Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
- # setup dummy data
- n = 1000
- x = np.random.normal(0, 0.55, n)
- y = 0.1*x + 0.3 + np.random.normal(0, 0.03, n) #want to find a=0.1 and b=0.3
- plt.plot(x, y)
- plt.show()
- def linear_regression(x, y, alpha=0.5, steps=8):
- # initialize a: random slope between -1 and 1
- a = tf.Variable(tf.random_uniform([1], -1, 1))
- # initialize b: 0
- b = tf.Variable(tf.zeros([1]))
- yh = a*x + b
- loss = tf.reduce_mean(tf.square(yh - y))
- train = tf.train.GradientDescentOptimizer(alpha).minimize(loss)
- sess = tf.Session()
- sess.run(tf.global_variables_initializer())
- for i in range(steps):
- sess.run(train)
- #print(i, sess.run(a), sess.run(b))
- ah = sess.run(a)
- bh = sess.run(b)
- return ah[0], bh[0]
- a, b = linear_regression(x, y)
- print("a: %f" % a)
- print("b: %f" % b)
Add Comment
Please, Sign In to add comment