Guest User

Untitled

a guest
Oct 21st, 2017
77
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.89 KB | None | 0 0
  1. import tensorflow as tf
  2. import numpy as np
  3. import matplotlib.pyplot as plt
  4.  
  5. # setup dummy data
  6. n = 1000
  7. x = np.random.normal(0, 0.55, n)
  8. y = 0.1*x + 0.3 + np.random.normal(0, 0.03, n) #want to find a=0.1 and b=0.3
  9. plt.plot(x, y)
  10. plt.show()
  11.  
  12. def linear_regression(x, y, alpha=0.5, steps=8):
  13. # initialize a: random slope between -1 and 1
  14. a = tf.Variable(tf.random_uniform([1], -1, 1))
  15. # initialize b: 0
  16. b = tf.Variable(tf.zeros([1]))
  17.  
  18. yh = a*x + b
  19. loss = tf.reduce_mean(tf.square(yh - y))
  20.  
  21. train = tf.train.GradientDescentOptimizer(alpha).minimize(loss)
  22.  
  23. sess = tf.Session()
  24. sess.run(tf.global_variables_initializer())
  25.  
  26. for i in range(steps):
  27. sess.run(train)
  28. #print(i, sess.run(a), sess.run(b))
  29. ah = sess.run(a)
  30. bh = sess.run(b)
  31.  
  32. return ah[0], bh[0]
  33.  
  34. a, b = linear_regression(x, y)
  35.  
  36. print("a: %f" % a)
  37. print("b: %f" % b)
Add Comment
Please, Sign In to add comment