Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Linear Regression Ex 1
- # makes up some data in two dimensions, and then fits a line to it.
- import tensorflow as tf
- import numpy as np
- # Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
- x_data = np.random.rand(100).astype(np.float32)
- y_data = x_data * 0.1 + 0.3
- # Try to find values for W and b that compute y_data = W * x_data + b
- # (We know that W should be 0.1 and b 0.3, but TensorFlow will
- # figure that out for us.)
- W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
- b = tf.Variable(tf.zeros([1]))
- y = W * x_data + b
- # Minimize the mean squared errors.
- loss = tf.reduce_mean(tf.square(y - y_data))
- optimizer = tf.train.GradientDescentOptimizer(0.5)
- train = optimizer.minimize(loss)
- # Before starting, initialize the variables. We will 'run' this first.
- init = tf.global_variables_initializer()
- # Launch the graph.
- sess = tf.Session()
- sess.run(init)
- # Fit the line.
- for step in range(201):
- sess.run(train)
- if step % 20 == 0:
- print(step, sess.run(W), sess.run(b))
- # Learns best fit is W: [0.1], b: [0.3]
- #Linear Regression Ex 2 with plotting
- import numpy as np
- import matplotlib.pyplot as plt
- import tensorflow as tf
- number_of_points = 200
- x_point = []
- y_point = []
- a = 0.22
- b = 0.78
- for i in range(number_of_points):
- x = np.random.normal(0.0,0.5)
- y = a*x + b +np.random.normal(0.0,0.1)
- x_point.append([x])
- y_point.append([y])
- plt.plot(x_point,y_point, 'o', label='Input Data')
- plt.legend()
- plt.show()
- A = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
- B = tf.Variable(tf.zeros([1]))
- y = A * x_point + B
- cost_function = tf.reduce_mean(tf.square(y - y_point))
- optimizer = tf.train.GradientDescentOptimizer(0.5)
- train = optimizer.minimize(cost_function)
- model = tf.initialize_all_variables()
- with tf.Session() as session:
- session.run(model)
- for step in range(0,21):
- session.run(train)
- if (step % 5) == 0:
- plt.plot(x_point,y_point,'o',
- label='step = {}'
- .format(step))
- plt.plot(x_point,
- session.run(A) *
- x_point +
- session.run(B))
- plt.legend()
- plt.show()
- x = tf.constant(1.0, name='input')
- w = tf.Variable(0.8, name='weight')
- y = tf.mul(w, x, name='output')
- y_ = tf.constant(0.0, name='correct_value')
- loss = tf.pow(y - y_, 2, name='loss')
- train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)
- for value in [x, w, y, y_, loss]:
- tf.scalar_summary(value.op.name, value)
- summaries = tf.merge_all_summaries()
- sess = tf.Session()
- summary_writer = tf.train.SummaryWriter('log_simple_stats', sess.graph)
- sess.run(tf.initialize_all_variables())
- for i in range(100):
- summary_writer.add_summary(sess.run(summaries), i)
- sess.run(train_step)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement