Guest User

Untitled

a guest
Nov 24th, 2017
97
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.56 KB | None | 0 0
  1. iris = datasets.load_iris()
  2. x_vals = np.array([x[0:3] for x in iris.data])
  3. y_vals = np.array([x[3] for x in iris.data])
  4. sess = tf.Session()
  5. seed = 2
  6. tf.set_random_seed(seed)
  7. np.random.seed(seed)
  8.  
  9. train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8),
  10. replace=False)
  11. test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
  12. x_vals_train = x_vals[train_indices]
  13. x_vals_test = x_vals[test_indices]
  14. y_vals_train = y_vals[train_indices]
  15. y_vals_test = y_vals[test_indices]
  16. def normalize_cols(m):
  17. col_max = m.max(axis=0)
  18. col_min = m.min(axis=0)
  19. return (m-col_min) / (col_max - col_min)
  20.  
  21. x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
  22. x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
  23.  
  24. batch_size = 50
  25. x_data = tf.placeholder(shape=[None, 3], dtype=tf.float32)
  26. y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
  27.  
  28. hidden_layer_nodes = 5
  29. A1 = tf.Variable(tf.random_normal(shape=[3,hidden_layer_nodes]))
  30. b1 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes]))
  31. A2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes,1]))
  32. b2 = tf.Variable(tf.random_normal(shape=[1]))
  33.  
  34. hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data, A1), b1))
  35. final_output = tf.nn.relu(tf.add(tf.matmul(hidden_output, A2),b2))
  36.  
  37. loss = tf.reduce_mean(tf.square(y_target - final_output))
  38. my_opt = tf.train.GradientDescentOptimizer(0.005)
  39. train_step = my_opt.minimize(loss)
  40. init = tf.global_variables_initializer()
  41. sess.run(init)
  42.  
  43. # First we initialize the loss vectors for storage.
  44. loss_vec = []
  45. test_loss = []
  46. for i in range(500):
  47. # First we select a random set of indices for the batch.
  48. rand_index = np.random.choice(len(x_vals_train), size=batch_size)
  49. # We then select the training values
  50. rand_x = x_vals_train[rand_index]
  51. rand_y = np.transpose([y_vals_train[rand_index]])
  52. # Now we run the training step
  53. sess.run(train_step, feed_dict={x_data: rand_x, y_target:rand_y})
  54. # We save the training loss
  55. temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
  56. loss_vec.append(np.sqrt(temp_loss))
  57. # Finally, we run the test-set loss and save it.
  58. test_temp_loss = sess.run(loss, feed_dict={x_data: x_vals_test,
  59. y_target: np.transpose([y_vals_test])})
  60. test_loss.append(np.sqrt(test_temp_loss))
  61. if (i+1)%50==0:
  62. print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))
  63.  
  64. plt.plot(loss_vec, 'k-', label='Train Loss')
  65. plt.plot(test_loss, 'r--', label='Test Loss')
  66. plt.title('Loss (MSE) per Generation')
  67. plt.xlabel('Generation')
  68. plt.ylabel('Loss')
  69. plt.legend(loc='upper right')
  70. plt.show()
Add Comment
Please, Sign In to add comment