Advertisement
Guest User

Untitled

a guest
Jan 23rd, 2017
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.78 KB | None | 0 0
  1. import tensorflow as tf
  2. from tensorflow.examples.tutorials.mnist import input_data
  3.  
  4. '''
  5. 1. first feed forward the data:
  6. input -> weights -> hidden layer 1 -> activation ->
  7. weights -> hidden layer 2 -> activation ->
  8. weights -> output layer
  9.  
  10. 2. Compute cost function (cross entropy) to figure out how close we are
  11. 3. Minimize cost using optimizer function (AdamOptimizer, SGD, AdaGrad, etc.).
  12. '''
  13.  
  14. mnist = input_data.read_data_sets("./temp/data", one_hot=True)
  15.  
  16. n_nodes_hl1 = 500
  17. n_nodes_hl2 = 500
  18. n_nodes_hl3 = 500
  19.  
  20. n_classes = 10
  21. batch_size = 100
  22.  
  23. x = tf.placeholder('float', [None, 784])
  24. y = tf.placeholder('float')
  25.  
  26.  
  27. def neural_network_model(data):
  28. hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
  29. 'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
  30.  
  31. hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
  32. 'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
  33.  
  34. hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
  35. 'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
  36.  
  37. output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
  38. 'biases': tf.Variable(tf.random_normal([n_classes]))}
  39.  
  40. l1 = tf.add(tf.matmul(data, hidden_1_layer[
  41. 'weights']), hidden_1_layer['biases'])
  42. l1 = tf.nn.relu(l1)
  43.  
  44. l2 = tf.add(tf.matmul(l1, hidden_2_layer[
  45. 'weights']), hidden_2_layer['biases'])
  46. l2 = tf.nn.relu(l2)
  47.  
  48. l3 = tf.add(tf.matmul(l2, hidden_3_layer[
  49. 'weights']), hidden_3_layer['biases'])
  50. l3 = tf.nn.relu(l3)
  51.  
  52. output = tf.add(tf.matmul(l3, output_layer[
  53. 'weights']), output_layer['biases'])
  54.  
  55. return output
  56.  
  57.  
  58. def train_neural_network(x):
  59. prediction = neural_network_model(x)
  60. cost = tf.reduce_mean(
  61. tf.nn.softmax_cross_entropy_with_logits(prediction, y))
  62. optimizer = tf.train.AdamOptimizer().minimize(cost)
  63.  
  64. hm_epochs = 10
  65.  
  66. with tf.Session() as sess:
  67. sess.run(tf.initialize_all_variables())
  68.  
  69. for epoch in range(hm_epochs):
  70. epoch_loss = 0
  71. for _ in range(int(mnist.train.num_examples / batch_size)):
  72. epoch_x, epoch_y = mnist.train.next_batch(batch_size)
  73. _, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
  74. epoch_loss += c
  75. print('epoch', epoch, 'completed out of',
  76. hm_epochs, 'loss:', epoch_loss)
  77.  
  78. correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
  79.  
  80. accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
  81. print('accuracy:', accuracy.eval(
  82. {x: mnist.test.images, y: mnist.test.labels}))
  83.  
  84. train_neural_network(x)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement