Advertisement
Guest User

Untitled

a guest
Nov 21st, 2018
96
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.99 KB | None | 0 0
  1. from __future__ import print_function
  2.  
  3. # Import MNIST data
  4. from tensorflow.examples.tutorials.mnist import input_data
  5. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  6.  
  7. import tensorflow as tf
  8.  
  9. # Parameters
  10. learning_rate = 0.001
  11. training_epochs = 15
  12. batch_size = 100
  13. display_step = 1
  14.  
  15. # Network Parameters
  16. n_hidden_1 = 256 # 1st layer number of neurons
  17. n_hidden_2 = 256 # 2nd layer number of neurons
  18. n_input = 784 # MNIST data input (img shape: 28*28)
  19. n_classes = 10 # MNIST total classes (0-9 digits)
  20.  
  21. # tf Graph input
  22. X = tf.placeholder("float", [None, n_input])
  23. Y = tf.placeholder("float", [None, n_classes])
  24.  
  25. # Store layers weight & bias
  26. weights = {
  27. 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
  28. 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  29. 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
  30. }
  31. biases = {
  32. 'b1': tf.Variable(tf.random_normal([n_hidden_1])),
  33. 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
  34. 'out': tf.Variable(tf.random_normal([n_classes]))
  35. }
  36.  
  37.  
  38. # Create model
  39. def multilayer_perceptron(x):
  40. # Hidden fully connected layer with 256 neurons
  41. layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
  42. # Hidden fully connected layer with 256 neurons
  43. layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
  44. # Output fully connected layer with a neuron for each class
  45. out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
  46. return out_layer
  47.  
  48. # Construct model
  49. logits = multilayer_perceptron(X)
  50.  
  51. # Define loss and optimizer
  52. loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
  53. logits=logits, labels=Y))
  54. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  55. train_op = optimizer.minimize(loss_op)
  56. # Initializing the variables
  57. init = tf.global_variables_initializer()
  58.  
  59. with tf.Session() as sess:
  60. sess.run(init)
  61.  
  62. # Training cycle
  63. for epoch in range(training_epochs):
  64. avg_cost = 0.
  65. total_batch = int(mnist.train.num_examples/batch_size)
  66. # Loop over all batches
  67. for i in range(total_batch):
  68. batch_x, batch_y = mnist.train.next_batch(batch_size)
  69. # Run optimization op (backprop) and cost op (to get loss value)
  70. _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,
  71. Y: batch_y})
  72. # Compute average loss
  73. avg_cost += c / total_batch
  74. # Display logs per epoch step
  75. if epoch % display_step == 0:
  76. print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
  77. print("Optimization Finished!")
  78.  
  79. # Test model
  80. pred = tf.nn.softmax(logits) # Apply softmax to logits
  81. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
  82. # Calculate accuracy
  83. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  84. print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement