Advertisement
Guest User

Untitled

a guest
Dec 2nd, 2016
81
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.52 KB | None | 0 0
  1. #!/usr/bin/env python
  2.  
  3. import tensorflow as tf
  4. from tensorflow.examples.tutorials.mnist import input_data
  5. import os
  6.  
  7.  
  8. def init_weights(shape, name):
  9. return tf.Variable(tf.random_normal(shape, stddev=0.01), name=name)
  10.  
  11. # This network is the same as the previous one except with an extra hidden layer + dropout
  12. def model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden):
  13. # Add layer name scopes for better graph visualization
  14. with tf.name_scope("layer1"):
  15. X = tf.nn.dropout(X, p_keep_input)
  16. h = tf.nn.relu(tf.matmul(X, w_h))
  17. with tf.name_scope("layer2"):
  18. h = tf.nn.dropout(h, p_keep_hidden)
  19. h2 = tf.nn.relu(tf.matmul(h, w_h2))
  20. with tf.name_scope("layer3"):
  21. h2 = tf.nn.dropout(h2, p_keep_hidden)
  22. return tf.matmul(h2, w_o)
  23.  
  24. mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
  25. trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
  26.  
  27. X = tf.placeholder("float", [None, 784], name="X")
  28. Y = tf.placeholder("float", [None, 10], name="Y")
  29.  
  30. w_h = init_weights([784, 625], "w_h")
  31. w_h2 = init_weights([625, 625], "w_h2")
  32. w_o = init_weights([625, 10], "w_o")
  33.  
  34. # Add histogram summaries for weights
  35. tf.histogram_summary("w_h_summ", w_h)
  36. tf.histogram_summary("w_h2_summ", w_h2)
  37. tf.histogram_summary("w_o_summ", w_o)
  38.  
  39. p_keep_input = tf.placeholder("float", name="p_keep_input")
  40. p_keep_hidden = tf.placeholder("float", name="p_keep_hidden")
  41.  
  42. py_x = model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden)
  43.  
  44.  
  45.  
  46. with tf.name_scope("cost"):
  47. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
  48. train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
  49. # Add scalar summary for cost
  50. tf.scalar_summary("cost", cost)
  51.  
  52. with tf.name_scope("accuracy"):
  53. correct_pred = tf.equal(tf.argmax(Y, 1), tf.argmax(py_x, 1)) # Count correct predictions
  54. acc_op = tf.reduce_mean(tf.cast(correct_pred, "float")) # Cast boolean to float to average
  55. # Add scalar summary for accuracy
  56. tf.scalar_summary("accuracy", acc_op)
  57.  
  58. # This gets added to the file to setup the checkpoint directory
  59. ckpt_dir = "./ckpt_dir"
  60. if not os.path.exists(ckpt_dir):
  61. os.makedirs(ckpt_dir)
  62.  
  63. global_step = tf.Variable(0, name='global_step', trainable=False)
  64.  
  65. # Call this after declaring all tf.Variables.
  66. saver = tf.train.Saver()
  67.  
  68. with tf.Session() as sess:
  69. # create a log writer. run 'tensorboard --logdir=./logs/nn_logs'
  70. writer = tf.train.SummaryWriter("./logs/nn_logs", sess.graph) # for 0.8
  71. merged = tf.merge_all_summaries()
  72.  
  73. # you need to initialize all variables
  74. tf.initialize_all_variables().run()
  75.  
  76. ckpt = tf.train.get_checkpoint_state(ckpt_dir)
  77. if ckpt and ckpt.model_checkpoint_path:
  78. print(ckpt.model_checkpoint_path)
  79. saver.restore(sess, ckpt.model_checkpoint_path) # restore all variables
  80.  
  81. for i in range(100):
  82. for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
  83. sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
  84. p_keep_input: 0.8, p_keep_hidden: 0.5})
  85. summary, acc = sess.run([merged, acc_op], feed_dict={X: teX, Y: teY,
  86. p_keep_input: 1.0, p_keep_hidden: 1.0})
  87.  
  88. # save model vars and checkpoint
  89. global_step.assign(i).eval() # set and update(eval) global_step with index, i
  90. saver.save(sess, ckpt_dir + "/model.ckpt", global_step=global_step)
  91.  
  92. writer.add_summary(summary, i) # Write summary
  93. print(i, acc) # Report the accuracy
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement