Guest User

Untitled

a guest
Jun 22nd, 2018
87
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.21 KB | None | 0 0
  1. from __future__ import absolute_import
  2. from __future__ import division
  3. from __future__ import print_function
  4.  
  5. import numpy as np
  6. import tensorflow as tf
  7.  
  8. tf.logging.set_verbosity(tf.logging.INFO)
  9.  
  10.  
  11. def model_fn(features, labels, mode):
  12. input_data = tf.reshape(features['x'], (-1, 28, 28, 1))
  13.  
  14. conv1 = tf.layers.conv2d(
  15. inputs=input_data,
  16. filters=32,
  17. kernel_size=(3, 3),
  18. padding='same',
  19. activation=tf.nn.relu
  20. )
  21. conv2 = tf.layers.conv2d(
  22. inputs=conv1,
  23. filters=32,
  24. kernel_size=(3, 3),
  25. padding='same',
  26. activation=tf.nn.relu
  27. )
  28. pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=2)
  29.  
  30. conv3 = tf.layers.conv2d(
  31. inputs=pool1,
  32. filters=64,
  33. kernel_size=(3, 3),
  34. padding="same",
  35. activation=tf.nn.relu)
  36. conv4 = tf.layers.conv2d(
  37. inputs=conv3,
  38. filters=64,
  39. kernel_size=(3, 3),
  40. padding='same',
  41. activation=tf.nn.relu
  42. )
  43. pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=(2, 2), strides=2)
  44.  
  45. pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
  46. dropout = tf.layers.dropout(
  47. inputs=pool2_flat, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
  48. dense = tf.layers.dense(inputs=dropout, units=1024, activation=tf.nn.relu)
  49.  
  50. decode = tf.layers.dense(inputs=dense, units=pool2_flat.get_shape()[1])
  51. decode_reshape = tf.reshape(decode, [-1, 7, 7, 64])
  52.  
  53. deconv1 = tf.layers.conv2d_transpose(
  54. inputs=decode_reshape,
  55. filters=64,
  56. kernel_size=(3, 3),
  57. strides=1,
  58. padding='same',
  59. activation=tf.nn.relu)
  60. deconv2 = tf.layers.conv2d_transpose(
  61. inputs=deconv1,
  62. filters=32,
  63. kernel_size=(3, 3),
  64. strides=2,
  65. padding='same',
  66. activation=tf.nn.relu)
  67.  
  68. deconv3 = tf.layers.conv2d_transpose(
  69. inputs=deconv2,
  70. filters=32,
  71. kernel_size=(3, 3),
  72. strides=1,
  73. padding='same',
  74. activation=tf.nn.relu)
  75. deconv4 = tf.layers.conv2d_transpose(
  76. inputs=deconv3,
  77. filters=1,
  78. kernel_size=(3, 3),
  79. strides=2,
  80. padding='same',
  81. activation=tf.nn.relu)
  82.  
  83. reconstruct = deconv4
  84.  
  85. loss = tf.nn.l2_loss(input_data - reconstruct)
  86.  
  87. if mode == tf.estimator.ModeKeys.TRAIN:
  88. optimizer = tf.train.AdamOptimizer()
  89. train_op = optimizer.minimize(
  90. loss=loss,
  91. global_step=tf.train.get_global_step())
  92. return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
  93.  
  94. return tf.estimator.EstimatorSpec(
  95. mode=mode, loss=loss)
  96.  
  97.  
  98. def main(argv):
  99. mnist = tf.contrib.learn.datasets.load_dataset("mnist")
  100. train_data = mnist.train.images
  101. train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
  102. eval_data = mnist.test.images
  103. eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
  104.  
  105. mnist_classifier = tf.estimator.Estimator(
  106. model_fn=model_fn, model_dir="./log")
  107.  
  108. train_input_fn = tf.estimator.inputs.numpy_input_fn(
  109. x={"x": train_data},
  110. y=train_labels,
  111. batch_size=128,
  112. num_epochs=None,
  113. shuffle=True)
  114.  
  115. mnist_classifier.train(
  116. input_fn=train_input_fn,
  117. steps=20000)
  118.  
  119. eval_input_fn = tf.estimator.inputs.numpy_input_fn(
  120. x={"x": eval_data},
  121. y=eval_labels,
  122. num_epochs=1,
  123. shuffle=False)
  124. eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
  125. print(eval_results)
  126.  
  127.  
  128. if __name__ == '__main__':
  129. tf.app.run()
Add Comment
Please, Sign In to add comment