Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import argparse
- import sys
- import time
- import numpy as np
- import tensorflow as tf
- from tensorflow.python.tools import optimize_for_inference_lib
- from tensorflow.examples.tutorials.mnist import input_data
- FLAGS = None
- NUM_ITERS = 20
- BATCH_SIZE = 4
- MNIST_X = 28
- MNIST_Y = 28
- MNIST_CHANNELS = 1
- MNIST_CLASSES = 10
- # Up-scale the MNIST input to these sizes
- INPUT_X = 784
- INPUT_Y = 392
- INPUT_CHANNELS = 3
- # Network size settings
- NUM_FEATURE_MAPS = 64
- def conv(conv_input, num_channels, name):
- with tf.variable_scope("conv_" + name):
- conv = tf.contrib.layers.convolution2d(conv_input, num_channels,
- activation_fn=tf.nn.relu,
- kernel_size=(3, 3), stride=1, padding="SAME")
- print(conv)
- return conv
- def model(network_input):
- conv1 = conv(network_input, NUM_FEATURE_MAPS, "conv1")
- conv2 = conv(conv1, NUM_FEATURE_MAPS, "conv2")
- conv3 = conv(conv2, NUM_FEATURE_MAPS, "conv3")
- result = conv(conv2, MNIST_CLASSES, "conv4")
- return result
- def main(_):
- print("Loading MNIST data")
- mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
- print("Creating model")
- x = tf.placeholder(tf.float32, [None, INPUT_X, INPUT_Y, INPUT_CHANNELS])
- x = tf.identity(x, "input")
- y = model(x)
- y = tf.identity(y, "output")
- y_ = tf.placeholder(tf.float32, [None, INPUT_X, INPUT_Y, MNIST_CLASSES])
- print("Defining loss")
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
- train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
- print("Starting session")
- print("Batch size: %d, num_steps: %d" % (BATCH_SIZE, NUM_ITERS))
- config = tf.ConfigProto()
- with tf.Session(config=config) as sess:
- tf.global_variables_initializer().run()
- print("Starting training")
- for i in range(NUM_ITERS):
- batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
- batch_xs = np.reshape(batch_xs, newshape=[BATCH_SIZE, MNIST_X, MNIST_Y, MNIST_CHANNELS])
- # Just to make the MNIST data a bit larger to reflect more realistic image sizes
- batch_xs = np.repeat(batch_xs, INPUT_X / MNIST_X, axis=1)
- batch_xs = np.repeat(batch_xs, INPUT_Y / MNIST_Y, axis=2)
- batch_xs = np.repeat(batch_xs, INPUT_CHANNELS / MNIST_CHANNELS, axis=3)
- # Also makes the output larger: actually making it into a 2D label of the same size as the input
- batch_ys = np.repeat(batch_ys[:, np.newaxis, :], INPUT_X, axis=1)
- batch_ys = np.repeat(batch_ys[:, :, np.newaxis, :], INPUT_Y, axis=2)
- sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
- print("Done training")
- print("Output graph to disk")
- graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"])
- graph = optimize_for_inference_lib.optimize_for_inference(graph, ["input"], ["output"],
- placeholder_type_enum=tf.float32.as_datatype_enum)
- tf.train.write_graph(graph, ".", "graph.pb", as_text=False)
- if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
- help='Directory for storing input data')
- FLAGS, unparsed = parser.parse_known_args()
- tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
Add Comment
Please, Sign In to add comment