Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import scipy.misc as smp
- from tensorflow.examples.tutorials.mnist import input_data
- def weight_variable(shape):
- initial = tf.truncated_normal(shape, stddev = 0.1)
- return tf.Variable(initial)
- def bias_variable(shape):
- initial = tf.constant(0.1, shape = shape)
- return tf.Variable(initial)
- def conv2d(x, W):
- return tf.nn.conv2d(
- x,
- W,
- strides = [1, 1, 1, 1],
- padding = 'SAME'
- )
- def max_pool_2x2(x):
- return tf.nn.max_pool(
- x,
- ksize = [1, 2, 2, 1],
- strides = [1, 2, 2, 1],
- padding = 'VALID'
- )
- def network(loadNetwork):
- mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
- x = tf.placeholder(tf.float32, shape = [None, 784])
- y_ = tf.placeholder(tf.float32, shape = [None, 10])
- x_image = tf.reshape(x, [-1, 28, 28, 1])
- netName = "mnist_network"
- with tf.name_scope("KeepProbability"):
- fckeep_prob = tf.placeholder(tf.float32)
- convkeep_prob = tf.placeholder(tf.float32)
- with tf.name_scope("Conv1"):
- W_conv1 = weight_variable([3, 3, 1, 8])
- b_conv1 = bias_variable([8])
- h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
- with tf.name_scope("Conv2"):
- W_conv2 = weight_variable([3, 3, 8, 16])
- b_conv2 = bias_variable([16])
- h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
- with tf.name_scope("Pooling1"):
- h_pool1 = max_pool_2x2(h_conv2)
- with tf.name_scope("Conv3"):
- W_conv3 = weight_variable([3, 3, 16, 24])
- b_conv3 = bias_variable([24])
- h_conv3 = tf.nn.relu(conv2d(h_pool1, W_conv3) + b_conv3)
- with tf.name_scope("Pooling2"):
- h_pool2= max_pool_2x2(h_conv3)
- with tf.name_scope("FC1"):
- h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 24])
- W_fc1 = weight_variable([7 * 7 * 24, 64])
- b_fc1 = bias_variable([64])
- h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
- with tf.name_scope("DropOut2"):
- h_fc1_drop = tf.nn.dropout(h_fc1, fckeep_prob)
- with tf.name_scope("FC2"):
- W_fc2 = weight_variable([64, 128])
- b_fc2 = bias_variable([128])
- h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
- with tf.name_scope("DropOut3"):
- h_fc2_drop = tf.nn.dropout(h_fc2, fckeep_prob)
- with tf.name_scope("FC3"):
- W_fc3 = weight_variable([128, 10])
- b_fc3 = bias_variable([10])
- y_conv = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
- with tf.name_scope("CrossEntropy"):
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
- with tf.name_scope("TrainingStep"):
- train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
- with tf.name_scope("Accuracy"):
- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- tf.scalar_summary('accuracy', accuracy)
- tf.scalar_summary('cross_entropy', cross_entropy)
- sess = tf.InteractiveSession()
- merged = tf.merge_all_summaries()
- tensorLogDir = "./tensorLogs"
- train_writer = tf.train.SummaryWriter(tensorLogDir, sess.graph)
- sess.run(tf.initialize_all_variables())
- numPasses = 10000
- printNum = numPasses / 10
- saver = tf.train.Saver()
- if not loadNetwork:
- for i in range(numPasses):
- batch = mnist.train.next_batch(100)
- if i % printNum == 0:
- train_accuracy = accuracy.eval(
- feed_dict={
- x: batch[0],
- y_: batch[1],
- fckeep_prob: 1.0,
- convkeep_prob: 1.0
- })
- print("%s: step %d, training accuracy %g"%(netName, i, train_accuracy))
- saver.save(sess, "./saved/{txt}".format(txt = netName), global_step = i)
- _, pool1, pool2, summary = sess.run(
- [train_step,
- h_pool1,
- h_pool2,
- merged],
- feed_dict={
- x: batch[0],
- y_: batch[1],
- fckeep_prob: 0.5,
- convkeep_prob: 0.8
- })
- train_writer.add_summary(summary, i)
- saver.save(sess, "./saved/{txt}".format(txt = netName), global_step = numPasses)
- for i in range(pool1.shape[0]):
- for j in range(pool1.shape[3]):
- img = smp.toimage(pool1[i,:,:,j])
- smp.imsave("./images14/img{i}_{j}.png".format(
- i = i,
- j = j
- ), img)
- for i in range(pool2.shape[0]):
- for j in range(pool2.shape[3]):
- img = smp.toimage(pool2[i,:,:,j])
- smp.imsave("./images7/img{i}_{j}.png".format(
- i = i,
- j = j
- ), img)
- i = 0
- for data in batch[0]:
- img = smp.toimage(data.reshape((28, 28)))
- smp.imsave("./inputs/img{i}.png".format(
- i = i,
- ), img)
- i += 1
- print("test accuracy %g"%accuracy.eval(feed_dict={
- x: mnist.test.images,
- y_: mnist.test.labels,
- fckeep_prob: 1.0,
- convkeep_prob: 1.0
- }))
- else:
- saver.restore(sess, "./saved/{txt}-{np}".format(
- txt = netName,
- np = numPasses
- ))
- print("Model restored.")
- print("Starting tests...")
- print("test accuracy %g"%accuracy.eval(feed_dict={
- x: mnist.test.images,
- y_: mnist.test.labels,
- fckeep_prob: 1.0,
- convkeep_prob: 1.0
- }))
- print(netName, "finished!")
- def trainNetwork():
- network(loadNetwork = False)
- print("Network successfully trained.")
- def testNetwork():
- network(loadNetwork = True)
- def main():
- trainNetwork()
- #testNetwork()
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement