Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from tensorflow.examples.tutorials.mnist import input_data
- import tensorflow as tf
- def weight_variable(shape):
- initial = tf.truncated_normal(shape, stddev=0.1)
- return tf.Variable(initial)
- def bias_variable(shape):
- initial = tf.constant(0.1, shape=shape)
- return tf.Variable(initial)
- def conv2d(x, W):
- return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
- def max_pool_2x2(x):
- return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
- strides=[1, 2, 2, 1], padding='SAME')
- mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
- sess = tf.InteractiveSession()
- x = tf.placeholder(tf.float32, shape=[None, 784])
- y_ = tf.placeholder(tf.float32, shape=[None, 10])
- x_image = tf.reshape(x, [-1,28,28,1])
- filters=4
- W_conv = weight_variable([5, 5, 1, filters])
- b_conv = bias_variable([filters])
- h_conv = (conv2d(x_image, W_conv) + b_conv)
- h_pool = max_pool_2x2(h_conv)
- s=14*14*filters
- h=16
- h_pool_flat = tf.reshape(h_pool, [-1, s])
- classes=10
- W_fc = weight_variable([s, classes])
- b_fc = bias_variable([classes])
- y_conv = tf.matmul(h_pool_flat, W_fc) + b_fc
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
- sess.run(tf.initialize_all_variables())
- n=55000
- train_accuracy = cross_entropy.eval(feed_dict={ x:mnist.train.images[:n,:], y_: mnist.train.labels[:n,:]})
Add Comment
Please, Sign In to add comment