Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- from tensorflow.examples.tutorials.mnist import input_data
- import matplotlib.pyplot as plt
- import numpy as np
- tf.set_random_seed(1)
- np.random.seed(1)
- BATCH_SIZE = 64
- LR = 0.001 # learning rate
- tf_x = tf.placeholder(tf.float32, [None, 28*28]) / 255.
- # (batch, height, width, channel)
- image = tf.reshape(tf_x, [-1, 28, 28, 1])
- tf_y = tf.placeholder(tf.int32, [None, 10]) # input y
- # CNN
- conv1 = tf.layers.conv2d(inputs=image, filters=16, kernel_size=5, strides=1,
- padding='same', activation=tf.nn.relu)
- pool1 = tf.layers.max_pooling2d(conv1, pool_size=2, strides=2,)
- conv2 = tf.layers.conv2d(pool1, 32, 5, 1, 'same',
- activation=tf.nn.relu) # -> (14, 14, 32)
- pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # -> (7, 7, 32)
- flat = tf.reshape(pool2, [-1, 7*7*32]) # -> (7*7*32, )
- output = tf.layers.dense(flat, 10) # output layer
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=tf_y, logits=output) # compute cost
- train_op = tf.train.AdamOptimizer(LR).minimize(loss)
- accuracy = tf.metrics.accuracy(
- labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]
- # Hyper Parameters
- BATCH_SIZE2 = 64
- LR2 = 0.002 # learning rate
- N_TEST_IMG2 = 5
- # Mnist digits
- # use not one-hotted target data
- mnist = input_data.read_data_sets('./mnist', one_hot=True)
- test_x = mnist.test.images[:5500]
- test_y = mnist.test.labels[:5500]
- x = np.array([])
- y = np.array([])
- for dim in range(1, 33):
- print('Bottleneck size is now {}'.format(dim))
- # tf placeholder
- # value in the range of (0, 1)
- tf_x2 = tf.placeholder(tf.float32, [None, 28*28])
- # encoder
- en0 = tf.layers.dense(tf_x2, 256, tf.nn.tanh)
- en1 = tf.layers.dense(en0, 128, tf.nn.tanh)
- en2 = tf.layers.dense(en1, 64, tf.nn.tanh)
- encoded = tf.layers.dense(en2, dim)
- # decoder
- de0 = tf.layers.dense(encoded, 64, tf.nn.tanh)
- de1 = tf.layers.dense(de0, 128, tf.nn.tanh)
- de2 = tf.layers.dense(de1, 256, tf.nn.tanh)
- decoded = tf.layers.dense(de2, 28*28, tf.nn.sigmoid)
- loss2 = tf.losses.mean_squared_error(labels=tf_x2, predictions=decoded)
- train = tf.train.AdamOptimizer(LR2).minimize(loss2)
- sess = tf.Session()
- init_op = tf.group(tf.global_variables_initializer(),
- tf.local_variables_initializer()) # the local var is for accuracy_op
- sess.run(init_op) # initialize var in graph
- for step in range(2600):
- b_x, b_y = mnist.train.next_batch(BATCH_SIZE)
- _, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y})
- if step % 100 == 0:
- accuracy_ = sess.run(accuracy, {tf_x: test_x, tf_y: test_y})
- print('Step:', step, '| train loss: %.4f' %
- loss_, '| test accuracy: %.2f' % accuracy_)
- # initialize figure
- # f, a = plt.subplots(2, N_TEST_IMG2, figsize=(5, 2))
- # plt.ion() # continuously plot
- # original data (first row) for viewing
- test_data = mnist.test.images
- test_label = mnist.test.labels
- view_data = mnist.test.images[:N_TEST_IMG2]
- # for i in range(N_TEST_IMG2):
- # a[0][i].imshow(np.reshape(view_data[i], (28, 28)), cmap='gray')
- # a[0][i].set_xticks(())
- # a[0][i].set_yticks(())
- sess.run(tf.local_variables_initializer())
- for step in range(8000):
- b_x, b_y = mnist.train.next_batch(BATCH_SIZE2)
- _, encoded_, decoded_, loss2_ = sess.run(
- [train, encoded, decoded, loss2], {tf_x2: b_x})
- if step == 0 or step % 200 == 199: # plotting
- # plotting decoded image (second row)
- decoded_data = sess.run(decoded, {tf_x2: test_data})
- accuracy_ = sess.run(accuracy, feed_dict={
- tf_x: decoded_data, tf_y: test_label})
- print('train loss: %.4f' %
- loss2_, ' | test accuracy: %.4f' % accuracy_)
- # for i in range(N_TEST_IMG2):
- # a[1][i].clear()
- # a[1][i].imshow(np.reshape(
- # decoded_data[i], (28, 28)), cmap='gray')
- # a[1][i].set_xticks(())
- # a[1][i].set_yticks(())
- # plt.draw()
- # plt.pause(0.01)
- # plt.ioff()
- x = np.append(x, dim)
- y = np.append(y, accuracy_)
- plt.plot(x, y)
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement