Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python2
- # -*- coding: utf-8 -*-
- """
- Created on Sun May 20 15:13:38 2018
- @author: akber
- """
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
- from tensorflow.examples.tutorials.mnist import input_data
- mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
- learning_rate = 0.01
- num_steps = 480*10
- batch_size = 125
- test_batch_size = 100
- display_step = 480
- examples_to_show = 10
- # Network Parameters
- num_input = 784 # MNIST data input (img shape: 28*28)
- training_loss=[]
- error=[]
- perf = []
- rate = np.array([10**-4, 10**-3, 10**-2, 10**-1, 10**-0])
- X = tf.placeholder("float", [None, num_input])
- Y = tf.placeholder(tf.float32, [None, 10])
- beta = tf.placeholder(tf.float32, shape=())
- data_type = tf.placeholder(tf.int16, shape=())
- def weight_variable(shape, name):
- # From the mnist tutorial
- initial = tf.truncated_normal(shape, stddev=0.1)
- return tf.Variable(initial, name=name)
- def bias_variable(shape, name):
- initial = tf.constant(0.1, shape=shape)
- return tf.Variable(initial, name=name)
- def fc_layer(previous, input_size, output_size, name):
- W = weight_variable([input_size, output_size], name)
- b = bias_variable([output_size], name)
- return tf.matmul(previous, W) + b
- def autoencoder(x,y,b,dtype):
- l1 = tf.nn.tanh(fc_layer(x, 28*28, 300, "auto"))
- l2 = tf.nn.tanh(fc_layer(l1, 300, 60, "auto"))
- l3 = fc_layer(l2, 60, 30, "auto")
- l4 = tf.nn.tanh(fc_layer(l3, 30, 60, "auto"))
- l5 = tf.nn.tanh(fc_layer(l4, 60, 300, "auto"))
- out = tf.nn.relu(fc_layer(l5, 300, 28*28, "auto"))
- loss = tf.reduce_mean(tf.squared_difference(x, out)) + b*tf.reduce_mean(tf.squared_difference(dct2(x,dtype),dct2(out,dtype)))
- mse = tf.reduce_mean(tf.squared_difference(x, out))
- return loss, mse, out, l3
- def dct2(x,dtype):
- T = []
- if dtype==1:
- N = batch_size
- else:
- N = test_batch_size
- for i in range(N):
- T = tf.concat([T,tf.reshape(tf.spectral.dct(tf.transpose(tf.spectral.dct(tf.transpose(tf.reshape(x[i,:],[28,28]))))),[-1])],0)
- T = tf.reshape
- loss, mse, output, latent = autoencoder(X, Y, beta, data_type)
- # and we use the Adam Optimizer for training
- optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
- # Initialize the variables (i.e. assign their default value)
- init = tf.global_variables_initializer()
- #init2 = tf.initializers.variables(var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope="auto"))
- with tf.Session() as sess:
- # Run the initializer
- sess.run(init)
- for t in rate:
- sess.run(init)
- for i in range(1, num_steps+1):
- # Prepare Data
- # Get the next batch of MNIST data (only images are needed, not labels)
- batch_x, batch_y = mnist.train.next_batch(batch_size)
- #batch_x, batch_y = mnist.train.next_batch(1)
- # Run optimization op (backprop) and cost op (to get loss value)
- _, l = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, beta: t, data_type: 1})
- # Display logs per step
- if i % display_step == 0 or i == 1:
- print('Step %i: Minibatch Loss: %f' % (i, l))
- #training_loss.append(l)
- n = 100
- error = []
- for i in range(n):
- batch_x, _ = mnist.test.next_batch(test_batch_size)
- #batch_x, _ = mnist.test.next_batch(1)
- l = sess.run(mse, feed_dict={X: batch_x, Y: batch_y, beta: 0, data_type: 0})
- error.append(l)
- #plt.plot(error)
- #plt.show()
- perf.append(sum(error)/n)
- plt.semilogx(rate,perf)
- plt.xlabel('$\lambda$')
- plt.ylabel('MSE')
- plt.show
Add Comment
Please, Sign In to add comment