Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ############ Imports ############
- import numpy as np
- import matplotlib.pyplot as plt
- import tensorflow as tf
- from PIL import Image
- import time
- import pdb
- ############ Initializations ############
- batch_size = 50
- num_classes = 10
- channels = 1
- height = 64
- width = 64
- # MNIST was resized to 64 * 64 for discriminator and generator architecture fitting
- latent = 100
- epsilon = 1e-7
- labeled_rate = 0.2 # For initial testing
- hash_bit = 32
- ############ Importing MNIST data ############
- def get_data():
- from tensorflow.examples.tutorials.mnist import input_data
- mnist = input_data.read_data_sets("/tmp/data/", one_hot=True, reshape=[])
- return mnist
- ############ Normalizing data ############
- # Scaling in range (-1,1) for generator tanh output
- def scale(x):
- # normalize data
- x = (x - 0.5) / 0.5
- return x
- batch_z = tf.random_uniform([batch_size, 1 , 1 , 100], name='random_z')
- z = tf.placeholder(tf.float32, shape = [None, 1, 1, latent], name = 'z')
- """Discriminator and Generator architecture should mirror each other"""
- ############ Defining Discriminator ############
- def discriminator(x, dropout_rate = 0., is_training = True, reuse = False):
- # input x -> n+1 classes
- with tf.variable_scope('Discriminator', reuse = reuse):
- # x = ?*64*64*1
- print('Discriminator architecture: ')
- #Layer 1
- conv1 = tf.layers.conv2d(x, 128, kernel_size = [4,4], strides = [2,2],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv1') # ?*32*32*128
- print(conv1.shape)
- #No batch-norm for input layer
- dropout1 = tf.nn.dropout(conv1, dropout_rate)
- #Layer2
- conv2 = tf.layers.conv2d(dropout1, 256, kernel_size = [4,4], strides = [2,2],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv2') # ?*16*16*256
- batch2 = tf.layers.batch_normalization(conv2, training = is_training)
- dropout2 = tf.nn.dropout(batch2, dropout_rate)
- print(conv2.shape)
- #Layer3
- conv3 = tf.layers.conv2d(dropout2, 512, kernel_size = [4,4], strides = [4,4],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv3') # ?*4*4*512
- batch3 = tf.layers.batch_normalization(conv3, training = is_training)
- dropout3 = tf.nn.dropout(batch3, dropout_rate)
- print(conv3.shape)
- # Layer 4
- conv4 = tf.layers.conv2d(dropout3, 1024, kernel_size=[3,3], strides=[1,1],
- padding='valid',activation = tf.nn.leaky_relu, name='conv4') # ?*2*2*1024
- # No batch-norm as this layer's op will be used in feature matching loss
- # No dropout as feature matching needs to be definite on logits
- print(conv4.shape)
- # Layer 5
- # Note: Applying Global average pooling
- flatten = tf.reduce_mean(conv4, axis = [1,2])
- logits_D = tf.layers.dense(flatten, (1 + num_classes))
- out_D = tf.nn.softmax(logits_D)
- return flatten,logits_D,out_D
- ############ Defining Generator ############
- def generator(z, dropout_rate = 0., is_training = True, reuse = False):
- # input latent z -> image x
- with tf.variable_scope('Generator', reuse = reuse):
- print('\n Generator architecture: ')
- #Layer 1
- deconv1 = tf.layers.conv2d_transpose(z, 512, kernel_size = [4,4],
- strides = [1,1], padding = 'valid',
- activation = tf.nn.relu, name = 'deconv1') # ?*4*4*512
- batch1 = tf.layers.batch_normalization(deconv1, training = is_training)
- dropout1 = tf.nn.dropout(batch1, dropout_rate)
- print(deconv1.shape)
- #Layer 2
- deconv2 = tf.layers.conv2d_transpose(dropout1, 256, kernel_size = [4,4],
- strides = [4,4], padding = 'same',
- activation = tf.nn.relu, name = 'deconv2')# ?*16*16*256
- batch2 = tf.layers.batch_normalization(deconv2, training = is_training)
- dropout2 = tf.nn.dropout(batch2, dropout_rate)
- print(deconv2.shape)
- #Layer 3
- deconv3 = tf.layers.conv2d_transpose(dropout2, 128, kernel_size = [4,4],
- strides = [2,2], padding = 'same',
- activation = tf.nn.relu, name = 'deconv3')# ?*32*32*256
- batch3 = tf.layers.batch_normalization(deconv3, training = is_training)
- dropout3 = tf.nn.dropout(batch3, dropout_rate)
- print(deconv3.shape)
- #Output layer
- deconv4 = tf.layers.conv2d_transpose(dropout3, 1, kernel_size = [4,4],
- strides = [2,2], padding = 'same',
- activation = None, name = 'deconv4')# ?*64*64*1
- out = tf.nn.tanh(deconv4)
- print(deconv4.shape)
- return out
- dd = generator (batch_z, dropout_rate = 0.7)
- ############ Defining Encoder ############
- tik = np.empty(shape=(batch_size,(1 + num_classes),hash_bit))
- ##out_D1 = np.empty(shape=(None,None,hash_bit),dtype=object)
- #tik = []
- def Encoder(x, dropout_rate = 0., is_training = True, reuse = False):
- # input x -> n+1 classes
- with tf.variable_scope('Encoder', reuse = reuse):
- # x = ?*64*64*1
- print('Encoder architecture: ')
- #Layer 1
- conv1 = tf.layers.conv2d(x, 128, kernel_size = [4,4], strides = [2,2],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv1') # ?*32*32*128
- print(conv1.shape)
- #No batch-norm for input layer
- dropout1 = tf.nn.dropout(conv1, dropout_rate)
- #Layer2
- conv2 = tf.layers.conv2d(dropout1, 256, kernel_size = [4,4], strides = [2,2],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv2') # ?*16*16*256
- batch2 = tf.layers.batch_normalization(conv2, training = is_training)
- dropout2 = tf.nn.dropout(batch2, dropout_rate)
- print(conv2.shape)
- #Layer3
- conv3 = tf.layers.conv2d(dropout2, 512, kernel_size = [4,4], strides = [4,4],
- padding = 'same', activation = tf.nn.leaky_relu, name = 'conv3') # ?*4*4*512
- batch3 = tf.layers.batch_normalization(conv3, training = is_training)
- dropout3 = tf.nn.dropout(batch3, dropout_rate)
- print(conv3.shape)
- # Layer 4
- conv4 = tf.layers.conv2d(dropout3, 1024, kernel_size=[3,3], strides=[1,1],
- padding='valid',activation = tf.nn.leaky_relu, name='conv4') # ?*2*2*1024
- # No batch-norm as this layer's op will be used in feature matching loss
- # No dropout as feature matching needs to be definite on logits
- print(conv4.shape)
- # Layer 5
- # Note: Applying Global average pooling
- flatten = tf.reduce_mean(conv4, axis = [1,2])
- logits_D = tf.layers.dense(flatten, (1 + num_classes))
- out_D = tf.nn.sigmoid(logits_D, name ='sigmoid')
- return out_D
- #out_D = Encoder(dd , dropout_rate = 0.7)
- def outputs_Encoder(output_G):
- for k in range (hash_bit):
- out_D1 = Encoder(output_G , dropout_rate = 0.7)
- return out_D1
- out_D1 = outputs_Encoder(dd)
- #tik[k] = out_D
- # tik[:][:][k].append(out_D1.eval())
- # i = 0
- with tf.Session() as sess:
- sess.run(tf.global_variables_initializer())
- t = sess.run(out_D1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement