Advertisement
daraee20

Untitled

Mar 9th, 2019
165
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 7.48 KB | None | 0 0
  1. ############ Imports ############
  2.                
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. import tensorflow as tf
  6. from PIL import Image
  7. import time
  8. import pdb
  9. ############ Initializations ############            
  10. batch_size = 50
  11. num_classes = 10
  12. channels = 1
  13. height = 64
  14. width = 64
  15. # MNIST was resized to 64 * 64 for discriminator and generator architecture fitting
  16. latent = 100
  17. epsilon = 1e-7
  18. labeled_rate = 0.2 # For initial testing
  19. hash_bit = 32
  20. ############ Importing MNIST data ############                
  21.  
  22. def get_data():
  23.     from tensorflow.examples.tutorials.mnist import input_data
  24.     mnist = input_data.read_data_sets("/tmp/data/", one_hot=True, reshape=[])
  25.     return mnist
  26.  
  27.                 ############ Normalizing data ############
  28. # Scaling in range (-1,1) for generator tanh output
  29. def scale(x):
  30.     # normalize data
  31.     x = (x - 0.5) / 0.5
  32.     return x
  33. batch_z = tf.random_uniform([batch_size, 1 , 1 , 100], name='random_z')
  34. z = tf.placeholder(tf.float32, shape = [None, 1, 1, latent], name = 'z')
  35. """Discriminator and Generator architecture should mirror each other"""
  36.  
  37. ############ Defining Discriminator ############
  38.  
  39. def discriminator(x, dropout_rate = 0., is_training = True, reuse = False):
  40.     # input x -> n+1 classes
  41.    
  42.     with tf.variable_scope('Discriminator', reuse = reuse):
  43.      
  44.       # x = ?*64*64*1
  45.      
  46.       print('Discriminator architecture: ')
  47.       #Layer 1
  48.       conv1 = tf.layers.conv2d(x, 128, kernel_size = [4,4], strides = [2,2],
  49.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv1') # ?*32*32*128
  50.       print(conv1.shape)
  51.       #No batch-norm for input layer
  52.       dropout1 = tf.nn.dropout(conv1, dropout_rate)
  53.      
  54.       #Layer2
  55.       conv2 = tf.layers.conv2d(dropout1, 256, kernel_size = [4,4], strides = [2,2],
  56.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv2') # ?*16*16*256
  57.       batch2 = tf.layers.batch_normalization(conv2, training = is_training)
  58.       dropout2 = tf.nn.dropout(batch2, dropout_rate)
  59.       print(conv2.shape)
  60.      
  61.       #Layer3
  62.       conv3 = tf.layers.conv2d(dropout2, 512, kernel_size = [4,4], strides = [4,4],
  63.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv3') # ?*4*4*512
  64.       batch3 = tf.layers.batch_normalization(conv3, training = is_training)
  65.       dropout3 = tf.nn.dropout(batch3, dropout_rate)
  66.       print(conv3.shape)
  67.        
  68.       # Layer 4
  69.       conv4 = tf.layers.conv2d(dropout3, 1024, kernel_size=[3,3], strides=[1,1],
  70.                                padding='valid',activation = tf.nn.leaky_relu, name='conv4') # ?*2*2*1024
  71.       # No batch-norm as this layer's op will be used in feature matching loss
  72.       # No dropout as feature matching needs to be definite on logits
  73.       print(conv4.shape)
  74.      
  75.       # Layer 5
  76.       # Note: Applying Global average pooling
  77.        
  78.       flatten = tf.reduce_mean(conv4, axis = [1,2])
  79.       logits_D = tf.layers.dense(flatten, (1 + num_classes))
  80.       out_D = tf.nn.softmax(logits_D)
  81.        
  82.     return flatten,logits_D,out_D
  83.  
  84. ############ Defining Generator ############
  85.                
  86. def generator(z, dropout_rate = 0., is_training = True, reuse = False):
  87.     # input latent z -> image x
  88.    
  89.     with tf.variable_scope('Generator', reuse = reuse):
  90.       print('\n Generator architecture: ')
  91.      
  92.       #Layer 1
  93.       deconv1 = tf.layers.conv2d_transpose(z, 512, kernel_size = [4,4],
  94.                                          strides = [1,1], padding = 'valid',
  95.                                         activation = tf.nn.relu, name = 'deconv1') # ?*4*4*512
  96.       batch1 = tf.layers.batch_normalization(deconv1, training = is_training)
  97.       dropout1 = tf.nn.dropout(batch1, dropout_rate)
  98.       print(deconv1.shape)
  99.      
  100.       #Layer 2
  101.       deconv2 = tf.layers.conv2d_transpose(dropout1, 256, kernel_size = [4,4],
  102.                                          strides = [4,4], padding = 'same',
  103.                                         activation = tf.nn.relu, name = 'deconv2')# ?*16*16*256
  104.       batch2 = tf.layers.batch_normalization(deconv2, training = is_training)
  105.       dropout2 = tf.nn.dropout(batch2, dropout_rate)
  106.       print(deconv2.shape)
  107.        
  108.       #Layer 3
  109.       deconv3 = tf.layers.conv2d_transpose(dropout2, 128, kernel_size = [4,4],
  110.                                          strides = [2,2], padding = 'same',
  111.                                         activation = tf.nn.relu, name = 'deconv3')# ?*32*32*256
  112.       batch3 = tf.layers.batch_normalization(deconv3, training = is_training)
  113.       dropout3 = tf.nn.dropout(batch3, dropout_rate)
  114.       print(deconv3.shape)
  115.      
  116.       #Output layer
  117.       deconv4 = tf.layers.conv2d_transpose(dropout3, 1, kernel_size = [4,4],
  118.                                         strides = [2,2], padding = 'same',
  119.                                         activation = None, name = 'deconv4')# ?*64*64*1
  120.       out = tf.nn.tanh(deconv4)
  121.       print(deconv4.shape)
  122.    
  123.     return out
  124. dd = generator (batch_z, dropout_rate = 0.7)
  125. ############ Defining Encoder ############
  126. tik = np.empty(shape=(batch_size,(1 + num_classes),hash_bit))
  127. ##out_D1 = np.empty(shape=(None,None,hash_bit),dtype=object)
  128. #tik = []
  129. def Encoder(x, dropout_rate = 0., is_training = True, reuse = False):
  130.     # input x -> n+1 classes
  131.   with tf.variable_scope('Encoder', reuse = reuse):
  132.      
  133.     # x = ?*64*64*1
  134.      
  135.     print('Encoder architecture: ')
  136.      #Layer 1
  137.     conv1 = tf.layers.conv2d(x, 128, kernel_size = [4,4], strides = [2,2],
  138.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv1') # ?*32*32*128
  139.     print(conv1.shape)
  140.     #No batch-norm for input layer
  141.     dropout1 = tf.nn.dropout(conv1, dropout_rate)
  142.      
  143.     #Layer2
  144.     conv2 = tf.layers.conv2d(dropout1, 256, kernel_size = [4,4], strides = [2,2],
  145.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv2') # ?*16*16*256
  146.     batch2 = tf.layers.batch_normalization(conv2, training = is_training)
  147.     dropout2 = tf.nn.dropout(batch2, dropout_rate)
  148.     print(conv2.shape)
  149.      
  150.     #Layer3
  151.     conv3 = tf.layers.conv2d(dropout2, 512, kernel_size = [4,4], strides = [4,4],
  152.                               padding = 'same', activation = tf.nn.leaky_relu, name = 'conv3') # ?*4*4*512
  153.     batch3 = tf.layers.batch_normalization(conv3, training = is_training)
  154.     dropout3 = tf.nn.dropout(batch3, dropout_rate)
  155.     print(conv3.shape)
  156.        
  157.       # Layer 4
  158.     conv4 = tf.layers.conv2d(dropout3, 1024, kernel_size=[3,3], strides=[1,1],
  159.                                padding='valid',activation = tf.nn.leaky_relu, name='conv4') # ?*2*2*1024
  160.     # No batch-norm as this layer's op will be used in feature matching loss
  161.     # No dropout as feature matching needs to be definite on logits
  162.     print(conv4.shape)
  163.      
  164.     # Layer 5
  165.     # Note: Applying Global average pooling
  166.        
  167.     flatten = tf.reduce_mean(conv4, axis = [1,2])
  168.     logits_D = tf.layers.dense(flatten, (1 + num_classes))
  169.     out_D = tf.nn.sigmoid(logits_D, name ='sigmoid')
  170.        
  171.   return out_D
  172. #out_D = Encoder(dd , dropout_rate = 0.7)
  173. def outputs_Encoder(output_G):
  174.   for k in range (hash_bit):
  175.     out_D1 = Encoder(output_G , dropout_rate = 0.7)
  176.   return out_D1
  177. out_D1 = outputs_Encoder(dd)
  178.   #tik[k] = out_D
  179.   # tik[:][:][k].append(out_D1.eval())
  180. # i = 0
  181. with tf.Session() as sess:
  182.   sess.run(tf.global_variables_initializer())
  183.   t = sess.run(out_D1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement