Advertisement
Guest User

Untitled

a guest
Dec 10th, 2018
73
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.88 KB | None | 0 0
  1. ################################
  2. ## imports ##
  3. ################################
  4.  
  5. import numpy as np
  6. import matplotlib.pyplot as plt
  7. from keras.models import Sequential
  8. from keras.layers import Dense, Flatten, Reshape, Dropout, Conv2D, BatchNormalization
  9. import keras.layers.advanced_activations as adac
  10. from tqdm import tqdm
  11. np.random.seed(7)
  12.  
  13. # set up loss storage vector
  14. losses = {"d":[], "g":[]}
  15.  
  16. #################################
  17. ## fonctions ##
  18. #################################
  19.  
  20. def plot_loss(losses):
  21. # display.clear_output(wait=True)
  22. # display.display(plt.gcf())
  23. plt.figure(figsize=(10,8))
  24. plt.plot(np.transpose(losses["d"])[0], label=('discriminitive loss'))
  25. plt.plot(np.transpose(losses["g"])[0], label=('generative loss'), linestyle ='dashed')
  26. plt.legend()
  27. plt.show()
  28. plt.plot(np.transpose(losses["d"])[1], label=('discriminitive Acc'))
  29. plt.plot(np.transpose(losses["g"])[1], label=('generative Acc'), linestyle ='dashed')
  30. plt.legend()
  31. plt.show()
  32.  
  33.  
  34. def plot_data(X,n_ex=16,dim=(4,4), figsize=(10,10) ):
  35. to_plot = []
  36. for i in range(n_ex):
  37. to_plot.append(X[np.random.randint(0,len(X))])
  38.  
  39. plt.figure(figsize=figsize)
  40. for i in range(len(to_plot)):
  41. plt.subplot(dim[0],dim[1],i+1)
  42. plt.plot(to_plot[i][0])
  43. plt.axis('off')
  44. plt.tight_layout()
  45. plt.show()
  46.  
  47. def plot_gen(n_ex=16,dim=(4,4), figsize=(10,10) ):
  48. noise = np.random.uniform(0,1,size=[n_ex,100])
  49. generated_images = Generator.predict(noise)
  50. Values = Discriminator.predict(generated_images)
  51. #generated_images = [Reconstruct(i) for i in generated_images]
  52.  
  53. plt.figure(figsize=figsize)
  54. for i in range(len(generated_images)):
  55. plt.subplot(dim[0],dim[1],i+1)
  56. plt.plot(generated_images[i][0])
  57. plt.axis('off')
  58. plt.tight_layout()
  59. plt.show()
  60.  
  61. def make_trainable(net, val):
  62. net.trainable = val
  63. for l in net.layers:
  64. l.trainable = val
  65.  
  66. def generate_good_data(nb_vect,nb_points):
  67. data=[]
  68. for i in range(nb_vect):
  69. Mat = np.zeros((1,nb_points))
  70. a = np.random.rand(1)
  71. for j in range(nb_points):
  72. Mat[0][j]= (a*(((j/nb_points)-0.5)**2))
  73. data.append(Mat)
  74. return(data)
  75.  
  76. def generate_bad_data(nb_vect,nb_points):
  77. data = []
  78. for i in range(nb_vect):
  79. Mat = np.zeros((1,nb_points))
  80. a = np.random.rand(1)
  81. for j in range(nb_points):
  82. Mat[0][j]=(np.random.rand(1))
  83. data.append(Mat)
  84. return(data)
  85.  
  86. def train_for_n(X,Y,nb_epoch=100, plt_frq=25,BATCH_SIZE=32):
  87. for e in tqdm(range(nb_epoch)):
  88.  
  89. # Make generative points
  90. coord1 = np.random.randint(0,X.shape[0]-BATCH_SIZE)
  91. coord2 = coord1+BATCH_SIZE
  92. dat = X[coord1:coord2]
  93. noise_gen = np.random.uniform(0,1,size=[BATCH_SIZE,100])
  94. datbad = Generator.predict(noise_gen)
  95.  
  96. # Train discriminator on generated images
  97. Xtemp = np.concatenate((dat, datbad))
  98. n = int(Xtemp.shape[0]/2)
  99. ybad = np.zeros([n])
  100. yTrue = np.ones([n])
  101. Y = np.concatenate((yTrue,ybad))
  102.  
  103. make_trainable(Discriminator, True)
  104. d_loss = Discriminator.train_on_batch(Xtemp,Y)
  105. losses["d"].append(d_loss)
  106.  
  107. # train Generator-Discriminator stack on input noise to non-generated output class
  108. noise_tr = np.random.uniform(0,1,size=[BATCH_SIZE,100])
  109. y2 = np.ones([BATCH_SIZE])
  110.  
  111. make_trainable(Discriminator,False)
  112. g_loss = gan.train_on_batch(noise_tr, y2 )
  113. losses["g"].append(g_loss)
  114.  
  115. # Updates plots
  116. if e%plt_frq==plt_frq-1:
  117. plot_loss(losses)
  118. plot_gen()
  119.  
  120. ##################################
  121. ## Discriminator ##
  122. ##################################
  123.  
  124. model = Sequential()
  125. droprate = 0.95
  126. model.add(Dense(100,input_shape=(1,100),activation='relu'))
  127. model.add(Dropout(droprate))
  128. model.add(Dense(1000,activation='relu'))
  129. model.add(Dropout(droprate))
  130. model.add(Flatten())
  131. model.add(Dense(1,activation='sigmoid'))
  132. model.summary()
  133.  
  134. Discriminator = model
  135.  
  136. # Compile model
  137. Discriminator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  138. Discriminator.summary()
  139.  
  140. ##############################
  141. ## Generator ##
  142. ######################<########
  143. droprate = 0
  144. model = Sequential()
  145.  
  146. model.add(Dense(25,input_dim=100,activation='relu'))
  147. model.add(Dropout(droprate))
  148. model.add(BatchNormalization(momentum=0.8))
  149. model.add(Dense(400,activation='relu'))
  150. model.add(Dropout(droprate))
  151. model.add(Reshape((20,20,1)))
  152. model.add(BatchNormalization(momentum=0.8))
  153. model.add(Conv2D(256,(3,3),activation='relu'))
  154. model.add(Dropout(droprate))
  155. model.add(Flatten())
  156. model.add(Dense(100,activation='tanh'))
  157. model.add(Dropout(droprate))
  158. model.add(Reshape((1,100)))
  159.  
  160. Generator = model
  161.  
  162. # Compile model
  163. Generator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  164. Generator.summary()
  165.  
  166.  
  167. #######################
  168. ## Gan ##
  169. #######################
  170. make_trainable(Discriminator, False)
  171. # Build stacked GAN model
  172. gan = Sequential()
  173. gan.add(Generator)
  174. gan.add(Discriminator)
  175.  
  176. gan.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  177. gan.summary()
  178.  
  179.  
  180. ###########################
  181. ## main ##
  182. ###########################
  183.  
  184. dat = np.array(generate_good_data(500,100))
  185. datbad = np.array(generate_bad_data(500,100))
  186.  
  187. plot_data(dat)
  188.  
  189. X = np.concatenate((dat,datbad),axis=0)
  190. yTrue = np.ones((dat.shape[0]))
  191. ybad = np.zeros((dat.shape[0]))
  192. Y = np.concatenate((yTrue,ybad))
  193.  
  194. #Optional pre-train discriminator
  195. make_trainable(Discriminator, True)
  196. Discriminator.fit(X, Y, epochs=2, batch_size=100)
  197.  
  198. train_for_n(X,Y,nb_epoch=3000, plt_frq=3000,BATCH_SIZE=100)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement