Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ################################
- ## imports ##
- ################################
- import numpy as np
- import matplotlib.pyplot as plt
- from keras.models import Sequential
- from keras.layers import Dense, Flatten, Reshape, Dropout, Conv2D, BatchNormalization
- import keras.layers.advanced_activations as adac
- from tqdm import tqdm
- np.random.seed(7)
- # set up loss storage vector
- losses = {"d":[], "g":[]}
- #################################
- ## fonctions ##
- #################################
- def plot_loss(losses):
- # display.clear_output(wait=True)
- # display.display(plt.gcf())
- plt.figure(figsize=(10,8))
- plt.plot(np.transpose(losses["d"])[0], label=('discriminitive loss'))
- plt.plot(np.transpose(losses["g"])[0], label=('generative loss'), linestyle ='dashed')
- plt.legend()
- plt.show()
- plt.plot(np.transpose(losses["d"])[1], label=('discriminitive Acc'))
- plt.plot(np.transpose(losses["g"])[1], label=('generative Acc'), linestyle ='dashed')
- plt.legend()
- plt.show()
- def plot_data(X,n_ex=16,dim=(4,4), figsize=(10,10) ):
- to_plot = []
- for i in range(n_ex):
- to_plot.append(X[np.random.randint(0,len(X))])
- plt.figure(figsize=figsize)
- for i in range(len(to_plot)):
- plt.subplot(dim[0],dim[1],i+1)
- plt.plot(to_plot[i][0])
- plt.axis('off')
- plt.tight_layout()
- plt.show()
- def plot_gen(n_ex=16,dim=(4,4), figsize=(10,10) ):
- noise = np.random.uniform(0,1,size=[n_ex,100])
- generated_images = Generator.predict(noise)
- Values = Discriminator.predict(generated_images)
- #generated_images = [Reconstruct(i) for i in generated_images]
- plt.figure(figsize=figsize)
- for i in range(len(generated_images)):
- plt.subplot(dim[0],dim[1],i+1)
- plt.plot(generated_images[i][0])
- plt.axis('off')
- plt.tight_layout()
- plt.show()
- def make_trainable(net, val):
- net.trainable = val
- for l in net.layers:
- l.trainable = val
- def generate_good_data(nb_vect,nb_points):
- data=[]
- for i in range(nb_vect):
- Mat = np.zeros((1,nb_points))
- a = np.random.rand(1)
- for j in range(nb_points):
- Mat[0][j]= (a*(((j/nb_points)-0.5)**2))
- data.append(Mat)
- return(data)
- def generate_bad_data(nb_vect,nb_points):
- data = []
- for i in range(nb_vect):
- Mat = np.zeros((1,nb_points))
- a = np.random.rand(1)
- for j in range(nb_points):
- Mat[0][j]=(np.random.rand(1))
- data.append(Mat)
- return(data)
- def train_for_n(X,Y,nb_epoch=100, plt_frq=25,BATCH_SIZE=32):
- for e in tqdm(range(nb_epoch)):
- # Make generative points
- coord1 = np.random.randint(0,X.shape[0]-BATCH_SIZE)
- coord2 = coord1+BATCH_SIZE
- dat = X[coord1:coord2]
- noise_gen = np.random.uniform(0,1,size=[BATCH_SIZE,100])
- datbad = Generator.predict(noise_gen)
- # Train discriminator on generated images
- Xtemp = np.concatenate((dat, datbad))
- n = int(Xtemp.shape[0]/2)
- ybad = np.zeros([n])
- yTrue = np.ones([n])
- Y = np.concatenate((yTrue,ybad))
- make_trainable(Discriminator, True)
- d_loss = Discriminator.train_on_batch(Xtemp,Y)
- losses["d"].append(d_loss)
- # train Generator-Discriminator stack on input noise to non-generated output class
- noise_tr = np.random.uniform(0,1,size=[BATCH_SIZE,100])
- y2 = np.ones([BATCH_SIZE])
- make_trainable(Discriminator,False)
- g_loss = gan.train_on_batch(noise_tr, y2 )
- losses["g"].append(g_loss)
- # Updates plots
- if e%plt_frq==plt_frq-1:
- plot_loss(losses)
- plot_gen()
- ##################################
- ## Discriminator ##
- ##################################
- model = Sequential()
- droprate = 0.95
- model.add(Dense(100,input_shape=(1,100),activation='relu'))
- model.add(Dropout(droprate))
- model.add(Dense(1000,activation='relu'))
- model.add(Dropout(droprate))
- model.add(Flatten())
- model.add(Dense(1,activation='sigmoid'))
- model.summary()
- Discriminator = model
- # Compile model
- Discriminator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
- Discriminator.summary()
- ##############################
- ## Generator ##
- ######################<########
- droprate = 0
- model = Sequential()
- model.add(Dense(25,input_dim=100,activation='relu'))
- model.add(Dropout(droprate))
- model.add(BatchNormalization(momentum=0.8))
- model.add(Dense(400,activation='relu'))
- model.add(Dropout(droprate))
- model.add(Reshape((20,20,1)))
- model.add(BatchNormalization(momentum=0.8))
- model.add(Conv2D(256,(3,3),activation='relu'))
- model.add(Dropout(droprate))
- model.add(Flatten())
- model.add(Dense(100,activation='tanh'))
- model.add(Dropout(droprate))
- model.add(Reshape((1,100)))
- Generator = model
- # Compile model
- Generator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
- Generator.summary()
- #######################
- ## Gan ##
- #######################
- make_trainable(Discriminator, False)
- # Build stacked GAN model
- gan = Sequential()
- gan.add(Generator)
- gan.add(Discriminator)
- gan.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
- gan.summary()
- ###########################
- ## main ##
- ###########################
- dat = np.array(generate_good_data(500,100))
- datbad = np.array(generate_bad_data(500,100))
- plot_data(dat)
- X = np.concatenate((dat,datbad),axis=0)
- yTrue = np.ones((dat.shape[0]))
- ybad = np.zeros((dat.shape[0]))
- Y = np.concatenate((yTrue,ybad))
- #Optional pre-train discriminator
- make_trainable(Discriminator, True)
- Discriminator.fit(X, Y, epochs=2, batch_size=100)
- train_for_n(X,Y,nb_epoch=3000, plt_frq=3000,BATCH_SIZE=100)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement