Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras.layers import Conv2D, MaxPool2D,\
- GlobalAveragePooling2D,Dense,\
- Input, Reshape, UpSampling2D,\
- BatchNormalization, GaussianNoise
- from keras.models import Model
- from keras.optimizers import Adam
- from keras.datasets import mnist
- import numpy as np
- import matplotlib.pyplot as plt
- (x_train, y_train), (x_test, y_test) = mnist.load_data()
- x_train = np.expand_dims(x_train, axis=-1)
- x_train_scaled = (x_train/255).copy()
- samples = 2000
- x_train = x_train[:samples,:,:]
- x_test = x_test[:samples,:,:]
- y_train = y_train[:samples]
- y_test = y_test[:samples]
- print(x_train)
- print(x_test)
- print(y_train)
- print(y_test)
- act_func = 'selu'
- aec_dim_num = 2
- encoder_layers = [GaussianNoise(1),
- BatchNormalization(),
- Conv2D(32, (7,7),padding = 'same',activation=act_func),
- MaxPool2D(2,2), BatchNormalization(),
- Conv2D(64, (5,5),padding = 'same', activation=act_func),
- MaxPool2D(2,2),
- BatchNormalization(),
- Conv2D(128, (3,3),padding = 'same',
- activation=act_func),
- GlobalAveragePooling2D(),
- Dense(aec_dim_num, activation = 'tanh')]
- decoder_layers = [
- Dense(128, activation = act_func),
- BatchNormalization(),
- Reshape((1,1,128)),
- UpSampling2D((7,7)),
- Conv2D(32, (3,3), padding = 'same',
- activation=act_func),
- BatchNormalization(),
- UpSampling2D((2,2)),
- Conv2D(32, (5,5),padding = 'same',
- activation=act_func),
- BatchNormalization(),
- UpSampling2D((2,2)),
- Conv2D(32, (7,7),padding = 'same',
- activation=act_func),
- BatchNormalization(),
- Conv2D(1, (3,3),padding = 'same',
- activation='sigmoid')]
- lrng_rate = 0.0002
- tensor = input_aec = \
- input_encoder = Input(x_train.shape[1:])
- for layer in encoder_layers:
- tensor = layer(tensor)
- output_encoder = tensor
- dec_tensor = input_decoder =\
- Input(output_encoder.shape[1:])
- for layer in decoder_layers:
- tensor = layer(tensor)
- dec_tensor = layer(dec_tensor)
- print(tensor)
- print(dec_tensor)
- output_aec = tensor
- output_decoder = dec_tensor
- autoencoder = Model(inputs = input_aec,\
- outputs = output_aec)
- encoder = Model(inputs = input_encoder,
- outputs = output_encoder)
- decoder = Model(inputs = input_decoder,
- outputs = dec_tensor)
- autoencoder.compile(optimizer=Adam(lrng_rate),
- loss='binary_crossentropy')
- autoencoder.fit(x = x_train, y = x_train,\
- epochs = 10, batch_size = 256)
- fig, ax = plt.subplots(1, 1, figsize = (20,16))
- for i in range(10):
- digits = y_train == i
- needed_imgs = x_train[digits,...]
- preds = encoder.predict(needed_imgs)
- ax.scatter(preds[:,0], preds[:,1])
- ax.legend(list(range(10)))
- num = 15
- limit = 0.6
- step = limit*2/num
- fig, ax = plt.subplots(num, num, figsize = (20,16))
- X_vals = np.arange(-limit, limit, step)
- Y_vals = np.arange(-limit, limit, step)
- for i, x in enumerate(X_vals):
- for j, y in enumerate(Y_vals):
- test_in = np.array([[x,y]])
- output = decoder.predict(x=test_in)
- output = np.squeeze(output)
- ax[-j-1,i].imshow(output, cmap = 'jet')
- ax[-j-1,i].axis('off')
- from keras.layers import Conv2D, MaxPool2D,\
- Input, UpSampling2D, GaussianNoise
- from keras.models import Model
- from keras.optimizers import Adam
- act_func = 'selu'
- encoder_layers = [GaussianNoise(1),
- Conv2D(32, (3,3),padding = 'same',
- activation=act_func),
- MaxPool2D(2,2),
- Conv2D(64, (3,3),padding = 'same',
- activation=act_func),
- MaxPool2D(2,2),
- Conv2D(128, (3,3),padding = 'same',
- activation=act_func) ]
- decoder_layers = [
- UpSampling2D((2,2)),
- Conv2D(32, (3,3), padding = 'same',
- activation=act_func),
- UpSampling2D((2,2)),
- Conv2D(32, (3,3),padding = 'same',
- activation=act_func),
- Conv2D(1, (3,3),padding = 'same',
- activation='sigmoid')]
- lrng_rate = 0.0001
- tensor = autoencoder_input = Input(x_train.shape[1:])
- for layer in encoder_layers+decoder_layers:
- tensor = layer(tensor)
- autoencoder = Model(inputs = autoencoder_input,
- outputs = tensor)
- autoencoder.compile(optimizer=Adam(lrng_rate),
- loss='binary_crossentropy')
- autoencoder.fit(x = x_train, y = x_train,
- epochs = 10, batch_size = 256)
- test_photos = x_test[10:20,...].copy()
- noisy_test_photos = test_photos.copy()
- mask = np.random.randn(*test_photos.shape)
- white = mask > 1
- black = mask < -1
- noisy_test_photos[white] = 255
- noisy_test_photos[black] = 0
- noisy_test_photos /= 255
- def show_pictures(arrs):
- arr_cnt = arrs.shape[0]
- fig, axes = plt.subplots(1, arr_cnt,
- figsize=(5*arr_cnt, arr_cnt))
- for axis, pic in zip(axes, arrs):
- axis.imshow(pic.squeeze(), cmap = 'gray')
- cleaned_images = autoencoder.predict(noisy_test_photos
- /255)*255
- show_pictures(test_photos)
- show_pictures(noisy_test_photos)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement