Advertisement
mikolajmki

si_lab10

Dec 8th, 2022
822
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.65 KB | None | 0 0
  1. from keras.layers import Conv2D, MaxPool2D,\
  2. GlobalAveragePooling2D,Dense,\
  3. Input, Reshape, UpSampling2D,\
  4. BatchNormalization, GaussianNoise
  5. from keras.models import Model
  6. from keras.optimizers import Adam
  7. from keras.datasets import mnist
  8. import numpy as np
  9. import matplotlib.pyplot as plt
  10. (x_train, y_train), (x_test, y_test) = mnist.load_data()
  11. x_train = np.expand_dims(x_train, axis=-1)
  12. x_train_scaled = (x_train/255).copy()
  13.  
  14. samples = 2000
  15. x_train = x_train[:samples,:,:]
  16. x_test = x_test[:samples,:,:]
  17. y_train = y_train[:samples]
  18. y_test = y_test[:samples]
  19.  
  20. print(x_train)
  21. print(x_test)
  22. print(y_train)
  23. print(y_test)
  24.  
  25. act_func = 'selu'
  26. aec_dim_num = 2
  27. encoder_layers = [GaussianNoise(1),
  28.     BatchNormalization(),
  29.     Conv2D(32, (7,7),padding = 'same',activation=act_func),
  30.     MaxPool2D(2,2), BatchNormalization(),
  31.     Conv2D(64, (5,5),padding = 'same', activation=act_func),
  32.     MaxPool2D(2,2),
  33.     BatchNormalization(),
  34.     Conv2D(128, (3,3),padding = 'same',
  35.     activation=act_func),
  36.     GlobalAveragePooling2D(),
  37.     Dense(aec_dim_num, activation = 'tanh')]
  38. decoder_layers = [
  39.     Dense(128, activation = act_func),
  40.     BatchNormalization(),
  41.     Reshape((1,1,128)),
  42.     UpSampling2D((7,7)),
  43.     Conv2D(32, (3,3), padding = 'same',
  44.     activation=act_func),
  45.     BatchNormalization(),
  46.     UpSampling2D((2,2)),
  47.     Conv2D(32, (5,5),padding = 'same',
  48.     activation=act_func),
  49.     BatchNormalization(),
  50.     UpSampling2D((2,2)),
  51.     Conv2D(32, (7,7),padding = 'same',
  52.     activation=act_func),
  53.     BatchNormalization(),
  54.     Conv2D(1, (3,3),padding = 'same',
  55.     activation='sigmoid')]
  56.  
  57. lrng_rate = 0.0002
  58. tensor = input_aec = \
  59. input_encoder = Input(x_train.shape[1:])
  60. for layer in encoder_layers:
  61.  tensor = layer(tensor)
  62. output_encoder = tensor
  63. dec_tensor = input_decoder =\
  64. Input(output_encoder.shape[1:])
  65. for layer in decoder_layers:
  66.  tensor = layer(tensor)
  67.  dec_tensor = layer(dec_tensor)
  68.  
  69. print(tensor)
  70. print(dec_tensor)
  71. output_aec = tensor
  72. output_decoder = dec_tensor
  73. autoencoder = Model(inputs = input_aec,\
  74. outputs = output_aec)
  75. encoder = Model(inputs = input_encoder,
  76. outputs = output_encoder)
  77. decoder = Model(inputs = input_decoder,
  78. outputs = dec_tensor)
  79. autoencoder.compile(optimizer=Adam(lrng_rate),
  80. loss='binary_crossentropy')
  81. autoencoder.fit(x = x_train, y = x_train,\
  82. epochs = 10, batch_size = 256)
  83.    
  84. fig, ax = plt.subplots(1, 1, figsize = (20,16))
  85. for i in range(10):
  86.  digits = y_train == i
  87.  needed_imgs = x_train[digits,...]
  88.  
  89.  preds = encoder.predict(needed_imgs)
  90.  ax.scatter(preds[:,0], preds[:,1])
  91. ax.legend(list(range(10)))
  92.  
  93. num = 15
  94. limit = 0.6
  95. step = limit*2/num
  96. fig, ax = plt.subplots(num, num, figsize = (20,16))
  97. X_vals = np.arange(-limit, limit, step)
  98. Y_vals = np.arange(-limit, limit, step)
  99. for i, x in enumerate(X_vals):
  100.  for j, y in enumerate(Y_vals):
  101.      test_in = np.array([[x,y]])
  102.      output = decoder.predict(x=test_in)
  103.      output = np.squeeze(output)
  104.      ax[-j-1,i].imshow(output, cmap = 'jet')
  105.      ax[-j-1,i].axis('off')
  106.      
  107. from keras.layers import Conv2D, MaxPool2D,\
  108.  Input, UpSampling2D, GaussianNoise
  109. from keras.models import Model
  110. from keras.optimizers import Adam
  111. act_func = 'selu'
  112. encoder_layers = [GaussianNoise(1),
  113.     Conv2D(32, (3,3),padding = 'same',
  114.     activation=act_func),
  115.     MaxPool2D(2,2),
  116.     Conv2D(64, (3,3),padding = 'same',
  117.     activation=act_func),
  118.     MaxPool2D(2,2),
  119.     Conv2D(128, (3,3),padding = 'same',
  120.     activation=act_func) ]
  121. decoder_layers = [
  122.     UpSampling2D((2,2)),
  123.     Conv2D(32, (3,3), padding = 'same',
  124.     activation=act_func),
  125.     UpSampling2D((2,2)),
  126.     Conv2D(32, (3,3),padding = 'same',
  127.     activation=act_func),
  128.     Conv2D(1, (3,3),padding = 'same',
  129.     activation='sigmoid')]
  130. lrng_rate = 0.0001
  131. tensor = autoencoder_input = Input(x_train.shape[1:])
  132. for layer in encoder_layers+decoder_layers:
  133.  tensor = layer(tensor)
  134. autoencoder = Model(inputs = autoencoder_input,
  135.  outputs = tensor)
  136. autoencoder.compile(optimizer=Adam(lrng_rate),
  137.  loss='binary_crossentropy')
  138. autoencoder.fit(x = x_train, y = x_train,
  139.  epochs = 10, batch_size = 256)
  140.  
  141. test_photos = x_test[10:20,...].copy()
  142. noisy_test_photos = test_photos.copy()
  143. mask = np.random.randn(*test_photos.shape)
  144. white = mask > 1
  145. black = mask < -1
  146. noisy_test_photos[white] = 255
  147. noisy_test_photos[black] = 0
  148. noisy_test_photos /= 255
  149. def show_pictures(arrs):
  150.  arr_cnt = arrs.shape[0]
  151.  fig, axes = plt.subplots(1, arr_cnt,
  152.  figsize=(5*arr_cnt, arr_cnt))
  153.  for axis, pic in zip(axes, arrs):
  154.     axis.imshow(pic.squeeze(), cmap = 'gray')
  155. cleaned_images = autoencoder.predict(noisy_test_photos
  156. /255)*255
  157. show_pictures(test_photos)
  158. show_pictures(noisy_test_photos)
  159.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement