Advertisement
Guest User

Untitled

a guest
Jun 18th, 2019
71
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.65 KB | None | 0 0
  1. import tensorflow as tf
  2. from keras.layers import Input, Dense, Flatten, Reshape, Dropout
  3. from keras.models import Model, Sequential
  4. from keras.optimizers import Adam
  5. from keras.objectives import binary_crossentropy
  6. from keras.layers.merge import concatenate
  7. from keras.losses import mean_squared_error
  8.  
  9. def variational_autoencoder(n_input_features, latent_space_size=64, hlayer_size=256,
  10. lr=1.0e-3, kl_weight=0.1):
  11.  
  12. encoder_input = Input(shape=[n_input_features])
  13. encoder_seq = Sequential()
  14. encoder_seq.add(Dense(hlayer_size, activation='relu', input_shape=[n_input_features, ]))
  15. encoder_seq.add(Dense(hlayer_size, activation='relu'))
  16.  
  17. encoder_mu = Dense(latent_space_size, activation='linear')(encoder_seq(encoder_input))
  18. encoder_log_sigma = Dense(latent_space_size, activation='linear')(encoder_seq(encoder_input))
  19.  
  20. def _sample_z(args):
  21. mu, log_sigma = args
  22. eps = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)
  23. return mu + K.exp(log_sigma / 2) * eps
  24.  
  25. encoder_output = Lambda(_sample_z)([encoder_mu, encoder_log_sigma])
  26.  
  27. decoder_input = Input(shape=[latent_space_size])
  28.  
  29. decoder_seq = Sequential()
  30. decoder_seq.add(Dense(hlayer_size, activation='relu', input_shape=[latent_space_size]))
  31. decoder_seq.add(Dense(hlayer_size, activation='relu'))
  32. decoder_seq.add(Dense(n_input_features, activation='linear'))
  33.  
  34. encoder_model = Model(inputs=encoder_input, outputs=encoder_output)
  35. decoder_model = Model(inputs=decoder_input, outputs=decoder_seq(decoder_input))
  36. full_model = Model(inputs=encoder_input,
  37. outputs=concatenate([encoder_mu, encoder_log_sigma, decoder_seq(encoder_output)]))
  38.  
  39. adam_opt = Adam(lr=lr)
  40.  
  41. def _vae_loss(y_true, model_output):
  42.  
  43. """ Calculate loss = reconstruction loss + KL loss for each data in minibatch """
  44. encoder_mu = model_output[:, 0:latent_space_size]
  45. encoder_log_sigma = model_output[:, latent_space_size:latent_space_size*2]
  46. y_pred = model_output[:, latent_space_size*2:]
  47.  
  48. # E[log P(X|z)] - this is because we model our P(X_i|z) as a normal distribution
  49. #recon = K.sum(K.binary_crossentropy(y_truey_true), axis=1)
  50.  
  51. recon = mean_squared_error(y_true, y_pred)
  52. # D_KL(Q(z|X) || P(z|X)); calculate in closed form as both dist. are Gaussian
  53.  
  54. kl = 0.5 * K.sum(K.exp(encoder_log_sigma) + K.square(encoder_mu) - 1. - encoder_log_sigma, axis=1)
  55.  
  56. return recon + kl_weight*kl
  57.  
  58. opt = Adam(lr=lr)
  59.  
  60. full_model.compile(optimizer=opt, loss=_vae_loss)
  61.  
  62. return encoder_model, decoder_model, full_model
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement