Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # build encoder model
- def encoder_model(inputs):
- x1 = Dense(intermediate_dim_1, activation='relu')(inputs)
- x2 = Dense(intermediate_dim_2, activation='relu')(x1)
- x3 = Dense(intermediate_dim_3, activation='relu')(x2)
- encoded = Dense(latent_dim)(x3)
- encoder = Model(inputs, encoded, name='encoder')
- return encoder
- # build decoder model
- def decoder_model():
- latent_inputs = Input(shape=(latent_dim,), name='z_space')
- x3 = Dense(intermediate_dim_3, activation='relu')(latent_inputs)
- x2 = Dense(intermediate_dim_2, activation='relu')(x3)
- x1 = Dense(intermediate_dim_1, activation='relu')(x2)
- decoded = Dense(original_dim)(x1)
- # instantiate decoder model
- decoder = Model(latent_inputs, decoded, name='decoder')
- return decoder
- if __name__ == '__main__':
- x_trn,x_val,y_trn,y_val = train_test_split(Cp_inputs, X_all, test_size=0.2,shuffle=True,random_state=0)
- original_dim = x_trn.shape[1]
- x_trn = np.reshape(x_trn, [-1, original_dim])
- x_val = np.reshape(x_val, [-1, original_dim])
- input_shape = (original_dim, )
- inputs = Input(shape=input_shape, name='encoder_input')
- # Define Intermediate Layer Dimension and Latent layer Dimension
- intermediate_dim_1 = 256
- intermediate_dim_2 = 128
- intermediate_dim_3 = 64
- latent_dim = 3
- # Define batch_size / epochs
- epochs = 200
- batch_size = 128
- encoder = encoder_model(inputs)
- decoder = decoder_model()
- # instantiate VAE model
- outputs = decoder(encoder(inputs))
- ae = Model(inputs, outputs, name='ae_mlp')
- reconstruction_loss = mse(inputs, outputs)
- # reconstruction_loss = binary_crossentropy(inputs, outputs)
- ae.add_loss(reconstruction_loss)
- opt = tf.keras.optimizers.Adam(lr=0.001)
- ae.compile(optimizer=opt, loss='mse')
- history = ae.fit(x_trn, x_trn, epochs=epochs, batch_size=batch_size, validation_data=(x_val, x_val))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement