Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # input layer
- input_layer = Input(shape=(28, 28, 1))
- # encoding architecture
- encoded_layer1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_layer)
- encoded_layer1 = MaxPool2D( (2, 2), padding='same')(encoded_layer1)
- encoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded_layer1)
- encoded_layer2 = MaxPool2D( (2, 2), padding='same')(encoded_layer2)
- encoded_layer3 = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded_layer2)
- latent_view = MaxPool2D( (2, 2), padding='same')(encoded_layer3)
- # decoding architecture
- decoded_layer1 = Conv2D(16, (3, 3), activation='relu', padding='same')(latent_view)
- decoded_layer1 = UpSampling2D((2, 2))(decoded_layer1)
- decoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(decoded_layer1)
- decoded_layer2 = UpSampling2D((2, 2))(decoded_layer2)
- decoded_layer3 = Conv2D(64, (3, 3), activation='relu')(decoded_layer2)
- decoded_layer3 = UpSampling2D((2, 2))(decoded_layer3)
- output_layer = Conv2D(1, (3, 3), padding='same', activation='sigmoid')(decoded_layer3)
- # compile the model
- model = Model(input_layer, output_layer)
- model.compile(optimizer='adam', loss='mse')
- # run the model
- early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=5, mode='auto')
- history = model.fit(train_x_n, x_train, epochs=20, batch_size=2048, validation_data=(val_x_n, x_test), callbacks=[early_stopping])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement