Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # define LSTM model
- model = Sequential()
- # Input shape: (samples, time, channels, rows, cols) see: https://keras.io/layers/recurrent/#convlstm2d
- model.add(Conv2D(
- filters=50,
- kernel_size=(5, 5),
- input_shape=(120, 40, 1),
- padding='same', kernel_initializer='TruncatedNormal'))
- model.add(LeakyReLU())
- model.add(BatchNormalization()) # Normalizes the data
- model.add(Dropout(0.2))
- print('L1 Output: ' , model.output_shape)
- model.add(Conv2D(
- filters=25,
- kernel_size=(3, 3),
- input_shape=(120, 40, 1),
- padding='same', kernel_initializer='TruncatedNormal'))
- model.add(LeakyReLU())
- model.add(BatchNormalization())
- model.add(Dropout(0.2))
- print('L2 Output: ' , model.output_shape)
- model.add(Conv2D(
- filters=10,
- kernel_size=(1, 1),
- input_shape=(120, 40, 1),
- padding='same', kernel_initializer='TruncatedNormal'))
- model.add(LeakyReLU())
- model.add(BatchNormalization())
- model.add(Dropout(0.2))
- print('L3 Output: ' , model.output_shape)
- model.add(Reshape((120, 10*40)))
- print('Reshape Output: ' , model.output_shape)
- # Input should be: (batch_size, timesteps, input_dim)
- model.add(GRU(10, return_sequences=False, activation='sigmoid'))
- model.add(Dropout(0.2))
- print('LSTM Output: ' , model.output_shape)
- #model.add(Flatten())
- #print('Flatten Output: ' , model.output_shape)
- model.add(Dense(10))
- model.add(LeakyReLU())
- model.add(Dropout(0.2))
- print('Dense(10) Output: ' , model.output_shape)
- model.add(Dense(units=output_dim, kernel_initializer='TruncatedNormal'))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement