Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import tensorflow as tf
- from keras.preprocessing.sequence import pad_sequences
- from grammar import *
- def generate_data(num_samples):
- x_data = []
- y_data = []
- data_len = []
- for i in range(num_samples):
- er_grammar = embedded_reber_generate()
- er_target = embedded_reber_get_targets(er_grammar)
- x_data.append(str_to_num(er_grammar))
- y_data.append(get_network_targets(er_target))
- data_len.append(len(er_grammar))
- return np.array(x_data), np.array(y_data), np.array(data_len)
- if __name__ == '__main__':
- # Task 1: Generate 5000 training samples, 500 validation and 500 test samples from the embedded Reber grammar
- x_train, y_train, train_len = generate_data(num_samples=5000)
- x_val, y_val, val_len = generate_data(num_samples=500)
- x_test, y_test, test_len = generate_data(num_samples=500)
- # Add padding
- pad_len = max(np.max(train_len), np.max(val_len), np.max(test_len))
- x_train = pad_sequences(x_train, maxlen=pad_len, padding='post')
- y_train = pad_sequences(y_train, maxlen=pad_len, padding='post')
- x_val = pad_sequences(x_val, maxlen=pad_len, padding='post')
- y_val = pad_sequences(y_val, maxlen=pad_len, padding='post')
- x_test = pad_sequences(x_test, maxlen=pad_len, padding='post')
- y_test = pad_sequences(y_test, maxlen=pad_len, padding='post')
- # ============================================================================================
- # TRAIN THE NETWORK
- # ============================================================================================
- tf.keras.backend.clear_session()
- # parameters
- neurons = 128
- batch_size = 32
- learning_rate = 0.001
- model = tf.keras.Sequential()
- # Add an Embedding layer expecting output of 7 units
- model.add(tf.keras.layers.Embedding(input_dim=8, output_dim=7, input_length=len(x_train[0]), mask_zero=True))
- model.add(tf.keras.layers.LSTM(units=neurons, activation='relu', return_sequences=True))
- model.add(tf.keras.layers.Dense(7, activation='sigmoid'))
- model.summary()
- model.compile(loss='binary_crossentropy',
- optimizer=tf.keras.optimizers.Adam(learning_rate),
- metrics=['accuracy'])
- hist = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=batch_size, epochs=10)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement