Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def create_model(x_train_ts, y_train_ts, x_test_ts, y_test_ts):
- x_train_ts, y_train_ts, x_test_ts, y_test_ts = build_data()
- lstm_model = Sequential()
- # (batch_size, timesteps, data_dim)
- lstm_model.add(LSTM({{choice([50, 100, 150])}}, batch_input_shape=(BATCH_SIZE, TIME_STEPS, x_train_ts.shape[2]), dropout=0.2,
- recurrent_dropout=0.2, stateful=True, return_sequences=True,
- kernel_initializer='random_uniform'))
- if {{choice(['one_lstm','two_lstm'])}} == 'two_lstm':
- lstm_model.add(LSTM({{choice([30, 60, 80])}}, dropout={{choice([0.1,0.2,0.3])}}))
- if {{choice(['one_dense','two_dense'])}} == 'two_dense':
- lstm_model.add(Dense({{choice([10, 20])}}, activation='relu'))
- lstm_model.add(Dense(1, activation='sigmoid'))
- if {{choice(['sgd','rms'])}} == 'rms':
- optimizer = optimizers.RMSprop(lr={{uniform(000.1, 0.1)}})
- else:
- optimizer = optimizers.SGD(lr={{uniform(000.1, 0.1)}}, decay=1e-6, momentum=0.9, nesterov=True)
- lstm_model.compile(loss='mean_squared_error', optimizer=optimizer) # binary_crossentropy
- history = lstm_model.fit(x_train_ts, y_train_ts, epochs={{choice([20, 40, 60, 70])}}, verbose=2, batch_size=BATCH_SIZE,
- validation_data=[x_test_ts, y_test_ts],
- callbacks=[LogMetrics(search_params, params, comb_no), csv_logger])
- val_error = np.amax(history.history['val_acc'])
- print('Best validation acc of epoch:', val_error)
- return {'loss': val_error, 'status': STATUS_OK, 'model': lstm_model} # if accuracy use '-' sign
- best_run, best_model = optim.minimize(model=create_model,
- data=data_dummy,
- algo=tpe.suggest,
- max_evals=2000,
- trials=Trials())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement