Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- train_size = int(0.70*X.shape[0])
- X_train, X_test, y_train, y_test = X[0:train_size], X[train_size:],y[0:train_size], y[train_size:]
- def create_model(activation_1='relu', activation_2='relu',
- neurons_input = 1, neurons_hidden_1=1,
- optimizer='adam',
- input_shape=(X_train.shape[1],)):
- model = Sequential()
- model.add(Dense(neurons_input, activation=activation_1, input_shape=input_shape,
- kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
- bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
- model.add(Dense(neurons_hidden_1, activation=activation_2,
- kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
- bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
- model.add(Dense(2, activation='softmax')) #change to 1-sigmoid
- model.compile (loss = 'sparse_categorical_crossentropy', optimizer=optimizer)
- return model
- clf=KerasClassifier(build_fn=create_model, epochs=100, verbose=0)
- param_grid = {
- 'neurons_input':[20, 25, 30, 35],
- 'neurons_hidden_1':[20, 25, 30, 35],
- 'batch_size': [32,60,80,100],
- 'optimizer':['adam']
- }
- class_weights = compute_class_weight('balanced', np.unique(y_train), y_train)
- class_weights = dict(enumerate(class_weights))
- my_cv = TimeSeriesSplit(n_splits=5).split(X_train)
- rs_keras = RandomizedSearchCV(clf, param_grid, cv=my_cv, scoring='accuracy', refit='accuracy',
- verbose=3, n_jobs=1,random_state=42)
- rs_keras.fit(X_train, y_train, class_weight=class_weights)
- val_size = int(0.70*X_train.shape[0])
- X_train_, X_val, y_train_, y_val = X_train[0:val_size], X_train[val_size:], y_train[0:val_size], y_train[val_size:]
- class_weights_ = compute_class_weight('balanced', np.unique(y_train_), y_train_)
- class_weights_ = dict(enumerate(class_weights_))
- model = Sequential()
- model.add(Dense(25, activation='relu', input_shape=(X_train.shape[1],),
- kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
- bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
- model.add(Dense(20, activation='relu',
- kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
- bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
- model.add(Dense(2, activation='softmax')) #sigmoid gives worse results
- model.compile (loss = 'sparse_categorical_crossentropy', optimizer=k.optimizers.Adam(),
- metrics=['acc']) #km.binary_f1_score()
- early_stopping_monitor = EarlyStopping(monitor='val_loss', mode='min', patience=5)
- history = model.fit(X_train_, y_train_, epochs=2000, batch_size=100, class_weight=class_weights_, validation_data=(X_val, y_val), callbacks = [early_stopping_monitor], verbose = 1)
- plt.plot(history.history['loss'])
- plt.plot(history.history['val_loss'])
- plt.title('model loss')
- plt.ylabel('loss')
- plt.xlabel('epoch')
- plt.legend(['train', 'test'], loc='upper left')
- plt.show()
- y_pred = model.predict(X_test)
- y_pred = y_pred.round()
- y_pred = y_pred.argmax(axis=1)
- from sklearn.metrics import classification_report, confusion_matrix
- clfreport = classification_report(y_test, y_pred)
- cm = confusion_matrix(y_test, y_pred)
- print ("Test %s" %clfreport)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement