Advertisement
Guest User

Untitled

a guest
Jun 24th, 2019
63
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.26 KB | None | 0 0
  1. train_size = int(0.70*X.shape[0])
  2. X_train, X_test, y_train, y_test = X[0:train_size], X[train_size:],y[0:train_size], y[train_size:]
  3.  
  4.  
  5. def create_model(activation_1='relu', activation_2='relu',
  6. neurons_input = 1, neurons_hidden_1=1,
  7. optimizer='adam',
  8. input_shape=(X_train.shape[1],)):
  9.  
  10. model = Sequential()
  11. model.add(Dense(neurons_input, activation=activation_1, input_shape=input_shape,
  12. kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
  13. bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
  14.  
  15. model.add(Dense(neurons_hidden_1, activation=activation_2,
  16. kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
  17. bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
  18.  
  19. model.add(Dense(2, activation='softmax')) #change to 1-sigmoid
  20.  
  21. model.compile (loss = 'sparse_categorical_crossentropy', optimizer=optimizer)
  22. return model
  23.  
  24.  
  25. clf=KerasClassifier(build_fn=create_model, epochs=100, verbose=0)
  26.  
  27. param_grid = {
  28. 'neurons_input':[20, 25, 30, 35],
  29. 'neurons_hidden_1':[20, 25, 30, 35],
  30. 'batch_size': [32,60,80,100],
  31. 'optimizer':['adam']
  32. }
  33.  
  34. class_weights = compute_class_weight('balanced', np.unique(y_train), y_train)
  35. class_weights = dict(enumerate(class_weights))
  36.  
  37. my_cv = TimeSeriesSplit(n_splits=5).split(X_train)
  38.  
  39. rs_keras = RandomizedSearchCV(clf, param_grid, cv=my_cv, scoring='accuracy', refit='accuracy',
  40. verbose=3, n_jobs=1,random_state=42)
  41.  
  42. rs_keras.fit(X_train, y_train, class_weight=class_weights)
  43.  
  44. val_size = int(0.70*X_train.shape[0])
  45. X_train_, X_val, y_train_, y_val = X_train[0:val_size], X_train[val_size:], y_train[0:val_size], y_train[val_size:]
  46.  
  47.  
  48. class_weights_ = compute_class_weight('balanced', np.unique(y_train_), y_train_)
  49. class_weights_ = dict(enumerate(class_weights_))
  50.  
  51. model = Sequential()
  52. model.add(Dense(25, activation='relu', input_shape=(X_train.shape[1],),
  53. kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
  54. bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
  55.  
  56. model.add(Dense(20, activation='relu',
  57. kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
  58. bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
  59.  
  60. model.add(Dense(2, activation='softmax')) #sigmoid gives worse results
  61.  
  62. model.compile (loss = 'sparse_categorical_crossentropy', optimizer=k.optimizers.Adam(),
  63. metrics=['acc']) #km.binary_f1_score()
  64.  
  65. early_stopping_monitor = EarlyStopping(monitor='val_loss', mode='min', patience=5)
  66.  
  67. history = model.fit(X_train_, y_train_, epochs=2000, batch_size=100, class_weight=class_weights_, validation_data=(X_val, y_val), callbacks = [early_stopping_monitor], verbose = 1)
  68.  
  69. plt.plot(history.history['loss'])
  70. plt.plot(history.history['val_loss'])
  71. plt.title('model loss')
  72. plt.ylabel('loss')
  73. plt.xlabel('epoch')
  74. plt.legend(['train', 'test'], loc='upper left')
  75. plt.show()
  76.  
  77. y_pred = model.predict(X_test)
  78. y_pred = y_pred.round()
  79. y_pred = y_pred.argmax(axis=1)
  80. from sklearn.metrics import classification_report, confusion_matrix
  81. clfreport = classification_report(y_test, y_pred)
  82. cm = confusion_matrix(y_test, y_pred)
  83. print ("Test %s" %clfreport)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement