Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # MLP
- from keras.models import Sequential
- from keras.layers import Dense, Dropout
- from keras.constraints import maxnorm
- from keras.utils import np_utils
- from keras import backend as K
- from keras.callbacks import TensorBoard
- from keras.wrappers.scikit_learn import KerasClassifier
- from sklearn.model_selection import StratifiedKFold, GridSearchCV
- import numpy as np
- import matplotlib.pyplot as plt
- import os
- import time
- import gc
- def create_model():
- # create model
- model = Sequential()
- # model with dropout and a kernel constraint on the hidden layers
- model.add(Dense(37, input_dim=num_inputs, kernel_initializer='normal', activation='relu'))
- model.add(Dropout(0.2))
- model.add(Dense(20, kernel_initializer='normal', activation='relu'))
- model.add(Dropout(0.2))
- model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
- # Compile model
- model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
- return model
- # fix random seed for reproducibility
- seed = 42
- np.random.seed(seed)
- X_d = np.loadtxt('../datasets/mixed_extracted/digits_features.csv', delimiter=',')
- y_d = np.loadtxt('../datasets/mixed_extracted/digits_target.csv', delimiter=',')
- # define k-fold cross validation test harness
- num_folds = 5
- cv = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=42)
- # one hot encode outputs
- # transforming the vector of class integers into a binary matrix
- y_d = np_utils.to_categorical(y_d)
- # define network parameters
- num_epochs = 150
- num_inputs = X_d.shape[1]
- num_classes = y_d.shape[1]
- # create model
- model = KerasClassifier(build_fn=create_model(), verbose=0)
- # grid search epochs, batch size, optimizers and kernel_initializer
- optimizers = ['rmsprop', 'adam']
- init = ['glorot_uniform', 'normal', 'uniform']
- epochs = np.array([50, 100, 150])
- batches = np.array([5, 10, 20])
- param_grid = dict(optimizers=optimizers, epochs=epochs, batch_size=batches, kernel_initializer=init)
- grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=cv, refit=True, scoring='accuracy')
- # Fit the model
- grid.fit(X_d, y_d)
- print("[INFO] Training ended")
- # summarize results
- print("[INFO]Best parameters set found on development set:")
- print()
- print(grid.best_params_)
- print()
- print("[INFO]Grid scores on development set:")
- print()
- means = grid.cv_results_['mean_test_score']
- stds = grid.cv_results_['std_test_score']
- for mean, std, params in zip(means, stds, grid.cv_results_['params']):
- print("%0.3f (+/-%0.03f) for %r"
- % (mean, std * 2, params))
- gc.collect()
- K.clear_session()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement