Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import warnings
- warnings.filterwarnings('ignore')
- import pandas as pd
- import numpy as np
- from tensorflow.python.keras import backend as K
- from tensorflow.python.keras.models import Sequential
- from tensorflow.python.keras.layers import InputLayer, Input
- from tensorflow.python.keras.layers import Reshape, MaxPooling2D
- from tensorflow.python.keras.layers import Conv2D, Dense, Flatten, Dropout
- from tensorflow.python.keras.callbacks import TensorBoard
- from tensorflow.python.keras.optimizers import Adam
- from tensorflow.python.keras.models import load_model
- import skopt
- from skopt import gp_minimize, forest_minimize
- from skopt.space import Real, Categorical, Integer
- from skopt.utils import use_named_args
- from skopt.plots import plot_convergence
- import scipy.io
- from sklearn.metrics import accuracy_score
- data_train = scipy.io.loadmat('train_32x32.mat')
- data_test = scipy.io.loadmat('test_32x32.mat')
- X_train, y_train = data_train['X'].transpose(3, 0, 1, 2), data_train['y']
- X_test, y_test = data_test['X'].transpose(3, 0, 1, 2), data_test['y']
- y_train = np.asarray(y_train, dtype=np.int32)
- y_test = np.asarray(y_test, dtype=np.int32)
- # Converting from RGB to gray scale
- def rgb2gray(images):
- return np.expand_dims(np.dot(images, [0.2990, 0.5870, 0.1140]), axis=3)
- X_train = rgb2gray(X_train).astype(np.float32)
- X_test = rgb2gray(X_test).astype(np.float32)
- # # Normalizing
- # Calculate the mean
- train_mean = np.mean(X_train, axis=0)
- # Calculate the std
- train_std = np.std(X_train, axis=0)
- # Normalizing
- X_train = (X_train - train_mean) / train_std
- X_test = (X_test - train_mean) / train_std
- validation_data = (X_test, y_test)
- print(X_train.shape, X_test.shape)
- print(y_train.shape, y_test.shape)
- print()
- dim_learning_rate = Real(low=1e-5, high=1e-1, prior='log-uniform', name='learning_rate')
- dim_num_dense_layers = Integer(low=1, high=4, name='num_dense_layers')
- dim_num_dense_nodes = Integer(low=40, high=1024, name='num_dense_nodes')
- dim_num_dropout = Real(low=2e-1, high=4e-1, prior='log-uniform', name='num_dropout')
- dimensions = [dim_learning_rate,
- dim_num_dense_layers,
- dim_num_dense_nodes,
- dim_num_dropout]
- default_parameters = [1e-3, 1, 1024, 2e-1]
- def create_model(learning_rate, num_dense_layers,
- num_dense_nodes, num_dropout):
- model = Sequential()
- model.add(InputLayer(input_shape=(32, 32, 1)))
- model.add(Reshape((32, 32, 1)))
- # receives [1, 32, 32, 1]
- # returns [-1, 16, 16, 32]
- model.add(Conv2D(filters=32, kernel_size=5, activation='relu', padding='same'))
- model.add(MaxPooling2D(pool_size=2, strides=2))
- # receives [-1, 16, 16, 32]
- # returns [-1, 8, 8, 64]
- model.add(Conv2D(filters=64, kernel_size=5, activation='relu', padding='same'))
- model.add(MaxPooling2D(pool_size=2, strides=2))
- # receives [-1, 8, 8, 64]
- # returns [-1, 4, 4, 128]
- model.add(Conv2D(filters=128, kernel_size=5, activation='relu', padding='same'))
- model.add(MaxPooling2D(pool_size=2, strides=2))
- model.add(Flatten())
- for i in range(num_dense_layers):
- name = 'layer_dense_{0}'.format(i+1)
- model.add(Dense(units=num_dense_nodes, activation='relu', name=name))
- model.add(Dropout(rate=num_dropout))
- model.add(Dense(units=11, activation='softmax'))
- optimizer = Adam(lr=learning_rate)
- model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
- return model
- path_best_model = 'best_model'
- best_accuracy = 0.0
- @use_named_args(dimensions=dimensions)
- def fitness(learning_rate, num_dense_layers,
- num_dense_nodes, num_dropout):
- print('learning rate: {0:.1e}'.format(learning_rate))
- print('num_dense_layers:', num_dense_layers)
- print('num_dense_nodes:', num_dense_nodes)
- print('num_dropout:', num_dropout)
- print()
- model = create_model(learning_rate=learning_rate,
- num_dense_layers=num_dense_layers,
- num_dense_nodes=num_dense_nodes,
- num_dropout=num_dropout)
- history = model.fit(x=X_train, y=y_train, epochs=1, batch_size=128, validation_data=validation_data)
- accuracy = history.history['val_acc'][-1]
- print()
- print("Accuracy: {0:.2%}".format(accuracy))
- print()
- global best_accuracy
- if accuracy > best_accuracy:
- model.save(path_best_model)
- best_accuracy = accuracy
- del model
- K.clear_session()
- return -accuracy
- fitness(x=default_parameters)
- search_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func='EI', n_calls=13, x0=default_parameters)
- plot_convergence(search_result);
- plt.show()
- print()
- print(search_result.x)
- print()
- print(search_resul.fun)
- # # Best model
- model = load_model(path_best_model)
- model.fit(X_train, y_train)
- pred = model.predict(X_test)
- pred = np.argmax(pred, axis=1)
- pred = pred.reshape(-1, 1)
- # Accuracy on Test Set
- print("Accuracy: {0:.2%}".format(accuracy_score(y_test, pred)))
Add Comment
Please, Sign In to add comment