Guest User

Untitled

a guest
Feb 18th, 2019
110
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.00 KB | None | 0 0
  1. import warnings
  2. warnings.filterwarnings('ignore')
  3. import pandas as pd
  4. import numpy as np
  5.  
  6. from tensorflow.python.keras import backend as K
  7. from tensorflow.python.keras.models import Sequential
  8. from tensorflow.python.keras.layers import InputLayer, Input
  9. from tensorflow.python.keras.layers import Reshape, MaxPooling2D
  10. from tensorflow.python.keras.layers import Conv2D, Dense, Flatten, Dropout
  11. from tensorflow.python.keras.callbacks import TensorBoard
  12. from tensorflow.python.keras.optimizers import Adam
  13. from tensorflow.python.keras.models import load_model
  14.  
  15. import skopt
  16. from skopt import gp_minimize, forest_minimize
  17. from skopt.space import Real, Categorical, Integer
  18. from skopt.utils import use_named_args
  19. from skopt.plots import plot_convergence
  20.  
  21. import scipy.io
  22.  
  23. from sklearn.metrics import accuracy_score
  24.  
  25. data_train = scipy.io.loadmat('train_32x32.mat')
  26. data_test = scipy.io.loadmat('test_32x32.mat')
  27.  
  28. X_train, y_train = data_train['X'].transpose(3, 0, 1, 2), data_train['y']
  29. X_test, y_test = data_test['X'].transpose(3, 0, 1, 2), data_test['y']
  30.  
  31. y_train = np.asarray(y_train, dtype=np.int32)
  32. y_test = np.asarray(y_test, dtype=np.int32)
  33.  
  34. # Converting from RGB to gray scale
  35. def rgb2gray(images):
  36. return np.expand_dims(np.dot(images, [0.2990, 0.5870, 0.1140]), axis=3)
  37.  
  38. X_train = rgb2gray(X_train).astype(np.float32)
  39. X_test = rgb2gray(X_test).astype(np.float32)
  40.  
  41. # # Normalizing
  42.  
  43. # Calculate the mean
  44. train_mean = np.mean(X_train, axis=0)
  45.  
  46. # Calculate the std
  47. train_std = np.std(X_train, axis=0)
  48.  
  49. # Normalizing
  50. X_train = (X_train - train_mean) / train_std
  51. X_test = (X_test - train_mean) / train_std
  52.  
  53. validation_data = (X_test, y_test)
  54.  
  55. print(X_train.shape, X_test.shape)
  56. print(y_train.shape, y_test.shape)
  57. print()
  58.  
  59. dim_learning_rate = Real(low=1e-5, high=1e-1, prior='log-uniform', name='learning_rate')
  60. dim_num_dense_layers = Integer(low=1, high=4, name='num_dense_layers')
  61. dim_num_dense_nodes = Integer(low=40, high=1024, name='num_dense_nodes')
  62. dim_num_dropout = Real(low=2e-1, high=4e-1, prior='log-uniform', name='num_dropout')
  63.  
  64. dimensions = [dim_learning_rate,
  65. dim_num_dense_layers,
  66. dim_num_dense_nodes,
  67. dim_num_dropout]
  68.  
  69. default_parameters = [1e-3, 1, 1024, 2e-1]
  70.  
  71. def create_model(learning_rate, num_dense_layers,
  72. num_dense_nodes, num_dropout):
  73.  
  74. model = Sequential()
  75.  
  76. model.add(InputLayer(input_shape=(32, 32, 1)))
  77.  
  78. model.add(Reshape((32, 32, 1)))
  79.  
  80. # receives [1, 32, 32, 1]
  81. # returns [-1, 16, 16, 32]
  82. model.add(Conv2D(filters=32, kernel_size=5, activation='relu', padding='same'))
  83. model.add(MaxPooling2D(pool_size=2, strides=2))
  84.  
  85. # receives [-1, 16, 16, 32]
  86. # returns [-1, 8, 8, 64]
  87. model.add(Conv2D(filters=64, kernel_size=5, activation='relu', padding='same'))
  88. model.add(MaxPooling2D(pool_size=2, strides=2))
  89.  
  90. # receives [-1, 8, 8, 64]
  91. # returns [-1, 4, 4, 128]
  92. model.add(Conv2D(filters=128, kernel_size=5, activation='relu', padding='same'))
  93. model.add(MaxPooling2D(pool_size=2, strides=2))
  94.  
  95. model.add(Flatten())
  96.  
  97. for i in range(num_dense_layers):
  98. name = 'layer_dense_{0}'.format(i+1)
  99. model.add(Dense(units=num_dense_nodes, activation='relu', name=name))
  100. model.add(Dropout(rate=num_dropout))
  101.  
  102. model.add(Dense(units=11, activation='softmax'))
  103.  
  104. optimizer = Adam(lr=learning_rate)
  105.  
  106. model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
  107.  
  108. return model
  109.  
  110. path_best_model = 'best_model'
  111.  
  112. best_accuracy = 0.0
  113.  
  114. @use_named_args(dimensions=dimensions)
  115. def fitness(learning_rate, num_dense_layers,
  116. num_dense_nodes, num_dropout):
  117.  
  118. print('learning rate: {0:.1e}'.format(learning_rate))
  119. print('num_dense_layers:', num_dense_layers)
  120. print('num_dense_nodes:', num_dense_nodes)
  121. print('num_dropout:', num_dropout)
  122. print()
  123.  
  124. model = create_model(learning_rate=learning_rate,
  125. num_dense_layers=num_dense_layers,
  126. num_dense_nodes=num_dense_nodes,
  127. num_dropout=num_dropout)
  128.  
  129. history = model.fit(x=X_train, y=y_train, epochs=1, batch_size=128, validation_data=validation_data)
  130.  
  131. accuracy = history.history['val_acc'][-1]
  132.  
  133. print()
  134. print("Accuracy: {0:.2%}".format(accuracy))
  135. print()
  136.  
  137. global best_accuracy
  138.  
  139. if accuracy > best_accuracy:
  140. model.save(path_best_model)
  141.  
  142. best_accuracy = accuracy
  143.  
  144. del model
  145.  
  146. K.clear_session()
  147.  
  148. return -accuracy
  149.  
  150. fitness(x=default_parameters)
  151.  
  152. search_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func='EI', n_calls=13, x0=default_parameters)
  153.  
  154. plot_convergence(search_result);
  155. plt.show()
  156.  
  157. print()
  158. print(search_result.x)
  159.  
  160. print()
  161. print(search_resul.fun)
  162.  
  163. # # Best model
  164.  
  165. model = load_model(path_best_model)
  166.  
  167. model.fit(X_train, y_train)
  168.  
  169. pred = model.predict(X_test)
  170. pred = np.argmax(pred, axis=1)
  171. pred = pred.reshape(-1, 1)
  172.  
  173. # Accuracy on Test Set
  174. print("Accuracy: {0:.2%}".format(accuracy_score(y_test, pred)))
Add Comment
Please, Sign In to add comment