Advertisement
Guest User

Untitled

a guest
Oct 15th, 2019
140
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.80 KB | None | 0 0
  1. # IMPORTING ALL THE NECESSARY LIBRARIES
  2. from keras .models import Sequential
  3. from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
  4. from keras.utils import np_utils
  5. from keras.callbacks import EarlyStopping
  6.  
  7. import matplotlib.pyplot as plt
  8.  
  9. # IMPORTING THE CIFAR10 DATASET BUNDLED WITH KERAS
  10. from keras.datasets import cifar100
  11.  
  12. # SPLITTING THE DATASET INTO TRAINING AND TESTING DATASETS
  13. (X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode='fine')
  14.  
  15. '''
  16. SINCE WE ARE USING TENSORFLOW AS BACKEND, WE NEED TO RESHAPE THE FEATURE MATRIX AS BEING:
  17. 1) NUMBER OF SAMPLES, 2)NUMBER OF ROWS, 3)NUMBER OF COLUMNS, 4) NUMBER OF CHANNELS
  18. '''
  19. X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
  20. X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
  21.  
  22. # NORMALIZING INPUTS FROM 0-255 TO 0.0-1.0
  23. X_train = X_train.astype('float32')
  24. X_test = X_test.astype('float32')
  25. X_train /= 255
  26. X_test /= 255
  27.  
  28. # CONVERTING LABELS TO KERAS USABLE CATEGORICAL FORMAT
  29. y_train = np_utils.to_categorical(y_train, 100)
  30. y_test = np_utils.to_categorical(y_test, 100)
  31.  
  32. # INITIATING THE SEQUENTIAL MODEL
  33. classifier = Sequential()
  34.  
  35. '''
  36. ADDING A CONVOLUTIONAL LAYER WITH:
  37. 1)NUMBER OF FILTERS TO BE USED FOR CONVOLUTION - 64
  38. 2)FILTER SIZE - (3,3)
  39. 3)INPUT SHAPE OF THE IMAGES(NUMBER OF ROWS, NUMBER OF COLUMNS, NUMBER OF CHANNELS)
  40. 4)AN ACTIVATION FUNCTION
  41. '''
  42. classifier.add(Conv2D(64, (3, 3), activation = 'relu', input_shape = (32, 32, 3)))
  43. # ADDING A POOLING LAYER MATRIX OF SIZE 2X2
  44. classifier.add(MaxPooling2D(pool_size = (2, 2)))
  45. # DROPPING OUT RANDOM NODES TO AVOID OVERFITTING
  46. classifier.add(Dropout(0.25))
  47.  
  48. '''
  49. ADDING ONE MORE CONVOLUTIONAL LAYER, BUT THIS TIME THE INPUT OF THIS LAYER
  50. WILL BE THE OUTPUT OF THE PREVIOUS LAYER
  51. '''
  52. classifier.add(Conv2D(64, (3, 3), activation = 'relu'))
  53. # ADDING A POOLING LAYER MATRIX OF SIZE 2X2
  54. classifier.add(MaxPooling2D(pool_size = (2, 2)))
  55. # DROPPING OUT RANDOM NODES
  56. classifier.add(Dropout(0.25))
  57.  
  58. # ADDING ONE MORE CONVOLUTIONAL LAYER WITH 64 FILTERS
  59. classifier.add(Conv2D(128, (3, 3), activation = 'relu'))
  60. # ADDING A POOLING LAYER MATRIX OF SIZE 2X2
  61. classifier.add(MaxPooling2D(pool_size = (2, 2)))
  62. # DROPPING OUT RANDOM NODES
  63. classifier.add(Dropout(0.25))
  64.  
  65. # FLATTENING THE OUTPUT OF THE PREVIOUS LAYER TO 1D
  66. classifier.add(Flatten())
  67.  
  68. # ADDING A FULLY CONNECTED DENSE NEURAL NETWORK WITH INPUT BEING THE FLATTEND ARRAY
  69. classifier.add(Dense(units = 128, activation = 'relu'))
  70. # DROPPING OUT RANDOM NODES
  71. classifier.add(Dropout(0.25))
  72.  
  73. # ADDING A FULLY CONNECTED DENSE NEURAL NETWORK WITH INPUT AS PREVIOUS DENSE LAYER
  74. classifier.add(Dense(units = 128, activation = 'relu'))
  75. # DROPPING OUT RANDOM NODES
  76. classifier.add(Dropout(0.25))
  77.  
  78. # ADDING A FULLY CONNECTED DENSE NEURAL NETWORK WITH INPUT AS PREVIOUS DENSE LAYER
  79. classifier.add(Dense(units = 128, activation = 'relu'))
  80. # DROPPING OUT RANDOM NODES
  81. classifier.add(Dropout(0.25))
  82.  
  83. '''
  84. ADDING A FULLY CONNECTED DENSE NEURAL NETWORK, BUT THIS TIME THE NETWORK WILL ONLY
  85. CONSIST OF NODES EQUIVALENT TO NUMBER OF CATEGORIES(LABELS). NOTICE THE ACTIVATION
  86. FUNCTION IS ALSO BEEN CHANGED TO A SIGMOID INSTEAD OF A "Rectified Linear Unit"(ReLU)
  87. '''
  88. classifier.add(Dense(units = 100, activation = 'softmax'))
  89.  
  90. # DECLARING NUMBER OF EPOCH AND THE BATCH SIZE
  91. epochs = 10000
  92. batch_size = 256
  93. VALIDATION_PATIENCE = 20
  94.  
  95. # COMPILING THE FINAL CREATED MODEL WITH AN OPTIMIZER, LOSS FUNCTION AND EVALUATION METRICS
  96. classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy'])
  97.  
  98. '''
  99. IMPLEMENTING A CONVERGENCE MONITOR SO THE THE MODEL AUTOMATICALLY STOPS THE ITERATION
  100. AS SOON AS THE CONVERGENCE IS ACHIEVED
  101. '''
  102. stopper = EarlyStopping(monitor='val_loss', patience=VALIDATION_PATIENCE)
  103.  
  104. # TRAINS THE MODEL ON TRAINING DATA BATCH-BY-BATCH
  105. classifier.fit(X_train, y_train, batch_size=100, callbacks=[stopper], validation_data = (X_test, y_test), epochs=epochs, shuffle = True)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement