Advertisement
Guest User

Untitled

a guest
Feb 23rd, 2017
104
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.49 KB | None | 0 0
  1. # input image dimensions
  2. img_rows, img_cols = 187, 100
  3.  
  4.  
  5. # preparing and import the dataset
  6. path1 = '/home/fatmasaid/new_flicker' #path of folder of images
  7. path2 = '/home/fatmasaid/new_flicker_data_resized' #path of folder to save images
  8.  
  9. listing = os.listdir(path1)
  10. listing.sort(key=lambda f: int(filter(str.isdigit, f)))
  11. num_samples=size(listing)
  12.  
  13. print (num_samples)
  14. #exit()
  15.  
  16. for file in listing:
  17. im = Image.open(path1 + '//' + file)
  18. img = im.resize((img_rows,img_cols))
  19. gray = img.convert('L')
  20. gray.save(path2 +'//' + file, "JPEG")
  21.  
  22. imlist = os.listdir(path2)
  23. #sorting the list in ascending order by image name
  24. imlist.sort(key=lambda f: int(filter(str.isdigit, f)))
  25.  
  26. im1 = array(Image.open('new_flicker_data_resized' + '//'+ imlist[0])) # open one image to get size et size
  27. m,n = im1.shape[0:2] # get the size of the images
  28. imnbr = len(imlist) # get the number of images
  29.  
  30. # create matrix to store all flattened images
  31. immatrix = array([array(Image.open('new_flicker_data_resized'+ '//' + im2)).flatten()
  32. for im2 in imlist],'f')
  33.  
  34.  
  35. #divide the dataset to 2 classes
  36. label=np.ones((num_samples,),dtype = int)
  37.  
  38. label[0:64878]=0 #images with low No of likes labeled 0
  39. label[64878:]=1 #images with high No of likes labeled 1
  40.  
  41. data,Label = shuffle(immatrix,label, random_state=2)
  42. train_data = [data,Label]
  43.  
  44. print (train_data[0].shape) # the train data
  45. print (train_data[1].shape) # the labels of these data
  46.  
  47. #batch_size to train
  48. batch_size = 16
  49. # number of output classes
  50. nb_classes = 2
  51. # number of epochs to train
  52. nb_epoch =150 #each epochs contains around 70000/128=468 batches with 128 images
  53.  
  54. img_rows, img_cols = 187, 100
  55.  
  56. (X, Y) = (train_data[0],train_data[1])
  57.  
  58.  
  59. # STEP 1: split X and y into training and testing sets
  60.  
  61. X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=4)
  62.  
  63. X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
  64. X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
  65.  
  66. X_train = X_train.astype('float32')
  67. X_test = X_test.astype('float32')
  68.  
  69. #Normalization
  70. X_train /= 255
  71. X_test /= 255
  72.  
  73. # compute the total time of prearing and importing the dataset
  74. t_generateArray = time.time()
  75. print('Generating Array Time:{}'.format(t_generateArray - t_start))
  76.  
  77. print('X_train shape:', X_train.shape)
  78. print(X_train.shape[0], 'train samples')
  79. print(X_test.shape[0], 'test samples')
  80.  
  81. # convert class vectors to binary class matrices
  82. Y_train = np_utils.to_categorical(Y_train, nb_classes)
  83. Y_test = np_utils.to_categorical(Y_test, nb_classes)
  84.  
  85. # Step 1:Network structure
  86. model = Sequential()
  87. model.add(ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols)))
  88. model.add(Convolution2D(64, 3, 3, activation='relu',init='glorot_uniform'))
  89. model.add(ZeroPadding2D((1, 1)))
  90. model.add(Convolution2D(64, 3, 3, activation='relu',init='glorot_uniform'))
  91. model.add(MaxPooling2D((2, 2), strides=(2, 2)))
  92.  
  93. model.add(ZeroPadding2D((1, 1)))
  94. model.add(Convolution2D(128, 3, 3, activation='relu',init='glorot_uniform'))
  95. model.add(ZeroPadding2D((1, 1)))
  96. model.add(Convolution2D(128, 3, 3, activation='relu',init='glorot_uniform'))
  97. model.add(MaxPooling2D((2, 2), strides=(2, 2)))
  98.  
  99. model.add(ZeroPadding2D((1, 1)))
  100. model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
  101. model.add(ZeroPadding2D((1, 1)))
  102. model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
  103. model.add(ZeroPadding2D((1, 1)))
  104. model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
  105. model.add(ZeroPadding2D((1, 1)))
  106. model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
  107. model.add(MaxPooling2D((2, 2), strides=(2, 2)))
  108.  
  109. model.add(ZeroPadding2D((1, 1)))
  110. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  111. model.add(ZeroPadding2D((1, 1)))
  112. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  113. model.add(ZeroPadding2D((1, 1)))
  114. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  115. model.add(ZeroPadding2D((1, 1)))
  116. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  117. model.add(MaxPooling2D((2, 2), strides=(2, 2)))
  118.  
  119. model.add(ZeroPadding2D((1, 1)))
  120. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  121. model.add(ZeroPadding2D((1, 1)))
  122. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  123. model.add(ZeroPadding2D((1, 1)))
  124. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  125. model.add(ZeroPadding2D((1, 1)))
  126. model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
  127. model.add(MaxPooling2D((2, 2), strides=(2, 2)))
  128. model.add(Flatten())
  129. model.add(Dense(1024, activation='relu',init='glorot_uniform'))
  130. model.add(Dropout(0.5))
  131. model.add(Dense(1024, activation='relu',init='glorot_uniform'))
  132. model.add(Dropout(0.5))
  133. model.add(Dense(nb_classes, activation='softmax')) # neuron number
  134.  
  135. # load weights
  136. #model.load_weights("Train_Test_weights.h5")
  137.  
  138. #step 2: Learning target(computiong the loss using the entropy function
  139. sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
  140. # adagrad = Adagrad(lr=0.01, epsilon=1e-08)
  141. model.compile(loss='categorical_crossentropy',optimizer= sgd,metrics=['accuracy'])
  142.  
  143. checkpointer=ModelCheckpoint(filepath='new_flicker_Train_Test_weights.h5',monitor='val_acc', verbose=1, save_best_only=True, mode='max')
  144.  
  145. #training the model
  146. print ('Training Start....')
  147. hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
  148. verbose=1, validation_data=(X_test, Y_test),callbacks=[checkpointer])
  149.  
  150.  
  151. #model.save_weights('Train_Test_weights.h5')
  152.  
  153. #evaluate the model
  154. score = model.evaluate(X_test, Y_test,verbose=0)
  155. print('Test score:', score[0])
  156. print('Test accuracy:', score[1])
  157.  
  158. print(model.predict_classes(X_test[2:10]))
  159. print(Y_test[2:10])
  160.  
  161.  
  162. print ('drawing the training process...')
  163. TrainERR = hist.history['loss']
  164. ValidERR = hist.history['val_loss']
  165. plt.figure(0)
  166. plt.plot(TrainERR, 'b', label='TrainERR')
  167. plt.plot(ValidERR, 'r', label='ValidERR')
  168. plt.legend(loc='best')
  169. plt.xlabel('epoch')
  170. plt.ylabel('loss')
  171. plt.grid(True)
  172. #plt.show()
  173. plt.savefig('new_flicker_Train_Test_Loss&Epoch.png', dpi=150)
  174.  
  175. Trainer = hist.history['acc']
  176. Valider = hist.history['val_acc']
  177. plt.figure(1)
  178. plt.plot(Trainer, 'b', label='Trainer')
  179. plt.plot(Valider, 'r', label='Valider')
  180. plt.legend(loc='best')
  181. plt.xlabel('epoch')
  182. plt.ylabel('Accuracy')
  183. plt.grid(True)
  184. #plt.show()
  185. plt.savefig('new_flicker_Train_Test_Accuracy&Epoch.png', dpi=150)
  186.  
  187. t_end = time.time()
  188.  
  189. print('Time:{}'.format(t_end - t_start))
  190. print('Generating Array Time:{}'.format(t_generateArray - t_start))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement