Advertisement
Guest User

Untitled

a guest
Jul 17th, 2019
96
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 13.13 KB | None | 0 0
  1. // The model overfits around 20 epochs. The saved checkpoint model works well on the test set by giving just 18 false positives
  2. // as compared to 36 in other cases. The model is shallow as compared to other researchers. The batch size=128 and small lr
  3. // were significant factor in getting low losses. Next try would be to increase lr and dropout.
  4.  
  5. // This was trained on the new dataset
  6.  
  7.  
  8. train_data_path = 'detection_data_1/train/'
  9. validation_data_path = 'detection_data_1/val/'
  10. test_data_path = 'detection_data_1/test/'
  11.  
  12. #Parametres
  13. img_width, img_height = 150, 250
  14.  
  15. nb_train_samples = sum(len(files) for _, _, files in os.walk(train_data_path))
  16. nb_validation_samples = sum(len(files) for _, _, files in os.walk(validation_data_path))
  17. # print(nb_train_samples)
  18. epochs = 100
  19. batch_size = 128
  20.  
  21.  
  22.  
  23. if K.image_data_format() == 'channels_first':
  24. input_shape = (3, img_width, img_height)
  25. else:
  26. input_shape = (img_width, img_height, 3)
  27.  
  28. # K.set_image_dim_ordering('th')
  29.  
  30. model = Sequential()
  31. model.add(Conv2D(64, (7, 7), padding = 'same', input_shape=input_shape))
  32. model.add(Activation('relu'))
  33. model.add(MaxPooling2D(pool_size=(2, 2)))
  34. model.add(Dropout(0.4))
  35.  
  36. model.add(Conv2D(64, (7, 7), padding = 'same'))
  37. model.add(Activation('relu'))
  38. model.add(MaxPooling2D(pool_size=(2, 2)))
  39. model.add(Dropout(0.4))
  40.  
  41. model.add(Conv2D(64, (7, 7), padding = 'same'))
  42. model.add(Activation('relu'))
  43. model.add(MaxPooling2D(pool_size=(2, 2)))
  44. model.add(Dropout(0.4))
  45.  
  46. model.add(Conv2D(128, (7, 7), padding = 'same'))
  47. model.add(Activation('relu'))
  48. model.add(MaxPooling2D(pool_size=(2, 2)))
  49. model.add(Dropout(0.5))
  50.  
  51. model.add(Conv2D(128, (7, 7), padding = 'same'))
  52. model.add(Activation('relu'))
  53. model.add(MaxPooling2D(pool_size=(2, 2)))
  54. model.add(Dropout(0.5))
  55.  
  56. model.add(Conv2D(128, (7, 7), padding = 'same'))
  57. model.add(Activation('relu'))
  58. model.add(MaxPooling2D(pool_size=(2, 2)))
  59. model.add(Dropout(0.5))
  60.  
  61. model.add(Conv2D(256, (7, 7), padding = 'same'))
  62. model.add(Activation('relu'))
  63. model.add(MaxPooling2D(pool_size=(2, 2)))
  64. model.add(Dropout(0.5))
  65.  
  66. model.add(Flatten())
  67. model.add(Dense(256))
  68. model.add(Activation('relu'))
  69.  
  70. model.add(Dense(1))
  71. model.add(Activation('sigmoid'))
  72.  
  73. model.compile(loss='binary_crossentropy',
  74. optimizer=optimizers.Adam(lr=3e-4),
  75. metrics=['accuracy'])
  76.  
  77.  
  78.  
  79. from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
  80. checkpoint = ModelCheckpoint(filepath='checkpointORCA_adam-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', verbose=0, save_best_only=True)
  81. reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
  82. patience=20, min_lr=1e-8)
  83.  
  84. train_datagen = ImageDataGenerator(rescale=1. / 255,
  85. shear_range=0.2,
  86. zoom_range=0.2)
  87.  
  88. # this is the augmentation configuration we will use for testing:
  89. # only rescaling
  90. test_datagen = ImageDataGenerator(rescale=1. / 255)
  91.  
  92. # Change the batchsize according to your system RAM
  93. train_batchsize = 128
  94. val_batchsize = 128
  95.  
  96. train_generator = train_datagen.flow_from_directory(
  97. train_data_path,
  98. target_size=(img_width, img_height),
  99. batch_size=train_batchsize,
  100. class_mode='binary',
  101. shuffle=True)
  102.  
  103. # train_generator.reset()
  104. # validation_generator.reset()
  105. validation_generator = test_datagen.flow_from_directory(
  106. validation_data_path,
  107. target_size=(img_width, img_height),
  108. batch_size=val_batchsize,
  109. class_mode='binary',
  110. shuffle=False)
  111.  
  112. # validation_generator.reset()
  113.  
  114. history = model.fit_generator(
  115. train_generator,
  116. steps_per_epoch=nb_train_samples // batch_size,
  117. epochs=epochs,
  118. validation_data=validation_generator,
  119. validation_steps=nb_validation_samples // batch_size,
  120. callbacks=[checkpoint, reduce_lr])
  121.  
  122. # model.save_weights('orca_detection_3.h5')
  123. model.save('OrcaCNN_detection_adam_15x25.h5')
  124.  
  125.  
  126.  
  127.  
  128.  
  129. Found 22790 images belonging to 2 classes.
  130. Found 16574 images belonging to 2 classes.
  131. Epoch 1/100
  132. 178/178 [==============================] - 431s 2s/step - loss: 0.4748 - acc: 0.7393 - val_loss: 0.2205 - val_acc: 0.9122
  133. Epoch 2/100
  134. 178/178 [==============================] - 401s 2s/step - loss: 0.2067 - acc: 0.9208 - val_loss: 0.1519 - val_acc: 0.9383
  135. Epoch 3/100
  136. 178/178 [==============================] - 404s 2s/step - loss: 0.1670 - acc: 0.9355 - val_loss: 0.1493 - val_acc: 0.9413
  137. Epoch 4/100
  138. 178/178 [==============================] - 403s 2s/step - loss: 0.1567 - acc: 0.9405 - val_loss: 0.1545 - val_acc: 0.9440
  139. Epoch 5/100
  140. 178/178 [==============================] - 402s 2s/step - loss: 0.1401 - acc: 0.9467 - val_loss: 0.1239 - val_acc: 0.9508
  141. Epoch 6/100
  142. 178/178 [==============================] - 400s 2s/step - loss: 0.1303 - acc: 0.9494 - val_loss: 0.1279 - val_acc: 0.9509
  143. Epoch 7/100
  144. 178/178 [==============================] - 401s 2s/step - loss: 0.1286 - acc: 0.9505 - val_loss: 0.1196 - val_acc: 0.9536
  145. Epoch 8/100
  146. 178/178 [==============================] - 400s 2s/step - loss: 0.1317 - acc: 0.9492 - val_loss: 0.1193 - val_acc: 0.9542
  147. Epoch 9/100
  148. 178/178 [==============================] - 401s 2s/step - loss: 0.1213 - acc: 0.9534 - val_loss: 0.1264 - val_acc: 0.9519
  149. Epoch 10/100
  150. 178/178 [==============================] - 400s 2s/step - loss: 0.1133 - acc: 0.9552 - val_loss: 0.1113 - val_acc: 0.9566
  151. Epoch 11/100
  152. 178/178 [==============================] - 399s 2s/step - loss: 0.1126 - acc: 0.9572 - val_loss: 0.1048 - val_acc: 0.9594
  153. Epoch 12/100
  154. 178/178 [==============================] - 399s 2s/step - loss: 0.1133 - acc: 0.9550 - val_loss: 0.1201 - val_acc: 0.9556
  155. Epoch 13/100
  156. 178/178 [==============================] - 399s 2s/step - loss: 0.1121 - acc: 0.9564 - val_loss: 0.1085 - val_acc: 0.9579
  157. Epoch 14/100
  158. 178/178 [==============================] - 399s 2s/step - loss: 0.1045 - acc: 0.9599 - val_loss: 0.1072 - val_acc: 0.9594
  159. Epoch 15/100
  160. 178/178 [==============================] - 399s 2s/step - loss: 0.1036 - acc: 0.9590 - val_loss: 0.1111 - val_acc: 0.9579
  161. Epoch 16/100
  162. 178/178 [==============================] - 396s 2s/step - loss: 0.1018 - acc: 0.9602 - val_loss: 0.1133 - val_acc: 0.9576
  163. Epoch 17/100
  164. 178/178 [==============================] - 394s 2s/step - loss: 0.1016 - acc: 0.9589 - val_loss: 0.1139 - val_acc: 0.9555
  165. Epoch 18/100
  166. 178/178 [==============================] - 394s 2s/step - loss: 0.1024 - acc: 0.9602 - val_loss: 0.1109 - val_acc: 0.9575
  167. Epoch 19/100
  168. 178/178 [==============================] - 393s 2s/step - loss: 0.0962 - acc: 0.9624 - val_loss: 0.1094 - val_acc: 0.9590
  169. Epoch 20/100
  170. 178/178 [==============================] - 394s 2s/step - loss: 0.0979 - acc: 0.9619 - val_loss: 0.1052 - val_acc: 0.9602
  171. Epoch 21/100
  172. 178/178 [==============================] - 393s 2s/step - loss: 0.1014 - acc: 0.9611 - val_loss: 0.1063 - val_acc: 0.9589
  173. Epoch 22/100
  174. 178/178 [==============================] - 393s 2s/step - loss: 0.0963 - acc: 0.9629 - val_loss: 0.1000 - val_acc: 0.9632
  175. Epoch 23/100
  176. 178/178 [==============================] - 394s 2s/step - loss: 0.0918 - acc: 0.9642 - val_loss: 0.1106 - val_acc: 0.9601
  177. Epoch 24/100
  178. 178/178 [==============================] - 394s 2s/step - loss: 0.0920 - acc: 0.9633 - val_loss: 0.1088 - val_acc: 0.9583
  179. Epoch 25/100
  180. 178/178 [==============================] - 394s 2s/step - loss: 0.0942 - acc: 0.9632 - val_loss: 0.1140 - val_acc: 0.9568
  181. Epoch 26/100
  182. 178/178 [==============================] - 394s 2s/step - loss: 0.0877 - acc: 0.9661 - val_loss: 0.1025 - val_acc: 0.9599
  183. Epoch 27/100
  184. 178/178 [==============================] - 394s 2s/step - loss: 0.0848 - acc: 0.9660 - val_loss: 0.1240 - val_acc: 0.9566
  185. Epoch 28/100
  186. 178/178 [==============================] - 394s 2s/step - loss: 0.0917 - acc: 0.9644 - val_loss: 0.1186 - val_acc: 0.9573
  187. Epoch 29/100
  188. 178/178 [==============================] - 394s 2s/step - loss: 0.0841 - acc: 0.9679 - val_loss: 0.1086 - val_acc: 0.9568
  189. Epoch 30/100
  190. 178/178 [==============================] - 393s 2s/step - loss: 0.0827 - acc: 0.9671 - val_loss: 0.1054 - val_acc: 0.9594
  191. Epoch 31/100
  192. 178/178 [==============================] - 394s 2s/step - loss: 0.0859 - acc: 0.9671 - val_loss: 0.1010 - val_acc: 0.9619
  193. Epoch 32/100
  194. 178/178 [==============================] - 395s 2s/step - loss: 0.0861 - acc: 0.9658 - val_loss: 0.1034 - val_acc: 0.9587
  195. Epoch 33/100
  196. 178/178 [==============================] - 396s 2s/step - loss: 0.0792 - acc: 0.9702 - val_loss: 0.0979 - val_acc: 0.9613
  197. Epoch 34/100
  198. 178/178 [==============================] - 395s 2s/step - loss: 0.0786 - acc: 0.9697 - val_loss: 0.1149 - val_acc: 0.9571
  199. Epoch 35/100
  200. 178/178 [==============================] - 396s 2s/step - loss: 0.0805 - acc: 0.9686 - val_loss: 0.1038 - val_acc: 0.9601
  201. Epoch 36/100
  202. 178/178 [==============================] - 397s 2s/step - loss: 0.0825 - acc: 0.9684 - val_loss: 0.1026 - val_acc: 0.9594
  203. Epoch 37/100
  204. 178/178 [==============================] - 396s 2s/step - loss: 0.0874 - acc: 0.9654 - val_loss: 0.1283 - val_acc: 0.9556
  205. Epoch 38/100
  206. 178/178 [==============================] - 395s 2s/step - loss: 0.0789 - acc: 0.9695 - val_loss: 0.1031 - val_acc: 0.9608
  207. Epoch 39/100
  208. 178/178 [==============================] - 396s 2s/step - loss: 0.0727 - acc: 0.9728 - val_loss: 0.1166 - val_acc: 0.9571
  209. Epoch 40/100
  210. 178/178 [==============================] - 394s 2s/step - loss: 0.0743 - acc: 0.9705 - val_loss: 0.1040 - val_acc: 0.9617
  211. Epoch 41/100
  212. 178/178 [==============================] - 393s 2s/step - loss: 0.0706 - acc: 0.9708 - val_loss: 0.1103 - val_acc: 0.9577
  213. Epoch 42/100
  214. 178/178 [==============================] - 394s 2s/step - loss: 0.0809 - acc: 0.9691 - val_loss: 0.1044 - val_acc: 0.9606
  215. Epoch 43/100
  216. 178/178 [==============================] - 394s 2s/step - loss: 0.0733 - acc: 0.9718 - val_loss: 0.1070 - val_acc: 0.9591
  217. Epoch 44/100
  218. 178/178 [==============================] - 395s 2s/step - loss: 0.0733 - acc: 0.9732 - val_loss: 0.1142 - val_acc: 0.9571
  219. Epoch 45/100
  220. 178/178 [==============================] - 395s 2s/step - loss: 0.0715 - acc: 0.9724 - val_loss: 0.0987 - val_acc: 0.9642
  221. Epoch 46/100
  222. 178/178 [==============================] - 395s 2s/step - loss: 0.0770 - acc: 0.9702 - val_loss: 0.1067 - val_acc: 0.9605
  223. Epoch 47/100
  224. 178/178 [==============================] - 395s 2s/step - loss: 0.0667 - acc: 0.9745 - val_loss: 0.1050 - val_acc: 0.9596
  225. Epoch 48/100
  226. 178/178 [==============================] - 393s 2s/step - loss: 0.0704 - acc: 0.9717 - val_loss: 0.1079 - val_acc: 0.9607
  227. Epoch 49/100
  228. 178/178 [==============================] - 393s 2s/step - loss: 0.0701 - acc: 0.9717 - val_loss: 0.1074 - val_acc: 0.9601
  229. Epoch 50/100
  230. 178/178 [==============================] - 394s 2s/step - loss: 0.0678 - acc: 0.9738 - val_loss: 0.1088 - val_acc: 0.9601
  231. Epoch 51/100
  232. 178/178 [==============================] - 393s 2s/step - loss: 0.0659 - acc: 0.9743 - val_loss: 0.1066 - val_acc: 0.9587
  233. Epoch 52/100
  234. 178/178 [==============================] - 393s 2s/step - loss: 0.0637 - acc: 0.9735 - val_loss: 0.1080 - val_acc: 0.9588
  235. Epoch 53/100
  236. 178/178 [==============================] - 394s 2s/step - loss: 0.0599 - acc: 0.9768 - val_loss: 0.1099 - val_acc: 0.9614
  237. Epoch 54/100
  238. 178/178 [==============================] - 395s 2s/step - loss: 0.0604 - acc: 0.9765 - val_loss: 0.1128 - val_acc: 0.9595
  239. Epoch 55/100
  240. 178/178 [==============================] - 395s 2s/step - loss: 0.0614 - acc: 0.9765 - val_loss: 0.1093 - val_acc: 0.9605
  241. Epoch 56/100
  242. 178/178 [==============================] - 395s 2s/step - loss: 0.0513 - acc: 0.9799 - val_loss: 0.1062 - val_acc: 0.9605
  243. Epoch 57/100
  244. 178/178 [==============================] - 395s 2s/step - loss: 0.0519 - acc: 0.9803 - val_loss: 0.1151 - val_acc: 0.9593
  245. Epoch 58/100
  246. 178/178 [==============================] - 394s 2s/step - loss: 0.0623 - acc: 0.9770 - val_loss: 0.1097 - val_acc: 0.9603
  247. Epoch 59/100
  248. 178/178 [==============================] - 394s 2s/step - loss: 0.0517 - acc: 0.9788 - val_loss: 0.1201 - val_acc: 0.9580
  249. Epoch 60/100
  250. 178/178 [==============================] - 393s 2s/step - loss: 0.0485 - acc: 0.9801 - val_loss: 0.1154 - val_acc: 0.9606
  251. Epoch 61/100
  252. 178/178 [==============================] - 394s 2s/step - loss: 0.0514 - acc: 0.9799 - val_loss: 0.1190 - val_acc: 0.9601
  253. Epoch 62/100
  254. 178/178 [==============================] - 394s 2s/step - loss: 0.0503 - acc: 0.9803 - val_loss: 0.1159 - val_acc: 0.9590
  255. Epoch 63/100
  256. 178/178 [==============================] - 394s 2s/step - loss: 0.0466 - acc: 0.9826 - val_loss: 0.1105 - val_acc: 0.9606
  257. Epoch 64/100
  258. 178/178 [==============================] - 394s 2s/step - loss: 0.0481 - acc: 0.9805 - val_loss: 0.1188 - val_acc: 0.9607
  259. Epoch 65/100
  260. 178/178 [==============================] - 394s 2s/step - loss: 0.0454 - acc: 0.9822 - val_loss: 0.1147 - val_acc: 0.9616
  261. Epoch 66/100
  262. 178/178 [==============================] - 394s 2s/step - loss: 0.0495 - acc: 0.9811 - val_loss: 0.1160 - val_acc: 0.9587
  263. Epoch 67/100
  264. 178/178 [==============================] - 394s 2s/step - loss: 0.0472 - acc: 0.9816 - val_loss: 0.1106 - val_acc: 0.9620
  265. Epoch 68/100
  266. 178/178 [==============================] - 394s 2s/step - loss: 0.0442 - acc: 0.9830 - val_loss: 0.1161 - val_acc: 0.9598
  267. Epoch 69/100
  268. 178/178 [==============================] - 394s 2s/step - loss: 0.0426 - acc: 0.9835 - val_loss: 0.1163 - val_acc: 0.9594
  269. Epoch 70/100
  270. 178/178 [==============================] - 395s 2s/step - loss: 0.0459 - acc: 0.9816 - val_loss: 0.1200 - val_acc: 0.9553
  271. Epoch 71/100
  272. 178/178 [==============================] - 395s 2s/step - loss: 0.0406 - acc: 0.9841 - val_loss: 0.1301 - val_acc: 0.9541
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement