Guest User

Untitled

a guest
Nov 21st, 2018
124
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 28.50 KB | None | 0 0
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 1,
  6. "metadata": {},
  7. "outputs": [
  8. {
  9. "name": "stderr",
  10. "output_type": "stream",
  11. "text": [
  12. "/home/coea/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
  13. " from ._conv import register_converters as _register_converters\n"
  14. ]
  15. }
  16. ],
  17. "source": [
  18. "import tensorflow as tf\n",
  19. "import os\n",
  20. "import numpy as np"
  21. ]
  22. },
  23. {
  24. "cell_type": "code",
  25. "execution_count": 5,
  26. "metadata": {},
  27. "outputs": [],
  28. "source": [
  29. "from keras.models import Sequential\n",
  30. "from keras.layers import Convolution2D\n",
  31. "from keras.layers import MaxPooling2D\n",
  32. "from keras.layers import Flatten\n",
  33. "from keras.layers import Dense, Activation, Dropout\n",
  34. "from keras.layers import BatchNormalization"
  35. ]
  36. },
  37. {
  38. "cell_type": "code",
  39. "execution_count": 6,
  40. "metadata": {},
  41. "outputs": [
  42. {
  43. "name": "stderr",
  44. "output_type": "stream",
  45. "text": [
  46. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), input_shape=(64, 64, 3...)`\n",
  47. " \"\"\"\n",
  48. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:12: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3))`\n",
  49. " if sys.path[0] == '':\n",
  50. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:21: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation=\"relu\", units=128)`\n",
  51. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:22: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation=\"softmax\", units=3)`\n"
  52. ]
  53. }
  54. ],
  55. "source": [
  56. "# Initialising the CNN\n",
  57. "classifier = Sequential()\n",
  58. "\n",
  59. "# Convolution\n",
  60. "classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3)))\n",
  61. "classifier.add(BatchNormalization(momentum = 0.85, epsilon=0.0001))\n",
  62. "classifier.add(Activation('relu'))\n",
  63. "#Pooling\n",
  64. "classifier.add(MaxPooling2D(pool_size = (2, 2)))\n",
  65. "classifier.add(Dropout(0.2))\n",
  66. "# Adding a second convolutional layer\n",
  67. "classifier.add(Convolution2D(32, 3, 3))\n",
  68. "classifier.add(BatchNormalization(momentum = 0.85, epsilon=0.0001))\n",
  69. "classifier.add(Activation('relu'))\n",
  70. "classifier.add(MaxPooling2D(pool_size = (2, 2)))\n",
  71. "classifier.add(Dropout(0.2))\n",
  72. "# Flattening\n",
  73. "classifier.add(Flatten())\n",
  74. "\n",
  75. "# Full connection\n",
  76. "classifier.add(Dense(output_dim = 128, activation = 'relu'))\n",
  77. "classifier.add(Dense(output_dim = 3, activation = 'softmax'))\n",
  78. "\n",
  79. "# Compiling the CNN\n",
  80. "classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])"
  81. ]
  82. },
  83. {
  84. "cell_type": "code",
  85. "execution_count": 4,
  86. "metadata": {},
  87. "outputs": [],
  88. "source": [
  89. "# Splitting and creating a new train test folders of images \n",
  90. "from PIL import Image\n",
  91. "from keras.preprocessing.image import load_img\n",
  92. "import os, os.path\n",
  93. "import scipy"
  94. ]
  95. },
  96. {
  97. "cell_type": "code",
  98. "execution_count": null,
  99. "metadata": {},
  100. "outputs": [],
  101. "source": [
  102. "imgs = []\n",
  103. "path = \"/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/daisy/\"\n",
  104. "for f in os.listdir(path):\n",
  105. " #print(f)\n",
  106. " imgs.append(Image.open(path+f).convert('RGB'))\n",
  107. " \n",
  108. " \n",
  109. "for i in range(1,len(imgs)):\n",
  110. " total = len(imgs)\n",
  111. " split = round(total*0.7)\n",
  112. "# print(i)\n",
  113. "# print(split)\n",
  114. " if(i <= split):\n",
  115. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/train/daisy/%d.jpg'%(i))\n",
  116. " else:\n",
  117. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/test/daisy/%d.jpg'%(i))"
  118. ]
  119. },
  120. {
  121. "cell_type": "code",
  122. "execution_count": null,
  123. "metadata": {},
  124. "outputs": [],
  125. "source": [
  126. "from PIL import Image\n",
  127. "from keras.preprocessing.image import load_img\n",
  128. "import os, os.path\n",
  129. "import scipy\n",
  130. "#from sklearn.preprocessing import train_test_split\n",
  131. "\n",
  132. "\n",
  133. "imgs = []\n",
  134. "path = \"/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/rose/\"\n",
  135. "for f in os.listdir(path):\n",
  136. " imgs.append(Image.open(path+f).convert('RGB'))\n",
  137. " \n",
  138. " \n",
  139. "for i in range(1,len(imgs)):\n",
  140. " total = len(imgs)\n",
  141. " split = round(total*0.7)\n",
  142. " print(i)\n",
  143. " print(split)\n",
  144. " if(i <= split):\n",
  145. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/train/rose/%d.jpg'%(i))\n",
  146. " else:\n",
  147. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/test/rose/%d.jpg'%(i))"
  148. ]
  149. },
  150. {
  151. "cell_type": "code",
  152. "execution_count": null,
  153. "metadata": {},
  154. "outputs": [],
  155. "source": [
  156. "from PIL import Image\n",
  157. "from keras.preprocessing.image import load_img\n",
  158. "import os, os.path\n",
  159. "import scipy\n",
  160. "#from sklearn.preprocessing import train_test_split\n",
  161. "\n",
  162. "\n",
  163. "imgs = []\n",
  164. "path = \"/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/sunflower/\"\n",
  165. "for f in os.listdir(path):\n",
  166. " imgs.append(Image.open(path+f).convert('RGB'))\n",
  167. " \n",
  168. " \n",
  169. "for i in range(1,len(imgs)):\n",
  170. " total = len(imgs)\n",
  171. " split = round(total*0.7)\n",
  172. "# print(i)\n",
  173. "# print(split)\n",
  174. " if(i <= split):\n",
  175. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/train/sunflower/%d.jpg'%(i))\n",
  176. " else:\n",
  177. " imgs[i].save('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/test/sunflower/%d.jpg'%(i))"
  178. ]
  179. },
  180. {
  181. "cell_type": "code",
  182. "execution_count": 5,
  183. "metadata": {},
  184. "outputs": [],
  185. "source": [
  186. "from keras.preprocessing.image import ImageDataGenerator\n",
  187. "train_datagen = ImageDataGenerator(rescale = 1./255,\n",
  188. " shear_range = 0.2,\n",
  189. " zoom_range = 0.2,\n",
  190. " horizontal_flip = True)\n",
  191. "\n",
  192. "test_datagen = ImageDataGenerator(rescale = 1./255)"
  193. ]
  194. },
  195. {
  196. "cell_type": "code",
  197. "execution_count": 6,
  198. "metadata": {},
  199. "outputs": [
  200. {
  201. "data": {
  202. "text/plain": [
  203. "<keras.preprocessing.image.ImageDataGenerator at 0x7f52820ee1d0>"
  204. ]
  205. },
  206. "execution_count": 6,
  207. "metadata": {},
  208. "output_type": "execute_result"
  209. }
  210. ],
  211. "source": [
  212. "train_datagen"
  213. ]
  214. },
  215. {
  216. "cell_type": "code",
  217. "execution_count": 7,
  218. "metadata": {},
  219. "outputs": [
  220. {
  221. "name": "stdout",
  222. "output_type": "stream",
  223. "text": [
  224. "Found 246 images belonging to 3 classes.\n"
  225. ]
  226. }
  227. ],
  228. "source": [
  229. "training_set = train_datagen.flow_from_directory('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/train',\n",
  230. " target_size = (64, 64),\n",
  231. " batch_size = 32,\n",
  232. " class_mode = 'categorical')"
  233. ]
  234. },
  235. {
  236. "cell_type": "code",
  237. "execution_count": 8,
  238. "metadata": {},
  239. "outputs": [
  240. {
  241. "name": "stdout",
  242. "output_type": "stream",
  243. "text": [
  244. "Found 101 images belonging to 3 classes.\n"
  245. ]
  246. }
  247. ],
  248. "source": [
  249. "\n",
  250. "test_set = test_datagen.flow_from_directory('/home/coea/HJ/Razorthink/modified_flowers/modified_flowers/test',\n",
  251. " target_size = (64, 64),\n",
  252. " batch_size = 32,\n",
  253. " class_mode = 'categorical')"
  254. ]
  255. },
  256. {
  257. "cell_type": "code",
  258. "execution_count": 9,
  259. "metadata": {},
  260. "outputs": [
  261. {
  262. "data": {
  263. "text/plain": [
  264. "<keras_preprocessing.image.DirectoryIterator at 0x7f52820ee208>"
  265. ]
  266. },
  267. "execution_count": 9,
  268. "metadata": {},
  269. "output_type": "execute_result"
  270. }
  271. ],
  272. "source": [
  273. "test_set"
  274. ]
  275. },
  276. {
  277. "cell_type": "code",
  278. "execution_count": 10,
  279. "metadata": {},
  280. "outputs": [
  281. {
  282. "name": "stdout",
  283. "output_type": "stream",
  284. "text": [
  285. "_________________________________________________________________\n",
  286. "Layer (type) Output Shape Param # \n",
  287. "=================================================================\n",
  288. "conv2d_1 (Conv2D) (None, 62, 62, 32) 896 \n",
  289. "_________________________________________________________________\n",
  290. "max_pooling2d_1 (MaxPooling2 (None, 31, 31, 32) 0 \n",
  291. "_________________________________________________________________\n",
  292. "conv2d_2 (Conv2D) (None, 29, 29, 32) 9248 \n",
  293. "_________________________________________________________________\n",
  294. "max_pooling2d_2 (MaxPooling2 (None, 14, 14, 32) 0 \n",
  295. "_________________________________________________________________\n",
  296. "flatten_1 (Flatten) (None, 6272) 0 \n",
  297. "_________________________________________________________________\n",
  298. "dense_1 (Dense) (None, 128) 802944 \n",
  299. "_________________________________________________________________\n",
  300. "dense_2 (Dense) (None, 3) 387 \n",
  301. "=================================================================\n",
  302. "Total params: 813,475\n",
  303. "Trainable params: 813,475\n",
  304. "Non-trainable params: 0\n",
  305. "_________________________________________________________________\n"
  306. ]
  307. }
  308. ],
  309. "source": [
  310. "classifier.summary()"
  311. ]
  312. },
  313. {
  314. "cell_type": "code",
  315. "execution_count": 11,
  316. "metadata": {},
  317. "outputs": [],
  318. "source": [
  319. "import sys\n",
  320. "from PIL import Image\n",
  321. "sys.modules['Image'] = Image "
  322. ]
  323. },
  324. {
  325. "cell_type": "code",
  326. "execution_count": 12,
  327. "metadata": {},
  328. "outputs": [
  329. {
  330. "name": "stdout",
  331. "output_type": "stream",
  332. "text": [
  333. "/home/coea/anaconda3/lib/python3.6/site-packages/PIL/Image.py\n"
  334. ]
  335. }
  336. ],
  337. "source": [
  338. "from PIL import Image\n",
  339. "print(Image.__file__)"
  340. ]
  341. },
  342. {
  343. "cell_type": "code",
  344. "execution_count": 13,
  345. "metadata": {},
  346. "outputs": [
  347. {
  348. "name": "stderr",
  349. "output_type": "stream",
  350. "text": [
  351. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: UserWarning: The semantics of the Keras 2 argument `steps_per_epoch` is not the same as the Keras 1 argument `samples_per_epoch`. `steps_per_epoch` is the number of batches to draw from the generator at each epoch. Basically steps_per_epoch = samples_per_epoch/batch_size. Similarly `nb_val_samples`->`validation_steps` and `val_samples`->`steps` arguments have changed. Update your method calls accordingly.\n",
  352. " \"\"\"\n",
  353. "/home/coea/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: UserWarning: Update your `fit_generator` call to the Keras 2 API: `fit_generator(<keras_pre..., validation_data=<keras_pre..., workers=8, steps_per_epoch=1, epochs=100, validation_steps=2000)`\n",
  354. " \"\"\"\n",
  355. "WARNING (theano.tensor.blas): We did not find a dynamic library in the library_dir of the library we use for blas. If you use ATLAS, make sure to compile it with dynamics library.\n"
  356. ]
  357. },
  358. {
  359. "name": "stdout",
  360. "output_type": "stream",
  361. "text": [
  362. "Epoch 1/100\n",
  363. "1/1 [==============================] - 69s 69s/step - loss: 1.0865 - acc: 0.3125 - val_loss: 1.2651 - val_acc: 0.4158\n",
  364. "Epoch 2/100\n",
  365. "1/1 [==============================] - 69s 69s/step - loss: 1.2396 - acc: 0.5000 - val_loss: 1.0243 - val_acc: 0.4059\n",
  366. "Epoch 3/100\n",
  367. "1/1 [==============================] - 68s 68s/step - loss: 1.1907 - acc: 0.4062 - val_loss: 1.0464 - val_acc: 0.4059\n",
  368. "Epoch 4/100\n",
  369. "1/1 [==============================] - 68s 68s/step - loss: 1.1058 - acc: 0.3750 - val_loss: 1.0543 - val_acc: 0.5347\n",
  370. "Epoch 5/100\n",
  371. "1/1 [==============================] - 68s 68s/step - loss: 1.0582 - acc: 0.5312 - val_loss: 1.0463 - val_acc: 0.4158\n",
  372. "Epoch 6/100\n",
  373. "1/1 [==============================] - 68s 68s/step - loss: 1.0577 - acc: 0.4062 - val_loss: 1.0148 - val_acc: 0.4158\n",
  374. "Epoch 7/100\n",
  375. "1/1 [==============================] - 68s 68s/step - loss: 1.0453 - acc: 0.4375 - val_loss: 0.9673 - val_acc: 0.5347\n",
  376. "Epoch 8/100\n",
  377. "1/1 [==============================] - 68s 68s/step - loss: 1.0542 - acc: 0.4545 - val_loss: 0.9185 - val_acc: 0.6733\n",
  378. "Epoch 9/100\n",
  379. "1/1 [==============================] - 68s 68s/step - loss: 0.9689 - acc: 0.6562 - val_loss: 0.8967 - val_acc: 0.6238\n",
  380. "Epoch 10/100\n",
  381. "1/1 [==============================] - 70s 70s/step - loss: 0.8869 - acc: 0.7500 - val_loss: 0.8914 - val_acc: 0.5644\n",
  382. "Epoch 11/100\n",
  383. "1/1 [==============================] - 69s 69s/step - loss: 0.9077 - acc: 0.5625 - val_loss: 0.9271 - val_acc: 0.5248\n",
  384. "Epoch 12/100\n",
  385. "1/1 [==============================] - 68s 68s/step - loss: 0.8872 - acc: 0.5312 - val_loss: 0.8416 - val_acc: 0.6139\n",
  386. "Epoch 13/100\n",
  387. "1/1 [==============================] - 91s 91s/step - loss: 0.8500 - acc: 0.6250 - val_loss: 0.7847 - val_acc: 0.6733\n",
  388. "Epoch 14/100\n",
  389. "1/1 [==============================] - 68s 68s/step - loss: 0.7329 - acc: 0.8125 - val_loss: 0.8103 - val_acc: 0.6238\n",
  390. "Epoch 15/100\n",
  391. "1/1 [==============================] - 68s 68s/step - loss: 0.8154 - acc: 0.6562 - val_loss: 0.8447 - val_acc: 0.6139\n",
  392. "Epoch 16/100\n",
  393. "1/1 [==============================] - 67s 67s/step - loss: 0.7329 - acc: 0.6818 - val_loss: 0.8180 - val_acc: 0.6238\n",
  394. "Epoch 17/100\n",
  395. "1/1 [==============================] - 70s 70s/step - loss: 0.7606 - acc: 0.6562 - val_loss: 0.7729 - val_acc: 0.6832\n",
  396. "Epoch 18/100\n",
  397. "1/1 [==============================] - 68s 68s/step - loss: 0.7154 - acc: 0.6875 - val_loss: 0.8620 - val_acc: 0.6238\n",
  398. "Epoch 19/100\n",
  399. "1/1 [==============================] - 68s 68s/step - loss: 0.9581 - acc: 0.5938 - val_loss: 0.9412 - val_acc: 0.6139\n",
  400. "Epoch 20/100\n",
  401. "1/1 [==============================] - 68s 68s/step - loss: 0.7176 - acc: 0.7188 - val_loss: 0.8494 - val_acc: 0.6238\n",
  402. "Epoch 21/100\n",
  403. "1/1 [==============================] - 70s 70s/step - loss: 0.8283 - acc: 0.6250 - val_loss: 0.7353 - val_acc: 0.6832\n",
  404. "Epoch 22/100\n",
  405. "1/1 [==============================] - 68s 68s/step - loss: 0.7750 - acc: 0.6875 - val_loss: 0.7326 - val_acc: 0.6733\n",
  406. "Epoch 23/100\n",
  407. "1/1 [==============================] - 67s 67s/step - loss: 0.5797 - acc: 0.7812 - val_loss: 0.7659 - val_acc: 0.6535\n",
  408. "Epoch 24/100\n",
  409. "1/1 [==============================] - 68s 68s/step - loss: 0.6883 - acc: 0.6818 - val_loss: 0.7641 - val_acc: 0.6535\n",
  410. "Epoch 25/100\n",
  411. "1/1 [==============================] - 68s 68s/step - loss: 0.6270 - acc: 0.7500 - val_loss: 0.7075 - val_acc: 0.6634\n",
  412. "Epoch 26/100\n",
  413. "1/1 [==============================] - 68s 68s/step - loss: 0.6236 - acc: 0.7500 - val_loss: 0.7081 - val_acc: 0.6931\n",
  414. "Epoch 27/100\n",
  415. "1/1 [==============================] - 119s 119s/step - loss: 0.5566 - acc: 0.8125 - val_loss: 0.8244 - val_acc: 0.6535\n",
  416. "Epoch 28/100\n",
  417. "1/1 [==============================] - 138s 138s/step - loss: 1.1191 - acc: 0.4688 - val_loss: 0.8391 - val_acc: 0.6436\n",
  418. "Epoch 29/100\n",
  419. "1/1 [==============================] - 144s 144s/step - loss: 0.7097 - acc: 0.6875 - val_loss: 0.7933 - val_acc: 0.6634\n",
  420. "Epoch 30/100\n",
  421. "1/1 [==============================] - 93s 93s/step - loss: 0.7841 - acc: 0.6875 - val_loss: 0.7083 - val_acc: 0.6832\n",
  422. "Epoch 31/100\n",
  423. "1/1 [==============================] - 68s 68s/step - loss: 0.6815 - acc: 0.6250 - val_loss: 0.6760 - val_acc: 0.6931\n",
  424. "Epoch 32/100\n",
  425. "1/1 [==============================] - 68s 68s/step - loss: 0.6354 - acc: 0.6818 - val_loss: 0.6901 - val_acc: 0.6634\n",
  426. "Epoch 33/100\n",
  427. "1/1 [==============================] - 68s 68s/step - loss: 0.6612 - acc: 0.6875 - val_loss: 0.6839 - val_acc: 0.6733\n",
  428. "Epoch 34/100\n",
  429. "1/1 [==============================] - 68s 68s/step - loss: 0.6294 - acc: 0.7188 - val_loss: 0.6765 - val_acc: 0.7129\n",
  430. "Epoch 35/100\n",
  431. "1/1 [==============================] - 68s 68s/step - loss: 0.5649 - acc: 0.7812 - val_loss: 0.6851 - val_acc: 0.7426\n",
  432. "Epoch 36/100\n",
  433. "1/1 [==============================] - 67s 67s/step - loss: 0.4238 - acc: 0.9375 - val_loss: 0.7046 - val_acc: 0.7327\n",
  434. "Epoch 37/100\n",
  435. "1/1 [==============================] - 67s 67s/step - loss: 0.7942 - acc: 0.5312 - val_loss: 0.7033 - val_acc: 0.7327\n",
  436. "Epoch 38/100\n",
  437. "1/1 [==============================] - 68s 68s/step - loss: 0.6164 - acc: 0.7500 - val_loss: 0.6896 - val_acc: 0.7426\n",
  438. "Epoch 39/100\n",
  439. "1/1 [==============================] - 68s 68s/step - loss: 0.7916 - acc: 0.7188 - val_loss: 0.6580 - val_acc: 0.7525\n",
  440. "Epoch 40/100\n",
  441. "1/1 [==============================] - 67s 67s/step - loss: 0.6091 - acc: 0.7727 - val_loss: 0.6501 - val_acc: 0.7129\n",
  442. "Epoch 41/100\n",
  443. "1/1 [==============================] - 67s 67s/step - loss: 0.4100 - acc: 0.8438 - val_loss: 0.6811 - val_acc: 0.6931\n",
  444. "Epoch 42/100\n",
  445. "1/1 [==============================] - 67s 67s/step - loss: 0.5511 - acc: 0.7500 - val_loss: 0.7026 - val_acc: 0.7030\n",
  446. "Epoch 43/100\n",
  447. "1/1 [==============================] - 67s 67s/step - loss: 0.5974 - acc: 0.7188 - val_loss: 0.6998 - val_acc: 0.6832\n",
  448. "Epoch 44/100\n",
  449. "1/1 [==============================] - 68s 68s/step - loss: 0.5175 - acc: 0.7812 - val_loss: 0.6827 - val_acc: 0.7030\n",
  450. "Epoch 45/100\n",
  451. "1/1 [==============================] - 68s 68s/step - loss: 0.4769 - acc: 0.7812 - val_loss: 0.6675 - val_acc: 0.7228\n",
  452. "Epoch 46/100\n",
  453. "1/1 [==============================] - 67s 67s/step - loss: 0.4497 - acc: 0.8125 - val_loss: 0.6664 - val_acc: 0.7129\n",
  454. "Epoch 47/100\n",
  455. "1/1 [==============================] - 68s 68s/step - loss: 0.7728 - acc: 0.6875 - val_loss: 0.6743 - val_acc: 0.7327\n",
  456. "Epoch 48/100\n",
  457. "1/1 [==============================] - 68s 68s/step - loss: 0.5609 - acc: 0.7273 - val_loss: 0.6850 - val_acc: 0.7525\n",
  458. "Epoch 49/100\n",
  459. "1/1 [==============================] - 67s 67s/step - loss: 0.5247 - acc: 0.7500 - val_loss: 0.7158 - val_acc: 0.7426\n",
  460. "Epoch 50/100\n",
  461. "1/1 [==============================] - 68s 68s/step - loss: 0.4550 - acc: 0.9062 - val_loss: 0.7603 - val_acc: 0.7030\n",
  462. "Epoch 51/100\n",
  463. "1/1 [==============================] - 67s 67s/step - loss: 0.4788 - acc: 0.7812 - val_loss: 0.7433 - val_acc: 0.7129\n",
  464. "Epoch 52/100\n",
  465. "1/1 [==============================] - 70s 70s/step - loss: 0.4931 - acc: 0.7812 - val_loss: 0.6769 - val_acc: 0.7525\n",
  466. "Epoch 53/100\n",
  467. "1/1 [==============================] - 68s 68s/step - loss: 0.6260 - acc: 0.7188 - val_loss: 0.6463 - val_acc: 0.7525\n",
  468. "Epoch 54/100\n",
  469. "1/1 [==============================] - 68s 68s/step - loss: 0.7252 - acc: 0.6250 - val_loss: 0.6594 - val_acc: 0.6931\n",
  470. "Epoch 55/100\n",
  471. "1/1 [==============================] - 67s 67s/step - loss: 0.3809 - acc: 0.7500 - val_loss: 0.6671 - val_acc: 0.6931\n",
  472. "Epoch 56/100\n",
  473. "1/1 [==============================] - 67s 67s/step - loss: 0.5237 - acc: 0.7273 - val_loss: 0.6738 - val_acc: 0.7327\n",
  474. "Epoch 57/100\n",
  475. "1/1 [==============================] - 67s 67s/step - loss: 0.3579 - acc: 0.8125 - val_loss: 0.7099 - val_acc: 0.7030\n",
  476. "Epoch 58/100\n",
  477. "1/1 [==============================] - 68s 68s/step - loss: 0.6069 - acc: 0.7188 - val_loss: 0.7007 - val_acc: 0.7129\n",
  478. "Epoch 59/100\n",
  479. "1/1 [==============================] - 68s 68s/step - loss: 0.6616 - acc: 0.6875 - val_loss: 0.6647 - val_acc: 0.7525\n",
  480. "Epoch 60/100\n",
  481. "1/1 [==============================] - 67s 67s/step - loss: 0.5614 - acc: 0.8125 - val_loss: 0.6443 - val_acc: 0.7624\n",
  482. "Epoch 61/100\n",
  483. "1/1 [==============================] - 68s 68s/step - loss: 0.3418 - acc: 0.8750 - val_loss: 0.6435 - val_acc: 0.7723\n",
  484. "Epoch 62/100\n",
  485. "1/1 [==============================] - 69s 69s/step - loss: 0.2942 - acc: 0.9375 - val_loss: 0.6459 - val_acc: 0.7624\n",
  486. "Epoch 63/100\n",
  487. "1/1 [==============================] - 68s 68s/step - loss: 0.5000 - acc: 0.7812 - val_loss: 0.6434 - val_acc: 0.7525\n"
  488. ]
  489. },
  490. {
  491. "name": "stdout",
  492. "output_type": "stream",
  493. "text": [
  494. "Epoch 64/100\n",
  495. "1/1 [==============================] - 68s 68s/step - loss: 0.4193 - acc: 0.8636 - val_loss: 0.6539 - val_acc: 0.7129\n",
  496. "Epoch 65/100\n",
  497. "1/1 [==============================] - 68s 68s/step - loss: 0.3639 - acc: 0.7812 - val_loss: 0.6506 - val_acc: 0.7030\n",
  498. "Epoch 66/100\n",
  499. "1/1 [==============================] - 69s 69s/step - loss: 0.5190 - acc: 0.8750 - val_loss: 0.6718 - val_acc: 0.7327\n",
  500. "Epoch 67/100\n",
  501. "1/1 [==============================] - 68s 68s/step - loss: 0.3082 - acc: 0.9375 - val_loss: 0.7209 - val_acc: 0.7228\n",
  502. "Epoch 68/100\n",
  503. "1/1 [==============================] - 70s 70s/step - loss: 0.4515 - acc: 0.8125 - val_loss: 0.7280 - val_acc: 0.7228\n",
  504. "Epoch 69/100\n",
  505. "1/1 [==============================] - 68s 68s/step - loss: 0.4008 - acc: 0.9062 - val_loss: 0.7134 - val_acc: 0.7129\n",
  506. "Epoch 70/100\n",
  507. "1/1 [==============================] - 69s 69s/step - loss: 0.3517 - acc: 0.8125 - val_loss: 0.7017 - val_acc: 0.7228\n",
  508. "Epoch 71/100\n",
  509. "1/1 [==============================] - 68s 68s/step - loss: 0.6477 - acc: 0.7500 - val_loss: 0.6957 - val_acc: 0.6931\n",
  510. "Epoch 72/100\n",
  511. "1/1 [==============================] - 69s 69s/step - loss: 0.6443 - acc: 0.6818 - val_loss: 0.7019 - val_acc: 0.7129\n",
  512. "Epoch 73/100\n",
  513. "1/1 [==============================] - 68s 68s/step - loss: 0.4497 - acc: 0.7812 - val_loss: 0.7011 - val_acc: 0.7030\n",
  514. "Epoch 74/100\n",
  515. "1/1 [==============================] - 68s 68s/step - loss: 0.4581 - acc: 0.7500 - val_loss: 0.6935 - val_acc: 0.7624\n",
  516. "Epoch 75/100\n",
  517. "1/1 [==============================] - 68s 68s/step - loss: 0.5335 - acc: 0.8125 - val_loss: 0.6997 - val_acc: 0.7228\n",
  518. "Epoch 76/100\n",
  519. "1/1 [==============================] - 68s 68s/step - loss: 0.4006 - acc: 0.8438 - val_loss: 0.6766 - val_acc: 0.7525\n",
  520. "Epoch 77/100\n",
  521. "1/1 [==============================] - 68s 68s/step - loss: 0.4860 - acc: 0.9062 - val_loss: 0.6527 - val_acc: 0.7327\n",
  522. "Epoch 78/100\n",
  523. "1/1 [==============================] - 68s 68s/step - loss: 0.2830 - acc: 0.9375 - val_loss: 0.6674 - val_acc: 0.7030\n",
  524. "Epoch 79/100\n",
  525. "1/1 [==============================] - 68s 68s/step - loss: 0.3773 - acc: 0.8438 - val_loss: 0.6668 - val_acc: 0.6931\n",
  526. "Epoch 80/100\n",
  527. "1/1 [==============================] - 68s 68s/step - loss: 0.5167 - acc: 0.7273 - val_loss: 0.6626 - val_acc: 0.7030\n",
  528. "Epoch 81/100\n",
  529. "1/1 [==============================] - 67s 67s/step - loss: 0.2665 - acc: 0.9375 - val_loss: 0.6558 - val_acc: 0.7525\n",
  530. "Epoch 82/100\n",
  531. "1/1 [==============================] - 69s 69s/step - loss: 0.4382 - acc: 0.8438 - val_loss: 0.6767 - val_acc: 0.7228\n",
  532. "Epoch 83/100\n",
  533. "1/1 [==============================] - 68s 68s/step - loss: 0.3791 - acc: 0.8750 - val_loss: 0.7182 - val_acc: 0.7129\n",
  534. "Epoch 84/100\n",
  535. "1/1 [==============================] - 68s 68s/step - loss: 0.5555 - acc: 0.7812 - val_loss: 0.7291 - val_acc: 0.7228\n",
  536. "Epoch 85/100\n",
  537. "1/1 [==============================] - 68s 68s/step - loss: 0.2581 - acc: 0.9688 - val_loss: 0.7101 - val_acc: 0.7426\n",
  538. "Epoch 86/100\n",
  539. "1/1 [==============================] - 68s 68s/step - loss: 0.3808 - acc: 0.8750 - val_loss: 0.6935 - val_acc: 0.7525\n",
  540. "Epoch 87/100\n",
  541. "1/1 [==============================] - 68s 68s/step - loss: 0.2688 - acc: 0.8750 - val_loss: 0.6903 - val_acc: 0.7327\n",
  542. "Epoch 88/100\n",
  543. "1/1 [==============================] - 69s 69s/step - loss: 0.5422 - acc: 0.7727 - val_loss: 0.6925 - val_acc: 0.7327\n",
  544. "Epoch 89/100\n",
  545. "1/1 [==============================] - 68s 68s/step - loss: 0.2427 - acc: 0.8750 - val_loss: 0.7114 - val_acc: 0.7129\n",
  546. "Epoch 90/100\n",
  547. "1/1 [==============================] - 69s 69s/step - loss: 0.4907 - acc: 0.8438 - val_loss: 0.7053 - val_acc: 0.7228\n",
  548. "Epoch 91/100\n",
  549. "1/1 [==============================] - 68s 68s/step - loss: 0.4050 - acc: 0.8438 - val_loss: 0.6921 - val_acc: 0.7426\n",
  550. "Epoch 92/100\n",
  551. "1/1 [==============================] - 69s 69s/step - loss: 0.3384 - acc: 0.8125 - val_loss: 0.7109 - val_acc: 0.7228\n",
  552. "Epoch 93/100\n",
  553. "1/1 [==============================] - 68s 68s/step - loss: 0.3424 - acc: 0.8438 - val_loss: 0.6975 - val_acc: 0.7228\n",
  554. "Epoch 94/100\n",
  555. "1/1 [==============================] - 69s 69s/step - loss: 0.2862 - acc: 0.9062 - val_loss: 0.7181 - val_acc: 0.7723\n",
  556. "Epoch 95/100\n",
  557. "1/1 [==============================] - 68s 68s/step - loss: 0.5684 - acc: 0.7188 - val_loss: 0.6874 - val_acc: 0.7426\n",
  558. "Epoch 96/100\n",
  559. "1/1 [==============================] - 69s 69s/step - loss: 0.1996 - acc: 1.0000 - val_loss: 0.6827 - val_acc: 0.7228\n",
  560. "Epoch 97/100\n",
  561. "1/1 [==============================] - 68s 68s/step - loss: 0.2910 - acc: 0.9062 - val_loss: 0.6624 - val_acc: 0.7327\n",
  562. "Epoch 98/100\n",
  563. "1/1 [==============================] - 68s 68s/step - loss: 0.3039 - acc: 0.9688 - val_loss: 0.6530 - val_acc: 0.7525\n",
  564. "Epoch 99/100\n",
  565. "1/1 [==============================] - 67s 67s/step - loss: 0.2786 - acc: 0.9062 - val_loss: 0.6620 - val_acc: 0.7624\n",
  566. "Epoch 100/100\n",
  567. "1/1 [==============================] - 67s 67s/step - loss: 0.2878 - acc: 0.9062 - val_loss: 0.6901 - val_acc: 0.7228\n"
  568. ]
  569. },
  570. {
  571. "data": {
  572. "text/plain": [
  573. "<keras.callbacks.History at 0x7f52b597e4e0>"
  574. ]
  575. },
  576. "execution_count": 13,
  577. "metadata": {},
  578. "output_type": "execute_result"
  579. }
  580. ],
  581. "source": [
  582. "classifier.fit_generator(training_set,\n",
  583. " samples_per_epoch = 50,\n",
  584. " nb_epoch = 10,\n",
  585. " validation_data = test_set,\n",
  586. " nb_val_samples = 2000, workers=8)"
  587. ]
  588. },
  589. {
  590. "cell_type": "markdown",
  591. "metadata": {},
  592. "source": [
  593. "## It seems model is overfitting the data after reaching training accuracy of 75% and corresponding validation accuracy is 74%. "
  594. ]
  595. },
  596. {
  597. "cell_type": "markdown",
  598. "metadata": {},
  599. "source": [
  600. "## For further improvement we can use batch normalisation and dropout layers and can also use more powerful models like\n",
  601. "\n",
  602. "### Residual Neural Network(ResNet)\n",
  603. "### Inception Net\n",
  604. "### Inception Net-ResNet(we can use it as transfer learning i.e. already trained model on imageNet datasets)"
  605. ]
  606. }
  607. ],
  608. "metadata": {
  609. "kernelspec": {
  610. "display_name": "Python 3",
  611. "language": "python",
  612. "name": "python3"
  613. },
  614. "language_info": {
  615. "codemirror_mode": {
  616. "name": "ipython",
  617. "version": 3
  618. },
  619. "file_extension": ".py",
  620. "mimetype": "text/x-python",
  621. "name": "python",
  622. "nbconvert_exporter": "python",
  623. "pygments_lexer": "ipython3",
  624. "version": "3.6.5"
  625. }
  626. },
  627. "nbformat": 4,
  628. "nbformat_minor": 2
  629. }
Add Comment
Please, Sign In to add comment