Advertisement
Guest User

Untitled

a guest
May 25th, 2016
206
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 123.76 KB | None | 0 0
  1. I0114 17:30:17.014989 86948 caffe.cpp:184] Using GPUs 0, 1
  2. I0114 17:30:18.139459 86948 solver.cpp:48] Initializing solver from parameters:
  3. test_iter: 10
  4. test_interval: 500
  5. base_lr: 0.001
  6. display: 500
  7. max_iter: 850000
  8. lr_policy: "fixed"
  9. gamma: 0.5
  10. momentum: 0.9
  11. weight_decay: 0.0005
  12. snapshot: 5000
  13. snapshot_prefix: "models/mv16f/mv16f1_"
  14. solver_mode: GPU
  15. device_id: 0
  16. net: "models/mv16f/mv_train1.prototxt"
  17. I0114 17:30:18.139631 86948 solver.cpp:91] Creating training net from net file: models/mv16f/mv_train1.prototxt
  18. I0114 17:30:18.140243 86948 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer data
  19. I0114 17:30:18.140266 86948 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
  20. I0114 17:30:18.140447 86948 net.cpp:49] Initializing net from parameters:
  21. name: "mv_16f1"
  22. state {
  23. phase: TRAIN
  24. }
  25. layer {
  26. name: "data"
  27. type: "HDF5Data"
  28. top: "data"
  29. top: "label"
  30. include {
  31. phase: TRAIN
  32. }
  33. hdf5_data_param {
  34. source: "/home/fe/anilil/caffe/models/mv16f/train.txt"
  35. batch_size: 150
  36. }
  37. }
  38. layer {
  39. name: "conv1"
  40. type: "Convolution"
  41. bottom: "data"
  42. top: "conv1"
  43. param {
  44. lr_mult: 1
  45. decay_mult: 1
  46. }
  47. param {
  48. lr_mult: 1
  49. decay_mult: 1
  50. }
  51. convolution_param {
  52. num_output: 64
  53. kernel_size: 3
  54. weight_filler {
  55. type: "xavier"
  56. }
  57. bias_filler {
  58. type: "xavier"
  59. }
  60. }
  61. }
  62. layer {
  63. name: "relu1"
  64. type: "ReLU"
  65. bottom: "conv1"
  66. top: "conv1"
  67. }
  68. layer {
  69. name: "pool1"
  70. type: "Pooling"
  71. bottom: "conv1"
  72. top: "pool1"
  73. pooling_param {
  74. pool: AVE
  75. kernel_size: 2
  76. stride: 1
  77. }
  78. }
  79. layer {
  80. name: "conv2"
  81. type: "Convolution"
  82. bottom: "pool1"
  83. top: "conv2"
  84. param {
  85. lr_mult: 1
  86. decay_mult: 1
  87. }
  88. param {
  89. lr_mult: 1
  90. decay_mult: 1
  91. }
  92. convolution_param {
  93. num_output: 128
  94. kernel_size: 3
  95. stride: 1
  96. weight_filler {
  97. type: "xavier"
  98. }
  99. bias_filler {
  100. type: "xavier"
  101. }
  102. }
  103. }
  104. layer {
  105. name: "relu2"
  106. type: "ReLU"
  107. bottom: "conv2"
  108. top: "conv2"
  109. }
  110. layer {
  111. name: "pool2"
  112. type: "Pooling"
  113. bottom: "conv2"
  114. top: "pool2"
  115. pooling_param {
  116. pool: AVE
  117. kernel_size: 2
  118. stride: 2
  119. }
  120. }
  121. layer {
  122. name: "conv3"
  123. type: "Convolution"
  124. bottom: "pool2"
  125. top: "conv3"
  126. param {
  127. lr_mult: 1
  128. decay_mult: 1
  129. }
  130. param {
  131. lr_mult: 1
  132. decay_mult: 1
  133. }
  134. convolution_param {
  135. num_output: 256
  136. kernel_size: 3
  137. stride: 1
  138. weight_filler {
  139. type: "xavier"
  140. }
  141. bias_filler {
  142. type: "xavier"
  143. }
  144. }
  145. }
  146. layer {
  147. name: "relu3"
  148. type: "ReLU"
  149. bottom: "conv3"
  150. top: "conv3"
  151. }
  152. layer {
  153. name: "pool3"
  154. type: "Pooling"
  155. bottom: "conv3"
  156. top: "pool3"
  157. pooling_param {
  158. pool: AVE
  159. kernel_size: 2
  160. stride: 2
  161. }
  162. }
  163. layer {
  164. name: "conv4"
  165. type: "Convolution"
  166. bottom: "pool3"
  167. top: "conv4"
  168. param {
  169. lr_mult: 1
  170. decay_mult: 1
  171. }
  172. param {
  173. lr_mult: 1
  174. decay_mult: 1
  175. }
  176. convolution_param {
  177. num_output: 256
  178. kernel_size: 3
  179. stride: 1
  180. weight_filler {
  181. type: "xavier"
  182. }
  183. bias_filler {
  184. type: "xavier"
  185. }
  186. }
  187. }
  188. layer {
  189. name: "relu4"
  190. type: "ReLU"
  191. bottom: "conv4"
  192. top: "conv4"
  193. }
  194. layer {
  195. name: "pool4"
  196. type: "Pooling"
  197. bottom: "conv4"
  198. top: "pool4"
  199. pooling_param {
  200. pool: AVE
  201. kernel_size: 2
  202. stride: 2
  203. }
  204. }
  205. layer {
  206. name: "conv5"
  207. type: "Convolution"
  208. bottom: "pool4"
  209. top: "conv5"
  210. param {
  211. lr_mult: 1
  212. decay_mult: 1
  213. }
  214. param {
  215. lr_mult: 1
  216. decay_mult: 1
  217. }
  218. convolution_param {
  219. num_output: 256
  220. kernel_size: 3
  221. stride: 1
  222. weight_filler {
  223. type: "xavier"
  224. }
  225. bias_filler {
  226. type: "xavier"
  227. }
  228. }
  229. }
  230. layer {
  231. name: "relu5"
  232. type: "ReLU"
  233. bottom: "conv5"
  234. top: "conv5"
  235. }
  236. layer {
  237. name: "pool5"
  238. type: "Pooling"
  239. bottom: "conv5"
  240. top: "pool5"
  241. pooling_param {
  242. pool: AVE
  243. kernel_size: 2
  244. stride: 2
  245. }
  246. }
  247. layer {
  248. name: "fc6"
  249. type: "InnerProduct"
  250. bottom: "pool5"
  251. top: "fc6"
  252. param {
  253. lr_mult: 1
  254. decay_mult: 1
  255. }
  256. param {
  257. lr_mult: 2
  258. decay_mult: 0
  259. }
  260. inner_product_param {
  261. num_output: 2048
  262. weight_filler {
  263. type: "xavier"
  264. }
  265. bias_filler {
  266. type: "xavier"
  267. }
  268. }
  269. }
  270. layer {
  271. name: "relu6"
  272. type: "ReLU"
  273. bottom: "fc6"
  274. top: "fc6"
  275. }
  276. layer {
  277. name: "fc7"
  278. type: "InnerProduct"
  279. bottom: "fc6"
  280. top: "fc7"
  281. param {
  282. lr_mult: 1
  283. decay_mult: 1
  284. }
  285. param {
  286. lr_mult: 2
  287. decay_mult: 0
  288. }
  289. inner_product_param {
  290. num_output: 2048
  291. weight_filler {
  292. type: "xavier"
  293. }
  294. bias_filler {
  295. type: "xavier"
  296. }
  297. }
  298. }
  299. layer {
  300. name: "relu7"
  301. type: "ReLU"
  302. bottom: "fc7"
  303. top: "fc7"
  304. }
  305. layer {
  306. name: "fc8"
  307. type: "InnerProduct"
  308. bottom: "fc7"
  309. top: "fc8"
  310. param {
  311. lr_mult: 1
  312. decay_mult: 1
  313. }
  314. param {
  315. lr_mult: 2
  316. decay_mult: 0
  317. }
  318. inner_product_param {
  319. num_output: 101
  320. weight_filler {
  321. type: "xavier"
  322. }
  323. bias_filler {
  324. type: "xavier"
  325. }
  326. }
  327. }
  328. layer {
  329. name: "loss"
  330. type: "SoftmaxWithLoss"
  331. bottom: "fc8"
  332. bottom: "label"
  333. top: "loss"
  334. }
  335. I0114 17:30:18.140599 86948 layer_factory.hpp:77] Creating layer data
  336. I0114 17:30:18.140625 86948 net.cpp:106] Creating Layer data
  337. I0114 17:30:18.140630 86948 net.cpp:411] data -> data
  338. I0114 17:30:18.140650 86948 net.cpp:411] data -> label
  339. I0114 17:30:18.140666 86948 hdf5_data_layer.cpp:79] Loading list of HDF5 filenames from: /home/fe/anilil/caffe/models/mv16f/train.txt
  340. I0114 17:30:18.140861 86948 hdf5_data_layer.cpp:93] Number of HDF5 files: 477
  341. I0114 17:30:18.141713 86948 hdf5.cpp:32] Datatype class: H5T_FLOAT
  342. I0114 17:30:19.121565 86948 net.cpp:150] Setting up data
  343. I0114 17:30:19.121650 86948 net.cpp:157] Top shape: 150 48 58 58 (24220800)
  344. I0114 17:30:19.121657 86948 net.cpp:157] Top shape: 150 1 (150)
  345. I0114 17:30:19.121660 86948 net.cpp:165] Memory required for data: 96883800
  346. I0114 17:30:19.121668 86948 layer_factory.hpp:77] Creating layer conv1
  347. I0114 17:30:19.121704 86948 net.cpp:106] Creating Layer conv1
  348. I0114 17:30:19.121721 86948 net.cpp:454] conv1 <- data
  349. I0114 17:30:19.121743 86948 net.cpp:411] conv1 -> conv1
  350. I0114 17:30:19.253258 86948 net.cpp:150] Setting up conv1
  351. I0114 17:30:19.253304 86948 net.cpp:157] Top shape: 150 64 56 56 (30105600)
  352. I0114 17:30:19.253307 86948 net.cpp:165] Memory required for data: 217306200
  353. I0114 17:30:19.253322 86948 layer_factory.hpp:77] Creating layer relu1
  354. I0114 17:30:19.253340 86948 net.cpp:106] Creating Layer relu1
  355. I0114 17:30:19.253345 86948 net.cpp:454] relu1 <- conv1
  356. I0114 17:30:19.253350 86948 net.cpp:397] relu1 -> conv1 (in-place)
  357. I0114 17:30:19.253569 86948 net.cpp:150] Setting up relu1
  358. I0114 17:30:19.253577 86948 net.cpp:157] Top shape: 150 64 56 56 (30105600)
  359. I0114 17:30:19.253592 86948 net.cpp:165] Memory required for data: 337728600
  360. I0114 17:30:19.253595 86948 layer_factory.hpp:77] Creating layer pool1
  361. I0114 17:30:19.253607 86948 net.cpp:106] Creating Layer pool1
  362. I0114 17:30:19.253612 86948 net.cpp:454] pool1 <- conv1
  363. I0114 17:30:19.253615 86948 net.cpp:411] pool1 -> pool1
  364. I0114 17:30:19.253981 86948 net.cpp:150] Setting up pool1
  365. I0114 17:30:19.253993 86948 net.cpp:157] Top shape: 150 64 55 55 (29040000)
  366. I0114 17:30:19.253995 86948 net.cpp:165] Memory required for data: 453888600
  367. I0114 17:30:19.253998 86948 layer_factory.hpp:77] Creating layer conv2
  368. I0114 17:30:19.254022 86948 net.cpp:106] Creating Layer conv2
  369. I0114 17:30:19.254025 86948 net.cpp:454] conv2 <- pool1
  370. I0114 17:30:19.254031 86948 net.cpp:411] conv2 -> conv2
  371. I0114 17:30:19.256091 86948 net.cpp:150] Setting up conv2
  372. I0114 17:30:19.256105 86948 net.cpp:157] Top shape: 150 128 53 53 (53932800)
  373. I0114 17:30:19.256108 86948 net.cpp:165] Memory required for data: 669619800
  374. I0114 17:30:19.256117 86948 layer_factory.hpp:77] Creating layer relu2
  375. I0114 17:30:19.256122 86948 net.cpp:106] Creating Layer relu2
  376. I0114 17:30:19.256125 86948 net.cpp:454] relu2 <- conv2
  377. I0114 17:30:19.256131 86948 net.cpp:397] relu2 -> conv2 (in-place)
  378. I0114 17:30:19.256299 86948 net.cpp:150] Setting up relu2
  379. I0114 17:30:19.256310 86948 net.cpp:157] Top shape: 150 128 53 53 (53932800)
  380. I0114 17:30:19.256325 86948 net.cpp:165] Memory required for data: 885351000
  381. I0114 17:30:19.256327 86948 layer_factory.hpp:77] Creating layer pool2
  382. I0114 17:30:19.256363 86948 net.cpp:106] Creating Layer pool2
  383. I0114 17:30:19.256367 86948 net.cpp:454] pool2 <- conv2
  384. I0114 17:30:19.256372 86948 net.cpp:411] pool2 -> pool2
  385. I0114 17:30:19.256733 86948 net.cpp:150] Setting up pool2
  386. I0114 17:30:19.256744 86948 net.cpp:157] Top shape: 150 128 27 27 (13996800)
  387. I0114 17:30:19.256747 86948 net.cpp:165] Memory required for data: 941338200
  388. I0114 17:30:19.256750 86948 layer_factory.hpp:77] Creating layer conv3
  389. I0114 17:30:19.256760 86948 net.cpp:106] Creating Layer conv3
  390. I0114 17:30:19.256763 86948 net.cpp:454] conv3 <- pool2
  391. I0114 17:30:19.256770 86948 net.cpp:411] conv3 -> conv3
  392. I0114 17:30:19.260486 86948 net.cpp:150] Setting up conv3
  393. I0114 17:30:19.260499 86948 net.cpp:157] Top shape: 150 256 25 25 (24000000)
  394. I0114 17:30:19.260501 86948 net.cpp:165] Memory required for data: 1037338200
  395. I0114 17:30:19.260509 86948 layer_factory.hpp:77] Creating layer relu3
  396. I0114 17:30:19.260517 86948 net.cpp:106] Creating Layer relu3
  397. I0114 17:30:19.260520 86948 net.cpp:454] relu3 <- conv3
  398. I0114 17:30:19.260524 86948 net.cpp:397] relu3 -> conv3 (in-place)
  399. I0114 17:30:19.260691 86948 net.cpp:150] Setting up relu3
  400. I0114 17:30:19.260699 86948 net.cpp:157] Top shape: 150 256 25 25 (24000000)
  401. I0114 17:30:19.260713 86948 net.cpp:165] Memory required for data: 1133338200
  402. I0114 17:30:19.260716 86948 layer_factory.hpp:77] Creating layer pool3
  403. I0114 17:30:19.260725 86948 net.cpp:106] Creating Layer pool3
  404. I0114 17:30:19.260728 86948 net.cpp:454] pool3 <- conv3
  405. I0114 17:30:19.260733 86948 net.cpp:411] pool3 -> pool3
  406. I0114 17:30:19.261076 86948 net.cpp:150] Setting up pool3
  407. I0114 17:30:19.261086 86948 net.cpp:157] Top shape: 150 256 13 13 (6489600)
  408. I0114 17:30:19.261090 86948 net.cpp:165] Memory required for data: 1159296600
  409. I0114 17:30:19.261093 86948 layer_factory.hpp:77] Creating layer conv4
  410. I0114 17:30:19.261103 86948 net.cpp:106] Creating Layer conv4
  411. I0114 17:30:19.261106 86948 net.cpp:454] conv4 <- pool3
  412. I0114 17:30:19.261113 86948 net.cpp:411] conv4 -> conv4
  413. I0114 17:30:19.266593 86948 net.cpp:150] Setting up conv4
  414. I0114 17:30:19.266607 86948 net.cpp:157] Top shape: 150 256 11 11 (4646400)
  415. I0114 17:30:19.266610 86948 net.cpp:165] Memory required for data: 1177882200
  416. I0114 17:30:19.266616 86948 layer_factory.hpp:77] Creating layer relu4
  417. I0114 17:30:19.266621 86948 net.cpp:106] Creating Layer relu4
  418. I0114 17:30:19.266624 86948 net.cpp:454] relu4 <- conv4
  419. I0114 17:30:19.266630 86948 net.cpp:397] relu4 -> conv4 (in-place)
  420. I0114 17:30:19.266991 86948 net.cpp:150] Setting up relu4
  421. I0114 17:30:19.267014 86948 net.cpp:157] Top shape: 150 256 11 11 (4646400)
  422. I0114 17:30:19.267016 86948 net.cpp:165] Memory required for data: 1196467800
  423. I0114 17:30:19.267019 86948 layer_factory.hpp:77] Creating layer pool4
  424. I0114 17:30:19.267027 86948 net.cpp:106] Creating Layer pool4
  425. I0114 17:30:19.267041 86948 net.cpp:454] pool4 <- conv4
  426. I0114 17:30:19.267045 86948 net.cpp:411] pool4 -> pool4
  427. I0114 17:30:19.267223 86948 net.cpp:150] Setting up pool4
  428. I0114 17:30:19.267231 86948 net.cpp:157] Top shape: 150 256 6 6 (1382400)
  429. I0114 17:30:19.267246 86948 net.cpp:165] Memory required for data: 1201997400
  430. I0114 17:30:19.267248 86948 layer_factory.hpp:77] Creating layer conv5
  431. I0114 17:30:19.267258 86948 net.cpp:106] Creating Layer conv5
  432. I0114 17:30:19.267273 86948 net.cpp:454] conv5 <- pool4
  433. I0114 17:30:19.267280 86948 net.cpp:411] conv5 -> conv5
  434. I0114 17:30:19.272622 86948 net.cpp:150] Setting up conv5
  435. I0114 17:30:19.272635 86948 net.cpp:157] Top shape: 150 256 4 4 (614400)
  436. I0114 17:30:19.272639 86948 net.cpp:165] Memory required for data: 1204455000
  437. I0114 17:30:19.272647 86948 layer_factory.hpp:77] Creating layer relu5
  438. I0114 17:30:19.272652 86948 net.cpp:106] Creating Layer relu5
  439. I0114 17:30:19.272655 86948 net.cpp:454] relu5 <- conv5
  440. I0114 17:30:19.272660 86948 net.cpp:397] relu5 -> conv5 (in-place)
  441. I0114 17:30:19.272987 86948 net.cpp:150] Setting up relu5
  442. I0114 17:30:19.273010 86948 net.cpp:157] Top shape: 150 256 4 4 (614400)
  443. I0114 17:30:19.273011 86948 net.cpp:165] Memory required for data: 1206912600
  444. I0114 17:30:19.273026 86948 layer_factory.hpp:77] Creating layer pool5
  445. I0114 17:30:19.273056 86948 net.cpp:106] Creating Layer pool5
  446. I0114 17:30:19.273061 86948 net.cpp:454] pool5 <- conv5
  447. I0114 17:30:19.273064 86948 net.cpp:411] pool5 -> pool5
  448. I0114 17:30:19.273246 86948 net.cpp:150] Setting up pool5
  449. I0114 17:30:19.273253 86948 net.cpp:157] Top shape: 150 256 2 2 (153600)
  450. I0114 17:30:19.273267 86948 net.cpp:165] Memory required for data: 1207527000
  451. I0114 17:30:19.273270 86948 layer_factory.hpp:77] Creating layer fc6
  452. I0114 17:30:19.273298 86948 net.cpp:106] Creating Layer fc6
  453. I0114 17:30:19.273301 86948 net.cpp:454] fc6 <- pool5
  454. I0114 17:30:19.273306 86948 net.cpp:411] fc6 -> fc6
  455. I0114 17:30:19.289192 86948 net.cpp:150] Setting up fc6
  456. I0114 17:30:19.289223 86948 net.cpp:157] Top shape: 150 2048 (307200)
  457. I0114 17:30:19.289237 86948 net.cpp:165] Memory required for data: 1208755800
  458. I0114 17:30:19.289244 86948 layer_factory.hpp:77] Creating layer relu6
  459. I0114 17:30:19.289252 86948 net.cpp:106] Creating Layer relu6
  460. I0114 17:30:19.289257 86948 net.cpp:454] relu6 <- fc6
  461. I0114 17:30:19.289260 86948 net.cpp:397] relu6 -> fc6 (in-place)
  462. I0114 17:30:19.289674 86948 net.cpp:150] Setting up relu6
  463. I0114 17:30:19.289696 86948 net.cpp:157] Top shape: 150 2048 (307200)
  464. I0114 17:30:19.289700 86948 net.cpp:165] Memory required for data: 1209984600
  465. I0114 17:30:19.289702 86948 layer_factory.hpp:77] Creating layer fc7
  466. I0114 17:30:19.289722 86948 net.cpp:106] Creating Layer fc7
  467. I0114 17:30:19.289726 86948 net.cpp:454] fc7 <- fc6
  468. I0114 17:30:19.289732 86948 net.cpp:411] fc7 -> fc7
  469. I0114 17:30:19.322726 86948 net.cpp:150] Setting up fc7
  470. I0114 17:30:19.322765 86948 net.cpp:157] Top shape: 150 2048 (307200)
  471. I0114 17:30:19.322769 86948 net.cpp:165] Memory required for data: 1211213400
  472. I0114 17:30:19.322777 86948 layer_factory.hpp:77] Creating layer relu7
  473. I0114 17:30:19.322787 86948 net.cpp:106] Creating Layer relu7
  474. I0114 17:30:19.322790 86948 net.cpp:454] relu7 <- fc7
  475. I0114 17:30:19.322796 86948 net.cpp:397] relu7 -> fc7 (in-place)
  476. I0114 17:30:19.323101 86948 net.cpp:150] Setting up relu7
  477. I0114 17:30:19.323110 86948 net.cpp:157] Top shape: 150 2048 (307200)
  478. I0114 17:30:19.323124 86948 net.cpp:165] Memory required for data: 1212442200
  479. I0114 17:30:19.323127 86948 layer_factory.hpp:77] Creating layer fc8
  480. I0114 17:30:19.323142 86948 net.cpp:106] Creating Layer fc8
  481. I0114 17:30:19.323144 86948 net.cpp:454] fc8 <- fc7
  482. I0114 17:30:19.323149 86948 net.cpp:411] fc8 -> fc8
  483. I0114 17:30:19.325206 86948 net.cpp:150] Setting up fc8
  484. I0114 17:30:19.325217 86948 net.cpp:157] Top shape: 150 101 (15150)
  485. I0114 17:30:19.325220 86948 net.cpp:165] Memory required for data: 1212502800
  486. I0114 17:30:19.325227 86948 layer_factory.hpp:77] Creating layer loss
  487. I0114 17:30:19.325237 86948 net.cpp:106] Creating Layer loss
  488. I0114 17:30:19.325240 86948 net.cpp:454] loss <- fc8
  489. I0114 17:30:19.325244 86948 net.cpp:454] loss <- label
  490. I0114 17:30:19.325250 86948 net.cpp:411] loss -> loss
  491. I0114 17:30:19.325264 86948 layer_factory.hpp:77] Creating layer loss
  492. I0114 17:30:19.326247 86948 net.cpp:150] Setting up loss
  493. I0114 17:30:19.326261 86948 net.cpp:157] Top shape: (1)
  494. I0114 17:30:19.326263 86948 net.cpp:160] with loss weight 1
  495. I0114 17:30:19.326287 86948 net.cpp:165] Memory required for data: 1212502804
  496. I0114 17:30:19.326289 86948 net.cpp:226] loss needs backward computation.
  497. I0114 17:30:19.326292 86948 net.cpp:226] fc8 needs backward computation.
  498. I0114 17:30:19.326295 86948 net.cpp:226] relu7 needs backward computation.
  499. I0114 17:30:19.326297 86948 net.cpp:226] fc7 needs backward computation.
  500. I0114 17:30:19.326300 86948 net.cpp:226] relu6 needs backward computation.
  501. I0114 17:30:19.326303 86948 net.cpp:226] fc6 needs backward computation.
  502. I0114 17:30:19.326305 86948 net.cpp:226] pool5 needs backward computation.
  503. I0114 17:30:19.326309 86948 net.cpp:226] relu5 needs backward computation.
  504. I0114 17:30:19.326311 86948 net.cpp:226] conv5 needs backward computation.
  505. I0114 17:30:19.326314 86948 net.cpp:226] pool4 needs backward computation.
  506. I0114 17:30:19.326318 86948 net.cpp:226] relu4 needs backward computation.
  507. I0114 17:30:19.326320 86948 net.cpp:226] conv4 needs backward computation.
  508. I0114 17:30:19.326339 86948 net.cpp:226] pool3 needs backward computation.
  509. I0114 17:30:19.326342 86948 net.cpp:226] relu3 needs backward computation.
  510. I0114 17:30:19.326344 86948 net.cpp:226] conv3 needs backward computation.
  511. I0114 17:30:19.326347 86948 net.cpp:226] pool2 needs backward computation.
  512. I0114 17:30:19.326350 86948 net.cpp:226] relu2 needs backward computation.
  513. I0114 17:30:19.326352 86948 net.cpp:226] conv2 needs backward computation.
  514. I0114 17:30:19.326355 86948 net.cpp:226] pool1 needs backward computation.
  515. I0114 17:30:19.326359 86948 net.cpp:226] relu1 needs backward computation.
  516. I0114 17:30:19.326361 86948 net.cpp:226] conv1 needs backward computation.
  517. I0114 17:30:19.326364 86948 net.cpp:228] data does not need backward computation.
  518. I0114 17:30:19.326366 86948 net.cpp:270] This network produces output loss
  519. I0114 17:30:19.326382 86948 net.cpp:283] Network initialization done.
  520. I0114 17:30:19.327203 86948 solver.cpp:181] Creating test net (#0) specified by net file: models/mv16f/mv_train1.prototxt
  521. I0114 17:30:19.327262 86948 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer data
  522. I0114 17:30:19.327458 86948 net.cpp:49] Initializing net from parameters:
  523. name: "mv_16f1"
  524. state {
  525. phase: TEST
  526. }
  527. layer {
  528. name: "data"
  529. type: "HDF5Data"
  530. top: "data"
  531. top: "label"
  532. include {
  533. phase: TEST
  534. }
  535. hdf5_data_param {
  536. source: "/home/fe/anilil/caffe/models/mv16f/test.txt"
  537. batch_size: 50
  538. }
  539. }
  540. layer {
  541. name: "conv1"
  542. type: "Convolution"
  543. bottom: "data"
  544. top: "conv1"
  545. param {
  546. lr_mult: 1
  547. decay_mult: 1
  548. }
  549. param {
  550. lr_mult: 1
  551. decay_mult: 1
  552. }
  553. convolution_param {
  554. num_output: 64
  555. kernel_size: 3
  556. weight_filler {
  557. type: "xavier"
  558. }
  559. bias_filler {
  560. type: "xavier"
  561. }
  562. }
  563. }
  564. layer {
  565. name: "relu1"
  566. type: "ReLU"
  567. bottom: "conv1"
  568. top: "conv1"
  569. }
  570. layer {
  571. name: "pool1"
  572. type: "Pooling"
  573. bottom: "conv1"
  574. top: "pool1"
  575. pooling_param {
  576. pool: AVE
  577. kernel_size: 2
  578. stride: 1
  579. }
  580. }
  581. layer {
  582. name: "conv2"
  583. type: "Convolution"
  584. bottom: "pool1"
  585. top: "conv2"
  586. param {
  587. lr_mult: 1
  588. decay_mult: 1
  589. }
  590. param {
  591. lr_mult: 1
  592. decay_mult: 1
  593. }
  594. convolution_param {
  595. num_output: 128
  596. kernel_size: 3
  597. stride: 1
  598. weight_filler {
  599. type: "xavier"
  600. }
  601. bias_filler {
  602. type: "xavier"
  603. }
  604. }
  605. }
  606. layer {
  607. name: "relu2"
  608. type: "ReLU"
  609. bottom: "conv2"
  610. top: "conv2"
  611. }
  612. layer {
  613. name: "pool2"
  614. type: "Pooling"
  615. bottom: "conv2"
  616. top: "pool2"
  617. pooling_param {
  618. pool: AVE
  619. kernel_size: 2
  620. stride: 2
  621. }
  622. }
  623. layer {
  624. name: "conv3"
  625. type: "Convolution"
  626. bottom: "pool2"
  627. top: "conv3"
  628. param {
  629. lr_mult: 1
  630. decay_mult: 1
  631. }
  632. param {
  633. lr_mult: 1
  634. decay_mult: 1
  635. }
  636. convolution_param {
  637. num_output: 256
  638. kernel_size: 3
  639. stride: 1
  640. weight_filler {
  641. type: "xavier"
  642. }
  643. bias_filler {
  644. type: "xavier"
  645. }
  646. }
  647. }
  648. layer {
  649. name: "relu3"
  650. type: "ReLU"
  651. bottom: "conv3"
  652. top: "conv3"
  653. }
  654. layer {
  655. name: "pool3"
  656. type: "Pooling"
  657. bottom: "conv3"
  658. top: "pool3"
  659. pooling_param {
  660. pool: AVE
  661. kernel_size: 2
  662. stride: 2
  663. }
  664. }
  665. layer {
  666. name: "conv4"
  667. type: "Convolution"
  668. bottom: "pool3"
  669. top: "conv4"
  670. param {
  671. lr_mult: 1
  672. decay_mult: 1
  673. }
  674. param {
  675. lr_mult: 1
  676. decay_mult: 1
  677. }
  678. convolution_param {
  679. num_output: 256
  680. kernel_size: 3
  681. stride: 1
  682. weight_filler {
  683. type: "xavier"
  684. }
  685. bias_filler {
  686. type: "xavier"
  687. }
  688. }
  689. }
  690. layer {
  691. name: "relu4"
  692. type: "ReLU"
  693. bottom: "conv4"
  694. top: "conv4"
  695. }
  696. layer {
  697. name: "pool4"
  698. type: "Pooling"
  699. bottom: "conv4"
  700. top: "pool4"
  701. pooling_param {
  702. pool: AVE
  703. kernel_size: 2
  704. stride: 2
  705. }
  706. }
  707. layer {
  708. name: "conv5"
  709. type: "Convolution"
  710. bottom: "pool4"
  711. top: "conv5"
  712. param {
  713. lr_mult: 1
  714. decay_mult: 1
  715. }
  716. param {
  717. lr_mult: 1
  718. decay_mult: 1
  719. }
  720. convolution_param {
  721. num_output: 256
  722. kernel_size: 3
  723. stride: 1
  724. weight_filler {
  725. type: "xavier"
  726. }
  727. bias_filler {
  728. type: "xavier"
  729. }
  730. }
  731. }
  732. layer {
  733. name: "relu5"
  734. type: "ReLU"
  735. bottom: "conv5"
  736. top: "conv5"
  737. }
  738. layer {
  739. name: "pool5"
  740. type: "Pooling"
  741. bottom: "conv5"
  742. top: "pool5"
  743. pooling_param {
  744. pool: AVE
  745. kernel_size: 2
  746. stride: 2
  747. }
  748. }
  749. layer {
  750. name: "fc6"
  751. type: "InnerProduct"
  752. bottom: "pool5"
  753. top: "fc6"
  754. param {
  755. lr_mult: 1
  756. decay_mult: 1
  757. }
  758. param {
  759. lr_mult: 2
  760. decay_mult: 0
  761. }
  762. inner_product_param {
  763. num_output: 2048
  764. weight_filler {
  765. type: "xavier"
  766. }
  767. bias_filler {
  768. type: "xavier"
  769. }
  770. }
  771. }
  772. layer {
  773. name: "relu6"
  774. type: "ReLU"
  775. bottom: "fc6"
  776. top: "fc6"
  777. }
  778. layer {
  779. name: "fc7"
  780. type: "InnerProduct"
  781. bottom: "fc6"
  782. top: "fc7"
  783. param {
  784. lr_mult: 1
  785. decay_mult: 1
  786. }
  787. param {
  788. lr_mult: 2
  789. decay_mult: 0
  790. }
  791. inner_product_param {
  792. num_output: 2048
  793. weight_filler {
  794. type: "xavier"
  795. }
  796. bias_filler {
  797. type: "xavier"
  798. }
  799. }
  800. }
  801. layer {
  802. name: "relu7"
  803. type: "ReLU"
  804. bottom: "fc7"
  805. top: "fc7"
  806. }
  807. layer {
  808. name: "fc8"
  809. type: "InnerProduct"
  810. bottom: "fc7"
  811. top: "fc8"
  812. param {
  813. lr_mult: 1
  814. decay_mult: 1
  815. }
  816. param {
  817. lr_mult: 2
  818. decay_mult: 0
  819. }
  820. inner_product_param {
  821. num_output: 101
  822. weight_filler {
  823. type: "xavier"
  824. }
  825. bias_filler {
  826. type: "xavier"
  827. }
  828. }
  829. }
  830. layer {
  831. name: "accuracy"
  832. type: "Accuracy"
  833. bottom: "fc8"
  834. bottom: "label"
  835. top: "accuracy"
  836. include {
  837. phase: TEST
  838. }
  839. }
  840. layer {
  841. name: "loss"
  842. type: "SoftmaxWithLoss"
  843. bottom: "fc8"
  844. bottom: "label"
  845. top: "loss"
  846. }
  847. I0114 17:30:19.327607 86948 layer_factory.hpp:77] Creating layer data
  848. I0114 17:30:19.327620 86948 net.cpp:106] Creating Layer data
  849. I0114 17:30:19.327623 86948 net.cpp:411] data -> data
  850. I0114 17:30:19.327630 86948 net.cpp:411] data -> label
  851. I0114 17:30:19.327636 86948 hdf5_data_layer.cpp:79] Loading list of HDF5 filenames from: /home/fe/anilil/caffe/models/mv16f/test.txt
  852. I0114 17:30:19.327733 86948 hdf5_data_layer.cpp:93] Number of HDF5 files: 205
  853. I0114 17:30:20.281930 86948 net.cpp:150] Setting up data
  854. I0114 17:30:20.281970 86948 net.cpp:157] Top shape: 50 48 58 58 (8073600)
  855. I0114 17:30:20.281988 86948 net.cpp:157] Top shape: 50 1 (50)
  856. I0114 17:30:20.281991 86948 net.cpp:165] Memory required for data: 32294600
  857. I0114 17:30:20.281996 86948 layer_factory.hpp:77] Creating layer label_data_1_split
  858. I0114 17:30:20.282017 86948 net.cpp:106] Creating Layer label_data_1_split
  859. I0114 17:30:20.282021 86948 net.cpp:454] label_data_1_split <- label
  860. I0114 17:30:20.282027 86948 net.cpp:411] label_data_1_split -> label_data_1_split_0
  861. I0114 17:30:20.282037 86948 net.cpp:411] label_data_1_split -> label_data_1_split_1
  862. I0114 17:30:20.282089 86948 net.cpp:150] Setting up label_data_1_split
  863. I0114 17:30:20.282109 86948 net.cpp:157] Top shape: 50 1 (50)
  864. I0114 17:30:20.282112 86948 net.cpp:157] Top shape: 50 1 (50)
  865. I0114 17:30:20.282126 86948 net.cpp:165] Memory required for data: 32295000
  866. I0114 17:30:20.282130 86948 layer_factory.hpp:77] Creating layer conv1
  867. I0114 17:30:20.282142 86948 net.cpp:106] Creating Layer conv1
  868. I0114 17:30:20.282146 86948 net.cpp:454] conv1 <- data
  869. I0114 17:30:20.282151 86948 net.cpp:411] conv1 -> conv1
  870. I0114 17:30:20.283648 86948 net.cpp:150] Setting up conv1
  871. I0114 17:30:20.283661 86948 net.cpp:157] Top shape: 50 64 56 56 (10035200)
  872. I0114 17:30:20.283664 86948 net.cpp:165] Memory required for data: 72435800
  873. I0114 17:30:20.283674 86948 layer_factory.hpp:77] Creating layer relu1
  874. I0114 17:30:20.283679 86948 net.cpp:106] Creating Layer relu1
  875. I0114 17:30:20.283682 86948 net.cpp:454] relu1 <- conv1
  876. I0114 17:30:20.283686 86948 net.cpp:397] relu1 -> conv1 (in-place)
  877. I0114 17:30:20.283843 86948 net.cpp:150] Setting up relu1
  878. I0114 17:30:20.283851 86948 net.cpp:157] Top shape: 50 64 56 56 (10035200)
  879. I0114 17:30:20.283865 86948 net.cpp:165] Memory required for data: 112576600
  880. I0114 17:30:20.283869 86948 layer_factory.hpp:77] Creating layer pool1
  881. I0114 17:30:20.283875 86948 net.cpp:106] Creating Layer pool1
  882. I0114 17:30:20.283879 86948 net.cpp:454] pool1 <- conv1
  883. I0114 17:30:20.283882 86948 net.cpp:411] pool1 -> pool1
  884. I0114 17:30:20.284235 86948 net.cpp:150] Setting up pool1
  885. I0114 17:30:20.284245 86948 net.cpp:157] Top shape: 50 64 55 55 (9680000)
  886. I0114 17:30:20.284247 86948 net.cpp:165] Memory required for data: 151296600
  887. I0114 17:30:20.284250 86948 layer_factory.hpp:77] Creating layer conv2
  888. I0114 17:30:20.284260 86948 net.cpp:106] Creating Layer conv2
  889. I0114 17:30:20.284262 86948 net.cpp:454] conv2 <- pool1
  890. I0114 17:30:20.284266 86948 net.cpp:411] conv2 -> conv2
  891. I0114 17:30:20.285594 86948 net.cpp:150] Setting up conv2
  892. I0114 17:30:20.285606 86948 net.cpp:157] Top shape: 50 128 53 53 (17977600)
  893. I0114 17:30:20.285609 86948 net.cpp:165] Memory required for data: 223207000
  894. I0114 17:30:20.285617 86948 layer_factory.hpp:77] Creating layer relu2
  895. I0114 17:30:20.285621 86948 net.cpp:106] Creating Layer relu2
  896. I0114 17:30:20.285624 86948 net.cpp:454] relu2 <- conv2
  897. I0114 17:30:20.285629 86948 net.cpp:397] relu2 -> conv2 (in-place)
  898. I0114 17:30:20.285780 86948 net.cpp:150] Setting up relu2
  899. I0114 17:30:20.285787 86948 net.cpp:157] Top shape: 50 128 53 53 (17977600)
  900. I0114 17:30:20.285801 86948 net.cpp:165] Memory required for data: 295117400
  901. I0114 17:30:20.285804 86948 layer_factory.hpp:77] Creating layer pool2
  902. I0114 17:30:20.285809 86948 net.cpp:106] Creating Layer pool2
  903. I0114 17:30:20.285812 86948 net.cpp:454] pool2 <- conv2
  904. I0114 17:30:20.285816 86948 net.cpp:411] pool2 -> pool2
  905. I0114 17:30:20.286202 86948 net.cpp:150] Setting up pool2
  906. I0114 17:30:20.286224 86948 net.cpp:157] Top shape: 50 128 27 27 (4665600)
  907. I0114 17:30:20.286226 86948 net.cpp:165] Memory required for data: 313779800
  908. I0114 17:30:20.286231 86948 layer_factory.hpp:77] Creating layer conv3
  909. I0114 17:30:20.286250 86948 net.cpp:106] Creating Layer conv3
  910. I0114 17:30:20.286253 86948 net.cpp:454] conv3 <- pool2
  911. I0114 17:30:20.286259 86948 net.cpp:411] conv3 -> conv3
  912. I0114 17:30:20.289516 86948 net.cpp:150] Setting up conv3
  913. I0114 17:30:20.289530 86948 net.cpp:157] Top shape: 50 256 25 25 (8000000)
  914. I0114 17:30:20.289532 86948 net.cpp:165] Memory required for data: 345779800
  915. I0114 17:30:20.289541 86948 layer_factory.hpp:77] Creating layer relu3
  916. I0114 17:30:20.289547 86948 net.cpp:106] Creating Layer relu3
  917. I0114 17:30:20.289551 86948 net.cpp:454] relu3 <- conv3
  918. I0114 17:30:20.289554 86948 net.cpp:397] relu3 -> conv3 (in-place)
  919. I0114 17:30:20.289873 86948 net.cpp:150] Setting up relu3
  920. I0114 17:30:20.289896 86948 net.cpp:157] Top shape: 50 256 25 25 (8000000)
  921. I0114 17:30:20.289898 86948 net.cpp:165] Memory required for data: 377779800
  922. I0114 17:30:20.289901 86948 layer_factory.hpp:77] Creating layer pool3
  923. I0114 17:30:20.289919 86948 net.cpp:106] Creating Layer pool3
  924. I0114 17:30:20.289922 86948 net.cpp:454] pool3 <- conv3
  925. I0114 17:30:20.289927 86948 net.cpp:411] pool3 -> pool3
  926. I0114 17:30:20.290241 86948 net.cpp:150] Setting up pool3
  927. I0114 17:30:20.290263 86948 net.cpp:157] Top shape: 50 256 13 13 (2163200)
  928. I0114 17:30:20.290266 86948 net.cpp:165] Memory required for data: 386432600
  929. I0114 17:30:20.290269 86948 layer_factory.hpp:77] Creating layer conv4
  930. I0114 17:30:20.290292 86948 net.cpp:106] Creating Layer conv4
  931. I0114 17:30:20.290294 86948 net.cpp:454] conv4 <- pool3
  932. I0114 17:30:20.290299 86948 net.cpp:411] conv4 -> conv4
  933. I0114 17:30:20.296136 86948 net.cpp:150] Setting up conv4
  934. I0114 17:30:20.296149 86948 net.cpp:157] Top shape: 50 256 11 11 (1548800)
  935. I0114 17:30:20.296152 86948 net.cpp:165] Memory required for data: 392627800
  936. I0114 17:30:20.296159 86948 layer_factory.hpp:77] Creating layer relu4
  937. I0114 17:30:20.296165 86948 net.cpp:106] Creating Layer relu4
  938. I0114 17:30:20.296169 86948 net.cpp:454] relu4 <- conv4
  939. I0114 17:30:20.296172 86948 net.cpp:397] relu4 -> conv4 (in-place)
  940. I0114 17:30:20.296542 86948 net.cpp:150] Setting up relu4
  941. I0114 17:30:20.296553 86948 net.cpp:157] Top shape: 50 256 11 11 (1548800)
  942. I0114 17:30:20.296567 86948 net.cpp:165] Memory required for data: 398823000
  943. I0114 17:30:20.296571 86948 layer_factory.hpp:77] Creating layer pool4
  944. I0114 17:30:20.296589 86948 net.cpp:106] Creating Layer pool4
  945. I0114 17:30:20.296592 86948 net.cpp:454] pool4 <- conv4
  946. I0114 17:30:20.296598 86948 net.cpp:411] pool4 -> pool4
  947. I0114 17:30:20.296790 86948 net.cpp:150] Setting up pool4
  948. I0114 17:30:20.296799 86948 net.cpp:157] Top shape: 50 256 6 6 (460800)
  949. I0114 17:30:20.296813 86948 net.cpp:165] Memory required for data: 400666200
  950. I0114 17:30:20.296816 86948 layer_factory.hpp:77] Creating layer conv5
  951. I0114 17:30:20.296838 86948 net.cpp:106] Creating Layer conv5
  952. I0114 17:30:20.296841 86948 net.cpp:454] conv5 <- pool4
  953. I0114 17:30:20.296847 86948 net.cpp:411] conv5 -> conv5
  954. I0114 17:30:20.302322 86948 net.cpp:150] Setting up conv5
  955. I0114 17:30:20.302346 86948 net.cpp:157] Top shape: 50 256 4 4 (204800)
  956. I0114 17:30:20.302350 86948 net.cpp:165] Memory required for data: 401485400
  957. I0114 17:30:20.302369 86948 layer_factory.hpp:77] Creating layer relu5
  958. I0114 17:30:20.302376 86948 net.cpp:106] Creating Layer relu5
  959. I0114 17:30:20.302379 86948 net.cpp:454] relu5 <- conv5
  960. I0114 17:30:20.302383 86948 net.cpp:397] relu5 -> conv5 (in-place)
  961. I0114 17:30:20.302727 86948 net.cpp:150] Setting up relu5
  962. I0114 17:30:20.302738 86948 net.cpp:157] Top shape: 50 256 4 4 (204800)
  963. I0114 17:30:20.302741 86948 net.cpp:165] Memory required for data: 402304600
  964. I0114 17:30:20.302743 86948 layer_factory.hpp:77] Creating layer pool5
  965. I0114 17:30:20.302752 86948 net.cpp:106] Creating Layer pool5
  966. I0114 17:30:20.302755 86948 net.cpp:454] pool5 <- conv5
  967. I0114 17:30:20.302762 86948 net.cpp:411] pool5 -> pool5
  968. I0114 17:30:20.302942 86948 net.cpp:150] Setting up pool5
  969. I0114 17:30:20.302963 86948 net.cpp:157] Top shape: 50 256 2 2 (51200)
  970. I0114 17:30:20.302965 86948 net.cpp:165] Memory required for data: 402509400
  971. I0114 17:30:20.302968 86948 layer_factory.hpp:77] Creating layer fc6
  972. I0114 17:30:20.302974 86948 net.cpp:106] Creating Layer fc6
  973. I0114 17:30:20.302978 86948 net.cpp:454] fc6 <- pool5
  974. I0114 17:30:20.302995 86948 net.cpp:411] fc6 -> fc6
  975. I0114 17:30:20.331744 86948 net.cpp:150] Setting up fc6
  976. I0114 17:30:20.331787 86948 net.cpp:157] Top shape: 50 2048 (102400)
  977. I0114 17:30:20.331796 86948 net.cpp:165] Memory required for data: 402919000
  978. I0114 17:30:20.331814 86948 layer_factory.hpp:77] Creating layer relu6
  979. I0114 17:30:20.331830 86948 net.cpp:106] Creating Layer relu6
  980. I0114 17:30:20.331838 86948 net.cpp:454] relu6 <- fc6
  981. I0114 17:30:20.331852 86948 net.cpp:397] relu6 -> fc6 (in-place)
  982. I0114 17:30:20.332582 86948 net.cpp:150] Setting up relu6
  983. I0114 17:30:20.332607 86948 net.cpp:157] Top shape: 50 2048 (102400)
  984. I0114 17:30:20.332613 86948 net.cpp:165] Memory required for data: 403328600
  985. I0114 17:30:20.332620 86948 layer_factory.hpp:77] Creating layer fc7
  986. I0114 17:30:20.332638 86948 net.cpp:106] Creating Layer fc7
  987. I0114 17:30:20.332645 86948 net.cpp:454] fc7 <- fc6
  988. I0114 17:30:20.332656 86948 net.cpp:411] fc7 -> fc7
  989. I0114 17:30:20.391887 86948 net.cpp:150] Setting up fc7
  990. I0114 17:30:20.391923 86948 net.cpp:157] Top shape: 50 2048 (102400)
  991. I0114 17:30:20.391929 86948 net.cpp:165] Memory required for data: 403738200
  992. I0114 17:30:20.391943 86948 layer_factory.hpp:77] Creating layer relu7
  993. I0114 17:30:20.391959 86948 net.cpp:106] Creating Layer relu7
  994. I0114 17:30:20.391966 86948 net.cpp:454] relu7 <- fc7
  995. I0114 17:30:20.391979 86948 net.cpp:397] relu7 -> fc7 (in-place)
  996. I0114 17:30:20.392313 86948 net.cpp:150] Setting up relu7
  997. I0114 17:30:20.392329 86948 net.cpp:157] Top shape: 50 2048 (102400)
  998. I0114 17:30:20.392334 86948 net.cpp:165] Memory required for data: 404147800
  999. I0114 17:30:20.392339 86948 layer_factory.hpp:77] Creating layer fc8
  1000. I0114 17:30:20.392351 86948 net.cpp:106] Creating Layer fc8
  1001. I0114 17:30:20.392356 86948 net.cpp:454] fc8 <- fc7
  1002. I0114 17:30:20.392366 86948 net.cpp:411] fc8 -> fc8
  1003. I0114 17:30:20.395522 86948 net.cpp:150] Setting up fc8
  1004. I0114 17:30:20.395540 86948 net.cpp:157] Top shape: 50 101 (5050)
  1005. I0114 17:30:20.395545 86948 net.cpp:165] Memory required for data: 404168000
  1006. I0114 17:30:20.395555 86948 layer_factory.hpp:77] Creating layer fc8_fc8_0_split
  1007. I0114 17:30:20.395565 86948 net.cpp:106] Creating Layer fc8_fc8_0_split
  1008. I0114 17:30:20.395570 86948 net.cpp:454] fc8_fc8_0_split <- fc8
  1009. I0114 17:30:20.395576 86948 net.cpp:411] fc8_fc8_0_split -> fc8_fc8_0_split_0
  1010. I0114 17:30:20.395611 86948 net.cpp:411] fc8_fc8_0_split -> fc8_fc8_0_split_1
  1011. I0114 17:30:20.395666 86948 net.cpp:150] Setting up fc8_fc8_0_split
  1012. I0114 17:30:20.395676 86948 net.cpp:157] Top shape: 50 101 (5050)
  1013. I0114 17:30:20.395681 86948 net.cpp:157] Top shape: 50 101 (5050)
  1014. I0114 17:30:20.395685 86948 net.cpp:165] Memory required for data: 404208400
  1015. I0114 17:30:20.395689 86948 layer_factory.hpp:77] Creating layer accuracy
  1016. I0114 17:30:20.395707 86948 net.cpp:106] Creating Layer accuracy
  1017. I0114 17:30:20.395712 86948 net.cpp:454] accuracy <- fc8_fc8_0_split_0
  1018. I0114 17:30:20.395719 86948 net.cpp:454] accuracy <- label_data_1_split_0
  1019. I0114 17:30:20.395727 86948 net.cpp:411] accuracy -> accuracy
  1020. I0114 17:30:20.395741 86948 net.cpp:150] Setting up accuracy
  1021. I0114 17:30:20.395746 86948 net.cpp:157] Top shape: (1)
  1022. I0114 17:30:20.395750 86948 net.cpp:165] Memory required for data: 404208404
  1023. I0114 17:30:20.395756 86948 layer_factory.hpp:77] Creating layer loss
  1024. I0114 17:30:20.395762 86948 net.cpp:106] Creating Layer loss
  1025. I0114 17:30:20.395766 86948 net.cpp:454] loss <- fc8_fc8_0_split_1
  1026. I0114 17:30:20.395772 86948 net.cpp:454] loss <- label_data_1_split_1
  1027. I0114 17:30:20.395781 86948 net.cpp:411] loss -> loss
  1028. I0114 17:30:20.395792 86948 layer_factory.hpp:77] Creating layer loss
  1029. I0114 17:30:20.396368 86948 net.cpp:150] Setting up loss
  1030. I0114 17:30:20.396384 86948 net.cpp:157] Top shape: (1)
  1031. I0114 17:30:20.396389 86948 net.cpp:160] with loss weight 1
  1032. I0114 17:30:20.396401 86948 net.cpp:165] Memory required for data: 404208408
  1033. I0114 17:30:20.396406 86948 net.cpp:226] loss needs backward computation.
  1034. I0114 17:30:20.396412 86948 net.cpp:228] accuracy does not need backward computation.
  1035. I0114 17:30:20.396417 86948 net.cpp:226] fc8_fc8_0_split needs backward computation.
  1036. I0114 17:30:20.396421 86948 net.cpp:226] fc8 needs backward computation.
  1037. I0114 17:30:20.396425 86948 net.cpp:226] relu7 needs backward computation.
  1038. I0114 17:30:20.396430 86948 net.cpp:226] fc7 needs backward computation.
  1039. I0114 17:30:20.396433 86948 net.cpp:226] relu6 needs backward computation.
  1040. I0114 17:30:20.396437 86948 net.cpp:226] fc6 needs backward computation.
  1041. I0114 17:30:20.396442 86948 net.cpp:226] pool5 needs backward computation.
  1042. I0114 17:30:20.396446 86948 net.cpp:226] relu5 needs backward computation.
  1043. I0114 17:30:20.396451 86948 net.cpp:226] conv5 needs backward computation.
  1044. I0114 17:30:20.396456 86948 net.cpp:226] pool4 needs backward computation.
  1045. I0114 17:30:20.396461 86948 net.cpp:226] relu4 needs backward computation.
  1046. I0114 17:30:20.396466 86948 net.cpp:226] conv4 needs backward computation.
  1047. I0114 17:30:20.396469 86948 net.cpp:226] pool3 needs backward computation.
  1048. I0114 17:30:20.396474 86948 net.cpp:226] relu3 needs backward computation.
  1049. I0114 17:30:20.396478 86948 net.cpp:226] conv3 needs backward computation.
  1050. I0114 17:30:20.396482 86948 net.cpp:226] pool2 needs backward computation.
  1051. I0114 17:30:20.396487 86948 net.cpp:226] relu2 needs backward computation.
  1052. I0114 17:30:20.396492 86948 net.cpp:226] conv2 needs backward computation.
  1053. I0114 17:30:20.396495 86948 net.cpp:226] pool1 needs backward computation.
  1054. I0114 17:30:20.396500 86948 net.cpp:226] relu1 needs backward computation.
  1055. I0114 17:30:20.396504 86948 net.cpp:226] conv1 needs backward computation.
  1056. I0114 17:30:20.396510 86948 net.cpp:228] label_data_1_split does not need backward computation.
  1057. I0114 17:30:20.396517 86948 net.cpp:228] data does not need backward computation.
  1058. I0114 17:30:20.396520 86948 net.cpp:270] This network produces output accuracy
  1059. I0114 17:30:20.396525 86948 net.cpp:270] This network produces output loss
  1060. I0114 17:30:20.396551 86948 net.cpp:283] Network initialization done.
  1061. I0114 17:30:20.396718 86948 solver.cpp:60] Solver scaffolding done.
  1062. I0114 17:30:20.397513 86948 caffe.cpp:128] Finetuning from models/mv16f/mv16f1__iter_5000.caffemodel
  1063. I0114 17:30:31.195835 86948 parallel.cpp:391] GPUs pairs 0:1
  1064. I0114 17:30:31.513195 86948 net.cpp:99] Sharing layer data from root net
  1065. I0114 17:30:31.514991 86948 net.cpp:143] Created top blob 0 (shape: 150 48 58 58 (24220800)) for shared layer data
  1066. I0114 17:30:31.515151 86948 net.cpp:143] Created top blob 1 (shape: 150 1 (150)) for shared layer data
  1067. I0114 17:30:31.810956 86948 parallel.cpp:419] Starting Optimization
  1068. I0114 17:30:31.811024 86948 solver.cpp:288] Solving mv_16f1
  1069. I0114 17:30:31.811029 86948 solver.cpp:289] Learning Rate Policy: fixed
  1070. I0114 17:30:31.811134 86948 solver.cpp:341] Iteration 0, Testing net (#0)
  1071. I0114 17:30:32.401675 86948 solver.cpp:409] Test net output #0: accuracy = 0.03
  1072. I0114 17:30:32.401713 86948 solver.cpp:409] Test net output #1: loss = 6.22612 (* 1 = 6.22612 loss)
  1073. I0114 17:30:32.615201 86948 solver.cpp:237] Iteration 0, loss = 6.00407
  1074. I0114 17:30:32.615250 86948 solver.cpp:253] Train net output #0: loss = 6.00407 (* 1 = 6.00407 loss)
  1075. I0114 17:30:32.930352 86948 sgd_solver.cpp:106] Iteration 0, lr = 0.001
  1076. I0114 17:53:19.297849 86948 solver.cpp:341] Iteration 500, Testing net (#0)
  1077. I0114 17:53:20.085278 86948 solver.cpp:409] Test net output #0: accuracy = 0.08
  1078. I0114 17:53:20.085350 86948 solver.cpp:409] Test net output #1: loss = 4.21846 (* 1 = 4.21846 loss)
  1079. I0114 17:53:33.010169 86948 solver.cpp:237] Iteration 500, loss = 3.1388
  1080. I0114 17:53:33.010236 86948 solver.cpp:253] Train net output #0: loss = 3.1388 (* 1 = 3.1388 loss)
  1081. I0114 17:53:33.311209 86948 sgd_solver.cpp:106] Iteration 500, lr = 0.001
  1082. I0114 18:15:45.039989 86948 solver.cpp:341] Iteration 1000, Testing net (#0)
  1083. I0114 18:15:45.830772 86948 solver.cpp:409] Test net output #0: accuracy = 0.106
  1084. I0114 18:15:45.830831 86948 solver.cpp:409] Test net output #1: loss = 4.29256 (* 1 = 4.29256 loss)
  1085. I0114 18:16:07.472872 86948 solver.cpp:237] Iteration 1000, loss = 2.89553
  1086. I0114 18:16:07.472937 86948 solver.cpp:253] Train net output #0: loss = 2.89553 (* 1 = 2.89553 loss)
  1087. I0114 18:16:07.806648 86948 sgd_solver.cpp:106] Iteration 1000, lr = 0.001
  1088. I0114 18:38:54.700490 86948 solver.cpp:341] Iteration 1500, Testing net (#0)
  1089. I0114 18:38:55.191038 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1090. I0114 18:38:55.191095 86948 solver.cpp:409] Test net output #1: loss = 4.38897 (* 1 = 4.38897 loss)
  1091. I0114 18:39:08.405194 86948 solver.cpp:237] Iteration 1500, loss = 2.40281
  1092. I0114 18:39:08.405258 86948 solver.cpp:253] Train net output #0: loss = 2.40281 (* 1 = 2.40281 loss)
  1093. I0114 18:39:08.707820 86948 sgd_solver.cpp:106] Iteration 1500, lr = 0.001
  1094. I0114 19:01:43.483299 86948 solver.cpp:341] Iteration 2000, Testing net (#0)
  1095. I0114 19:01:57.033280 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1096. I0114 19:01:57.033339 86948 solver.cpp:409] Test net output #1: loss = 4.58599 (* 1 = 4.58599 loss)
  1097. I0114 19:02:15.622921 86948 solver.cpp:237] Iteration 2000, loss = 2.65678
  1098. I0114 19:02:15.623080 86948 solver.cpp:253] Train net output #0: loss = 2.65678 (* 1 = 2.65678 loss)
  1099. I0114 19:02:16.000957 86948 sgd_solver.cpp:106] Iteration 2000, lr = 0.001
  1100. I0114 19:25:49.882880 86948 solver.cpp:341] Iteration 2500, Testing net (#0)
  1101. I0114 19:25:50.668056 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1102. I0114 19:25:50.668131 86948 solver.cpp:409] Test net output #1: loss = 4.66895 (* 1 = 4.66895 loss)
  1103. I0114 19:26:04.158541 86948 solver.cpp:237] Iteration 2500, loss = 2.29065
  1104. I0114 19:26:04.158601 86948 solver.cpp:253] Train net output #0: loss = 2.29065 (* 1 = 2.29065 loss)
  1105. I0114 19:26:04.158622 86948 sgd_solver.cpp:106] Iteration 2500, lr = 0.001
  1106. I0114 19:48:19.053063 86948 solver.cpp:341] Iteration 3000, Testing net (#0)
  1107. I0114 19:48:19.546254 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1108. I0114 19:48:19.546349 86948 solver.cpp:409] Test net output #1: loss = 5.19176 (* 1 = 5.19176 loss)
  1109. I0114 19:48:32.406949 86948 solver.cpp:237] Iteration 3000, loss = 1.92223
  1110. I0114 19:48:32.407004 86948 solver.cpp:253] Train net output #0: loss = 1.92223 (* 1 = 1.92223 loss)
  1111. I0114 19:48:32.708853 86948 sgd_solver.cpp:106] Iteration 3000, lr = 0.001
  1112. I0114 20:11:15.198561 86948 solver.cpp:341] Iteration 3500, Testing net (#0)
  1113. I0114 20:11:15.988100 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1114. I0114 20:11:15.988169 86948 solver.cpp:409] Test net output #1: loss = 4.85761 (* 1 = 4.85761 loss)
  1115. I0114 20:11:29.490351 86948 solver.cpp:237] Iteration 3500, loss = 1.88295
  1116. I0114 20:11:29.490416 86948 solver.cpp:253] Train net output #0: loss = 1.88295 (* 1 = 1.88295 loss)
  1117. I0114 20:11:29.864094 86948 sgd_solver.cpp:106] Iteration 3500, lr = 0.001
  1118. I0114 20:33:36.204324 86948 solver.cpp:341] Iteration 4000, Testing net (#0)
  1119. I0114 20:33:49.203364 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1120. I0114 20:33:49.203444 86948 solver.cpp:409] Test net output #1: loss = 5.02526 (* 1 = 5.02526 loss)
  1121. I0114 20:34:01.464068 86948 solver.cpp:237] Iteration 4000, loss = 1.94949
  1122. I0114 20:34:01.464118 86948 solver.cpp:253] Train net output #0: loss = 1.94949 (* 1 = 1.94949 loss)
  1123. I0114 20:34:01.813761 86948 sgd_solver.cpp:106] Iteration 4000, lr = 0.001
  1124. I0114 20:56:38.518110 86948 solver.cpp:341] Iteration 4500, Testing net (#0)
  1125. I0114 20:56:39.302815 86948 solver.cpp:409] Test net output #0: accuracy = 0.146
  1126. I0114 20:56:39.302886 86948 solver.cpp:409] Test net output #1: loss = 5.19903 (* 1 = 5.19903 loss)
  1127. I0114 20:56:56.997396 86948 solver.cpp:237] Iteration 4500, loss = 1.50638
  1128. I0114 20:56:56.997449 86948 solver.cpp:253] Train net output #0: loss = 1.50638 (* 1 = 1.50638 loss)
  1129. I0114 20:56:57.367223 86948 sgd_solver.cpp:106] Iteration 4500, lr = 0.001
  1130. I0114 21:19:51.173880 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_5000.caffemodel
  1131. I0114 21:19:54.054301 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_5000.solverstate
  1132. I0114 21:19:54.105180 86948 solver.cpp:341] Iteration 5000, Testing net (#0)
  1133. I0114 21:19:54.591950 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1134. I0114 21:19:54.592030 86948 solver.cpp:409] Test net output #1: loss = 5.67052 (* 1 = 5.67052 loss)
  1135. I0114 21:20:07.774837 86948 solver.cpp:237] Iteration 5000, loss = 1.57842
  1136. I0114 21:20:07.774885 86948 solver.cpp:253] Train net output #0: loss = 1.57842 (* 1 = 1.57842 loss)
  1137. I0114 21:20:08.038023 86948 sgd_solver.cpp:106] Iteration 5000, lr = 0.001
  1138. I0114 21:43:29.756516 86948 solver.cpp:341] Iteration 5500, Testing net (#0)
  1139. I0114 21:43:30.251178 86948 solver.cpp:409] Test net output #0: accuracy = 0.134
  1140. I0114 21:43:30.251236 86948 solver.cpp:409] Test net output #1: loss = 6.00113 (* 1 = 6.00113 loss)
  1141. I0114 21:43:42.861708 86948 solver.cpp:237] Iteration 5500, loss = 1.49909
  1142. I0114 21:43:42.861762 86948 solver.cpp:253] Train net output #0: loss = 1.49909 (* 1 = 1.49909 loss)
  1143. I0114 21:43:43.164937 86948 sgd_solver.cpp:106] Iteration 5500, lr = 0.001
  1144. I0114 22:05:53.412967 86948 solver.cpp:341] Iteration 6000, Testing net (#0)
  1145. I0114 22:06:06.204192 86948 solver.cpp:409] Test net output #0: accuracy = 0.134
  1146. I0114 22:06:06.204249 86948 solver.cpp:409] Test net output #1: loss = 6.14714 (* 1 = 6.14714 loss)
  1147. I0114 22:06:19.697239 86948 solver.cpp:237] Iteration 6000, loss = 1.08655
  1148. I0114 22:06:19.697300 86948 solver.cpp:253] Train net output #0: loss = 1.08655 (* 1 = 1.08655 loss)
  1149. I0114 22:06:19.999053 86948 sgd_solver.cpp:106] Iteration 6000, lr = 0.001
  1150. I0114 22:28:44.623858 86948 solver.cpp:341] Iteration 6500, Testing net (#0)
  1151. I0114 22:28:45.117502 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1152. I0114 22:28:45.117586 86948 solver.cpp:409] Test net output #1: loss = 6.22176 (* 1 = 6.22176 loss)
  1153. I0114 22:29:02.504175 86948 solver.cpp:237] Iteration 6500, loss = 1.09802
  1154. I0114 22:29:02.504230 86948 solver.cpp:253] Train net output #0: loss = 1.09802 (* 1 = 1.09802 loss)
  1155. I0114 22:29:02.504263 86948 sgd_solver.cpp:106] Iteration 6500, lr = 0.001
  1156. I0114 22:51:08.072340 86948 solver.cpp:341] Iteration 7000, Testing net (#0)
  1157. I0114 22:51:08.570003 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1158. I0114 22:51:08.570077 86948 solver.cpp:409] Test net output #1: loss = 7.06356 (* 1 = 7.06356 loss)
  1159. I0114 22:51:21.792124 86948 solver.cpp:237] Iteration 7000, loss = 1.38146
  1160. I0114 22:51:21.792181 86948 solver.cpp:253] Train net output #0: loss = 1.38146 (* 1 = 1.38146 loss)
  1161. I0114 22:51:22.154963 86948 sgd_solver.cpp:106] Iteration 7000, lr = 0.001
  1162. I0114 23:13:57.562847 86948 solver.cpp:341] Iteration 7500, Testing net (#0)
  1163. I0114 23:13:58.056743 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1164. I0114 23:13:58.056813 86948 solver.cpp:409] Test net output #1: loss = 6.39689 (* 1 = 6.39689 loss)
  1165. I0114 23:14:19.756793 86948 solver.cpp:237] Iteration 7500, loss = 1.23898
  1166. I0114 23:14:19.756858 86948 solver.cpp:253] Train net output #0: loss = 1.23898 (* 1 = 1.23898 loss)
  1167. I0114 23:14:20.112123 86948 sgd_solver.cpp:106] Iteration 7500, lr = 0.001
  1168. I0114 23:37:58.058513 86948 solver.cpp:341] Iteration 8000, Testing net (#0)
  1169. I0114 23:38:11.531455 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1170. I0114 23:38:11.531529 86948 solver.cpp:409] Test net output #1: loss = 6.81792 (* 1 = 6.81792 loss)
  1171. I0114 23:38:25.274824 86948 solver.cpp:237] Iteration 8000, loss = 1.02914
  1172. I0114 23:38:25.274878 86948 solver.cpp:253] Train net output #0: loss = 1.02914 (* 1 = 1.02914 loss)
  1173. I0114 23:38:25.577234 86948 sgd_solver.cpp:106] Iteration 8000, lr = 0.001
  1174. I0115 00:05:17.607841 86948 solver.cpp:341] Iteration 8500, Testing net (#0)
  1175. I0115 00:05:18.392987 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1176. I0115 00:05:18.393034 86948 solver.cpp:409] Test net output #1: loss = 6.75926 (* 1 = 6.75926 loss)
  1177. I0115 00:05:45.961122 86948 solver.cpp:237] Iteration 8500, loss = 1.06676
  1178. I0115 00:05:45.961179 86948 solver.cpp:253] Train net output #0: loss = 1.06676 (* 1 = 1.06676 loss)
  1179. I0115 00:05:46.262384 86948 sgd_solver.cpp:106] Iteration 8500, lr = 0.001
  1180. I0115 00:32:18.132803 86948 solver.cpp:341] Iteration 9000, Testing net (#0)
  1181. I0115 00:32:18.625879 86948 solver.cpp:409] Test net output #0: accuracy = 0.134
  1182. I0115 00:32:18.625929 86948 solver.cpp:409] Test net output #1: loss = 7.14524 (* 1 = 7.14524 loss)
  1183. I0115 00:32:30.816737 86948 solver.cpp:237] Iteration 9000, loss = 0.860762
  1184. I0115 00:32:30.816799 86948 solver.cpp:253] Train net output #0: loss = 0.860762 (* 1 = 0.860762 loss)
  1185. I0115 00:32:31.143739 86948 sgd_solver.cpp:106] Iteration 9000, lr = 0.001
  1186. I0115 01:00:46.037446 86948 solver.cpp:341] Iteration 9500, Testing net (#0)
  1187. I0115 01:00:46.825520 86948 solver.cpp:409] Test net output #0: accuracy = 0.148
  1188. I0115 01:00:46.825567 86948 solver.cpp:409] Test net output #1: loss = 7.651 (* 1 = 7.651 loss)
  1189. I0115 01:01:12.079197 86948 solver.cpp:237] Iteration 9500, loss = 0.76086
  1190. I0115 01:01:12.079242 86948 solver.cpp:253] Train net output #0: loss = 0.76086 (* 1 = 0.76086 loss)
  1191. I0115 01:01:12.424202 86948 sgd_solver.cpp:106] Iteration 9500, lr = 0.001
  1192. I0115 01:23:29.878836 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_10000.caffemodel
  1193. I0115 01:23:31.686995 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_10000.solverstate
  1194. I0115 01:23:31.741595 86948 solver.cpp:341] Iteration 10000, Testing net (#0)
  1195. I0115 01:23:53.724689 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1196. I0115 01:23:53.724733 86948 solver.cpp:409] Test net output #1: loss = 7.54584 (* 1 = 7.54584 loss)
  1197. I0115 01:24:14.223534 86948 solver.cpp:237] Iteration 10000, loss = 0.774985
  1198. I0115 01:24:14.223733 86948 solver.cpp:253] Train net output #0: loss = 0.774985 (* 1 = 0.774985 loss)
  1199. I0115 01:24:14.602334 86948 sgd_solver.cpp:106] Iteration 10000, lr = 0.001
  1200. I0115 01:47:00.340564 86948 solver.cpp:341] Iteration 10500, Testing net (#0)
  1201. I0115 01:47:01.126672 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1202. I0115 01:47:01.126751 86948 solver.cpp:409] Test net output #1: loss = 7.71585 (* 1 = 7.71585 loss)
  1203. I0115 01:47:13.787405 86948 solver.cpp:237] Iteration 10500, loss = 0.845633
  1204. I0115 01:47:13.787449 86948 solver.cpp:253] Train net output #0: loss = 0.845633 (* 1 = 0.845633 loss)
  1205. I0115 01:47:13.787469 86948 sgd_solver.cpp:106] Iteration 10500, lr = 0.001
  1206. I0115 02:15:35.751904 86948 solver.cpp:341] Iteration 11000, Testing net (#0)
  1207. I0115 02:15:36.241739 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1208. I0115 02:15:36.241788 86948 solver.cpp:409] Test net output #1: loss = 9.06797 (* 1 = 9.06797 loss)
  1209. I0115 02:15:48.996397 86948 solver.cpp:237] Iteration 11000, loss = 0.546424
  1210. I0115 02:15:48.996453 86948 solver.cpp:253] Train net output #0: loss = 0.546424 (* 1 = 0.546424 loss)
  1211. I0115 02:15:48.996476 86948 sgd_solver.cpp:106] Iteration 11000, lr = 0.001
  1212. I0115 02:43:31.419049 86948 solver.cpp:341] Iteration 11500, Testing net (#0)
  1213. I0115 02:43:32.210687 86948 solver.cpp:409] Test net output #0: accuracy = 0.104
  1214. I0115 02:43:32.210752 86948 solver.cpp:409] Test net output #1: loss = 8.80403 (* 1 = 8.80403 loss)
  1215. I0115 02:43:44.821365 86948 solver.cpp:237] Iteration 11500, loss = 0.706103
  1216. I0115 02:43:44.821415 86948 solver.cpp:253] Train net output #0: loss = 0.706103 (* 1 = 0.706103 loss)
  1217. I0115 02:43:45.123746 86948 sgd_solver.cpp:106] Iteration 11500, lr = 0.001
  1218. I0115 03:14:23.524358 86948 solver.cpp:341] Iteration 12000, Testing net (#0)
  1219. I0115 03:14:46.023252 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1220. I0115 03:14:46.023300 86948 solver.cpp:409] Test net output #1: loss = 8.83962 (* 1 = 8.83962 loss)
  1221. I0115 03:14:59.884909 86948 solver.cpp:237] Iteration 12000, loss = 0.786133
  1222. I0115 03:14:59.885141 86948 solver.cpp:253] Train net output #0: loss = 0.786133 (* 1 = 0.786133 loss)
  1223. I0115 03:15:00.243067 86948 sgd_solver.cpp:106] Iteration 12000, lr = 0.001
  1224. I0115 03:41:47.587093 86948 solver.cpp:341] Iteration 12500, Testing net (#0)
  1225. I0115 03:41:48.073796 86948 solver.cpp:409] Test net output #0: accuracy = 0.098
  1226. I0115 03:41:48.073854 86948 solver.cpp:409] Test net output #1: loss = 9.32967 (* 1 = 9.32967 loss)
  1227. I0115 03:42:09.332177 86948 solver.cpp:237] Iteration 12500, loss = 0.707963
  1228. I0115 03:42:09.332263 86948 solver.cpp:253] Train net output #0: loss = 0.707963 (* 1 = 0.707963 loss)
  1229. I0115 03:42:09.676595 86948 sgd_solver.cpp:106] Iteration 12500, lr = 0.001
  1230. I0115 04:06:02.889426 86948 solver.cpp:341] Iteration 13000, Testing net (#0)
  1231. I0115 04:06:03.378060 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1232. I0115 04:06:03.378123 86948 solver.cpp:409] Test net output #1: loss = 9.17314 (* 1 = 9.17314 loss)
  1233. I0115 04:06:22.668329 86948 solver.cpp:237] Iteration 13000, loss = 0.509333
  1234. I0115 04:06:22.668419 86948 solver.cpp:253] Train net output #0: loss = 0.509333 (* 1 = 0.509333 loss)
  1235. I0115 04:06:22.970981 86948 sgd_solver.cpp:106] Iteration 13000, lr = 0.001
  1236. I0115 04:32:01.955811 86948 solver.cpp:341] Iteration 13500, Testing net (#0)
  1237. I0115 04:32:02.444028 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1238. I0115 04:32:02.444092 86948 solver.cpp:409] Test net output #1: loss = 9.12479 (* 1 = 9.12479 loss)
  1239. I0115 04:32:20.073575 86948 solver.cpp:237] Iteration 13500, loss = 0.500275
  1240. I0115 04:32:20.073634 86948 solver.cpp:253] Train net output #0: loss = 0.500275 (* 1 = 0.500275 loss)
  1241. I0115 04:32:20.435401 86948 sgd_solver.cpp:106] Iteration 13500, lr = 0.001
  1242. I0115 04:57:03.939975 86948 solver.cpp:341] Iteration 14000, Testing net (#0)
  1243. I0115 04:57:24.463392 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1244. I0115 04:57:24.463464 86948 solver.cpp:409] Test net output #1: loss = 9.76042 (* 1 = 9.76042 loss)
  1245. I0115 04:57:48.684335 86948 solver.cpp:237] Iteration 14000, loss = 0.390494
  1246. I0115 04:57:48.684521 86948 solver.cpp:253] Train net output #0: loss = 0.390494 (* 1 = 0.390494 loss)
  1247. I0115 04:57:49.035643 86948 sgd_solver.cpp:106] Iteration 14000, lr = 0.001
  1248. I0115 05:26:57.706825 86948 solver.cpp:341] Iteration 14500, Testing net (#0)
  1249. I0115 05:26:58.193374 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1250. I0115 05:26:58.193425 86948 solver.cpp:409] Test net output #1: loss = 10.1548 (* 1 = 10.1548 loss)
  1251. I0115 05:27:13.510231 86948 solver.cpp:237] Iteration 14500, loss = 0.692382
  1252. I0115 05:27:13.510294 86948 solver.cpp:253] Train net output #0: loss = 0.692382 (* 1 = 0.692382 loss)
  1253. I0115 05:27:13.812265 86948 sgd_solver.cpp:106] Iteration 14500, lr = 0.001
  1254. I0115 05:55:32.336146 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_15000.caffemodel
  1255. I0115 05:55:34.037616 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_15000.solverstate
  1256. I0115 05:55:34.086087 86948 solver.cpp:341] Iteration 15000, Testing net (#0)
  1257. I0115 05:55:34.573166 86948 solver.cpp:409] Test net output #0: accuracy = 0.106
  1258. I0115 05:55:34.573215 86948 solver.cpp:409] Test net output #1: loss = 9.8564 (* 1 = 9.8564 loss)
  1259. I0115 05:55:55.581955 86948 solver.cpp:237] Iteration 15000, loss = 0.507724
  1260. I0115 05:55:55.582023 86948 solver.cpp:253] Train net output #0: loss = 0.507724 (* 1 = 0.507724 loss)
  1261. I0115 05:55:55.884019 86948 sgd_solver.cpp:106] Iteration 15000, lr = 0.001
  1262. I0115 06:23:44.102008 86948 solver.cpp:341] Iteration 15500, Testing net (#0)
  1263. I0115 06:23:44.889958 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1264. I0115 06:23:44.890012 86948 solver.cpp:409] Test net output #1: loss = 10.4728 (* 1 = 10.4728 loss)
  1265. I0115 06:24:01.230536 86948 solver.cpp:237] Iteration 15500, loss = 0.417666
  1266. I0115 06:24:01.230588 86948 solver.cpp:253] Train net output #0: loss = 0.417666 (* 1 = 0.417666 loss)
  1267. I0115 06:24:01.606312 86948 sgd_solver.cpp:106] Iteration 15500, lr = 0.001
  1268. I0115 06:48:03.508607 86948 solver.cpp:341] Iteration 16000, Testing net (#0)
  1269. I0115 06:48:26.399644 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1270. I0115 06:48:26.399693 86948 solver.cpp:409] Test net output #1: loss = 10.979 (* 1 = 10.979 loss)
  1271. I0115 06:48:27.674831 86948 solver.cpp:237] Iteration 16000, loss = 0.40489
  1272. I0115 06:48:27.674890 86948 solver.cpp:253] Train net output #0: loss = 0.40489 (* 1 = 0.40489 loss)
  1273. I0115 06:48:28.055383 86948 sgd_solver.cpp:106] Iteration 16000, lr = 0.001
  1274. I0115 07:15:44.285596 86948 solver.cpp:341] Iteration 16500, Testing net (#0)
  1275. I0115 07:15:45.069802 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1276. I0115 07:15:45.069869 86948 solver.cpp:409] Test net output #1: loss = 10.3104 (* 1 = 10.3104 loss)
  1277. I0115 07:16:09.445392 86948 solver.cpp:237] Iteration 16500, loss = 0.29393
  1278. I0115 07:16:09.445464 86948 solver.cpp:253] Train net output #0: loss = 0.29393 (* 1 = 0.29393 loss)
  1279. I0115 07:16:09.445511 86948 sgd_solver.cpp:106] Iteration 16500, lr = 0.001
  1280. I0115 07:38:32.618374 86948 solver.cpp:341] Iteration 17000, Testing net (#0)
  1281. I0115 07:38:33.405944 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1282. I0115 07:38:33.405997 86948 solver.cpp:409] Test net output #1: loss = 11.4087 (* 1 = 11.4087 loss)
  1283. I0115 07:39:02.668887 86948 solver.cpp:237] Iteration 17000, loss = 0.475379
  1284. I0115 07:39:02.669137 86948 solver.cpp:253] Train net output #0: loss = 0.475379 (* 1 = 0.475379 loss)
  1285. I0115 07:39:03.016024 86948 sgd_solver.cpp:106] Iteration 17000, lr = 0.001
  1286. I0115 08:07:22.610738 86948 solver.cpp:341] Iteration 17500, Testing net (#0)
  1287. I0115 08:07:23.399281 86948 solver.cpp:409] Test net output #0: accuracy = 0.098
  1288. I0115 08:07:23.399346 86948 solver.cpp:409] Test net output #1: loss = 11.3343 (* 1 = 11.3343 loss)
  1289. I0115 08:07:35.556828 86948 solver.cpp:237] Iteration 17500, loss = 0.412808
  1290. I0115 08:07:35.556901 86948 solver.cpp:253] Train net output #0: loss = 0.412808 (* 1 = 0.412808 loss)
  1291. I0115 08:07:35.929672 86948 sgd_solver.cpp:106] Iteration 17500, lr = 0.001
  1292. I0115 08:35:21.397052 86948 solver.cpp:341] Iteration 18000, Testing net (#0)
  1293. I0115 08:35:37.317168 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1294. I0115 08:35:37.317226 86948 solver.cpp:409] Test net output #1: loss = 11.6325 (* 1 = 11.6325 loss)
  1295. I0115 08:35:54.273187 86948 solver.cpp:237] Iteration 18000, loss = 0.330421
  1296. I0115 08:35:54.273433 86948 solver.cpp:253] Train net output #0: loss = 0.330421 (* 1 = 0.330421 loss)
  1297. I0115 08:35:54.644815 86948 sgd_solver.cpp:106] Iteration 18000, lr = 0.001
  1298. I0115 09:04:32.714617 86948 solver.cpp:341] Iteration 18500, Testing net (#0)
  1299. I0115 09:04:33.499600 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1300. I0115 09:04:33.499655 86948 solver.cpp:409] Test net output #1: loss = 11.2051 (* 1 = 11.2051 loss)
  1301. I0115 09:04:45.729574 86948 solver.cpp:237] Iteration 18500, loss = 0.248244
  1302. I0115 09:04:45.729639 86948 solver.cpp:253] Train net output #0: loss = 0.248244 (* 1 = 0.248244 loss)
  1303. I0115 09:04:46.056254 86948 sgd_solver.cpp:106] Iteration 18500, lr = 0.001
  1304. I0115 09:33:10.057044 86948 solver.cpp:341] Iteration 19000, Testing net (#0)
  1305. I0115 09:33:10.552685 86948 solver.cpp:409] Test net output #0: accuracy = 0.088
  1306. I0115 09:33:10.552734 86948 solver.cpp:409] Test net output #1: loss = 12.5566 (* 1 = 12.5566 loss)
  1307. I0115 09:33:27.995590 86948 solver.cpp:237] Iteration 19000, loss = 0.353737
  1308. I0115 09:33:27.995659 86948 solver.cpp:253] Train net output #0: loss = 0.353737 (* 1 = 0.353737 loss)
  1309. I0115 09:33:28.297780 86948 sgd_solver.cpp:106] Iteration 19000, lr = 0.001
  1310. I0115 09:59:55.573832 86948 solver.cpp:341] Iteration 19500, Testing net (#0)
  1311. I0115 09:59:56.362241 86948 solver.cpp:409] Test net output #0: accuracy = 0.1
  1312. I0115 09:59:56.362289 86948 solver.cpp:409] Test net output #1: loss = 12.3916 (* 1 = 12.3916 loss)
  1313. I0115 10:00:18.836007 86948 solver.cpp:237] Iteration 19500, loss = 0.343227
  1314. I0115 10:00:18.836064 86948 solver.cpp:253] Train net output #0: loss = 0.343227 (* 1 = 0.343227 loss)
  1315. I0115 10:00:19.138316 86948 sgd_solver.cpp:106] Iteration 19500, lr = 0.001
  1316. I0115 10:28:15.445359 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_20000.caffemodel
  1317. I0115 10:28:17.336303 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_20000.solverstate
  1318. I0115 10:28:17.381590 86948 solver.cpp:341] Iteration 20000, Testing net (#0)
  1319. I0115 10:28:39.768867 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1320. I0115 10:28:39.768931 86948 solver.cpp:409] Test net output #1: loss = 12.533 (* 1 = 12.533 loss)
  1321. I0115 10:28:54.931838 86948 solver.cpp:237] Iteration 20000, loss = 0.288482
  1322. I0115 10:28:54.932060 86948 solver.cpp:253] Train net output #0: loss = 0.288482 (* 1 = 0.288482 loss)
  1323. I0115 10:28:55.283051 86948 sgd_solver.cpp:106] Iteration 20000, lr = 0.001
  1324. I0115 10:57:32.207489 86948 solver.cpp:341] Iteration 20500, Testing net (#0)
  1325. I0115 10:57:32.996423 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1326. I0115 10:57:32.996495 86948 solver.cpp:409] Test net output #1: loss = 12.6864 (* 1 = 12.6864 loss)
  1327. I0115 10:57:47.032009 86948 solver.cpp:237] Iteration 20500, loss = 0.232661
  1328. I0115 10:57:47.032083 86948 solver.cpp:253] Train net output #0: loss = 0.232661 (* 1 = 0.232661 loss)
  1329. I0115 10:57:47.383950 86948 sgd_solver.cpp:106] Iteration 20500, lr = 0.001
  1330. I0115 11:25:23.241185 86948 solver.cpp:341] Iteration 21000, Testing net (#0)
  1331. I0115 11:25:24.027587 86948 solver.cpp:409] Test net output #0: accuracy = 0.088
  1332. I0115 11:25:24.027698 86948 solver.cpp:409] Test net output #1: loss = 13.6841 (* 1 = 13.6841 loss)
  1333. I0115 11:25:39.803107 86948 solver.cpp:237] Iteration 21000, loss = 0.163174
  1334. I0115 11:25:39.803169 86948 solver.cpp:253] Train net output #0: loss = 0.163174 (* 1 = 0.163174 loss)
  1335. I0115 11:25:40.157781 86948 sgd_solver.cpp:106] Iteration 21000, lr = 0.001
  1336. I0115 11:54:44.030809 86948 solver.cpp:341] Iteration 21500, Testing net (#0)
  1337. I0115 11:54:44.521996 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1338. I0115 11:54:44.522053 86948 solver.cpp:409] Test net output #1: loss = 12.5438 (* 1 = 12.5438 loss)
  1339. I0115 11:54:57.836741 86948 solver.cpp:237] Iteration 21500, loss = 0.339656
  1340. I0115 11:54:57.836797 86948 solver.cpp:253] Train net output #0: loss = 0.339656 (* 1 = 0.339656 loss)
  1341. I0115 11:54:58.139484 86948 sgd_solver.cpp:106] Iteration 21500, lr = 0.001
  1342. I0115 12:22:13.480896 86948 solver.cpp:341] Iteration 22000, Testing net (#0)
  1343. I0115 12:22:28.147697 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1344. I0115 12:22:28.147763 86948 solver.cpp:409] Test net output #1: loss = 12.3577 (* 1 = 12.3577 loss)
  1345. I0115 12:22:49.409201 86948 solver.cpp:237] Iteration 22000, loss = 0.13327
  1346. I0115 12:22:49.409493 86948 solver.cpp:253] Train net output #0: loss = 0.133271 (* 1 = 0.133271 loss)
  1347. I0115 12:22:49.774583 86948 sgd_solver.cpp:106] Iteration 22000, lr = 0.001
  1348. I0115 12:47:31.931730 86948 solver.cpp:341] Iteration 22500, Testing net (#0)
  1349. I0115 12:47:32.418171 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1350. I0115 12:47:32.418236 86948 solver.cpp:409] Test net output #1: loss = 13.1006 (* 1 = 13.1006 loss)
  1351. I0115 12:47:54.692035 86948 solver.cpp:237] Iteration 22500, loss = 0.171656
  1352. I0115 12:47:54.692101 86948 solver.cpp:253] Train net output #0: loss = 0.171656 (* 1 = 0.171656 loss)
  1353. I0115 12:47:55.034996 86948 sgd_solver.cpp:106] Iteration 22500, lr = 0.001
  1354. I0115 13:15:58.244537 86948 solver.cpp:341] Iteration 23000, Testing net (#0)
  1355. I0115 13:15:59.031538 86948 solver.cpp:409] Test net output #0: accuracy = 0.106
  1356. I0115 13:15:59.031599 86948 solver.cpp:409] Test net output #1: loss = 13.0743 (* 1 = 13.0743 loss)
  1357. I0115 13:16:18.517042 86948 solver.cpp:237] Iteration 23000, loss = 0.137712
  1358. I0115 13:16:18.517102 86948 solver.cpp:253] Train net output #0: loss = 0.137712 (* 1 = 0.137712 loss)
  1359. I0115 13:16:18.517133 86948 sgd_solver.cpp:106] Iteration 23000, lr = 0.001
  1360. I0115 13:45:23.165734 86948 solver.cpp:341] Iteration 23500, Testing net (#0)
  1361. I0115 13:45:23.656873 86948 solver.cpp:409] Test net output #0: accuracy = 0.092
  1362. I0115 13:45:23.656924 86948 solver.cpp:409] Test net output #1: loss = 13.9083 (* 1 = 13.9083 loss)
  1363. I0115 13:45:48.137435 86948 solver.cpp:237] Iteration 23500, loss = 0.265021
  1364. I0115 13:45:48.137493 86948 solver.cpp:253] Train net output #0: loss = 0.265021 (* 1 = 0.265021 loss)
  1365. I0115 13:45:48.466833 86948 sgd_solver.cpp:106] Iteration 23500, lr = 0.001
  1366. I0115 14:15:13.209718 86948 solver.cpp:341] Iteration 24000, Testing net (#0)
  1367. I0115 14:15:27.127425 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1368. I0115 14:15:27.127475 86948 solver.cpp:409] Test net output #1: loss = 13.4618 (* 1 = 13.4618 loss)
  1369. I0115 14:15:52.682905 86948 solver.cpp:237] Iteration 24000, loss = 0.0797336
  1370. I0115 14:15:52.683089 86948 solver.cpp:253] Train net output #0: loss = 0.0797339 (* 1 = 0.0797339 loss)
  1371. I0115 14:15:53.035676 86948 sgd_solver.cpp:106] Iteration 24000, lr = 0.001
  1372. I0115 14:44:07.245959 86948 solver.cpp:341] Iteration 24500, Testing net (#0)
  1373. I0115 14:44:08.031157 86948 solver.cpp:409] Test net output #0: accuracy = 0.13
  1374. I0115 14:44:08.031208 86948 solver.cpp:409] Test net output #1: loss = 13.2707 (* 1 = 13.2707 loss)
  1375. I0115 14:44:20.815029 86948 solver.cpp:237] Iteration 24500, loss = 0.194503
  1376. I0115 14:44:20.815084 86948 solver.cpp:253] Train net output #0: loss = 0.194503 (* 1 = 0.194503 loss)
  1377. I0115 14:44:21.117302 86948 sgd_solver.cpp:106] Iteration 24500, lr = 0.001
  1378. I0115 15:13:06.487923 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_25000.caffemodel
  1379. I0115 15:13:08.724500 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_25000.solverstate
  1380. I0115 15:13:08.772209 86948 solver.cpp:341] Iteration 25000, Testing net (#0)
  1381. I0115 15:13:09.255282 86948 solver.cpp:409] Test net output #0: accuracy = 0.14
  1382. I0115 15:13:09.255313 86948 solver.cpp:409] Test net output #1: loss = 13.9014 (* 1 = 13.9014 loss)
  1383. I0115 15:13:28.586627 86948 solver.cpp:237] Iteration 25000, loss = 0.198213
  1384. I0115 15:13:28.586673 86948 solver.cpp:253] Train net output #0: loss = 0.198213 (* 1 = 0.198213 loss)
  1385. I0115 15:13:28.586694 86948 sgd_solver.cpp:106] Iteration 25000, lr = 0.001
  1386. I0115 15:39:11.728093 86948 solver.cpp:341] Iteration 25500, Testing net (#0)
  1387. I0115 15:39:12.220155 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1388. I0115 15:39:12.220222 86948 solver.cpp:409] Test net output #1: loss = 14.6033 (* 1 = 14.6033 loss)
  1389. I0115 15:39:13.502130 86948 solver.cpp:237] Iteration 25500, loss = 0.250862
  1390. I0115 15:39:13.502204 86948 solver.cpp:253] Train net output #0: loss = 0.250862 (* 1 = 0.250862 loss)
  1391. I0115 15:39:13.805196 86948 sgd_solver.cpp:106] Iteration 25500, lr = 0.001
  1392. I0115 16:04:51.572655 86948 solver.cpp:341] Iteration 26000, Testing net (#0)
  1393. I0115 16:05:10.281999 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1394. I0115 16:05:10.282049 86948 solver.cpp:409] Test net output #1: loss = 15.0046 (* 1 = 15.0046 loss)
  1395. I0115 16:05:32.037425 86948 solver.cpp:237] Iteration 26000, loss = 0.0719195
  1396. I0115 16:05:32.037680 86948 solver.cpp:253] Train net output #0: loss = 0.0719199 (* 1 = 0.0719199 loss)
  1397. I0115 16:05:32.395720 86948 sgd_solver.cpp:106] Iteration 26000, lr = 0.001
  1398. I0115 16:34:12.324597 86948 solver.cpp:341] Iteration 26500, Testing net (#0)
  1399. I0115 16:34:12.813323 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1400. I0115 16:34:12.813388 86948 solver.cpp:409] Test net output #1: loss = 14.6345 (* 1 = 14.6345 loss)
  1401. I0115 16:34:29.782642 86948 solver.cpp:237] Iteration 26500, loss = 0.0966772
  1402. I0115 16:34:29.782699 86948 solver.cpp:253] Train net output #0: loss = 0.0966777 (* 1 = 0.0966777 loss)
  1403. I0115 16:34:30.084247 86948 sgd_solver.cpp:106] Iteration 26500, lr = 0.001
  1404. I0115 17:03:03.270098 86948 solver.cpp:341] Iteration 27000, Testing net (#0)
  1405. I0115 17:03:03.758162 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1406. I0115 17:03:03.758208 86948 solver.cpp:409] Test net output #1: loss = 14.6089 (* 1 = 14.6089 loss)
  1407. I0115 17:03:24.800110 86948 solver.cpp:237] Iteration 27000, loss = 0.11815
  1408. I0115 17:03:24.800163 86948 solver.cpp:253] Train net output #0: loss = 0.118151 (* 1 = 0.118151 loss)
  1409. I0115 17:03:25.180635 86948 sgd_solver.cpp:106] Iteration 27000, lr = 0.001
  1410. I0115 17:30:54.107142 86948 solver.cpp:341] Iteration 27500, Testing net (#0)
  1411. I0115 17:30:54.893143 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1412. I0115 17:30:54.893173 86948 solver.cpp:409] Test net output #1: loss = 15.1023 (* 1 = 15.1023 loss)
  1413. I0115 17:31:15.332089 86948 solver.cpp:237] Iteration 27500, loss = 0.166894
  1414. I0115 17:31:15.332123 86948 solver.cpp:253] Train net output #0: loss = 0.166895 (* 1 = 0.166895 loss)
  1415. I0115 17:31:15.332142 86948 sgd_solver.cpp:106] Iteration 27500, lr = 0.001
  1416. I0115 17:59:33.270324 86948 solver.cpp:341] Iteration 28000, Testing net (#0)
  1417. I0115 17:59:47.750169 86948 solver.cpp:409] Test net output #0: accuracy = 0.098
  1418. I0115 17:59:47.750216 86948 solver.cpp:409] Test net output #1: loss = 15.66 (* 1 = 15.66 loss)
  1419. I0115 18:00:00.982518 86948 solver.cpp:237] Iteration 28000, loss = 0.0917245
  1420. I0115 18:00:00.982571 86948 solver.cpp:253] Train net output #0: loss = 0.0917249 (* 1 = 0.0917249 loss)
  1421. I0115 18:00:00.982594 86948 sgd_solver.cpp:106] Iteration 28000, lr = 0.001
  1422. I0115 18:27:58.396294 86948 solver.cpp:341] Iteration 28500, Testing net (#0)
  1423. I0115 18:27:58.884178 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1424. I0115 18:27:58.884222 86948 solver.cpp:409] Test net output #1: loss = 15.1945 (* 1 = 15.1945 loss)
  1425. I0115 18:28:10.991932 86948 solver.cpp:237] Iteration 28500, loss = 0.233415
  1426. I0115 18:28:10.991986 86948 solver.cpp:253] Train net output #0: loss = 0.233416 (* 1 = 0.233416 loss)
  1427. I0115 18:28:11.294566 86948 sgd_solver.cpp:106] Iteration 28500, lr = 0.001
  1428. I0115 18:53:29.752965 86948 solver.cpp:341] Iteration 29000, Testing net (#0)
  1429. I0115 18:53:30.241336 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1430. I0115 18:53:30.241379 86948 solver.cpp:409] Test net output #1: loss = 15.9175 (* 1 = 15.9175 loss)
  1431. I0115 18:53:45.822639 86948 solver.cpp:237] Iteration 29000, loss = 0.180933
  1432. I0115 18:53:45.822686 86948 solver.cpp:253] Train net output #0: loss = 0.180933 (* 1 = 0.180933 loss)
  1433. I0115 18:53:46.178261 86948 sgd_solver.cpp:106] Iteration 29000, lr = 0.001
  1434. I0115 19:20:57.301084 86948 solver.cpp:341] Iteration 29500, Testing net (#0)
  1435. I0115 19:20:58.086940 86948 solver.cpp:409] Test net output #0: accuracy = 0.108
  1436. I0115 19:20:58.086983 86948 solver.cpp:409] Test net output #1: loss = 15.0374 (* 1 = 15.0374 loss)
  1437. I0115 19:21:15.846012 86948 solver.cpp:237] Iteration 29500, loss = 0.126706
  1438. I0115 19:21:15.846066 86948 solver.cpp:253] Train net output #0: loss = 0.126706 (* 1 = 0.126706 loss)
  1439. I0115 19:21:16.226754 86948 sgd_solver.cpp:106] Iteration 29500, lr = 0.001
  1440. I0115 19:50:20.279408 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_30000.caffemodel
  1441. I0115 19:50:23.981377 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_30000.solverstate
  1442. I0115 19:50:24.027272 86948 solver.cpp:341] Iteration 30000, Testing net (#0)
  1443. I0115 19:50:41.670526 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1444. I0115 19:50:41.670569 86948 solver.cpp:409] Test net output #1: loss = 14.7405 (* 1 = 14.7405 loss)
  1445. I0115 19:51:10.223618 86948 solver.cpp:237] Iteration 30000, loss = 0.115344
  1446. I0115 19:51:10.223824 86948 solver.cpp:253] Train net output #0: loss = 0.115344 (* 1 = 0.115344 loss)
  1447. I0115 19:51:10.554566 86948 sgd_solver.cpp:106] Iteration 30000, lr = 0.001
  1448. I0115 20:19:14.663985 86948 solver.cpp:341] Iteration 30500, Testing net (#0)
  1449. I0115 20:19:15.150887 86948 solver.cpp:409] Test net output #0: accuracy = 0.108
  1450. I0115 20:19:15.150933 86948 solver.cpp:409] Test net output #1: loss = 14.5146 (* 1 = 14.5146 loss)
  1451. I0115 20:19:27.349344 86948 solver.cpp:237] Iteration 30500, loss = 0.176101
  1452. I0115 20:19:27.349388 86948 solver.cpp:253] Train net output #0: loss = 0.176101 (* 1 = 0.176101 loss)
  1453. I0115 20:19:27.715607 86948 sgd_solver.cpp:106] Iteration 30500, lr = 0.001
  1454. I0115 20:48:49.768007 86948 solver.cpp:341] Iteration 31000, Testing net (#0)
  1455. I0115 20:48:50.554081 86948 solver.cpp:409] Test net output #0: accuracy = 0.146
  1456. I0115 20:48:50.554113 86948 solver.cpp:409] Test net output #1: loss = 15.8795 (* 1 = 15.8795 loss)
  1457. I0115 20:49:03.957561 86948 solver.cpp:237] Iteration 31000, loss = 0.152509
  1458. I0115 20:49:03.957617 86948 solver.cpp:253] Train net output #0: loss = 0.15251 (* 1 = 0.15251 loss)
  1459. I0115 20:49:04.340080 86948 sgd_solver.cpp:106] Iteration 31000, lr = 0.001
  1460. I0115 21:17:05.424391 86948 solver.cpp:341] Iteration 31500, Testing net (#0)
  1461. I0115 21:17:06.212334 86948 solver.cpp:409] Test net output #0: accuracy = 0.14
  1462. I0115 21:17:06.212385 86948 solver.cpp:409] Test net output #1: loss = 15.1155 (* 1 = 15.1155 loss)
  1463. I0115 21:17:24.824414 86948 solver.cpp:237] Iteration 31500, loss = 0.106935
  1464. I0115 21:17:24.824450 86948 solver.cpp:253] Train net output #0: loss = 0.106935 (* 1 = 0.106935 loss)
  1465. I0115 21:17:24.824468 86948 sgd_solver.cpp:106] Iteration 31500, lr = 0.001
  1466. I0115 21:42:54.570103 86948 solver.cpp:341] Iteration 32000, Testing net (#0)
  1467. I0115 21:43:11.151145 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1468. I0115 21:43:11.151206 86948 solver.cpp:409] Test net output #1: loss = 15.0276 (* 1 = 15.0276 loss)
  1469. I0115 21:43:37.811069 86948 solver.cpp:237] Iteration 32000, loss = 0.105481
  1470. I0115 21:43:37.811275 86948 solver.cpp:253] Train net output #0: loss = 0.105482 (* 1 = 0.105482 loss)
  1471. I0115 21:43:38.146634 86948 sgd_solver.cpp:106] Iteration 32000, lr = 0.001
  1472. I0115 22:10:46.791887 86948 solver.cpp:341] Iteration 32500, Testing net (#0)
  1473. I0115 22:10:47.576876 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1474. I0115 22:10:47.576911 86948 solver.cpp:409] Test net output #1: loss = 16.8682 (* 1 = 16.8682 loss)
  1475. I0115 22:11:00.348250 86948 solver.cpp:237] Iteration 32500, loss = 0.136433
  1476. I0115 22:11:00.348315 86948 solver.cpp:253] Train net output #0: loss = 0.136434 (* 1 = 0.136434 loss)
  1477. I0115 22:11:00.703428 86948 sgd_solver.cpp:106] Iteration 32500, lr = 0.001
  1478. I0115 22:40:09.482702 86948 solver.cpp:341] Iteration 33000, Testing net (#0)
  1479. I0115 22:40:09.970733 86948 solver.cpp:409] Test net output #0: accuracy = 0.106
  1480. I0115 22:40:09.970774 86948 solver.cpp:409] Test net output #1: loss = 15.7237 (* 1 = 15.7237 loss)
  1481. I0115 22:40:33.196830 86948 solver.cpp:237] Iteration 33000, loss = 0.0419159
  1482. I0115 22:40:33.196878 86948 solver.cpp:253] Train net output #0: loss = 0.0419162 (* 1 = 0.0419162 loss)
  1483. I0115 22:40:33.520575 86948 sgd_solver.cpp:106] Iteration 33000, lr = 0.001
  1484. I0115 23:09:33.160050 86948 solver.cpp:341] Iteration 33500, Testing net (#0)
  1485. I0115 23:09:33.949401 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  1486. I0115 23:09:33.949445 86948 solver.cpp:409] Test net output #1: loss = 16.4421 (* 1 = 16.4421 loss)
  1487. I0115 23:09:55.919976 86948 solver.cpp:237] Iteration 33500, loss = 0.0335577
  1488. I0115 23:09:55.920012 86948 solver.cpp:253] Train net output #0: loss = 0.0335579 (* 1 = 0.0335579 loss)
  1489. I0115 23:09:55.920037 86948 sgd_solver.cpp:106] Iteration 33500, lr = 0.001
  1490. I0115 23:38:55.816653 86948 solver.cpp:341] Iteration 34000, Testing net (#0)
  1491. I0115 23:39:11.819865 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  1492. I0115 23:39:11.819916 86948 solver.cpp:409] Test net output #1: loss = 15.4734 (* 1 = 15.4734 loss)
  1493. I0115 23:39:39.288936 86948 solver.cpp:237] Iteration 34000, loss = 0.0957365
  1494. I0115 23:39:39.289160 86948 solver.cpp:253] Train net output #0: loss = 0.0957366 (* 1 = 0.0957366 loss)
  1495. I0115 23:39:39.647867 86948 sgd_solver.cpp:106] Iteration 34000, lr = 0.001
  1496. I0116 00:07:39.981012 86948 solver.cpp:341] Iteration 34500, Testing net (#0)
  1497. I0116 00:07:40.466253 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1498. I0116 00:07:40.466292 86948 solver.cpp:409] Test net output #1: loss = 16.4261 (* 1 = 16.4261 loss)
  1499. I0116 00:07:53.536689 86948 solver.cpp:237] Iteration 34500, loss = 0.0408948
  1500. I0116 00:07:53.536746 86948 solver.cpp:253] Train net output #0: loss = 0.040895 (* 1 = 0.040895 loss)
  1501. I0116 00:07:53.899355 86948 sgd_solver.cpp:106] Iteration 34500, lr = 0.001
  1502. I0116 00:36:33.319710 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_35000.caffemodel
  1503. I0116 00:36:35.887605 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_35000.solverstate
  1504. I0116 00:36:35.931835 86948 solver.cpp:341] Iteration 35000, Testing net (#0)
  1505. I0116 00:36:36.414018 86948 solver.cpp:409] Test net output #0: accuracy = 0.13
  1506. I0116 00:36:36.414049 86948 solver.cpp:409] Test net output #1: loss = 15.4727 (* 1 = 15.4727 loss)
  1507. I0116 00:36:56.310194 86948 solver.cpp:237] Iteration 35000, loss = 0.043666
  1508. I0116 00:36:56.310250 86948 solver.cpp:253] Train net output #0: loss = 0.0436661 (* 1 = 0.0436661 loss)
  1509. I0116 00:36:56.666497 86948 sgd_solver.cpp:106] Iteration 35000, lr = 0.001
  1510. I0116 01:03:28.900647 86948 solver.cpp:341] Iteration 35500, Testing net (#0)
  1511. I0116 01:03:29.686399 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1512. I0116 01:03:29.686441 86948 solver.cpp:409] Test net output #1: loss = 15.9959 (* 1 = 15.9959 loss)
  1513. I0116 01:03:59.950546 86948 solver.cpp:237] Iteration 35500, loss = 0.0498535
  1514. I0116 01:03:59.950760 86948 solver.cpp:253] Train net output #0: loss = 0.0498535 (* 1 = 0.0498535 loss)
  1515. I0116 01:04:00.292171 86948 sgd_solver.cpp:106] Iteration 35500, lr = 0.001
  1516. I0116 01:30:57.973039 86948 solver.cpp:341] Iteration 36000, Testing net (#0)
  1517. I0116 01:31:12.973151 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1518. I0116 01:31:12.973201 86948 solver.cpp:409] Test net output #1: loss = 16.1145 (* 1 = 16.1145 loss)
  1519. I0116 01:31:27.247730 86948 solver.cpp:237] Iteration 36000, loss = 0.0438099
  1520. I0116 01:31:27.247781 86948 solver.cpp:253] Train net output #0: loss = 0.04381 (* 1 = 0.04381 loss)
  1521. I0116 01:31:27.622767 86948 sgd_solver.cpp:106] Iteration 36000, lr = 0.001
  1522. I0116 02:00:12.971065 86948 solver.cpp:341] Iteration 36500, Testing net (#0)
  1523. I0116 02:00:13.755668 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1524. I0116 02:00:13.755719 86948 solver.cpp:409] Test net output #1: loss = 15.5805 (* 1 = 15.5805 loss)
  1525. I0116 02:00:36.261240 86948 solver.cpp:237] Iteration 36500, loss = 0.132418
  1526. I0116 02:00:36.261309 86948 solver.cpp:253] Train net output #0: loss = 0.132418 (* 1 = 0.132418 loss)
  1527. I0116 02:00:36.563027 86948 sgd_solver.cpp:106] Iteration 36500, lr = 0.001
  1528. I0116 02:29:10.770051 86948 solver.cpp:341] Iteration 37000, Testing net (#0)
  1529. I0116 02:29:11.554711 86948 solver.cpp:409] Test net output #0: accuracy = 0.1
  1530. I0116 02:29:11.554755 86948 solver.cpp:409] Test net output #1: loss = 17.2159 (* 1 = 17.2159 loss)
  1531. I0116 02:29:33.752339 86948 solver.cpp:237] Iteration 37000, loss = 0.049209
  1532. I0116 02:29:33.752393 86948 solver.cpp:253] Train net output #0: loss = 0.049209 (* 1 = 0.049209 loss)
  1533. I0116 02:29:34.055008 86948 sgd_solver.cpp:106] Iteration 37000, lr = 0.001
  1534. I0116 02:59:24.341718 86948 solver.cpp:341] Iteration 37500, Testing net (#0)
  1535. I0116 02:59:24.835041 86948 solver.cpp:409] Test net output #0: accuracy = 0.108
  1536. I0116 02:59:24.835085 86948 solver.cpp:409] Test net output #1: loss = 16.7629 (* 1 = 16.7629 loss)
  1537. I0116 02:59:43.998987 86948 solver.cpp:237] Iteration 37500, loss = 0.0429082
  1538. I0116 02:59:43.999044 86948 solver.cpp:253] Train net output #0: loss = 0.0429082 (* 1 = 0.0429082 loss)
  1539. I0116 02:59:44.358476 86948 sgd_solver.cpp:106] Iteration 37500, lr = 0.001
  1540. I0116 03:25:54.589052 86948 solver.cpp:341] Iteration 38000, Testing net (#0)
  1541. I0116 03:26:07.712508 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1542. I0116 03:26:07.712555 86948 solver.cpp:409] Test net output #1: loss = 16.8652 (* 1 = 16.8652 loss)
  1543. I0116 03:26:25.959569 86948 solver.cpp:237] Iteration 38000, loss = 0.0664696
  1544. I0116 03:26:25.959764 86948 solver.cpp:253] Train net output #0: loss = 0.0664696 (* 1 = 0.0664696 loss)
  1545. I0116 03:26:25.959852 86948 sgd_solver.cpp:106] Iteration 38000, lr = 0.001
  1546. I0116 03:52:32.271384 86948 solver.cpp:341] Iteration 38500, Testing net (#0)
  1547. I0116 03:52:32.757511 86948 solver.cpp:409] Test net output #0: accuracy = 0.102
  1548. I0116 03:52:32.757550 86948 solver.cpp:409] Test net output #1: loss = 16.6012 (* 1 = 16.6012 loss)
  1549. I0116 03:52:45.400449 86948 solver.cpp:237] Iteration 38500, loss = 0.0561813
  1550. I0116 03:52:45.400507 86948 solver.cpp:253] Train net output #0: loss = 0.0561813 (* 1 = 0.0561813 loss)
  1551. I0116 03:52:45.703629 86948 sgd_solver.cpp:106] Iteration 38500, lr = 0.001
  1552. I0116 04:19:38.358794 86948 solver.cpp:341] Iteration 39000, Testing net (#0)
  1553. I0116 04:19:39.143568 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1554. I0116 04:19:39.143609 86948 solver.cpp:409] Test net output #1: loss = 17.4246 (* 1 = 17.4246 loss)
  1555. I0116 04:19:51.897547 86948 solver.cpp:237] Iteration 39000, loss = 0.0750932
  1556. I0116 04:19:51.897608 86948 solver.cpp:253] Train net output #0: loss = 0.0750932 (* 1 = 0.0750932 loss)
  1557. I0116 04:19:52.250996 86948 sgd_solver.cpp:106] Iteration 39000, lr = 0.001
  1558. I0116 04:48:40.934551 86948 solver.cpp:341] Iteration 39500, Testing net (#0)
  1559. I0116 04:48:41.424099 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1560. I0116 04:48:41.424131 86948 solver.cpp:409] Test net output #1: loss = 16.8196 (* 1 = 16.8196 loss)
  1561. I0116 04:49:06.289463 86948 solver.cpp:237] Iteration 39500, loss = 0.0240466
  1562. I0116 04:49:06.289499 86948 solver.cpp:253] Train net output #0: loss = 0.0240466 (* 1 = 0.0240466 loss)
  1563. I0116 04:49:06.289518 86948 sgd_solver.cpp:106] Iteration 39500, lr = 0.001
  1564. I0116 05:18:16.326318 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_40000.caffemodel
  1565. I0116 05:18:19.084363 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_40000.solverstate
  1566. I0116 05:18:19.128511 86948 solver.cpp:341] Iteration 40000, Testing net (#0)
  1567. I0116 05:18:41.247922 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1568. I0116 05:18:41.247972 86948 solver.cpp:409] Test net output #1: loss = 16.2713 (* 1 = 16.2713 loss)
  1569. I0116 05:18:53.945335 86948 solver.cpp:237] Iteration 40000, loss = 0.0314111
  1570. I0116 05:18:53.945523 86948 solver.cpp:253] Train net output #0: loss = 0.0314112 (* 1 = 0.0314112 loss)
  1571. I0116 05:18:54.285737 86948 sgd_solver.cpp:106] Iteration 40000, lr = 0.001
  1572. I0116 05:48:00.650080 86948 solver.cpp:341] Iteration 40500, Testing net (#0)
  1573. I0116 05:48:01.433145 86948 solver.cpp:409] Test net output #0: accuracy = 0.13
  1574. I0116 05:48:01.433185 86948 solver.cpp:409] Test net output #1: loss = 16.1484 (* 1 = 16.1484 loss)
  1575. I0116 05:48:16.489727 86948 solver.cpp:237] Iteration 40500, loss = 0.0346045
  1576. I0116 05:48:16.489781 86948 solver.cpp:253] Train net output #0: loss = 0.0346046 (* 1 = 0.0346046 loss)
  1577. I0116 05:48:16.792301 86948 sgd_solver.cpp:106] Iteration 40500, lr = 0.001
  1578. I0116 06:16:05.988806 86948 solver.cpp:341] Iteration 41000, Testing net (#0)
  1579. I0116 06:16:06.773151 86948 solver.cpp:409] Test net output #0: accuracy = 0.146
  1580. I0116 06:16:06.773193 86948 solver.cpp:409] Test net output #1: loss = 16.7342 (* 1 = 16.7342 loss)
  1581. I0116 06:16:34.219686 86948 solver.cpp:237] Iteration 41000, loss = 0.0419025
  1582. I0116 06:16:34.219740 86948 solver.cpp:253] Train net output #0: loss = 0.0419026 (* 1 = 0.0419026 loss)
  1583. I0116 06:16:34.522406 86948 sgd_solver.cpp:106] Iteration 41000, lr = 0.001
  1584. I0116 06:42:35.883358 86948 solver.cpp:341] Iteration 41500, Testing net (#0)
  1585. I0116 06:42:36.669574 86948 solver.cpp:409] Test net output #0: accuracy = 0.102
  1586. I0116 06:42:36.669616 86948 solver.cpp:409] Test net output #1: loss = 17.7167 (* 1 = 17.7167 loss)
  1587. I0116 06:42:49.773461 86948 solver.cpp:237] Iteration 41500, loss = 0.185499
  1588. I0116 06:42:49.773519 86948 solver.cpp:253] Train net output #0: loss = 0.185499 (* 1 = 0.185499 loss)
  1589. I0116 06:42:50.104356 86948 sgd_solver.cpp:106] Iteration 41500, lr = 0.001
  1590. I0116 07:11:21.097192 86948 solver.cpp:341] Iteration 42000, Testing net (#0)
  1591. I0116 07:11:46.179417 86948 solver.cpp:409] Test net output #0: accuracy = 0.092
  1592. I0116 07:11:46.179472 86948 solver.cpp:409] Test net output #1: loss = 17.2835 (* 1 = 17.2835 loss)
  1593. I0116 07:11:58.591433 86948 solver.cpp:237] Iteration 42000, loss = 0.0348851
  1594. I0116 07:11:58.591639 86948 solver.cpp:253] Train net output #0: loss = 0.0348851 (* 1 = 0.0348851 loss)
  1595. I0116 07:11:58.952178 86948 sgd_solver.cpp:106] Iteration 42000, lr = 0.001
  1596. I0116 07:40:17.186990 86948 solver.cpp:341] Iteration 42500, Testing net (#0)
  1597. I0116 07:40:17.672905 86948 solver.cpp:409] Test net output #0: accuracy = 0.106
  1598. I0116 07:40:17.672950 86948 solver.cpp:409] Test net output #1: loss = 15.8583 (* 1 = 15.8583 loss)
  1599. I0116 07:40:41.425642 86948 solver.cpp:237] Iteration 42500, loss = 0.199413
  1600. I0116 07:40:41.425703 86948 solver.cpp:253] Train net output #0: loss = 0.199413 (* 1 = 0.199413 loss)
  1601. I0116 07:40:41.761833 86948 sgd_solver.cpp:106] Iteration 42500, lr = 0.001
  1602. I0116 08:09:26.900053 86948 solver.cpp:341] Iteration 43000, Testing net (#0)
  1603. I0116 08:09:27.686225 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1604. I0116 08:09:27.686270 86948 solver.cpp:409] Test net output #1: loss = 15.5427 (* 1 = 15.5427 loss)
  1605. I0116 08:09:41.389711 86948 solver.cpp:237] Iteration 43000, loss = 0.0642446
  1606. I0116 08:09:41.389744 86948 solver.cpp:253] Train net output #0: loss = 0.0642446 (* 1 = 0.0642446 loss)
  1607. I0116 08:09:41.389763 86948 sgd_solver.cpp:106] Iteration 43000, lr = 0.001
  1608. I0116 08:38:30.601132 86948 solver.cpp:341] Iteration 43500, Testing net (#0)
  1609. I0116 08:38:31.386473 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1610. I0116 08:38:31.386512 86948 solver.cpp:409] Test net output #1: loss = 17.499 (* 1 = 17.499 loss)
  1611. I0116 08:38:50.902880 86948 solver.cpp:237] Iteration 43500, loss = 0.0141587
  1612. I0116 08:38:50.902937 86948 solver.cpp:253] Train net output #0: loss = 0.0141588 (* 1 = 0.0141588 loss)
  1613. I0116 08:38:51.279814 86948 sgd_solver.cpp:106] Iteration 43500, lr = 0.001
  1614. I0116 09:07:12.407667 86948 solver.cpp:341] Iteration 44000, Testing net (#0)
  1615. I0116 09:07:34.280910 86948 solver.cpp:409] Test net output #0: accuracy = 0.102
  1616. I0116 09:07:34.280952 86948 solver.cpp:409] Test net output #1: loss = 16.8864 (* 1 = 16.8864 loss)
  1617. I0116 09:07:47.130345 86948 solver.cpp:237] Iteration 44000, loss = 0.0879925
  1618. I0116 09:07:47.130524 86948 solver.cpp:253] Train net output #0: loss = 0.0879925 (* 1 = 0.0879925 loss)
  1619. I0116 09:07:47.479935 86948 sgd_solver.cpp:106] Iteration 44000, lr = 0.001
  1620. I0116 09:36:28.176921 86948 solver.cpp:341] Iteration 44500, Testing net (#0)
  1621. I0116 09:36:28.962368 86948 solver.cpp:409] Test net output #0: accuracy = 0.096
  1622. I0116 09:36:28.962402 86948 solver.cpp:409] Test net output #1: loss = 17.2819 (* 1 = 17.2819 loss)
  1623. I0116 09:36:58.008808 86948 solver.cpp:237] Iteration 44500, loss = 0.0444473
  1624. I0116 09:36:58.008863 86948 solver.cpp:253] Train net output #0: loss = 0.0444473 (* 1 = 0.0444473 loss)
  1625. I0116 09:36:58.356963 86948 sgd_solver.cpp:106] Iteration 44500, lr = 0.001
  1626. I0116 10:01:30.690524 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_45000.caffemodel
  1627. I0116 10:01:32.237660 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_45000.solverstate
  1628. I0116 10:01:32.281229 86948 solver.cpp:341] Iteration 45000, Testing net (#0)
  1629. I0116 10:01:32.763458 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1630. I0116 10:01:32.763500 86948 solver.cpp:409] Test net output #1: loss = 17.0017 (* 1 = 17.0017 loss)
  1631. I0116 10:01:57.153758 86948 solver.cpp:237] Iteration 45000, loss = 0.0694832
  1632. I0116 10:01:57.153815 86948 solver.cpp:253] Train net output #0: loss = 0.0694833 (* 1 = 0.0694833 loss)
  1633. I0116 10:01:57.495278 86948 sgd_solver.cpp:106] Iteration 45000, lr = 0.001
  1634. I0116 10:29:44.696324 86948 solver.cpp:341] Iteration 45500, Testing net (#0)
  1635. I0116 10:29:45.185446 86948 solver.cpp:409] Test net output #0: accuracy = 0.096
  1636. I0116 10:29:45.185494 86948 solver.cpp:409] Test net output #1: loss = 17.6054 (* 1 = 17.6054 loss)
  1637. I0116 10:30:04.526257 86948 solver.cpp:237] Iteration 45500, loss = 0.0797003
  1638. I0116 10:30:04.526335 86948 solver.cpp:253] Train net output #0: loss = 0.0797004 (* 1 = 0.0797004 loss)
  1639. I0116 10:30:04.827440 86948 sgd_solver.cpp:106] Iteration 45500, lr = 0.001
  1640. I0116 10:59:38.546458 86948 solver.cpp:341] Iteration 46000, Testing net (#0)
  1641. I0116 10:59:51.304450 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1642. I0116 10:59:51.304498 86948 solver.cpp:409] Test net output #1: loss = 16.352 (* 1 = 16.352 loss)
  1643. I0116 11:00:03.451951 86948 solver.cpp:237] Iteration 46000, loss = 0.032807
  1644. I0116 11:00:03.451997 86948 solver.cpp:253] Train net output #0: loss = 0.0328071 (* 1 = 0.0328071 loss)
  1645. I0116 11:00:03.806876 86948 sgd_solver.cpp:106] Iteration 46000, lr = 0.001
  1646. I0116 11:28:33.824901 86948 solver.cpp:341] Iteration 46500, Testing net (#0)
  1647. I0116 11:28:34.310847 86948 solver.cpp:409] Test net output #0: accuracy = 0.102
  1648. I0116 11:28:34.310897 86948 solver.cpp:409] Test net output #1: loss = 18.0733 (* 1 = 18.0733 loss)
  1649. I0116 11:29:01.760802 86948 solver.cpp:237] Iteration 46500, loss = 0.0403756
  1650. I0116 11:29:01.760859 86948 solver.cpp:253] Train net output #0: loss = 0.0403757 (* 1 = 0.0403757 loss)
  1651. I0116 11:29:02.063665 86948 sgd_solver.cpp:106] Iteration 46500, lr = 0.001
  1652. I0116 11:58:13.606930 86948 solver.cpp:341] Iteration 47000, Testing net (#0)
  1653. I0116 11:58:14.391067 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1654. I0116 11:58:14.391113 86948 solver.cpp:409] Test net output #1: loss = 17.0273 (* 1 = 17.0273 loss)
  1655. I0116 11:58:26.920436 86948 solver.cpp:237] Iteration 47000, loss = 0.010671
  1656. I0116 11:58:26.920490 86948 solver.cpp:253] Train net output #0: loss = 0.0106711 (* 1 = 0.0106711 loss)
  1657. I0116 11:58:27.222714 86948 sgd_solver.cpp:106] Iteration 47000, lr = 0.001
  1658. I0116 12:25:42.220270 86948 solver.cpp:341] Iteration 47500, Testing net (#0)
  1659. I0116 12:25:42.707748 86948 solver.cpp:409] Test net output #0: accuracy = 0.136
  1660. I0116 12:25:42.707782 86948 solver.cpp:409] Test net output #1: loss = 17.2746 (* 1 = 17.2746 loss)
  1661. I0116 12:26:04.379840 86948 solver.cpp:237] Iteration 47500, loss = 0.00723714
  1662. I0116 12:26:04.379889 86948 solver.cpp:253] Train net output #0: loss = 0.00723719 (* 1 = 0.00723719 loss)
  1663. I0116 12:26:04.719759 86948 sgd_solver.cpp:106] Iteration 47500, lr = 0.001
  1664. I0116 12:50:40.256570 86948 solver.cpp:341] Iteration 48000, Testing net (#0)
  1665. I0116 12:51:04.559180 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1666. I0116 12:51:04.559231 86948 solver.cpp:409] Test net output #1: loss = 17.5079 (* 1 = 17.5079 loss)
  1667. I0116 12:51:30.824625 86948 solver.cpp:237] Iteration 48000, loss = 0.011206
  1668. I0116 12:51:30.824817 86948 solver.cpp:253] Train net output #0: loss = 0.011206 (* 1 = 0.011206 loss)
  1669. I0116 12:51:31.175508 86948 sgd_solver.cpp:106] Iteration 48000, lr = 0.001
  1670. I0116 13:18:06.765019 86948 solver.cpp:341] Iteration 48500, Testing net (#0)
  1671. I0116 13:18:07.413496 86948 solver.cpp:409] Test net output #0: accuracy = 0.136
  1672. I0116 13:18:07.413537 86948 solver.cpp:409] Test net output #1: loss = 16.5992 (* 1 = 16.5992 loss)
  1673. I0116 13:18:22.153182 86948 solver.cpp:237] Iteration 48500, loss = 0.0693408
  1674. I0116 13:18:22.153237 86948 solver.cpp:253] Train net output #0: loss = 0.0693408 (* 1 = 0.0693408 loss)
  1675. I0116 13:18:22.454764 86948 sgd_solver.cpp:106] Iteration 48500, lr = 0.001
  1676. I0116 13:46:26.325832 86948 solver.cpp:341] Iteration 49000, Testing net (#0)
  1677. I0116 13:46:26.812815 86948 solver.cpp:409] Test net output #0: accuracy = 0.082
  1678. I0116 13:46:26.812847 86948 solver.cpp:409] Test net output #1: loss = 17.991 (* 1 = 17.991 loss)
  1679. I0116 13:46:47.049461 86948 solver.cpp:237] Iteration 49000, loss = 0.0393182
  1680. I0116 13:46:47.049496 86948 solver.cpp:253] Train net output #0: loss = 0.0393183 (* 1 = 0.0393183 loss)
  1681. I0116 13:46:47.049516 86948 sgd_solver.cpp:106] Iteration 49000, lr = 0.001
  1682. I0116 14:15:12.316920 86948 solver.cpp:341] Iteration 49500, Testing net (#0)
  1683. I0116 14:15:12.803696 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1684. I0116 14:15:12.803738 86948 solver.cpp:409] Test net output #1: loss = 18.0324 (* 1 = 18.0324 loss)
  1685. I0116 14:15:32.959098 86948 solver.cpp:237] Iteration 49500, loss = 0.00838045
  1686. I0116 14:15:32.959175 86948 solver.cpp:253] Train net output #0: loss = 0.0083806 (* 1 = 0.0083806 loss)
  1687. I0116 14:15:32.959216 86948 sgd_solver.cpp:106] Iteration 49500, lr = 0.001
  1688. I0116 14:45:00.736219 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_50000.caffemodel
  1689. I0116 14:45:04.874878 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_50000.solverstate
  1690. I0116 14:45:04.918927 86948 solver.cpp:341] Iteration 50000, Testing net (#0)
  1691. I0116 14:45:24.349766 86948 solver.cpp:409] Test net output #0: accuracy = 0.156
  1692. I0116 14:45:24.349812 86948 solver.cpp:409] Test net output #1: loss = 17.3452 (* 1 = 17.3452 loss)
  1693. I0116 14:45:46.836374 86948 solver.cpp:237] Iteration 50000, loss = 0.0652684
  1694. I0116 14:45:46.836606 86948 solver.cpp:253] Train net output #0: loss = 0.0652686 (* 1 = 0.0652686 loss)
  1695. I0116 14:45:47.176255 86948 sgd_solver.cpp:106] Iteration 50000, lr = 0.001
  1696. I0116 15:14:51.746951 86948 solver.cpp:341] Iteration 50500, Testing net (#0)
  1697. I0116 15:14:52.531561 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1698. I0116 15:14:52.531596 86948 solver.cpp:409] Test net output #1: loss = 18.264 (* 1 = 18.264 loss)
  1699. I0116 15:15:14.273367 86948 solver.cpp:237] Iteration 50500, loss = 0.00904272
  1700. I0116 15:15:14.273427 86948 solver.cpp:253] Train net output #0: loss = 0.00904288 (* 1 = 0.00904288 loss)
  1701. I0116 15:15:14.617744 86948 sgd_solver.cpp:106] Iteration 50500, lr = 0.001
  1702. I0116 15:40:51.166714 86948 solver.cpp:341] Iteration 51000, Testing net (#0)
  1703. I0116 15:40:51.652114 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1704. I0116 15:40:51.652145 86948 solver.cpp:409] Test net output #1: loss = 18.104 (* 1 = 18.104 loss)
  1705. I0116 15:41:06.815246 86948 solver.cpp:237] Iteration 51000, loss = 0.0139255
  1706. I0116 15:41:06.815307 86948 solver.cpp:253] Train net output #0: loss = 0.0139257 (* 1 = 0.0139257 loss)
  1707. I0116 15:41:07.148931 86948 sgd_solver.cpp:106] Iteration 51000, lr = 0.001
  1708. I0116 16:10:14.715281 86948 solver.cpp:341] Iteration 51500, Testing net (#0)
  1709. I0116 16:10:15.497902 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1710. I0116 16:10:15.497944 86948 solver.cpp:409] Test net output #1: loss = 16.8469 (* 1 = 16.8469 loss)
  1711. I0116 16:10:30.471583 86948 solver.cpp:237] Iteration 51500, loss = 0.00585824
  1712. I0116 16:10:30.471635 86948 solver.cpp:253] Train net output #0: loss = 0.0058584 (* 1 = 0.0058584 loss)
  1713. I0116 16:10:30.773869 86948 sgd_solver.cpp:106] Iteration 51500, lr = 0.001
  1714. I0116 16:40:01.208590 86948 solver.cpp:341] Iteration 52000, Testing net (#0)
  1715. I0116 16:40:14.902446 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1716. I0116 16:40:14.902493 86948 solver.cpp:409] Test net output #1: loss = 17.7235 (* 1 = 17.7235 loss)
  1717. I0116 16:40:35.560999 86948 solver.cpp:237] Iteration 52000, loss = 0.00612289
  1718. I0116 16:40:35.561203 86948 solver.cpp:253] Train net output #0: loss = 0.00612306 (* 1 = 0.00612306 loss)
  1719. I0116 16:40:35.912436 86948 sgd_solver.cpp:106] Iteration 52000, lr = 0.001
  1720. I0116 17:08:55.274165 86948 solver.cpp:341] Iteration 52500, Testing net (#0)
  1721. I0116 17:08:55.761176 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1722. I0116 17:08:55.761209 86948 solver.cpp:409] Test net output #1: loss = 17.5617 (* 1 = 17.5617 loss)
  1723. I0116 17:09:21.139521 86948 solver.cpp:237] Iteration 52500, loss = 0.014499
  1724. I0116 17:09:21.139580 86948 solver.cpp:253] Train net output #0: loss = 0.0144992 (* 1 = 0.0144992 loss)
  1725. I0116 17:09:21.471717 86948 sgd_solver.cpp:106] Iteration 52500, lr = 0.001
  1726. I0116 17:39:18.728796 86948 solver.cpp:341] Iteration 53000, Testing net (#0)
  1727. I0116 17:39:19.215486 86948 solver.cpp:409] Test net output #0: accuracy = 0.14
  1728. I0116 17:39:19.215517 86948 solver.cpp:409] Test net output #1: loss = 17.877 (* 1 = 17.877 loss)
  1729. I0116 17:39:31.311669 86948 solver.cpp:237] Iteration 53000, loss = 0.00112286
  1730. I0116 17:39:31.311717 86948 solver.cpp:253] Train net output #0: loss = 0.00112301 (* 1 = 0.00112301 loss)
  1731. I0116 17:39:31.657688 86948 sgd_solver.cpp:106] Iteration 53000, lr = 0.001
  1732. I0116 18:08:52.516023 86948 solver.cpp:341] Iteration 53500, Testing net (#0)
  1733. I0116 18:08:53.300496 86948 solver.cpp:409] Test net output #0: accuracy = 0.146
  1734. I0116 18:08:53.300536 86948 solver.cpp:409] Test net output #1: loss = 17.146 (* 1 = 17.146 loss)
  1735. I0116 18:09:07.123847 86948 solver.cpp:237] Iteration 53500, loss = 0.0122715
  1736. I0116 18:09:07.123903 86948 solver.cpp:253] Train net output #0: loss = 0.0122717 (* 1 = 0.0122717 loss)
  1737. I0116 18:09:07.487051 86948 sgd_solver.cpp:106] Iteration 53500, lr = 0.001
  1738. I0116 18:38:23.766234 86948 solver.cpp:341] Iteration 54000, Testing net (#0)
  1739. I0116 18:38:42.114236 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  1740. I0116 18:38:42.114279 86948 solver.cpp:409] Test net output #1: loss = 17.6793 (* 1 = 17.6793 loss)
  1741. I0116 18:38:55.071338 86948 solver.cpp:237] Iteration 54000, loss = 0.00152475
  1742. I0116 18:38:55.071554 86948 solver.cpp:253] Train net output #0: loss = 0.00152491 (* 1 = 0.00152491 loss)
  1743. I0116 18:38:55.444756 86948 sgd_solver.cpp:106] Iteration 54000, lr = 0.001
  1744. I0116 19:04:11.690522 86948 solver.cpp:341] Iteration 54500, Testing net (#0)
  1745. I0116 19:04:12.474172 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1746. I0116 19:04:12.474216 86948 solver.cpp:409] Test net output #1: loss = 18.4609 (* 1 = 18.4609 loss)
  1747. I0116 19:04:26.059206 86948 solver.cpp:237] Iteration 54500, loss = 0.00278085
  1748. I0116 19:04:26.059262 86948 solver.cpp:253] Train net output #0: loss = 0.00278101 (* 1 = 0.00278101 loss)
  1749. I0116 19:04:26.362669 86948 sgd_solver.cpp:106] Iteration 54500, lr = 0.001
  1750. I0116 19:32:57.336834 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_55000.caffemodel
  1751. I0116 19:33:02.048396 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_55000.solverstate
  1752. I0116 19:33:02.093430 86948 solver.cpp:341] Iteration 55000, Testing net (#0)
  1753. I0116 19:33:02.575043 86948 solver.cpp:409] Test net output #0: accuracy = 0.1
  1754. I0116 19:33:02.575074 86948 solver.cpp:409] Test net output #1: loss = 17.5898 (* 1 = 17.5898 loss)
  1755. I0116 19:33:14.721052 86948 solver.cpp:237] Iteration 55000, loss = 0.00244798
  1756. I0116 19:33:14.721104 86948 solver.cpp:253] Train net output #0: loss = 0.00244815 (* 1 = 0.00244815 loss)
  1757. I0116 19:33:15.023622 86948 sgd_solver.cpp:106] Iteration 55000, lr = 0.001
  1758. I0116 20:00:52.280625 86948 solver.cpp:341] Iteration 55500, Testing net (#0)
  1759. I0116 20:00:53.064491 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1760. I0116 20:00:53.064527 86948 solver.cpp:409] Test net output #1: loss = 18.1016 (* 1 = 18.1016 loss)
  1761. I0116 20:01:10.699457 86948 solver.cpp:237] Iteration 55500, loss = 0.00185457
  1762. I0116 20:01:10.699515 86948 solver.cpp:253] Train net output #0: loss = 0.00185475 (* 1 = 0.00185475 loss)
  1763. I0116 20:01:11.001893 86948 sgd_solver.cpp:106] Iteration 55500, lr = 0.001
  1764. I0116 20:30:32.961246 86948 solver.cpp:341] Iteration 56000, Testing net (#0)
  1765. I0116 20:30:46.017097 86948 solver.cpp:409] Test net output #0: accuracy = 0.138
  1766. I0116 20:30:46.017145 86948 solver.cpp:409] Test net output #1: loss = 17.466 (* 1 = 17.466 loss)
  1767. I0116 20:31:08.532201 86948 solver.cpp:237] Iteration 56000, loss = 0.00167043
  1768. I0116 20:31:08.532402 86948 solver.cpp:253] Train net output #0: loss = 0.00167062 (* 1 = 0.00167062 loss)
  1769. I0116 20:31:08.892717 86948 sgd_solver.cpp:106] Iteration 56000, lr = 0.001
  1770. I0116 21:00:52.884006 86948 solver.cpp:341] Iteration 56500, Testing net (#0)
  1771. I0116 21:00:53.370746 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1772. I0116 21:00:53.370777 86948 solver.cpp:409] Test net output #1: loss = 18.5152 (* 1 = 18.5152 loss)
  1773. I0116 21:01:29.835618 86948 solver.cpp:237] Iteration 56500, loss = 0.00127041
  1774. I0116 21:01:29.835860 86948 solver.cpp:253] Train net output #0: loss = 0.0012706 (* 1 = 0.0012706 loss)
  1775. I0116 21:01:30.164361 86948 sgd_solver.cpp:106] Iteration 56500, lr = 0.001
  1776. I0116 21:29:50.693728 86948 solver.cpp:341] Iteration 57000, Testing net (#0)
  1777. I0116 21:29:51.477064 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1778. I0116 21:29:51.477094 86948 solver.cpp:409] Test net output #1: loss = 17.2161 (* 1 = 17.2161 loss)
  1779. I0116 21:30:16.758760 86948 solver.cpp:237] Iteration 57000, loss = 0.00125112
  1780. I0116 21:30:16.758816 86948 solver.cpp:253] Train net output #0: loss = 0.00125128 (* 1 = 0.00125128 loss)
  1781. I0116 21:30:17.094545 86948 sgd_solver.cpp:106] Iteration 57000, lr = 0.001
  1782. I0116 21:56:25.575891 86948 solver.cpp:341] Iteration 57500, Testing net (#0)
  1783. I0116 21:56:26.359499 86948 solver.cpp:409] Test net output #0: accuracy = 0.102
  1784. I0116 21:56:26.359541 86948 solver.cpp:409] Test net output #1: loss = 17.1281 (* 1 = 17.1281 loss)
  1785. I0116 21:56:45.616204 86948 solver.cpp:237] Iteration 57500, loss = 0.000995827
  1786. I0116 21:56:45.616246 86948 solver.cpp:253] Train net output #0: loss = 0.000995983 (* 1 = 0.000995983 loss)
  1787. I0116 21:56:45.616267 86948 sgd_solver.cpp:106] Iteration 57500, lr = 0.001
  1788. I0116 22:24:15.822245 86948 solver.cpp:341] Iteration 58000, Testing net (#0)
  1789. I0116 22:24:28.277140 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1790. I0116 22:24:28.277187 86948 solver.cpp:409] Test net output #1: loss = 17.7945 (* 1 = 17.7945 loss)
  1791. I0116 22:24:46.763841 86948 solver.cpp:237] Iteration 58000, loss = 0.00149116
  1792. I0116 22:24:46.764065 86948 solver.cpp:253] Train net output #0: loss = 0.00149132 (* 1 = 0.00149132 loss)
  1793. I0116 22:24:47.134806 86948 sgd_solver.cpp:106] Iteration 58000, lr = 0.001
  1794. I0116 22:53:01.435055 86948 solver.cpp:341] Iteration 58500, Testing net (#0)
  1795. I0116 22:53:02.219271 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1796. I0116 22:53:02.219316 86948 solver.cpp:409] Test net output #1: loss = 17.5235 (* 1 = 17.5235 loss)
  1797. I0116 22:53:21.272271 86948 solver.cpp:237] Iteration 58500, loss = 0.000840927
  1798. I0116 22:53:21.272325 86948 solver.cpp:253] Train net output #0: loss = 0.000841088 (* 1 = 0.000841088 loss)
  1799. I0116 22:53:21.631062 86948 sgd_solver.cpp:106] Iteration 58500, lr = 0.001
  1800. I0116 23:22:36.109424 86948 solver.cpp:341] Iteration 59000, Testing net (#0)
  1801. I0116 23:22:36.595427 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1802. I0116 23:22:36.595466 86948 solver.cpp:409] Test net output #1: loss = 17.8167 (* 1 = 17.8167 loss)
  1803. I0116 23:22:58.007833 86948 solver.cpp:237] Iteration 59000, loss = 0.000619527
  1804. I0116 23:22:58.007884 86948 solver.cpp:253] Train net output #0: loss = 0.000619694 (* 1 = 0.000619694 loss)
  1805. I0116 23:22:58.388481 86948 sgd_solver.cpp:106] Iteration 59000, lr = 0.001
  1806. I0116 23:51:47.412817 86948 solver.cpp:341] Iteration 59500, Testing net (#0)
  1807. I0116 23:51:47.905879 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1808. I0116 23:51:47.905912 86948 solver.cpp:409] Test net output #1: loss = 17.7125 (* 1 = 17.7125 loss)
  1809. I0116 23:52:21.172386 86948 solver.cpp:237] Iteration 59500, loss = 0.000833786
  1810. I0116 23:52:21.172557 86948 solver.cpp:253] Train net output #0: loss = 0.000833953 (* 1 = 0.000833953 loss)
  1811. I0116 23:52:21.172621 86948 sgd_solver.cpp:106] Iteration 59500, lr = 0.001
  1812. I0117 00:19:51.501459 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_60000.caffemodel
  1813. I0117 00:19:53.112973 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_60000.solverstate
  1814. I0117 00:19:53.159576 86948 solver.cpp:341] Iteration 60000, Testing net (#0)
  1815. I0117 00:20:13.622352 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1816. I0117 00:20:13.622414 86948 solver.cpp:409] Test net output #1: loss = 16.4636 (* 1 = 16.4636 loss)
  1817. I0117 00:20:38.179111 86948 solver.cpp:237] Iteration 60000, loss = 0.000698102
  1818. I0117 00:20:38.179344 86948 solver.cpp:253] Train net output #0: loss = 0.000698267 (* 1 = 0.000698267 loss)
  1819. I0117 00:20:38.518753 86948 sgd_solver.cpp:106] Iteration 60000, lr = 0.001
  1820. I0117 00:48:01.094512 86948 solver.cpp:341] Iteration 60500, Testing net (#0)
  1821. I0117 00:48:01.583814 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  1822. I0117 00:48:01.583847 86948 solver.cpp:409] Test net output #1: loss = 16.9208 (* 1 = 16.9208 loss)
  1823. I0117 00:48:02.826798 86948 solver.cpp:237] Iteration 60500, loss = 0.00134723
  1824. I0117 00:48:02.826843 86948 solver.cpp:253] Train net output #0: loss = 0.00134739 (* 1 = 0.00134739 loss)
  1825. I0117 00:48:03.129700 86948 sgd_solver.cpp:106] Iteration 60500, lr = 0.001
  1826. I0117 01:15:35.234369 86948 solver.cpp:341] Iteration 61000, Testing net (#0)
  1827. I0117 01:15:36.017329 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1828. I0117 01:15:36.017371 86948 solver.cpp:409] Test net output #1: loss = 16.7038 (* 1 = 16.7038 loss)
  1829. I0117 01:15:48.144557 86948 solver.cpp:237] Iteration 61000, loss = 0.00100287
  1830. I0117 01:15:48.144616 86948 solver.cpp:253] Train net output #0: loss = 0.00100303 (* 1 = 0.00100303 loss)
  1831. I0117 01:15:48.447521 86948 sgd_solver.cpp:106] Iteration 61000, lr = 0.001
  1832. I0117 01:43:27.736707 86948 solver.cpp:341] Iteration 61500, Testing net (#0)
  1833. I0117 01:43:28.520886 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1834. I0117 01:43:28.520928 86948 solver.cpp:409] Test net output #1: loss = 16.7259 (* 1 = 16.7259 loss)
  1835. I0117 01:43:43.177824 86948 solver.cpp:237] Iteration 61500, loss = 0.00111827
  1836. I0117 01:43:43.177875 86948 solver.cpp:253] Train net output #0: loss = 0.00111844 (* 1 = 0.00111844 loss)
  1837. I0117 01:43:43.177896 86948 sgd_solver.cpp:106] Iteration 61500, lr = 0.001
  1838. I0117 02:12:18.621260 86948 solver.cpp:341] Iteration 62000, Testing net (#0)
  1839. I0117 02:12:39.971473 86948 solver.cpp:409] Test net output #0: accuracy = 0.142
  1840. I0117 02:12:39.971523 86948 solver.cpp:409] Test net output #1: loss = 15.8327 (* 1 = 15.8327 loss)
  1841. I0117 02:12:57.808661 86948 solver.cpp:237] Iteration 62000, loss = 0.000379236
  1842. I0117 02:12:57.808817 86948 solver.cpp:253] Train net output #0: loss = 0.000379402 (* 1 = 0.000379402 loss)
  1843. I0117 02:12:58.166721 86948 sgd_solver.cpp:106] Iteration 62000, lr = 0.001
  1844. I0117 02:41:14.882915 86948 solver.cpp:341] Iteration 62500, Testing net (#0)
  1845. I0117 02:41:15.368656 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1846. I0117 02:41:15.368685 86948 solver.cpp:409] Test net output #1: loss = 16.6882 (* 1 = 16.6882 loss)
  1847. I0117 02:41:34.613448 86948 solver.cpp:237] Iteration 62500, loss = 0.00201296
  1848. I0117 02:41:34.613498 86948 solver.cpp:253] Train net output #0: loss = 0.00201312 (* 1 = 0.00201312 loss)
  1849. I0117 02:41:34.967422 86948 sgd_solver.cpp:106] Iteration 62500, lr = 0.001
  1850. I0117 03:09:53.952474 86948 solver.cpp:341] Iteration 63000, Testing net (#0)
  1851. I0117 03:09:54.738646 86948 solver.cpp:409] Test net output #0: accuracy = 0.118
  1852. I0117 03:09:54.738690 86948 solver.cpp:409] Test net output #1: loss = 15.5435 (* 1 = 15.5435 loss)
  1853. I0117 03:10:19.191061 86948 solver.cpp:237] Iteration 63000, loss = 0.00052981
  1854. I0117 03:10:19.191112 86948 solver.cpp:253] Train net output #0: loss = 0.000529975 (* 1 = 0.000529975 loss)
  1855. I0117 03:10:19.493536 86948 sgd_solver.cpp:106] Iteration 63000, lr = 0.001
  1856. I0117 03:38:18.031046 86948 solver.cpp:341] Iteration 63500, Testing net (#0)
  1857. I0117 03:38:18.815105 86948 solver.cpp:409] Test net output #0: accuracy = 0.154
  1858. I0117 03:38:18.815142 86948 solver.cpp:409] Test net output #1: loss = 16.497 (* 1 = 16.497 loss)
  1859. I0117 03:38:34.141821 86948 solver.cpp:237] Iteration 63500, loss = 0.000767982
  1860. I0117 03:38:34.141876 86948 solver.cpp:253] Train net output #0: loss = 0.000768148 (* 1 = 0.000768148 loss)
  1861. I0117 03:38:34.443259 86948 sgd_solver.cpp:106] Iteration 63500, lr = 0.001
  1862. I0117 04:03:25.591889 86948 solver.cpp:341] Iteration 64000, Testing net (#0)
  1863. I0117 04:03:48.695626 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1864. I0117 04:03:48.695669 86948 solver.cpp:409] Test net output #1: loss = 16.1517 (* 1 = 16.1517 loss)
  1865. I0117 04:04:01.971127 86948 solver.cpp:237] Iteration 64000, loss = 0.000629346
  1866. I0117 04:04:01.971350 86948 solver.cpp:253] Train net output #0: loss = 0.000629513 (* 1 = 0.000629513 loss)
  1867. I0117 04:04:02.324965 86948 sgd_solver.cpp:106] Iteration 64000, lr = 0.001
  1868. I0117 04:32:27.745319 86948 solver.cpp:341] Iteration 64500, Testing net (#0)
  1869. I0117 04:32:28.231277 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1870. I0117 04:32:28.231320 86948 solver.cpp:409] Test net output #1: loss = 16.2351 (* 1 = 16.2351 loss)
  1871. I0117 04:32:54.835930 86948 solver.cpp:237] Iteration 64500, loss = 0.00131145
  1872. I0117 04:32:54.835990 86948 solver.cpp:253] Train net output #0: loss = 0.00131162 (* 1 = 0.00131162 loss)
  1873. I0117 04:32:55.187325 86948 sgd_solver.cpp:106] Iteration 64500, lr = 0.001
  1874. I0117 05:00:55.586280 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_65000.caffemodel
  1875. I0117 05:01:00.939674 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_65000.solverstate
  1876. I0117 05:01:02.193336 86948 solver.cpp:341] Iteration 65000, Testing net (#0)
  1877. I0117 05:01:02.676085 86948 solver.cpp:409] Test net output #0: accuracy = 0.134
  1878. I0117 05:01:02.676128 86948 solver.cpp:409] Test net output #1: loss = 15.8553 (* 1 = 15.8553 loss)
  1879. I0117 05:01:26.228950 86948 solver.cpp:237] Iteration 65000, loss = 0.000773424
  1880. I0117 05:01:26.229154 86948 solver.cpp:253] Train net output #0: loss = 0.000773591 (* 1 = 0.000773591 loss)
  1881. I0117 05:01:26.229217 86948 sgd_solver.cpp:106] Iteration 65000, lr = 0.001
  1882. I0117 05:29:58.098109 86948 solver.cpp:341] Iteration 65500, Testing net (#0)
  1883. I0117 05:29:58.882112 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1884. I0117 05:29:58.882163 86948 solver.cpp:409] Test net output #1: loss = 16.5149 (* 1 = 16.5149 loss)
  1885. I0117 05:30:31.572700 86948 solver.cpp:237] Iteration 65500, loss = 0.000899338
  1886. I0117 05:30:31.572927 86948 solver.cpp:253] Train net output #0: loss = 0.000899504 (* 1 = 0.000899504 loss)
  1887. I0117 05:30:31.903906 86948 sgd_solver.cpp:106] Iteration 65500, lr = 0.001
  1888. I0117 06:00:43.981590 86948 solver.cpp:341] Iteration 66000, Testing net (#0)
  1889. I0117 06:01:08.399309 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  1890. I0117 06:01:08.399371 86948 solver.cpp:409] Test net output #1: loss = 15.3885 (* 1 = 15.3885 loss)
  1891. I0117 06:01:44.210752 86948 solver.cpp:237] Iteration 66000, loss = 0.00092309
  1892. I0117 06:01:44.211006 86948 solver.cpp:253] Train net output #0: loss = 0.000923256 (* 1 = 0.000923256 loss)
  1893. I0117 06:01:44.571835 86948 sgd_solver.cpp:106] Iteration 66000, lr = 0.001
  1894. I0117 06:30:41.497572 86948 solver.cpp:341] Iteration 66500, Testing net (#0)
  1895. I0117 06:30:42.280725 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  1896. I0117 06:30:42.280772 86948 solver.cpp:409] Test net output #1: loss = 14.6409 (* 1 = 14.6409 loss)
  1897. I0117 06:30:58.502143 86948 solver.cpp:237] Iteration 66500, loss = 0.000562749
  1898. I0117 06:30:58.502197 86948 solver.cpp:253] Train net output #0: loss = 0.000562914 (* 1 = 0.000562914 loss)
  1899. I0117 06:30:58.870307 86948 sgd_solver.cpp:106] Iteration 66500, lr = 0.001
  1900. I0117 06:56:45.541666 86948 solver.cpp:341] Iteration 67000, Testing net (#0)
  1901. I0117 06:56:46.032340 86948 solver.cpp:409] Test net output #0: accuracy = 0.11
  1902. I0117 06:56:46.032380 86948 solver.cpp:409] Test net output #1: loss = 16.1808 (* 1 = 16.1808 loss)
  1903. I0117 06:57:01.526554 86948 solver.cpp:237] Iteration 67000, loss = 0.00102099
  1904. I0117 06:57:01.526605 86948 solver.cpp:253] Train net output #0: loss = 0.00102115 (* 1 = 0.00102115 loss)
  1905. I0117 06:57:01.865414 86948 sgd_solver.cpp:106] Iteration 67000, lr = 0.001
  1906. I0117 07:24:53.334848 86948 solver.cpp:341] Iteration 67500, Testing net (#0)
  1907. I0117 07:24:54.120802 86948 solver.cpp:409] Test net output #0: accuracy = 0.112
  1908. I0117 07:24:54.120841 86948 solver.cpp:409] Test net output #1: loss = 16.0582 (* 1 = 16.0582 loss)
  1909. I0117 07:25:07.953392 86948 solver.cpp:237] Iteration 67500, loss = 0.00112115
  1910. I0117 07:25:07.953441 86948 solver.cpp:253] Train net output #0: loss = 0.00112132 (* 1 = 0.00112132 loss)
  1911. I0117 07:25:08.307104 86948 sgd_solver.cpp:106] Iteration 67500, lr = 0.001
  1912. I0117 07:53:18.890822 86948 solver.cpp:341] Iteration 68000, Testing net (#0)
  1913. I0117 07:53:32.259802 86948 solver.cpp:409] Test net output #0: accuracy = 0.148
  1914. I0117 07:53:32.259845 86948 solver.cpp:409] Test net output #1: loss = 15.5333 (* 1 = 15.5333 loss)
  1915. I0117 07:53:47.024539 86948 solver.cpp:237] Iteration 68000, loss = 0.000853823
  1916. I0117 07:53:47.024593 86948 solver.cpp:253] Train net output #0: loss = 0.000853988 (* 1 = 0.000853988 loss)
  1917. I0117 07:53:47.327491 86948 sgd_solver.cpp:106] Iteration 68000, lr = 0.001
  1918. I0117 08:22:41.051843 86948 solver.cpp:341] Iteration 68500, Testing net (#0)
  1919. I0117 08:22:41.542336 86948 solver.cpp:409] Test net output #0: accuracy = 0.126
  1920. I0117 08:22:41.542385 86948 solver.cpp:409] Test net output #1: loss = 14.4018 (* 1 = 14.4018 loss)
  1921. I0117 08:22:57.310125 86948 solver.cpp:237] Iteration 68500, loss = 0.0009701
  1922. I0117 08:22:57.310174 86948 solver.cpp:253] Train net output #0: loss = 0.000970266 (* 1 = 0.000970266 loss)
  1923. I0117 08:22:57.310201 86948 sgd_solver.cpp:106] Iteration 68500, lr = 0.001
  1924. I0117 08:51:31.132508 86948 solver.cpp:341] Iteration 69000, Testing net (#0)
  1925. I0117 08:51:31.918012 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1926. I0117 08:51:31.918045 86948 solver.cpp:409] Test net output #1: loss = 15.011 (* 1 = 15.011 loss)
  1927. I0117 08:51:55.438107 86948 solver.cpp:237] Iteration 69000, loss = 0.00144422
  1928. I0117 08:51:55.438156 86948 solver.cpp:253] Train net output #0: loss = 0.00144438 (* 1 = 0.00144438 loss)
  1929. I0117 08:51:55.438179 86948 sgd_solver.cpp:106] Iteration 69000, lr = 0.001
  1930. I0117 09:20:06.035789 86948 solver.cpp:341] Iteration 69500, Testing net (#0)
  1931. I0117 09:20:06.530084 86948 solver.cpp:409] Test net output #0: accuracy = 0.144
  1932. I0117 09:20:06.530124 86948 solver.cpp:409] Test net output #1: loss = 15.2529 (* 1 = 15.2529 loss)
  1933. I0117 09:20:24.312736 86948 solver.cpp:237] Iteration 69500, loss = 0.00116137
  1934. I0117 09:20:24.312789 86948 solver.cpp:253] Train net output #0: loss = 0.00116154 (* 1 = 0.00116154 loss)
  1935. I0117 09:20:24.615401 86948 sgd_solver.cpp:106] Iteration 69500, lr = 0.001
  1936. I0117 09:48:31.437814 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_70000.caffemodel
  1937. I0117 09:48:33.344377 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_70000.solverstate
  1938. I0117 09:48:33.388301 86948 solver.cpp:341] Iteration 70000, Testing net (#0)
  1939. I0117 09:48:46.943537 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  1940. I0117 09:48:46.943588 86948 solver.cpp:409] Test net output #1: loss = 14.5244 (* 1 = 14.5244 loss)
  1941. I0117 09:49:03.112295 86948 solver.cpp:237] Iteration 70000, loss = 0.00122999
  1942. I0117 09:49:03.112499 86948 solver.cpp:253] Train net output #0: loss = 0.00123015 (* 1 = 0.00123015 loss)
  1943. I0117 09:49:03.471725 86948 sgd_solver.cpp:106] Iteration 70000, lr = 0.001
  1944. I0117 10:16:26.051054 86948 solver.cpp:341] Iteration 70500, Testing net (#0)
  1945. I0117 10:16:26.835522 86948 solver.cpp:409] Test net output #0: accuracy = 0.13
  1946. I0117 10:16:26.835569 86948 solver.cpp:409] Test net output #1: loss = 15.0903 (* 1 = 15.0903 loss)
  1947. I0117 10:16:43.701015 86948 solver.cpp:237] Iteration 70500, loss = 0.00145832
  1948. I0117 10:16:43.701076 86948 solver.cpp:253] Train net output #0: loss = 0.00145848 (* 1 = 0.00145848 loss)
  1949. I0117 10:16:43.701113 86948 sgd_solver.cpp:106] Iteration 70500, lr = 0.001
  1950. I0117 10:44:12.330981 86948 solver.cpp:341] Iteration 71000, Testing net (#0)
  1951. I0117 10:44:13.115999 86948 solver.cpp:409] Test net output #0: accuracy = 0.148
  1952. I0117 10:44:13.116049 86948 solver.cpp:409] Test net output #1: loss = 14.8792 (* 1 = 14.8792 loss)
  1953. I0117 10:44:41.000583 86948 solver.cpp:237] Iteration 71000, loss = 0.00174855
  1954. I0117 10:44:41.000628 86948 solver.cpp:253] Train net output #0: loss = 0.00174872 (* 1 = 0.00174872 loss)
  1955. I0117 10:44:41.000649 86948 sgd_solver.cpp:106] Iteration 71000, lr = 0.001
  1956. I0117 11:14:19.942044 86948 solver.cpp:341] Iteration 71500, Testing net (#0)
  1957. I0117 11:14:20.427345 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1958. I0117 11:14:20.427377 86948 solver.cpp:409] Test net output #1: loss = 14.2639 (* 1 = 14.2639 loss)
  1959. I0117 11:14:37.532204 86948 solver.cpp:237] Iteration 71500, loss = 0.000676939
  1960. I0117 11:14:37.532251 86948 solver.cpp:253] Train net output #0: loss = 0.000677105 (* 1 = 0.000677105 loss)
  1961. I0117 11:14:37.879487 86948 sgd_solver.cpp:106] Iteration 71500, lr = 0.001
  1962. I0117 11:43:06.430202 86948 solver.cpp:341] Iteration 72000, Testing net (#0)
  1963. I0117 11:43:22.537875 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  1964. I0117 11:43:22.537925 86948 solver.cpp:409] Test net output #1: loss = 15.7033 (* 1 = 15.7033 loss)
  1965. I0117 11:43:34.772270 86948 solver.cpp:237] Iteration 72000, loss = 0.00116305
  1966. I0117 11:43:34.772325 86948 solver.cpp:253] Train net output #0: loss = 0.00116322 (* 1 = 0.00116322 loss)
  1967. I0117 11:43:35.127001 86948 sgd_solver.cpp:106] Iteration 72000, lr = 0.001
  1968. I0117 12:13:05.521544 86948 solver.cpp:341] Iteration 72500, Testing net (#0)
  1969. I0117 12:13:06.007657 86948 solver.cpp:409] Test net output #0: accuracy = 0.114
  1970. I0117 12:13:06.007705 86948 solver.cpp:409] Test net output #1: loss = 14.6482 (* 1 = 14.6482 loss)
  1971. I0117 12:13:19.264045 86948 solver.cpp:237] Iteration 72500, loss = 0.00117693
  1972. I0117 12:13:19.264101 86948 solver.cpp:253] Train net output #0: loss = 0.0011771 (* 1 = 0.0011771 loss)
  1973. I0117 12:13:19.566165 86948 sgd_solver.cpp:106] Iteration 72500, lr = 0.001
  1974. I0117 12:40:20.751565 86948 solver.cpp:341] Iteration 73000, Testing net (#0)
  1975. I0117 12:40:21.238452 86948 solver.cpp:409] Test net output #0: accuracy = 0.14
  1976. I0117 12:40:21.238520 86948 solver.cpp:409] Test net output #1: loss = 13.8224 (* 1 = 13.8224 loss)
  1977. I0117 12:40:44.118749 86948 solver.cpp:237] Iteration 73000, loss = 0.00160221
  1978. I0117 12:40:44.118808 86948 solver.cpp:253] Train net output #0: loss = 0.00160238 (* 1 = 0.00160238 loss)
  1979. I0117 12:40:44.492357 86948 sgd_solver.cpp:106] Iteration 73000, lr = 0.001
  1980. I0117 13:05:49.983078 86948 solver.cpp:341] Iteration 73500, Testing net (#0)
  1981. I0117 13:05:50.769421 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  1982. I0117 13:05:50.769461 86948 solver.cpp:409] Test net output #1: loss = 14.2741 (* 1 = 14.2741 loss)
  1983. I0117 13:06:13.503284 86948 solver.cpp:237] Iteration 73500, loss = 0.00136268
  1984. I0117 13:06:13.503340 86948 solver.cpp:253] Train net output #0: loss = 0.00136284 (* 1 = 0.00136284 loss)
  1985. I0117 13:06:13.805341 86948 sgd_solver.cpp:106] Iteration 73500, lr = 0.001
  1986. I0117 13:34:08.023531 86948 solver.cpp:341] Iteration 74000, Testing net (#0)
  1987. I0117 13:34:27.022164 86948 solver.cpp:409] Test net output #0: accuracy = 0.104
  1988. I0117 13:34:27.022241 86948 solver.cpp:409] Test net output #1: loss = 14.4191 (* 1 = 14.4191 loss)
  1989. I0117 13:34:39.493510 86948 solver.cpp:237] Iteration 74000, loss = 0.00152815
  1990. I0117 13:34:39.493753 86948 solver.cpp:253] Train net output #0: loss = 0.00152831 (* 1 = 0.00152831 loss)
  1991. I0117 13:34:39.854380 86948 sgd_solver.cpp:106] Iteration 74000, lr = 0.001
  1992. I0117 14:02:55.896353 86948 solver.cpp:341] Iteration 74500, Testing net (#0)
  1993. I0117 14:02:56.681279 86948 solver.cpp:409] Test net output #0: accuracy = 0.124
  1994. I0117 14:02:56.681329 86948 solver.cpp:409] Test net output #1: loss = 15.249 (* 1 = 15.249 loss)
  1995. I0117 14:03:17.415796 86948 solver.cpp:237] Iteration 74500, loss = 0.00171971
  1996. I0117 14:03:17.415844 86948 solver.cpp:253] Train net output #0: loss = 0.00171988 (* 1 = 0.00171988 loss)
  1997. I0117 14:03:17.761025 86948 sgd_solver.cpp:106] Iteration 74500, lr = 0.001
  1998. I0117 14:32:28.479389 86948 solver.cpp:459] Snapshotting to binary proto file models/mv16f/mv16f1__iter_75000.caffemodel
  1999. I0117 14:32:32.218113 86948 sgd_solver.cpp:273] Snapshotting solver state to binary proto file models/mv16f/mv16f1__iter_75000.solverstate
  2000. I0117 14:32:32.261855 86948 solver.cpp:341] Iteration 75000, Testing net (#0)
  2001. I0117 14:32:32.778828 86948 solver.cpp:409] Test net output #0: accuracy = 0.13
  2002. I0117 14:32:32.778861 86948 solver.cpp:409] Test net output #1: loss = 14.905 (* 1 = 14.905 loss)
  2003. I0117 14:32:50.592114 86948 solver.cpp:237] Iteration 75000, loss = 0.000816114
  2004. I0117 14:32:50.592182 86948 solver.cpp:253] Train net output #0: loss = 0.000816279 (* 1 = 0.000816279 loss)
  2005. I0117 14:32:50.894603 86948 sgd_solver.cpp:106] Iteration 75000, lr = 0.001
  2006. I0117 15:02:30.998363 86948 solver.cpp:341] Iteration 75500, Testing net (#0)
  2007. I0117 15:02:31.485143 86948 solver.cpp:409] Test net output #0: accuracy = 0.12
  2008. I0117 15:02:31.485188 86948 solver.cpp:409] Test net output #1: loss = 14.9655 (* 1 = 14.9655 loss)
  2009. I0117 15:02:52.363260 86948 solver.cpp:237] Iteration 75500, loss = 0.00145175
  2010. I0117 15:02:52.363328 86948 solver.cpp:253] Train net output #0: loss = 0.00145191 (* 1 = 0.00145191 loss)
  2011. I0117 15:02:52.684723 86948 sgd_solver.cpp:106] Iteration 75500, lr = 0.001
  2012. I0117 15:32:25.288990 86948 solver.cpp:341] Iteration 76000, Testing net (#0)
  2013. I0117 15:32:38.957864 86948 solver.cpp:409] Test net output #0: accuracy = 0.116
  2014. I0117 15:32:38.957921 86948 solver.cpp:409] Test net output #1: loss = 14.0129 (* 1 = 14.0129 loss)
  2015. I0117 15:32:51.730339 86948 solver.cpp:237] Iteration 76000, loss = 0.00122516
  2016. I0117 15:32:51.730393 86948 solver.cpp:253] Train net output #0: loss = 0.00122533 (* 1 = 0.00122533 loss)
  2017. I0117 15:32:52.032361 86948 sgd_solver.cpp:106] Iteration 76000, lr = 0.001
  2018. I0117 15:58:04.766777 86948 solver.cpp:341] Iteration 76500, Testing net (#0)
  2019. I0117 15:58:05.253283 86948 solver.cpp:409] Test net output #0: accuracy = 0.128
  2020. I0117 15:58:05.253428 86948 solver.cpp:409] Test net output #1: loss = 14.5003 (* 1 = 14.5003 loss)
  2021. I0117 15:58:36.863298 86948 solver.cpp:237] Iteration 76500, loss = 0.00182002
  2022. I0117 15:58:36.863498 86948 solver.cpp:253] Train net output #0: loss = 0.00182018 (* 1 = 0.00182018 loss)
  2023. I0117 15:58:37.219053 86948 sgd_solver.cpp:106] Iteration 76500, lr = 0.001
  2024. I0117 16:26:28.663875 86948 solver.cpp:341] Iteration 77000, Testing net (#0)
  2025. I0117 16:26:29.149608 86948 solver.cpp:409] Test net output #0: accuracy = 0.158
  2026. I0117 16:26:29.149651 86948 solver.cpp:409] Test net output #1: loss = 12.9062 (* 1 = 12.9062 loss)
  2027. I0117 16:26:49.170310 86948 solver.cpp:237] Iteration 77000, loss = 0.00109042
  2028. I0117 16:26:49.170364 86948 solver.cpp:253] Train net output #0: loss = 0.00109059 (* 1 = 0.00109059 loss)
  2029. I0117 16:26:49.472210 86948 sgd_solver.cpp:106] Iteration 77000, lr = 0.001
  2030. I0117 16:53:34.338349 86948 solver.cpp:341] Iteration 77500, Testing net (#0)
  2031. I0117 16:53:34.824774 86948 solver.cpp:409] Test net output #0: accuracy = 0.122
  2032. I0117 16:53:34.824815 86948 solver.cpp:409] Test net output #1: loss = 14.0104 (* 1 = 14.0104 loss)
  2033. I0117 16:53:49.258014 86948 solver.cpp:237] Iteration 77500, loss = 0.00227474
  2034. I0117 16:53:49.258074 86948 solver.cpp:253] Train net output #0: loss = 0.00227491 (* 1 = 0.00227491 loss)
  2035. I0117 16:53:49.604596 86948 sgd_solver.cpp:106] Iteration 77500, lr = 0.001
  2036. I0117 17:23:28.599503 86948 solver.cpp:341] Iteration 78000, Testing net (#0)
  2037. I0117 17:23:42.401674 86948 solver.cpp:409] Test net output #0: accuracy = 0.134
  2038. I0117 17:23:42.401721 86948 solver.cpp:409] Test net output #1: loss = 13.5035 (* 1 = 13.5035 loss)
  2039. I0117 17:23:59.519179 86948 solver.cpp:237] Iteration 78000, loss = 0.00130983
  2040. I0117 17:23:59.519390 86948 solver.cpp:253] Train net output #0: loss = 0.00131 (* 1 = 0.00131 loss)
  2041. I0117 17:23:59.820915 86948 sgd_solver.cpp:106] Iteration 78000, lr = 0.001
  2042. I0117 17:52:58.458806 86948 solver.cpp:341] Iteration 78500, Testing net (#0)
  2043. I0117 17:52:58.944988 86948 solver.cpp:409] Test net output #0: accuracy = 0.132
  2044. I0117 17:52:58.945024 86948 solver.cpp:409] Test net output #1: loss = 14.1663 (* 1 = 14.1663 loss)
  2045. I0117 17:53:19.896749 86948 solver.cpp:237] Iteration 78500, loss = 0.00147733
  2046. I0117 17:53:19.896806 86948 solver.cpp:253] Train net output #0: loss = 0.0014775 (* 1 = 0.0014775 loss)
  2047. I0117 17:53:19.896831 86948 sgd_solver.cpp:106] Iteration 78500, lr = 0.001
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement