Guest User

Untitled

a guest
Nov 19th, 2017
68
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 90.59 KB | None | 0 0
  1. name: "Cifar-Resnet"
  2. layer { # TRAIN data layer
  3. name: "dataLayer"
  4. type: "Data"
  5. top: "data_top"
  6. top: "label"
  7. include {
  8. phase: TRAIN
  9. }
  10. transform_param {
  11. mirror: true
  12. crop_size: 32
  13. mean_file: "examples/cifar10/mean.binaryproto"
  14. }
  15. data_param {
  16. source: "examples/cifar10/cifar10_train_lmdb"
  17. batch_size: 125
  18. backend: LMDB
  19. }
  20. image_data_param {
  21. shuffle: true
  22. }
  23. }
  24. layer { # TEST data layer
  25. name: "dataLayer"
  26. type: "Data"
  27. top: "data_top"
  28. top: "label"
  29. include {
  30. phase: TEST
  31. }
  32. transform_param {
  33. crop_size: 32
  34. mean_file: "examples/cifar10/mean.binaryproto"
  35. }
  36. data_param {
  37. source: "examples/cifar10/cifar10_test_lmdb"
  38. batch_size: 125
  39. backend: LMDB
  40. }
  41. }
  42. layer { # pre_conv
  43. name: "pre_conv"
  44. type: "Convolution"
  45. bottom: "data_top"
  46. top: "pre_conv_top"
  47. param {
  48. lr_mult: 1
  49. decay_mult: 1
  50. }
  51. param {
  52. lr_mult: 2
  53. decay_mult: 0
  54. }
  55. convolution_param {
  56. num_output: 16
  57. pad: 1
  58. kernel_size: 3
  59. stride: 1
  60. weight_filler {
  61. type: "msra"
  62. }
  63. bias_filler {
  64. type: "constant"
  65. }
  66. }
  67. }
  68. layer { # pre_bn
  69. name: "pre_bn"
  70. type: "BatchNorm"
  71. bottom: "pre_conv_top"
  72. top: "pre_bn_top"
  73. param {
  74. lr_mult: 0
  75. decay_mult: 0
  76. }
  77. param {
  78. lr_mult: 0
  79. decay_mult: 0
  80. }
  81. param {
  82. lr_mult: 0
  83. decay_mult: 0
  84. }
  85. include {
  86. phase: TRAIN
  87. }
  88. batch_norm_param {
  89. use_global_stats: false
  90. moving_average_fraction: 0.999
  91. }
  92. }
  93. layer { # pre_bn
  94. name: "pre_bn"
  95. type: "BatchNorm"
  96. bottom: "pre_conv_top"
  97. top: "pre_bn_top"
  98. param {
  99. lr_mult: 0
  100. decay_mult: 0
  101. }
  102. param {
  103. lr_mult: 0
  104. decay_mult: 0
  105. }
  106. param {
  107. lr_mult: 0
  108. decay_mult: 0
  109. }
  110. include {
  111. phase: TEST
  112. }
  113. batch_norm_param {
  114. use_global_stats: true
  115. moving_average_fraction: 0.999
  116. }
  117. }
  118. layer { # pre_scale
  119. name: "pre_scale"
  120. type: "Scale"
  121. bottom: "pre_bn_top"
  122. top: "pre_bn_top"
  123. scale_param {
  124. bias_term: true
  125. }
  126. }
  127. layer { # pre_relu
  128. name: "pre_relu"
  129. type: "ReLU"
  130. bottom: "pre_bn_top"
  131. top: "pre_bn_top"
  132. }
  133. #{ L1 start
  134. #{ L1_b1 start
  135. #{ L1_b1_cbr1 start
  136. layer { # L1_b1_cbr1_conv
  137. name: "L1_b1_cbr1_conv"
  138. type: "Convolution"
  139. bottom: "pre_bn_top"
  140. top: "L1_b1_cbr1_conv_top"
  141. param {
  142. lr_mult: 1
  143. decay_mult: 1
  144. }
  145. param {
  146. lr_mult: 2
  147. decay_mult: 0
  148. }
  149. convolution_param {
  150. num_output: 16
  151. pad: 1
  152. kernel_size: 3
  153. stride: 1
  154. weight_filler {
  155. type: "msra"
  156. }
  157. bias_filler {
  158. type: "constant"
  159. }
  160. }
  161. }
  162. layer { # L1_b1_cbr1_bn
  163. name: "L1_b1_cbr1_bn"
  164. type: "BatchNorm"
  165. bottom: "L1_b1_cbr1_conv_top"
  166. top: "L1_b1_cbr1_bn_top"
  167. param {
  168. lr_mult: 0
  169. decay_mult: 0
  170. }
  171. param {
  172. lr_mult: 0
  173. decay_mult: 0
  174. }
  175. param {
  176. lr_mult: 0
  177. decay_mult: 0
  178. }
  179. include {
  180. phase: TRAIN
  181. }
  182. batch_norm_param {
  183. use_global_stats: false
  184. moving_average_fraction: 0.999
  185. }
  186. }
  187. layer { # L1_b1_cbr1_bn
  188. name: "L1_b1_cbr1_bn"
  189. type: "BatchNorm"
  190. bottom: "L1_b1_cbr1_conv_top"
  191. top: "L1_b1_cbr1_bn_top"
  192. param {
  193. lr_mult: 0
  194. decay_mult: 0
  195. }
  196. param {
  197. lr_mult: 0
  198. decay_mult: 0
  199. }
  200. param {
  201. lr_mult: 0
  202. decay_mult: 0
  203. }
  204. include {
  205. phase: TEST
  206. }
  207. batch_norm_param {
  208. use_global_stats: true
  209. moving_average_fraction: 0.999
  210. }
  211. }
  212. layer { # L1_b1_cbr1_scale
  213. name: "L1_b1_cbr1_scale"
  214. type: "Scale"
  215. bottom: "L1_b1_cbr1_bn_top"
  216. top: "L1_b1_cbr1_bn_top"
  217. scale_param {
  218. bias_term: true
  219. }
  220. }
  221. layer { # L1_b1_cbr1_relu
  222. name: "L1_b1_cbr1_relu"
  223. type: "ReLU"
  224. bottom: "L1_b1_cbr1_bn_top"
  225. top: "L1_b1_cbr1_bn_top"
  226. }
  227. #} L1_b1_cbr1 end
  228. #{ L1_b1_cbr2 start
  229. layer { # L1_b1_cbr2_conv
  230. name: "L1_b1_cbr2_conv"
  231. type: "Convolution"
  232. bottom: "L1_b1_cbr1_bn_top"
  233. top: "L1_b1_cbr2_conv_top"
  234. param {
  235. lr_mult: 1
  236. decay_mult: 1
  237. }
  238. param {
  239. lr_mult: 2
  240. decay_mult: 0
  241. }
  242. convolution_param {
  243. num_output: 16
  244. pad: 1
  245. kernel_size: 3
  246. stride: 1
  247. weight_filler {
  248. type: "msra"
  249. }
  250. bias_filler {
  251. type: "constant"
  252. }
  253. }
  254. }
  255. layer { # L1_b1_cbr2_bn
  256. name: "L1_b1_cbr2_bn"
  257. type: "BatchNorm"
  258. bottom: "L1_b1_cbr2_conv_top"
  259. top: "L1_b1_cbr2_bn_top"
  260. param {
  261. lr_mult: 0
  262. decay_mult: 0
  263. }
  264. param {
  265. lr_mult: 0
  266. decay_mult: 0
  267. }
  268. param {
  269. lr_mult: 0
  270. decay_mult: 0
  271. }
  272. include {
  273. phase: TRAIN
  274. }
  275. batch_norm_param {
  276. use_global_stats: false
  277. moving_average_fraction: 0.999
  278. }
  279. }
  280. layer { # L1_b1_cbr2_bn
  281. name: "L1_b1_cbr2_bn"
  282. type: "BatchNorm"
  283. bottom: "L1_b1_cbr2_conv_top"
  284. top: "L1_b1_cbr2_bn_top"
  285. param {
  286. lr_mult: 0
  287. decay_mult: 0
  288. }
  289. param {
  290. lr_mult: 0
  291. decay_mult: 0
  292. }
  293. param {
  294. lr_mult: 0
  295. decay_mult: 0
  296. }
  297. include {
  298. phase: TEST
  299. }
  300. batch_norm_param {
  301. use_global_stats: true
  302. moving_average_fraction: 0.999
  303. }
  304. }
  305. layer { # L1_b1_cbr2_scale
  306. name: "L1_b1_cbr2_scale"
  307. type: "Scale"
  308. bottom: "L1_b1_cbr2_bn_top"
  309. top: "L1_b1_cbr2_bn_top"
  310. scale_param {
  311. bias_term: true
  312. }
  313. }
  314. #} L1_b1_cbr2 end
  315. layer { # L1_b1_sum_eltwise
  316. name: "L1_b1_sum_eltwise"
  317. type: "Eltwise"
  318. bottom: "L1_b1_cbr2_bn_top"
  319. bottom: "pre_bn_top"
  320. top: "L1_b1_sum_eltwise_top"
  321. eltwise_param {
  322. operation: SUM
  323. }
  324. }
  325. layer { # L1_b1_relu
  326. name: "L1_b1_relu"
  327. type: "ReLU"
  328. bottom: "L1_b1_sum_eltwise_top"
  329. top: "L1_b1_sum_eltwise_top"
  330. }
  331. #} L1_b1 end
  332. #{ L1_b2 start
  333. #{ L1_b2_cbr1 start
  334. layer { # L1_b2_cbr1_conv
  335. name: "L1_b2_cbr1_conv"
  336. type: "Convolution"
  337. bottom: "L1_b1_sum_eltwise_top"
  338. top: "L1_b2_cbr1_conv_top"
  339. param {
  340. lr_mult: 1
  341. decay_mult: 1
  342. }
  343. param {
  344. lr_mult: 2
  345. decay_mult: 0
  346. }
  347. convolution_param {
  348. num_output: 16
  349. pad: 1
  350. kernel_size: 3
  351. stride: 1
  352. weight_filler {
  353. type: "msra"
  354. }
  355. bias_filler {
  356. type: "constant"
  357. }
  358. }
  359. }
  360. layer { # L1_b2_cbr1_bn
  361. name: "L1_b2_cbr1_bn"
  362. type: "BatchNorm"
  363. bottom: "L1_b2_cbr1_conv_top"
  364. top: "L1_b2_cbr1_bn_top"
  365. param {
  366. lr_mult: 0
  367. decay_mult: 0
  368. }
  369. param {
  370. lr_mult: 0
  371. decay_mult: 0
  372. }
  373. param {
  374. lr_mult: 0
  375. decay_mult: 0
  376. }
  377. include {
  378. phase: TRAIN
  379. }
  380. batch_norm_param {
  381. use_global_stats: false
  382. moving_average_fraction: 0.999
  383. }
  384. }
  385. layer { # L1_b2_cbr1_bn
  386. name: "L1_b2_cbr1_bn"
  387. type: "BatchNorm"
  388. bottom: "L1_b2_cbr1_conv_top"
  389. top: "L1_b2_cbr1_bn_top"
  390. param {
  391. lr_mult: 0
  392. decay_mult: 0
  393. }
  394. param {
  395. lr_mult: 0
  396. decay_mult: 0
  397. }
  398. param {
  399. lr_mult: 0
  400. decay_mult: 0
  401. }
  402. include {
  403. phase: TEST
  404. }
  405. batch_norm_param {
  406. use_global_stats: true
  407. moving_average_fraction: 0.999
  408. }
  409. }
  410. layer { # L1_b2_cbr1_scale
  411. name: "L1_b2_cbr1_scale"
  412. type: "Scale"
  413. bottom: "L1_b2_cbr1_bn_top"
  414. top: "L1_b2_cbr1_bn_top"
  415. scale_param {
  416. bias_term: true
  417. }
  418. }
  419. layer { # L1_b2_cbr1_relu
  420. name: "L1_b2_cbr1_relu"
  421. type: "ReLU"
  422. bottom: "L1_b2_cbr1_bn_top"
  423. top: "L1_b2_cbr1_bn_top"
  424. }
  425. #} L1_b2_cbr1 end
  426. #{ L1_b2_cbr2 start
  427. layer { # L1_b2_cbr2_conv
  428. name: "L1_b2_cbr2_conv"
  429. type: "Convolution"
  430. bottom: "L1_b2_cbr1_bn_top"
  431. top: "L1_b2_cbr2_conv_top"
  432. param {
  433. lr_mult: 1
  434. decay_mult: 1
  435. }
  436. param {
  437. lr_mult: 2
  438. decay_mult: 0
  439. }
  440. convolution_param {
  441. num_output: 16
  442. pad: 1
  443. kernel_size: 3
  444. stride: 1
  445. weight_filler {
  446. type: "msra"
  447. }
  448. bias_filler {
  449. type: "constant"
  450. }
  451. }
  452. }
  453. layer { # L1_b2_cbr2_bn
  454. name: "L1_b2_cbr2_bn"
  455. type: "BatchNorm"
  456. bottom: "L1_b2_cbr2_conv_top"
  457. top: "L1_b2_cbr2_bn_top"
  458. param {
  459. lr_mult: 0
  460. decay_mult: 0
  461. }
  462. param {
  463. lr_mult: 0
  464. decay_mult: 0
  465. }
  466. param {
  467. lr_mult: 0
  468. decay_mult: 0
  469. }
  470. include {
  471. phase: TRAIN
  472. }
  473. batch_norm_param {
  474. use_global_stats: false
  475. moving_average_fraction: 0.999
  476. }
  477. }
  478. layer { # L1_b2_cbr2_bn
  479. name: "L1_b2_cbr2_bn"
  480. type: "BatchNorm"
  481. bottom: "L1_b2_cbr2_conv_top"
  482. top: "L1_b2_cbr2_bn_top"
  483. param {
  484. lr_mult: 0
  485. decay_mult: 0
  486. }
  487. param {
  488. lr_mult: 0
  489. decay_mult: 0
  490. }
  491. param {
  492. lr_mult: 0
  493. decay_mult: 0
  494. }
  495. include {
  496. phase: TEST
  497. }
  498. batch_norm_param {
  499. use_global_stats: true
  500. moving_average_fraction: 0.999
  501. }
  502. }
  503. layer { # L1_b2_cbr2_scale
  504. name: "L1_b2_cbr2_scale"
  505. type: "Scale"
  506. bottom: "L1_b2_cbr2_bn_top"
  507. top: "L1_b2_cbr2_bn_top"
  508. scale_param {
  509. bias_term: true
  510. }
  511. }
  512. #} L1_b2_cbr2 end
  513. layer { # L1_b2_sum_eltwise
  514. name: "L1_b2_sum_eltwise"
  515. type: "Eltwise"
  516. bottom: "L1_b2_cbr2_bn_top"
  517. bottom: "L1_b1_sum_eltwise_top"
  518. top: "L1_b2_sum_eltwise_top"
  519. eltwise_param {
  520. operation: SUM
  521. }
  522. }
  523. layer { # L1_b2_relu
  524. name: "L1_b2_relu"
  525. type: "ReLU"
  526. bottom: "L1_b2_sum_eltwise_top"
  527. top: "L1_b2_sum_eltwise_top"
  528. }
  529. #} L1_b2 end
  530. #{ L1_b3 start
  531. #{ L1_b3_cbr1 start
  532. layer { # L1_b3_cbr1_conv
  533. name: "L1_b3_cbr1_conv"
  534. type: "Convolution"
  535. bottom: "L1_b2_sum_eltwise_top"
  536. top: "L1_b3_cbr1_conv_top"
  537. param {
  538. lr_mult: 1
  539. decay_mult: 1
  540. }
  541. param {
  542. lr_mult: 2
  543. decay_mult: 0
  544. }
  545. convolution_param {
  546. num_output: 16
  547. pad: 1
  548. kernel_size: 3
  549. stride: 1
  550. weight_filler {
  551. type: "msra"
  552. }
  553. bias_filler {
  554. type: "constant"
  555. }
  556. }
  557. }
  558. layer { # L1_b3_cbr1_bn
  559. name: "L1_b3_cbr1_bn"
  560. type: "BatchNorm"
  561. bottom: "L1_b3_cbr1_conv_top"
  562. top: "L1_b3_cbr1_bn_top"
  563. param {
  564. lr_mult: 0
  565. decay_mult: 0
  566. }
  567. param {
  568. lr_mult: 0
  569. decay_mult: 0
  570. }
  571. param {
  572. lr_mult: 0
  573. decay_mult: 0
  574. }
  575. include {
  576. phase: TRAIN
  577. }
  578. batch_norm_param {
  579. use_global_stats: false
  580. moving_average_fraction: 0.999
  581. }
  582. }
  583. layer { # L1_b3_cbr1_bn
  584. name: "L1_b3_cbr1_bn"
  585. type: "BatchNorm"
  586. bottom: "L1_b3_cbr1_conv_top"
  587. top: "L1_b3_cbr1_bn_top"
  588. param {
  589. lr_mult: 0
  590. decay_mult: 0
  591. }
  592. param {
  593. lr_mult: 0
  594. decay_mult: 0
  595. }
  596. param {
  597. lr_mult: 0
  598. decay_mult: 0
  599. }
  600. include {
  601. phase: TEST
  602. }
  603. batch_norm_param {
  604. use_global_stats: true
  605. moving_average_fraction: 0.999
  606. }
  607. }
  608. layer { # L1_b3_cbr1_scale
  609. name: "L1_b3_cbr1_scale"
  610. type: "Scale"
  611. bottom: "L1_b3_cbr1_bn_top"
  612. top: "L1_b3_cbr1_bn_top"
  613. scale_param {
  614. bias_term: true
  615. }
  616. }
  617. layer { # L1_b3_cbr1_relu
  618. name: "L1_b3_cbr1_relu"
  619. type: "ReLU"
  620. bottom: "L1_b3_cbr1_bn_top"
  621. top: "L1_b3_cbr1_bn_top"
  622. }
  623. #} L1_b3_cbr1 end
  624. #{ L1_b3_cbr2 start
  625. layer { # L1_b3_cbr2_conv
  626. name: "L1_b3_cbr2_conv"
  627. type: "Convolution"
  628. bottom: "L1_b3_cbr1_bn_top"
  629. top: "L1_b3_cbr2_conv_top"
  630. param {
  631. lr_mult: 1
  632. decay_mult: 1
  633. }
  634. param {
  635. lr_mult: 2
  636. decay_mult: 0
  637. }
  638. convolution_param {
  639. num_output: 16
  640. pad: 1
  641. kernel_size: 3
  642. stride: 1
  643. weight_filler {
  644. type: "msra"
  645. }
  646. bias_filler {
  647. type: "constant"
  648. }
  649. }
  650. }
  651. layer { # L1_b3_cbr2_bn
  652. name: "L1_b3_cbr2_bn"
  653. type: "BatchNorm"
  654. bottom: "L1_b3_cbr2_conv_top"
  655. top: "L1_b3_cbr2_bn_top"
  656. param {
  657. lr_mult: 0
  658. decay_mult: 0
  659. }
  660. param {
  661. lr_mult: 0
  662. decay_mult: 0
  663. }
  664. param {
  665. lr_mult: 0
  666. decay_mult: 0
  667. }
  668. include {
  669. phase: TRAIN
  670. }
  671. batch_norm_param {
  672. use_global_stats: false
  673. moving_average_fraction: 0.999
  674. }
  675. }
  676. layer { # L1_b3_cbr2_bn
  677. name: "L1_b3_cbr2_bn"
  678. type: "BatchNorm"
  679. bottom: "L1_b3_cbr2_conv_top"
  680. top: "L1_b3_cbr2_bn_top"
  681. param {
  682. lr_mult: 0
  683. decay_mult: 0
  684. }
  685. param {
  686. lr_mult: 0
  687. decay_mult: 0
  688. }
  689. param {
  690. lr_mult: 0
  691. decay_mult: 0
  692. }
  693. include {
  694. phase: TEST
  695. }
  696. batch_norm_param {
  697. use_global_stats: true
  698. moving_average_fraction: 0.999
  699. }
  700. }
  701. layer { # L1_b3_cbr2_scale
  702. name: "L1_b3_cbr2_scale"
  703. type: "Scale"
  704. bottom: "L1_b3_cbr2_bn_top"
  705. top: "L1_b3_cbr2_bn_top"
  706. scale_param {
  707. bias_term: true
  708. }
  709. }
  710. #} L1_b3_cbr2 end
  711. layer { # L1_b3_sum_eltwise
  712. name: "L1_b3_sum_eltwise"
  713. type: "Eltwise"
  714. bottom: "L1_b3_cbr2_bn_top"
  715. bottom: "L1_b2_sum_eltwise_top"
  716. top: "L1_b3_sum_eltwise_top"
  717. eltwise_param {
  718. operation: SUM
  719. }
  720. }
  721. layer { # L1_b3_relu
  722. name: "L1_b3_relu"
  723. type: "ReLU"
  724. bottom: "L1_b3_sum_eltwise_top"
  725. top: "L1_b3_sum_eltwise_top"
  726. }
  727. #} L1_b3 end
  728. #{ L1_b4 start
  729. #{ L1_b4_cbr1 start
  730. layer { # L1_b4_cbr1_conv
  731. name: "L1_b4_cbr1_conv"
  732. type: "Convolution"
  733. bottom: "L1_b3_sum_eltwise_top"
  734. top: "L1_b4_cbr1_conv_top"
  735. param {
  736. lr_mult: 1
  737. decay_mult: 1
  738. }
  739. param {
  740. lr_mult: 2
  741. decay_mult: 0
  742. }
  743. convolution_param {
  744. num_output: 16
  745. pad: 1
  746. kernel_size: 3
  747. stride: 1
  748. weight_filler {
  749. type: "msra"
  750. }
  751. bias_filler {
  752. type: "constant"
  753. }
  754. }
  755. }
  756. layer { # L1_b4_cbr1_bn
  757. name: "L1_b4_cbr1_bn"
  758. type: "BatchNorm"
  759. bottom: "L1_b4_cbr1_conv_top"
  760. top: "L1_b4_cbr1_bn_top"
  761. param {
  762. lr_mult: 0
  763. decay_mult: 0
  764. }
  765. param {
  766. lr_mult: 0
  767. decay_mult: 0
  768. }
  769. param {
  770. lr_mult: 0
  771. decay_mult: 0
  772. }
  773. include {
  774. phase: TRAIN
  775. }
  776. batch_norm_param {
  777. use_global_stats: false
  778. moving_average_fraction: 0.999
  779. }
  780. }
  781. layer { # L1_b4_cbr1_bn
  782. name: "L1_b4_cbr1_bn"
  783. type: "BatchNorm"
  784. bottom: "L1_b4_cbr1_conv_top"
  785. top: "L1_b4_cbr1_bn_top"
  786. param {
  787. lr_mult: 0
  788. decay_mult: 0
  789. }
  790. param {
  791. lr_mult: 0
  792. decay_mult: 0
  793. }
  794. param {
  795. lr_mult: 0
  796. decay_mult: 0
  797. }
  798. include {
  799. phase: TEST
  800. }
  801. batch_norm_param {
  802. use_global_stats: true
  803. moving_average_fraction: 0.999
  804. }
  805. }
  806. layer { # L1_b4_cbr1_scale
  807. name: "L1_b4_cbr1_scale"
  808. type: "Scale"
  809. bottom: "L1_b4_cbr1_bn_top"
  810. top: "L1_b4_cbr1_bn_top"
  811. scale_param {
  812. bias_term: true
  813. }
  814. }
  815. layer { # L1_b4_cbr1_relu
  816. name: "L1_b4_cbr1_relu"
  817. type: "ReLU"
  818. bottom: "L1_b4_cbr1_bn_top"
  819. top: "L1_b4_cbr1_bn_top"
  820. }
  821. #} L1_b4_cbr1 end
  822. #{ L1_b4_cbr2 start
  823. layer { # L1_b4_cbr2_conv
  824. name: "L1_b4_cbr2_conv"
  825. type: "Convolution"
  826. bottom: "L1_b4_cbr1_bn_top"
  827. top: "L1_b4_cbr2_conv_top"
  828. param {
  829. lr_mult: 1
  830. decay_mult: 1
  831. }
  832. param {
  833. lr_mult: 2
  834. decay_mult: 0
  835. }
  836. convolution_param {
  837. num_output: 16
  838. pad: 1
  839. kernel_size: 3
  840. stride: 1
  841. weight_filler {
  842. type: "msra"
  843. }
  844. bias_filler {
  845. type: "constant"
  846. }
  847. }
  848. }
  849. layer { # L1_b4_cbr2_bn
  850. name: "L1_b4_cbr2_bn"
  851. type: "BatchNorm"
  852. bottom: "L1_b4_cbr2_conv_top"
  853. top: "L1_b4_cbr2_bn_top"
  854. param {
  855. lr_mult: 0
  856. decay_mult: 0
  857. }
  858. param {
  859. lr_mult: 0
  860. decay_mult: 0
  861. }
  862. param {
  863. lr_mult: 0
  864. decay_mult: 0
  865. }
  866. include {
  867. phase: TRAIN
  868. }
  869. batch_norm_param {
  870. use_global_stats: false
  871. moving_average_fraction: 0.999
  872. }
  873. }
  874. layer { # L1_b4_cbr2_bn
  875. name: "L1_b4_cbr2_bn"
  876. type: "BatchNorm"
  877. bottom: "L1_b4_cbr2_conv_top"
  878. top: "L1_b4_cbr2_bn_top"
  879. param {
  880. lr_mult: 0
  881. decay_mult: 0
  882. }
  883. param {
  884. lr_mult: 0
  885. decay_mult: 0
  886. }
  887. param {
  888. lr_mult: 0
  889. decay_mult: 0
  890. }
  891. include {
  892. phase: TEST
  893. }
  894. batch_norm_param {
  895. use_global_stats: true
  896. moving_average_fraction: 0.999
  897. }
  898. }
  899. layer { # L1_b4_cbr2_scale
  900. name: "L1_b4_cbr2_scale"
  901. type: "Scale"
  902. bottom: "L1_b4_cbr2_bn_top"
  903. top: "L1_b4_cbr2_bn_top"
  904. scale_param {
  905. bias_term: true
  906. }
  907. }
  908. #} L1_b4_cbr2 end
  909. layer { # L1_b4_sum_eltwise
  910. name: "L1_b4_sum_eltwise"
  911. type: "Eltwise"
  912. bottom: "L1_b4_cbr2_bn_top"
  913. bottom: "L1_b3_sum_eltwise_top"
  914. top: "L1_b4_sum_eltwise_top"
  915. eltwise_param {
  916. operation: SUM
  917. }
  918. }
  919. layer { # L1_b4_relu
  920. name: "L1_b4_relu"
  921. type: "ReLU"
  922. bottom: "L1_b4_sum_eltwise_top"
  923. top: "L1_b4_sum_eltwise_top"
  924. }
  925. #} L1_b4 end
  926. #{ L1_b5 start
  927. #{ L1_b5_cbr1 start
  928. layer { # L1_b5_cbr1_conv
  929. name: "L1_b5_cbr1_conv"
  930. type: "Convolution"
  931. bottom: "L1_b4_sum_eltwise_top"
  932. top: "L1_b5_cbr1_conv_top"
  933. param {
  934. lr_mult: 1
  935. decay_mult: 1
  936. }
  937. param {
  938. lr_mult: 2
  939. decay_mult: 0
  940. }
  941. convolution_param {
  942. num_output: 16
  943. pad: 1
  944. kernel_size: 3
  945. stride: 1
  946. weight_filler {
  947. type: "msra"
  948. }
  949. bias_filler {
  950. type: "constant"
  951. }
  952. }
  953. }
  954. layer { # L1_b5_cbr1_bn
  955. name: "L1_b5_cbr1_bn"
  956. type: "BatchNorm"
  957. bottom: "L1_b5_cbr1_conv_top"
  958. top: "L1_b5_cbr1_bn_top"
  959. param {
  960. lr_mult: 0
  961. decay_mult: 0
  962. }
  963. param {
  964. lr_mult: 0
  965. decay_mult: 0
  966. }
  967. param {
  968. lr_mult: 0
  969. decay_mult: 0
  970. }
  971. include {
  972. phase: TRAIN
  973. }
  974. batch_norm_param {
  975. use_global_stats: false
  976. moving_average_fraction: 0.999
  977. }
  978. }
  979. layer { # L1_b5_cbr1_bn
  980. name: "L1_b5_cbr1_bn"
  981. type: "BatchNorm"
  982. bottom: "L1_b5_cbr1_conv_top"
  983. top: "L1_b5_cbr1_bn_top"
  984. param {
  985. lr_mult: 0
  986. decay_mult: 0
  987. }
  988. param {
  989. lr_mult: 0
  990. decay_mult: 0
  991. }
  992. param {
  993. lr_mult: 0
  994. decay_mult: 0
  995. }
  996. include {
  997. phase: TEST
  998. }
  999. batch_norm_param {
  1000. use_global_stats: true
  1001. moving_average_fraction: 0.999
  1002. }
  1003. }
  1004. layer { # L1_b5_cbr1_scale
  1005. name: "L1_b5_cbr1_scale"
  1006. type: "Scale"
  1007. bottom: "L1_b5_cbr1_bn_top"
  1008. top: "L1_b5_cbr1_bn_top"
  1009. scale_param {
  1010. bias_term: true
  1011. }
  1012. }
  1013. layer { # L1_b5_cbr1_relu
  1014. name: "L1_b5_cbr1_relu"
  1015. type: "ReLU"
  1016. bottom: "L1_b5_cbr1_bn_top"
  1017. top: "L1_b5_cbr1_bn_top"
  1018. }
  1019. #} L1_b5_cbr1 end
  1020. #{ L1_b5_cbr2 start
  1021. layer { # L1_b5_cbr2_conv
  1022. name: "L1_b5_cbr2_conv"
  1023. type: "Convolution"
  1024. bottom: "L1_b5_cbr1_bn_top"
  1025. top: "L1_b5_cbr2_conv_top"
  1026. param {
  1027. lr_mult: 1
  1028. decay_mult: 1
  1029. }
  1030. param {
  1031. lr_mult: 2
  1032. decay_mult: 0
  1033. }
  1034. convolution_param {
  1035. num_output: 16
  1036. pad: 1
  1037. kernel_size: 3
  1038. stride: 1
  1039. weight_filler {
  1040. type: "msra"
  1041. }
  1042. bias_filler {
  1043. type: "constant"
  1044. }
  1045. }
  1046. }
  1047. layer { # L1_b5_cbr2_bn
  1048. name: "L1_b5_cbr2_bn"
  1049. type: "BatchNorm"
  1050. bottom: "L1_b5_cbr2_conv_top"
  1051. top: "L1_b5_cbr2_bn_top"
  1052. param {
  1053. lr_mult: 0
  1054. decay_mult: 0
  1055. }
  1056. param {
  1057. lr_mult: 0
  1058. decay_mult: 0
  1059. }
  1060. param {
  1061. lr_mult: 0
  1062. decay_mult: 0
  1063. }
  1064. include {
  1065. phase: TRAIN
  1066. }
  1067. batch_norm_param {
  1068. use_global_stats: false
  1069. moving_average_fraction: 0.999
  1070. }
  1071. }
  1072. layer { # L1_b5_cbr2_bn
  1073. name: "L1_b5_cbr2_bn"
  1074. type: "BatchNorm"
  1075. bottom: "L1_b5_cbr2_conv_top"
  1076. top: "L1_b5_cbr2_bn_top"
  1077. param {
  1078. lr_mult: 0
  1079. decay_mult: 0
  1080. }
  1081. param {
  1082. lr_mult: 0
  1083. decay_mult: 0
  1084. }
  1085. param {
  1086. lr_mult: 0
  1087. decay_mult: 0
  1088. }
  1089. include {
  1090. phase: TEST
  1091. }
  1092. batch_norm_param {
  1093. use_global_stats: true
  1094. moving_average_fraction: 0.999
  1095. }
  1096. }
  1097. layer { # L1_b5_cbr2_scale
  1098. name: "L1_b5_cbr2_scale"
  1099. type: "Scale"
  1100. bottom: "L1_b5_cbr2_bn_top"
  1101. top: "L1_b5_cbr2_bn_top"
  1102. scale_param {
  1103. bias_term: true
  1104. }
  1105. }
  1106. #} L1_b5_cbr2 end
  1107. layer { # L1_b5_sum_eltwise
  1108. name: "L1_b5_sum_eltwise"
  1109. type: "Eltwise"
  1110. bottom: "L1_b5_cbr2_bn_top"
  1111. bottom: "L1_b4_sum_eltwise_top"
  1112. top: "L1_b5_sum_eltwise_top"
  1113. eltwise_param {
  1114. operation: SUM
  1115. }
  1116. }
  1117. layer { # L1_b5_relu
  1118. name: "L1_b5_relu"
  1119. type: "ReLU"
  1120. bottom: "L1_b5_sum_eltwise_top"
  1121. top: "L1_b5_sum_eltwise_top"
  1122. }
  1123. #} L1_b5 end
  1124. #{ L1_b6 start
  1125. #{ L1_b6_cbr1 start
  1126. layer { # L1_b6_cbr1_conv
  1127. name: "L1_b6_cbr1_conv"
  1128. type: "Convolution"
  1129. bottom: "L1_b5_sum_eltwise_top"
  1130. top: "L1_b6_cbr1_conv_top"
  1131. param {
  1132. lr_mult: 1
  1133. decay_mult: 1
  1134. }
  1135. param {
  1136. lr_mult: 2
  1137. decay_mult: 0
  1138. }
  1139. convolution_param {
  1140. num_output: 16
  1141. pad: 1
  1142. kernel_size: 3
  1143. stride: 1
  1144. weight_filler {
  1145. type: "msra"
  1146. }
  1147. bias_filler {
  1148. type: "constant"
  1149. }
  1150. }
  1151. }
  1152. layer { # L1_b6_cbr1_bn
  1153. name: "L1_b6_cbr1_bn"
  1154. type: "BatchNorm"
  1155. bottom: "L1_b6_cbr1_conv_top"
  1156. top: "L1_b6_cbr1_bn_top"
  1157. param {
  1158. lr_mult: 0
  1159. decay_mult: 0
  1160. }
  1161. param {
  1162. lr_mult: 0
  1163. decay_mult: 0
  1164. }
  1165. param {
  1166. lr_mult: 0
  1167. decay_mult: 0
  1168. }
  1169. include {
  1170. phase: TRAIN
  1171. }
  1172. batch_norm_param {
  1173. use_global_stats: false
  1174. moving_average_fraction: 0.999
  1175. }
  1176. }
  1177. layer { # L1_b6_cbr1_bn
  1178. name: "L1_b6_cbr1_bn"
  1179. type: "BatchNorm"
  1180. bottom: "L1_b6_cbr1_conv_top"
  1181. top: "L1_b6_cbr1_bn_top"
  1182. param {
  1183. lr_mult: 0
  1184. decay_mult: 0
  1185. }
  1186. param {
  1187. lr_mult: 0
  1188. decay_mult: 0
  1189. }
  1190. param {
  1191. lr_mult: 0
  1192. decay_mult: 0
  1193. }
  1194. include {
  1195. phase: TEST
  1196. }
  1197. batch_norm_param {
  1198. use_global_stats: true
  1199. moving_average_fraction: 0.999
  1200. }
  1201. }
  1202. layer { # L1_b6_cbr1_scale
  1203. name: "L1_b6_cbr1_scale"
  1204. type: "Scale"
  1205. bottom: "L1_b6_cbr1_bn_top"
  1206. top: "L1_b6_cbr1_bn_top"
  1207. scale_param {
  1208. bias_term: true
  1209. }
  1210. }
  1211. layer { # L1_b6_cbr1_relu
  1212. name: "L1_b6_cbr1_relu"
  1213. type: "ReLU"
  1214. bottom: "L1_b6_cbr1_bn_top"
  1215. top: "L1_b6_cbr1_bn_top"
  1216. }
  1217. #} L1_b6_cbr1 end
  1218. #{ L1_b6_cbr2 start
  1219. layer { # L1_b6_cbr2_conv
  1220. name: "L1_b6_cbr2_conv"
  1221. type: "Convolution"
  1222. bottom: "L1_b6_cbr1_bn_top"
  1223. top: "L1_b6_cbr2_conv_top"
  1224. param {
  1225. lr_mult: 1
  1226. decay_mult: 1
  1227. }
  1228. param {
  1229. lr_mult: 2
  1230. decay_mult: 0
  1231. }
  1232. convolution_param {
  1233. num_output: 16
  1234. pad: 1
  1235. kernel_size: 3
  1236. stride: 1
  1237. weight_filler {
  1238. type: "msra"
  1239. }
  1240. bias_filler {
  1241. type: "constant"
  1242. }
  1243. }
  1244. }
  1245. layer { # L1_b6_cbr2_bn
  1246. name: "L1_b6_cbr2_bn"
  1247. type: "BatchNorm"
  1248. bottom: "L1_b6_cbr2_conv_top"
  1249. top: "L1_b6_cbr2_bn_top"
  1250. param {
  1251. lr_mult: 0
  1252. decay_mult: 0
  1253. }
  1254. param {
  1255. lr_mult: 0
  1256. decay_mult: 0
  1257. }
  1258. param {
  1259. lr_mult: 0
  1260. decay_mult: 0
  1261. }
  1262. include {
  1263. phase: TRAIN
  1264. }
  1265. batch_norm_param {
  1266. use_global_stats: false
  1267. moving_average_fraction: 0.999
  1268. }
  1269. }
  1270. layer { # L1_b6_cbr2_bn
  1271. name: "L1_b6_cbr2_bn"
  1272. type: "BatchNorm"
  1273. bottom: "L1_b6_cbr2_conv_top"
  1274. top: "L1_b6_cbr2_bn_top"
  1275. param {
  1276. lr_mult: 0
  1277. decay_mult: 0
  1278. }
  1279. param {
  1280. lr_mult: 0
  1281. decay_mult: 0
  1282. }
  1283. param {
  1284. lr_mult: 0
  1285. decay_mult: 0
  1286. }
  1287. include {
  1288. phase: TEST
  1289. }
  1290. batch_norm_param {
  1291. use_global_stats: true
  1292. moving_average_fraction: 0.999
  1293. }
  1294. }
  1295. layer { # L1_b6_cbr2_scale
  1296. name: "L1_b6_cbr2_scale"
  1297. type: "Scale"
  1298. bottom: "L1_b6_cbr2_bn_top"
  1299. top: "L1_b6_cbr2_bn_top"
  1300. scale_param {
  1301. bias_term: true
  1302. }
  1303. }
  1304. #} L1_b6_cbr2 end
  1305. layer { # L1_b6_sum_eltwise
  1306. name: "L1_b6_sum_eltwise"
  1307. type: "Eltwise"
  1308. bottom: "L1_b6_cbr2_bn_top"
  1309. bottom: "L1_b5_sum_eltwise_top"
  1310. top: "L1_b6_sum_eltwise_top"
  1311. eltwise_param {
  1312. operation: SUM
  1313. }
  1314. }
  1315. layer { # L1_b6_relu
  1316. name: "L1_b6_relu"
  1317. type: "ReLU"
  1318. bottom: "L1_b6_sum_eltwise_top"
  1319. top: "L1_b6_sum_eltwise_top"
  1320. }
  1321. #} L1_b6 end
  1322. #{ L1_b7 start
  1323. #{ L1_b7_cbr1 start
  1324. layer { # L1_b7_cbr1_conv
  1325. name: "L1_b7_cbr1_conv"
  1326. type: "Convolution"
  1327. bottom: "L1_b6_sum_eltwise_top"
  1328. top: "L1_b7_cbr1_conv_top"
  1329. param {
  1330. lr_mult: 1
  1331. decay_mult: 1
  1332. }
  1333. param {
  1334. lr_mult: 2
  1335. decay_mult: 0
  1336. }
  1337. convolution_param {
  1338. num_output: 16
  1339. pad: 1
  1340. kernel_size: 3
  1341. stride: 1
  1342. weight_filler {
  1343. type: "msra"
  1344. }
  1345. bias_filler {
  1346. type: "constant"
  1347. }
  1348. }
  1349. }
  1350. layer { # L1_b7_cbr1_bn
  1351. name: "L1_b7_cbr1_bn"
  1352. type: "BatchNorm"
  1353. bottom: "L1_b7_cbr1_conv_top"
  1354. top: "L1_b7_cbr1_bn_top"
  1355. param {
  1356. lr_mult: 0
  1357. decay_mult: 0
  1358. }
  1359. param {
  1360. lr_mult: 0
  1361. decay_mult: 0
  1362. }
  1363. param {
  1364. lr_mult: 0
  1365. decay_mult: 0
  1366. }
  1367. include {
  1368. phase: TRAIN
  1369. }
  1370. batch_norm_param {
  1371. use_global_stats: false
  1372. moving_average_fraction: 0.999
  1373. }
  1374. }
  1375. layer { # L1_b7_cbr1_bn
  1376. name: "L1_b7_cbr1_bn"
  1377. type: "BatchNorm"
  1378. bottom: "L1_b7_cbr1_conv_top"
  1379. top: "L1_b7_cbr1_bn_top"
  1380. param {
  1381. lr_mult: 0
  1382. decay_mult: 0
  1383. }
  1384. param {
  1385. lr_mult: 0
  1386. decay_mult: 0
  1387. }
  1388. param {
  1389. lr_mult: 0
  1390. decay_mult: 0
  1391. }
  1392. include {
  1393. phase: TEST
  1394. }
  1395. batch_norm_param {
  1396. use_global_stats: true
  1397. moving_average_fraction: 0.999
  1398. }
  1399. }
  1400. layer { # L1_b7_cbr1_scale
  1401. name: "L1_b7_cbr1_scale"
  1402. type: "Scale"
  1403. bottom: "L1_b7_cbr1_bn_top"
  1404. top: "L1_b7_cbr1_bn_top"
  1405. scale_param {
  1406. bias_term: true
  1407. }
  1408. }
  1409. layer { # L1_b7_cbr1_relu
  1410. name: "L1_b7_cbr1_relu"
  1411. type: "ReLU"
  1412. bottom: "L1_b7_cbr1_bn_top"
  1413. top: "L1_b7_cbr1_bn_top"
  1414. }
  1415. #} L1_b7_cbr1 end
  1416. #{ L1_b7_cbr2 start
  1417. layer { # L1_b7_cbr2_conv
  1418. name: "L1_b7_cbr2_conv"
  1419. type: "Convolution"
  1420. bottom: "L1_b7_cbr1_bn_top"
  1421. top: "L1_b7_cbr2_conv_top"
  1422. param {
  1423. lr_mult: 1
  1424. decay_mult: 1
  1425. }
  1426. param {
  1427. lr_mult: 2
  1428. decay_mult: 0
  1429. }
  1430. convolution_param {
  1431. num_output: 16
  1432. pad: 1
  1433. kernel_size: 3
  1434. stride: 1
  1435. weight_filler {
  1436. type: "msra"
  1437. }
  1438. bias_filler {
  1439. type: "constant"
  1440. }
  1441. }
  1442. }
  1443. layer { # L1_b7_cbr2_bn
  1444. name: "L1_b7_cbr2_bn"
  1445. type: "BatchNorm"
  1446. bottom: "L1_b7_cbr2_conv_top"
  1447. top: "L1_b7_cbr2_bn_top"
  1448. param {
  1449. lr_mult: 0
  1450. decay_mult: 0
  1451. }
  1452. param {
  1453. lr_mult: 0
  1454. decay_mult: 0
  1455. }
  1456. param {
  1457. lr_mult: 0
  1458. decay_mult: 0
  1459. }
  1460. include {
  1461. phase: TRAIN
  1462. }
  1463. batch_norm_param {
  1464. use_global_stats: false
  1465. moving_average_fraction: 0.999
  1466. }
  1467. }
  1468. layer { # L1_b7_cbr2_bn
  1469. name: "L1_b7_cbr2_bn"
  1470. type: "BatchNorm"
  1471. bottom: "L1_b7_cbr2_conv_top"
  1472. top: "L1_b7_cbr2_bn_top"
  1473. param {
  1474. lr_mult: 0
  1475. decay_mult: 0
  1476. }
  1477. param {
  1478. lr_mult: 0
  1479. decay_mult: 0
  1480. }
  1481. param {
  1482. lr_mult: 0
  1483. decay_mult: 0
  1484. }
  1485. include {
  1486. phase: TEST
  1487. }
  1488. batch_norm_param {
  1489. use_global_stats: true
  1490. moving_average_fraction: 0.999
  1491. }
  1492. }
  1493. layer { # L1_b7_cbr2_scale
  1494. name: "L1_b7_cbr2_scale"
  1495. type: "Scale"
  1496. bottom: "L1_b7_cbr2_bn_top"
  1497. top: "L1_b7_cbr2_bn_top"
  1498. scale_param {
  1499. bias_term: true
  1500. }
  1501. }
  1502. #} L1_b7_cbr2 end
  1503. layer { # L1_b7_sum_eltwise
  1504. name: "L1_b7_sum_eltwise"
  1505. type: "Eltwise"
  1506. bottom: "L1_b7_cbr2_bn_top"
  1507. bottom: "L1_b6_sum_eltwise_top"
  1508. top: "L1_b7_sum_eltwise_top"
  1509. eltwise_param {
  1510. operation: SUM
  1511. }
  1512. }
  1513. layer { # L1_b7_relu
  1514. name: "L1_b7_relu"
  1515. type: "ReLU"
  1516. bottom: "L1_b7_sum_eltwise_top"
  1517. top: "L1_b7_sum_eltwise_top"
  1518. }
  1519. #} L1_b7 end
  1520. #{ L1_b8 start
  1521. #{ L1_b8_cbr1 start
  1522. layer { # L1_b8_cbr1_conv
  1523. name: "L1_b8_cbr1_conv"
  1524. type: "Convolution"
  1525. bottom: "L1_b7_sum_eltwise_top"
  1526. top: "L1_b8_cbr1_conv_top"
  1527. param {
  1528. lr_mult: 1
  1529. decay_mult: 1
  1530. }
  1531. param {
  1532. lr_mult: 2
  1533. decay_mult: 0
  1534. }
  1535. convolution_param {
  1536. num_output: 16
  1537. pad: 1
  1538. kernel_size: 3
  1539. stride: 1
  1540. weight_filler {
  1541. type: "msra"
  1542. }
  1543. bias_filler {
  1544. type: "constant"
  1545. }
  1546. }
  1547. }
  1548. layer { # L1_b8_cbr1_bn
  1549. name: "L1_b8_cbr1_bn"
  1550. type: "BatchNorm"
  1551. bottom: "L1_b8_cbr1_conv_top"
  1552. top: "L1_b8_cbr1_bn_top"
  1553. param {
  1554. lr_mult: 0
  1555. decay_mult: 0
  1556. }
  1557. param {
  1558. lr_mult: 0
  1559. decay_mult: 0
  1560. }
  1561. param {
  1562. lr_mult: 0
  1563. decay_mult: 0
  1564. }
  1565. include {
  1566. phase: TRAIN
  1567. }
  1568. batch_norm_param {
  1569. use_global_stats: false
  1570. moving_average_fraction: 0.999
  1571. }
  1572. }
  1573. layer { # L1_b8_cbr1_bn
  1574. name: "L1_b8_cbr1_bn"
  1575. type: "BatchNorm"
  1576. bottom: "L1_b8_cbr1_conv_top"
  1577. top: "L1_b8_cbr1_bn_top"
  1578. param {
  1579. lr_mult: 0
  1580. decay_mult: 0
  1581. }
  1582. param {
  1583. lr_mult: 0
  1584. decay_mult: 0
  1585. }
  1586. param {
  1587. lr_mult: 0
  1588. decay_mult: 0
  1589. }
  1590. include {
  1591. phase: TEST
  1592. }
  1593. batch_norm_param {
  1594. use_global_stats: true
  1595. moving_average_fraction: 0.999
  1596. }
  1597. }
  1598. layer { # L1_b8_cbr1_scale
  1599. name: "L1_b8_cbr1_scale"
  1600. type: "Scale"
  1601. bottom: "L1_b8_cbr1_bn_top"
  1602. top: "L1_b8_cbr1_bn_top"
  1603. scale_param {
  1604. bias_term: true
  1605. }
  1606. }
  1607. layer { # L1_b8_cbr1_relu
  1608. name: "L1_b8_cbr1_relu"
  1609. type: "ReLU"
  1610. bottom: "L1_b8_cbr1_bn_top"
  1611. top: "L1_b8_cbr1_bn_top"
  1612. }
  1613. #} L1_b8_cbr1 end
  1614. #{ L1_b8_cbr2 start
  1615. layer { # L1_b8_cbr2_conv
  1616. name: "L1_b8_cbr2_conv"
  1617. type: "Convolution"
  1618. bottom: "L1_b8_cbr1_bn_top"
  1619. top: "L1_b8_cbr2_conv_top"
  1620. param {
  1621. lr_mult: 1
  1622. decay_mult: 1
  1623. }
  1624. param {
  1625. lr_mult: 2
  1626. decay_mult: 0
  1627. }
  1628. convolution_param {
  1629. num_output: 16
  1630. pad: 1
  1631. kernel_size: 3
  1632. stride: 1
  1633. weight_filler {
  1634. type: "msra"
  1635. }
  1636. bias_filler {
  1637. type: "constant"
  1638. }
  1639. }
  1640. }
  1641. layer { # L1_b8_cbr2_bn
  1642. name: "L1_b8_cbr2_bn"
  1643. type: "BatchNorm"
  1644. bottom: "L1_b8_cbr2_conv_top"
  1645. top: "L1_b8_cbr2_bn_top"
  1646. param {
  1647. lr_mult: 0
  1648. decay_mult: 0
  1649. }
  1650. param {
  1651. lr_mult: 0
  1652. decay_mult: 0
  1653. }
  1654. param {
  1655. lr_mult: 0
  1656. decay_mult: 0
  1657. }
  1658. include {
  1659. phase: TRAIN
  1660. }
  1661. batch_norm_param {
  1662. use_global_stats: false
  1663. moving_average_fraction: 0.999
  1664. }
  1665. }
  1666. layer { # L1_b8_cbr2_bn
  1667. name: "L1_b8_cbr2_bn"
  1668. type: "BatchNorm"
  1669. bottom: "L1_b8_cbr2_conv_top"
  1670. top: "L1_b8_cbr2_bn_top"
  1671. param {
  1672. lr_mult: 0
  1673. decay_mult: 0
  1674. }
  1675. param {
  1676. lr_mult: 0
  1677. decay_mult: 0
  1678. }
  1679. param {
  1680. lr_mult: 0
  1681. decay_mult: 0
  1682. }
  1683. include {
  1684. phase: TEST
  1685. }
  1686. batch_norm_param {
  1687. use_global_stats: true
  1688. moving_average_fraction: 0.999
  1689. }
  1690. }
  1691. layer { # L1_b8_cbr2_scale
  1692. name: "L1_b8_cbr2_scale"
  1693. type: "Scale"
  1694. bottom: "L1_b8_cbr2_bn_top"
  1695. top: "L1_b8_cbr2_bn_top"
  1696. scale_param {
  1697. bias_term: true
  1698. }
  1699. }
  1700. #} L1_b8_cbr2 end
  1701. layer { # L1_b8_sum_eltwise
  1702. name: "L1_b8_sum_eltwise"
  1703. type: "Eltwise"
  1704. bottom: "L1_b8_cbr2_bn_top"
  1705. bottom: "L1_b7_sum_eltwise_top"
  1706. top: "L1_b8_sum_eltwise_top"
  1707. eltwise_param {
  1708. operation: SUM
  1709. }
  1710. }
  1711. layer { # L1_b8_relu
  1712. name: "L1_b8_relu"
  1713. type: "ReLU"
  1714. bottom: "L1_b8_sum_eltwise_top"
  1715. top: "L1_b8_sum_eltwise_top"
  1716. }
  1717. #} L1_b8 end
  1718. #{ L1_b9 start
  1719. #{ L1_b9_cbr1 start
  1720. layer { # L1_b9_cbr1_conv
  1721. name: "L1_b9_cbr1_conv"
  1722. type: "Convolution"
  1723. bottom: "L1_b8_sum_eltwise_top"
  1724. top: "L1_b9_cbr1_conv_top"
  1725. param {
  1726. lr_mult: 1
  1727. decay_mult: 1
  1728. }
  1729. param {
  1730. lr_mult: 2
  1731. decay_mult: 0
  1732. }
  1733. convolution_param {
  1734. num_output: 16
  1735. pad: 1
  1736. kernel_size: 3
  1737. stride: 1
  1738. weight_filler {
  1739. type: "msra"
  1740. }
  1741. bias_filler {
  1742. type: "constant"
  1743. }
  1744. }
  1745. }
  1746. layer { # L1_b9_cbr1_bn
  1747. name: "L1_b9_cbr1_bn"
  1748. type: "BatchNorm"
  1749. bottom: "L1_b9_cbr1_conv_top"
  1750. top: "L1_b9_cbr1_bn_top"
  1751. param {
  1752. lr_mult: 0
  1753. decay_mult: 0
  1754. }
  1755. param {
  1756. lr_mult: 0
  1757. decay_mult: 0
  1758. }
  1759. param {
  1760. lr_mult: 0
  1761. decay_mult: 0
  1762. }
  1763. include {
  1764. phase: TRAIN
  1765. }
  1766. batch_norm_param {
  1767. use_global_stats: false
  1768. moving_average_fraction: 0.999
  1769. }
  1770. }
  1771. layer { # L1_b9_cbr1_bn
  1772. name: "L1_b9_cbr1_bn"
  1773. type: "BatchNorm"
  1774. bottom: "L1_b9_cbr1_conv_top"
  1775. top: "L1_b9_cbr1_bn_top"
  1776. param {
  1777. lr_mult: 0
  1778. decay_mult: 0
  1779. }
  1780. param {
  1781. lr_mult: 0
  1782. decay_mult: 0
  1783. }
  1784. param {
  1785. lr_mult: 0
  1786. decay_mult: 0
  1787. }
  1788. include {
  1789. phase: TEST
  1790. }
  1791. batch_norm_param {
  1792. use_global_stats: true
  1793. moving_average_fraction: 0.999
  1794. }
  1795. }
  1796. layer { # L1_b9_cbr1_scale
  1797. name: "L1_b9_cbr1_scale"
  1798. type: "Scale"
  1799. bottom: "L1_b9_cbr1_bn_top"
  1800. top: "L1_b9_cbr1_bn_top"
  1801. scale_param {
  1802. bias_term: true
  1803. }
  1804. }
  1805. layer { # L1_b9_cbr1_relu
  1806. name: "L1_b9_cbr1_relu"
  1807. type: "ReLU"
  1808. bottom: "L1_b9_cbr1_bn_top"
  1809. top: "L1_b9_cbr1_bn_top"
  1810. }
  1811. #} L1_b9_cbr1 end
  1812. #{ L1_b9_cbr2 start
  1813. layer { # L1_b9_cbr2_conv
  1814. name: "L1_b9_cbr2_conv"
  1815. type: "Convolution"
  1816. bottom: "L1_b9_cbr1_bn_top"
  1817. top: "L1_b9_cbr2_conv_top"
  1818. param {
  1819. lr_mult: 1
  1820. decay_mult: 1
  1821. }
  1822. param {
  1823. lr_mult: 2
  1824. decay_mult: 0
  1825. }
  1826. convolution_param {
  1827. num_output: 16
  1828. pad: 1
  1829. kernel_size: 3
  1830. stride: 1
  1831. weight_filler {
  1832. type: "msra"
  1833. }
  1834. bias_filler {
  1835. type: "constant"
  1836. }
  1837. }
  1838. }
  1839. layer { # L1_b9_cbr2_bn
  1840. name: "L1_b9_cbr2_bn"
  1841. type: "BatchNorm"
  1842. bottom: "L1_b9_cbr2_conv_top"
  1843. top: "L1_b9_cbr2_bn_top"
  1844. param {
  1845. lr_mult: 0
  1846. decay_mult: 0
  1847. }
  1848. param {
  1849. lr_mult: 0
  1850. decay_mult: 0
  1851. }
  1852. param {
  1853. lr_mult: 0
  1854. decay_mult: 0
  1855. }
  1856. include {
  1857. phase: TRAIN
  1858. }
  1859. batch_norm_param {
  1860. use_global_stats: false
  1861. moving_average_fraction: 0.999
  1862. }
  1863. }
  1864. layer { # L1_b9_cbr2_bn
  1865. name: "L1_b9_cbr2_bn"
  1866. type: "BatchNorm"
  1867. bottom: "L1_b9_cbr2_conv_top"
  1868. top: "L1_b9_cbr2_bn_top"
  1869. param {
  1870. lr_mult: 0
  1871. decay_mult: 0
  1872. }
  1873. param {
  1874. lr_mult: 0
  1875. decay_mult: 0
  1876. }
  1877. param {
  1878. lr_mult: 0
  1879. decay_mult: 0
  1880. }
  1881. include {
  1882. phase: TEST
  1883. }
  1884. batch_norm_param {
  1885. use_global_stats: true
  1886. moving_average_fraction: 0.999
  1887. }
  1888. }
  1889. layer { # L1_b9_cbr2_scale
  1890. name: "L1_b9_cbr2_scale"
  1891. type: "Scale"
  1892. bottom: "L1_b9_cbr2_bn_top"
  1893. top: "L1_b9_cbr2_bn_top"
  1894. scale_param {
  1895. bias_term: true
  1896. }
  1897. }
  1898. #} L1_b9_cbr2 end
  1899. layer { # L1_b9_sum_eltwise
  1900. name: "L1_b9_sum_eltwise"
  1901. type: "Eltwise"
  1902. bottom: "L1_b9_cbr2_bn_top"
  1903. bottom: "L1_b8_sum_eltwise_top"
  1904. top: "L1_b9_sum_eltwise_top"
  1905. eltwise_param {
  1906. operation: SUM
  1907. }
  1908. }
  1909. layer { # L1_b9_relu
  1910. name: "L1_b9_relu"
  1911. type: "ReLU"
  1912. bottom: "L1_b9_sum_eltwise_top"
  1913. top: "L1_b9_sum_eltwise_top"
  1914. }
  1915. #} L1_b9 end
  1916. #} L1 end
  1917. #{ L2 start
  1918. #{ L2_b1 start
  1919. #{ L2_b1_cbr1 start
  1920. layer { # L2_b1_cbr1_conv
  1921. name: "L2_b1_cbr1_conv"
  1922. type: "Convolution"
  1923. bottom: "L1_b9_sum_eltwise_top"
  1924. top: "L2_b1_cbr1_conv_top"
  1925. param {
  1926. lr_mult: 1
  1927. decay_mult: 1
  1928. }
  1929. param {
  1930. lr_mult: 2
  1931. decay_mult: 0
  1932. }
  1933. convolution_param {
  1934. num_output: 16
  1935. pad: 1
  1936. kernel_size: 3
  1937. stride: 2
  1938. weight_filler {
  1939. type: "msra"
  1940. }
  1941. bias_filler {
  1942. type: "constant"
  1943. }
  1944. }
  1945. }
  1946. layer { # L2_b1_cbr1_bn
  1947. name: "L2_b1_cbr1_bn"
  1948. type: "BatchNorm"
  1949. bottom: "L2_b1_cbr1_conv_top"
  1950. top: "L2_b1_cbr1_bn_top"
  1951. param {
  1952. lr_mult: 0
  1953. decay_mult: 0
  1954. }
  1955. param {
  1956. lr_mult: 0
  1957. decay_mult: 0
  1958. }
  1959. param {
  1960. lr_mult: 0
  1961. decay_mult: 0
  1962. }
  1963. include {
  1964. phase: TRAIN
  1965. }
  1966. batch_norm_param {
  1967. use_global_stats: false
  1968. moving_average_fraction: 0.999
  1969. }
  1970. }
  1971. layer { # L2_b1_cbr1_bn
  1972. name: "L2_b1_cbr1_bn"
  1973. type: "BatchNorm"
  1974. bottom: "L2_b1_cbr1_conv_top"
  1975. top: "L2_b1_cbr1_bn_top"
  1976. param {
  1977. lr_mult: 0
  1978. decay_mult: 0
  1979. }
  1980. param {
  1981. lr_mult: 0
  1982. decay_mult: 0
  1983. }
  1984. param {
  1985. lr_mult: 0
  1986. decay_mult: 0
  1987. }
  1988. include {
  1989. phase: TEST
  1990. }
  1991. batch_norm_param {
  1992. use_global_stats: true
  1993. moving_average_fraction: 0.999
  1994. }
  1995. }
  1996. layer { # L2_b1_cbr1_scale
  1997. name: "L2_b1_cbr1_scale"
  1998. type: "Scale"
  1999. bottom: "L2_b1_cbr1_bn_top"
  2000. top: "L2_b1_cbr1_bn_top"
  2001. scale_param {
  2002. bias_term: true
  2003. }
  2004. }
  2005. layer { # L2_b1_cbr1_relu
  2006. name: "L2_b1_cbr1_relu"
  2007. type: "ReLU"
  2008. bottom: "L2_b1_cbr1_bn_top"
  2009. top: "L2_b1_cbr1_bn_top"
  2010. }
  2011. #} L2_b1_cbr1 end
  2012. #{ L2_b1_cbr2 start
  2013. layer { # L2_b1_cbr2_conv
  2014. name: "L2_b1_cbr2_conv"
  2015. type: "Convolution"
  2016. bottom: "L2_b1_cbr1_bn_top"
  2017. top: "L2_b1_cbr2_conv_top"
  2018. param {
  2019. lr_mult: 1
  2020. decay_mult: 1
  2021. }
  2022. param {
  2023. lr_mult: 2
  2024. decay_mult: 0
  2025. }
  2026. convolution_param {
  2027. num_output: 16
  2028. pad: 1
  2029. kernel_size: 3
  2030. stride: 1
  2031. weight_filler {
  2032. type: "msra"
  2033. }
  2034. bias_filler {
  2035. type: "constant"
  2036. }
  2037. }
  2038. }
  2039. layer { # L2_b1_cbr2_bn
  2040. name: "L2_b1_cbr2_bn"
  2041. type: "BatchNorm"
  2042. bottom: "L2_b1_cbr2_conv_top"
  2043. top: "L2_b1_cbr2_bn_top"
  2044. param {
  2045. lr_mult: 0
  2046. decay_mult: 0
  2047. }
  2048. param {
  2049. lr_mult: 0
  2050. decay_mult: 0
  2051. }
  2052. param {
  2053. lr_mult: 0
  2054. decay_mult: 0
  2055. }
  2056. include {
  2057. phase: TRAIN
  2058. }
  2059. batch_norm_param {
  2060. use_global_stats: false
  2061. moving_average_fraction: 0.999
  2062. }
  2063. }
  2064. layer { # L2_b1_cbr2_bn
  2065. name: "L2_b1_cbr2_bn"
  2066. type: "BatchNorm"
  2067. bottom: "L2_b1_cbr2_conv_top"
  2068. top: "L2_b1_cbr2_bn_top"
  2069. param {
  2070. lr_mult: 0
  2071. decay_mult: 0
  2072. }
  2073. param {
  2074. lr_mult: 0
  2075. decay_mult: 0
  2076. }
  2077. param {
  2078. lr_mult: 0
  2079. decay_mult: 0
  2080. }
  2081. include {
  2082. phase: TEST
  2083. }
  2084. batch_norm_param {
  2085. use_global_stats: true
  2086. moving_average_fraction: 0.999
  2087. }
  2088. }
  2089. layer { # L2_b1_cbr2_scale
  2090. name: "L2_b1_cbr2_scale"
  2091. type: "Scale"
  2092. bottom: "L2_b1_cbr2_bn_top"
  2093. top: "L2_b1_cbr2_bn_top"
  2094. scale_param {
  2095. bias_term: true
  2096. }
  2097. }
  2098. #} L2_b1_cbr2 end
  2099. layer { # L2_b1_pool
  2100. name: "L2_b1_pool"
  2101. type: "Pooling"
  2102. bottom: "L1_b9_sum_eltwise_top"
  2103. top: "L2_b1_pool"
  2104. pooling_param {
  2105. pool: AVE
  2106. kernel_size: 3
  2107. stride: 2
  2108. }
  2109. }
  2110. layer { # L2_b1_sum_eltwise
  2111. name: "L2_b1_sum_eltwise"
  2112. type: "Eltwise"
  2113. bottom: "L2_b1_cbr2_bn_top"
  2114. bottom: "L2_b1_pool"
  2115. top: "L2_b1_sum_eltwise_top"
  2116. eltwise_param {
  2117. operation: SUM
  2118. }
  2119. }
  2120. layer { # L2_b1_relu
  2121. name: "L2_b1_relu"
  2122. type: "ReLU"
  2123. bottom: "L2_b1_sum_eltwise_top"
  2124. top: "L2_b1_sum_eltwise_top"
  2125. }
  2126. #} L2_b1 end
  2127. layer { # L2_b1_zeros
  2128. name: "L2_b1_zeros"
  2129. type: "DummyData"
  2130. top: "L2_b1_zeros"
  2131. dummy_data_param {
  2132. shape: {dim: 125 dim: 16 dim: 16 dim: 16 }
  2133. data_filler: {
  2134. type: "constant"
  2135. value: 0
  2136. }
  2137. }
  2138. }
  2139. layer { # L2_b1_concat0
  2140. name: "L2_b1_concat0"
  2141. type: "Concat"
  2142. bottom: "L2_b1_sum_eltwise_top"
  2143. bottom: "L2_b1_zeros"
  2144. top: "L2_b1_concat0"
  2145. concat_param {
  2146. axis: 1
  2147. }
  2148. }
  2149. #{ L2_b2 start
  2150. #{ L2_b2_cbr1 start
  2151. layer { # L2_b2_cbr1_conv
  2152. name: "L2_b2_cbr1_conv"
  2153. type: "Convolution"
  2154. bottom: "L2_b1_concat0"
  2155. top: "L2_b2_cbr1_conv_top"
  2156. param {
  2157. lr_mult: 1
  2158. decay_mult: 1
  2159. }
  2160. param {
  2161. lr_mult: 2
  2162. decay_mult: 0
  2163. }
  2164. convolution_param {
  2165. num_output: 32
  2166. pad: 1
  2167. kernel_size: 3
  2168. stride: 1
  2169. weight_filler {
  2170. type: "msra"
  2171. }
  2172. bias_filler {
  2173. type: "constant"
  2174. }
  2175. }
  2176. }
  2177. layer { # L2_b2_cbr1_bn
  2178. name: "L2_b2_cbr1_bn"
  2179. type: "BatchNorm"
  2180. bottom: "L2_b2_cbr1_conv_top"
  2181. top: "L2_b2_cbr1_bn_top"
  2182. param {
  2183. lr_mult: 0
  2184. decay_mult: 0
  2185. }
  2186. param {
  2187. lr_mult: 0
  2188. decay_mult: 0
  2189. }
  2190. param {
  2191. lr_mult: 0
  2192. decay_mult: 0
  2193. }
  2194. include {
  2195. phase: TRAIN
  2196. }
  2197. batch_norm_param {
  2198. use_global_stats: false
  2199. moving_average_fraction: 0.999
  2200. }
  2201. }
  2202. layer { # L2_b2_cbr1_bn
  2203. name: "L2_b2_cbr1_bn"
  2204. type: "BatchNorm"
  2205. bottom: "L2_b2_cbr1_conv_top"
  2206. top: "L2_b2_cbr1_bn_top"
  2207. param {
  2208. lr_mult: 0
  2209. decay_mult: 0
  2210. }
  2211. param {
  2212. lr_mult: 0
  2213. decay_mult: 0
  2214. }
  2215. param {
  2216. lr_mult: 0
  2217. decay_mult: 0
  2218. }
  2219. include {
  2220. phase: TEST
  2221. }
  2222. batch_norm_param {
  2223. use_global_stats: true
  2224. moving_average_fraction: 0.999
  2225. }
  2226. }
  2227. layer { # L2_b2_cbr1_scale
  2228. name: "L2_b2_cbr1_scale"
  2229. type: "Scale"
  2230. bottom: "L2_b2_cbr1_bn_top"
  2231. top: "L2_b2_cbr1_bn_top"
  2232. scale_param {
  2233. bias_term: true
  2234. }
  2235. }
  2236. layer { # L2_b2_cbr1_relu
  2237. name: "L2_b2_cbr1_relu"
  2238. type: "ReLU"
  2239. bottom: "L2_b2_cbr1_bn_top"
  2240. top: "L2_b2_cbr1_bn_top"
  2241. }
  2242. #} L2_b2_cbr1 end
  2243. #{ L2_b2_cbr2 start
  2244. layer { # L2_b2_cbr2_conv
  2245. name: "L2_b2_cbr2_conv"
  2246. type: "Convolution"
  2247. bottom: "L2_b2_cbr1_bn_top"
  2248. top: "L2_b2_cbr2_conv_top"
  2249. param {
  2250. lr_mult: 1
  2251. decay_mult: 1
  2252. }
  2253. param {
  2254. lr_mult: 2
  2255. decay_mult: 0
  2256. }
  2257. convolution_param {
  2258. num_output: 32
  2259. pad: 1
  2260. kernel_size: 3
  2261. stride: 1
  2262. weight_filler {
  2263. type: "msra"
  2264. }
  2265. bias_filler {
  2266. type: "constant"
  2267. }
  2268. }
  2269. }
  2270. layer { # L2_b2_cbr2_bn
  2271. name: "L2_b2_cbr2_bn"
  2272. type: "BatchNorm"
  2273. bottom: "L2_b2_cbr2_conv_top"
  2274. top: "L2_b2_cbr2_bn_top"
  2275. param {
  2276. lr_mult: 0
  2277. decay_mult: 0
  2278. }
  2279. param {
  2280. lr_mult: 0
  2281. decay_mult: 0
  2282. }
  2283. param {
  2284. lr_mult: 0
  2285. decay_mult: 0
  2286. }
  2287. include {
  2288. phase: TRAIN
  2289. }
  2290. batch_norm_param {
  2291. use_global_stats: false
  2292. moving_average_fraction: 0.999
  2293. }
  2294. }
  2295. layer { # L2_b2_cbr2_bn
  2296. name: "L2_b2_cbr2_bn"
  2297. type: "BatchNorm"
  2298. bottom: "L2_b2_cbr2_conv_top"
  2299. top: "L2_b2_cbr2_bn_top"
  2300. param {
  2301. lr_mult: 0
  2302. decay_mult: 0
  2303. }
  2304. param {
  2305. lr_mult: 0
  2306. decay_mult: 0
  2307. }
  2308. param {
  2309. lr_mult: 0
  2310. decay_mult: 0
  2311. }
  2312. include {
  2313. phase: TEST
  2314. }
  2315. batch_norm_param {
  2316. use_global_stats: true
  2317. moving_average_fraction: 0.999
  2318. }
  2319. }
  2320. layer { # L2_b2_cbr2_scale
  2321. name: "L2_b2_cbr2_scale"
  2322. type: "Scale"
  2323. bottom: "L2_b2_cbr2_bn_top"
  2324. top: "L2_b2_cbr2_bn_top"
  2325. scale_param {
  2326. bias_term: true
  2327. }
  2328. }
  2329. #} L2_b2_cbr2 end
  2330. layer { # L2_b2_sum_eltwise
  2331. name: "L2_b2_sum_eltwise"
  2332. type: "Eltwise"
  2333. bottom: "L2_b2_cbr2_bn_top"
  2334. bottom: "L2_b1_concat0"
  2335. top: "L2_b2_sum_eltwise_top"
  2336. eltwise_param {
  2337. operation: SUM
  2338. }
  2339. }
  2340. layer { # L2_b2_relu
  2341. name: "L2_b2_relu"
  2342. type: "ReLU"
  2343. bottom: "L2_b2_sum_eltwise_top"
  2344. top: "L2_b2_sum_eltwise_top"
  2345. }
  2346. #} L2_b2 end
  2347. #{ L2_b3 start
  2348. #{ L2_b3_cbr1 start
  2349. layer { # L2_b3_cbr1_conv
  2350. name: "L2_b3_cbr1_conv"
  2351. type: "Convolution"
  2352. bottom: "L2_b2_sum_eltwise_top"
  2353. top: "L2_b3_cbr1_conv_top"
  2354. param {
  2355. lr_mult: 1
  2356. decay_mult: 1
  2357. }
  2358. param {
  2359. lr_mult: 2
  2360. decay_mult: 0
  2361. }
  2362. convolution_param {
  2363. num_output: 32
  2364. pad: 1
  2365. kernel_size: 3
  2366. stride: 1
  2367. weight_filler {
  2368. type: "msra"
  2369. }
  2370. bias_filler {
  2371. type: "constant"
  2372. }
  2373. }
  2374. }
  2375. layer { # L2_b3_cbr1_bn
  2376. name: "L2_b3_cbr1_bn"
  2377. type: "BatchNorm"
  2378. bottom: "L2_b3_cbr1_conv_top"
  2379. top: "L2_b3_cbr1_bn_top"
  2380. param {
  2381. lr_mult: 0
  2382. decay_mult: 0
  2383. }
  2384. param {
  2385. lr_mult: 0
  2386. decay_mult: 0
  2387. }
  2388. param {
  2389. lr_mult: 0
  2390. decay_mult: 0
  2391. }
  2392. include {
  2393. phase: TRAIN
  2394. }
  2395. batch_norm_param {
  2396. use_global_stats: false
  2397. moving_average_fraction: 0.999
  2398. }
  2399. }
  2400. layer { # L2_b3_cbr1_bn
  2401. name: "L2_b3_cbr1_bn"
  2402. type: "BatchNorm"
  2403. bottom: "L2_b3_cbr1_conv_top"
  2404. top: "L2_b3_cbr1_bn_top"
  2405. param {
  2406. lr_mult: 0
  2407. decay_mult: 0
  2408. }
  2409. param {
  2410. lr_mult: 0
  2411. decay_mult: 0
  2412. }
  2413. param {
  2414. lr_mult: 0
  2415. decay_mult: 0
  2416. }
  2417. include {
  2418. phase: TEST
  2419. }
  2420. batch_norm_param {
  2421. use_global_stats: true
  2422. moving_average_fraction: 0.999
  2423. }
  2424. }
  2425. layer { # L2_b3_cbr1_scale
  2426. name: "L2_b3_cbr1_scale"
  2427. type: "Scale"
  2428. bottom: "L2_b3_cbr1_bn_top"
  2429. top: "L2_b3_cbr1_bn_top"
  2430. scale_param {
  2431. bias_term: true
  2432. }
  2433. }
  2434. layer { # L2_b3_cbr1_relu
  2435. name: "L2_b3_cbr1_relu"
  2436. type: "ReLU"
  2437. bottom: "L2_b3_cbr1_bn_top"
  2438. top: "L2_b3_cbr1_bn_top"
  2439. }
  2440. #} L2_b3_cbr1 end
  2441. #{ L2_b3_cbr2 start
  2442. layer { # L2_b3_cbr2_conv
  2443. name: "L2_b3_cbr2_conv"
  2444. type: "Convolution"
  2445. bottom: "L2_b3_cbr1_bn_top"
  2446. top: "L2_b3_cbr2_conv_top"
  2447. param {
  2448. lr_mult: 1
  2449. decay_mult: 1
  2450. }
  2451. param {
  2452. lr_mult: 2
  2453. decay_mult: 0
  2454. }
  2455. convolution_param {
  2456. num_output: 32
  2457. pad: 1
  2458. kernel_size: 3
  2459. stride: 1
  2460. weight_filler {
  2461. type: "msra"
  2462. }
  2463. bias_filler {
  2464. type: "constant"
  2465. }
  2466. }
  2467. }
  2468. layer { # L2_b3_cbr2_bn
  2469. name: "L2_b3_cbr2_bn"
  2470. type: "BatchNorm"
  2471. bottom: "L2_b3_cbr2_conv_top"
  2472. top: "L2_b3_cbr2_bn_top"
  2473. param {
  2474. lr_mult: 0
  2475. decay_mult: 0
  2476. }
  2477. param {
  2478. lr_mult: 0
  2479. decay_mult: 0
  2480. }
  2481. param {
  2482. lr_mult: 0
  2483. decay_mult: 0
  2484. }
  2485. include {
  2486. phase: TRAIN
  2487. }
  2488. batch_norm_param {
  2489. use_global_stats: false
  2490. moving_average_fraction: 0.999
  2491. }
  2492. }
  2493. layer { # L2_b3_cbr2_bn
  2494. name: "L2_b3_cbr2_bn"
  2495. type: "BatchNorm"
  2496. bottom: "L2_b3_cbr2_conv_top"
  2497. top: "L2_b3_cbr2_bn_top"
  2498. param {
  2499. lr_mult: 0
  2500. decay_mult: 0
  2501. }
  2502. param {
  2503. lr_mult: 0
  2504. decay_mult: 0
  2505. }
  2506. param {
  2507. lr_mult: 0
  2508. decay_mult: 0
  2509. }
  2510. include {
  2511. phase: TEST
  2512. }
  2513. batch_norm_param {
  2514. use_global_stats: true
  2515. moving_average_fraction: 0.999
  2516. }
  2517. }
  2518. layer { # L2_b3_cbr2_scale
  2519. name: "L2_b3_cbr2_scale"
  2520. type: "Scale"
  2521. bottom: "L2_b3_cbr2_bn_top"
  2522. top: "L2_b3_cbr2_bn_top"
  2523. scale_param {
  2524. bias_term: true
  2525. }
  2526. }
  2527. #} L2_b3_cbr2 end
  2528. layer { # L2_b3_sum_eltwise
  2529. name: "L2_b3_sum_eltwise"
  2530. type: "Eltwise"
  2531. bottom: "L2_b3_cbr2_bn_top"
  2532. bottom: "L2_b2_sum_eltwise_top"
  2533. top: "L2_b3_sum_eltwise_top"
  2534. eltwise_param {
  2535. operation: SUM
  2536. }
  2537. }
  2538. layer { # L2_b3_relu
  2539. name: "L2_b3_relu"
  2540. type: "ReLU"
  2541. bottom: "L2_b3_sum_eltwise_top"
  2542. top: "L2_b3_sum_eltwise_top"
  2543. }
  2544. #} L2_b3 end
  2545. #{ L2_b4 start
  2546. #{ L2_b4_cbr1 start
  2547. layer { # L2_b4_cbr1_conv
  2548. name: "L2_b4_cbr1_conv"
  2549. type: "Convolution"
  2550. bottom: "L2_b3_sum_eltwise_top"
  2551. top: "L2_b4_cbr1_conv_top"
  2552. param {
  2553. lr_mult: 1
  2554. decay_mult: 1
  2555. }
  2556. param {
  2557. lr_mult: 2
  2558. decay_mult: 0
  2559. }
  2560. convolution_param {
  2561. num_output: 32
  2562. pad: 1
  2563. kernel_size: 3
  2564. stride: 1
  2565. weight_filler {
  2566. type: "msra"
  2567. }
  2568. bias_filler {
  2569. type: "constant"
  2570. }
  2571. }
  2572. }
  2573. layer { # L2_b4_cbr1_bn
  2574. name: "L2_b4_cbr1_bn"
  2575. type: "BatchNorm"
  2576. bottom: "L2_b4_cbr1_conv_top"
  2577. top: "L2_b4_cbr1_bn_top"
  2578. param {
  2579. lr_mult: 0
  2580. decay_mult: 0
  2581. }
  2582. param {
  2583. lr_mult: 0
  2584. decay_mult: 0
  2585. }
  2586. param {
  2587. lr_mult: 0
  2588. decay_mult: 0
  2589. }
  2590. include {
  2591. phase: TRAIN
  2592. }
  2593. batch_norm_param {
  2594. use_global_stats: false
  2595. moving_average_fraction: 0.999
  2596. }
  2597. }
  2598. layer { # L2_b4_cbr1_bn
  2599. name: "L2_b4_cbr1_bn"
  2600. type: "BatchNorm"
  2601. bottom: "L2_b4_cbr1_conv_top"
  2602. top: "L2_b4_cbr1_bn_top"
  2603. param {
  2604. lr_mult: 0
  2605. decay_mult: 0
  2606. }
  2607. param {
  2608. lr_mult: 0
  2609. decay_mult: 0
  2610. }
  2611. param {
  2612. lr_mult: 0
  2613. decay_mult: 0
  2614. }
  2615. include {
  2616. phase: TEST
  2617. }
  2618. batch_norm_param {
  2619. use_global_stats: true
  2620. moving_average_fraction: 0.999
  2621. }
  2622. }
  2623. layer { # L2_b4_cbr1_scale
  2624. name: "L2_b4_cbr1_scale"
  2625. type: "Scale"
  2626. bottom: "L2_b4_cbr1_bn_top"
  2627. top: "L2_b4_cbr1_bn_top"
  2628. scale_param {
  2629. bias_term: true
  2630. }
  2631. }
  2632. layer { # L2_b4_cbr1_relu
  2633. name: "L2_b4_cbr1_relu"
  2634. type: "ReLU"
  2635. bottom: "L2_b4_cbr1_bn_top"
  2636. top: "L2_b4_cbr1_bn_top"
  2637. }
  2638. #} L2_b4_cbr1 end
  2639. #{ L2_b4_cbr2 start
  2640. layer { # L2_b4_cbr2_conv
  2641. name: "L2_b4_cbr2_conv"
  2642. type: "Convolution"
  2643. bottom: "L2_b4_cbr1_bn_top"
  2644. top: "L2_b4_cbr2_conv_top"
  2645. param {
  2646. lr_mult: 1
  2647. decay_mult: 1
  2648. }
  2649. param {
  2650. lr_mult: 2
  2651. decay_mult: 0
  2652. }
  2653. convolution_param {
  2654. num_output: 32
  2655. pad: 1
  2656. kernel_size: 3
  2657. stride: 1
  2658. weight_filler {
  2659. type: "msra"
  2660. }
  2661. bias_filler {
  2662. type: "constant"
  2663. }
  2664. }
  2665. }
  2666. layer { # L2_b4_cbr2_bn
  2667. name: "L2_b4_cbr2_bn"
  2668. type: "BatchNorm"
  2669. bottom: "L2_b4_cbr2_conv_top"
  2670. top: "L2_b4_cbr2_bn_top"
  2671. param {
  2672. lr_mult: 0
  2673. decay_mult: 0
  2674. }
  2675. param {
  2676. lr_mult: 0
  2677. decay_mult: 0
  2678. }
  2679. param {
  2680. lr_mult: 0
  2681. decay_mult: 0
  2682. }
  2683. include {
  2684. phase: TRAIN
  2685. }
  2686. batch_norm_param {
  2687. use_global_stats: false
  2688. moving_average_fraction: 0.999
  2689. }
  2690. }
  2691. layer { # L2_b4_cbr2_bn
  2692. name: "L2_b4_cbr2_bn"
  2693. type: "BatchNorm"
  2694. bottom: "L2_b4_cbr2_conv_top"
  2695. top: "L2_b4_cbr2_bn_top"
  2696. param {
  2697. lr_mult: 0
  2698. decay_mult: 0
  2699. }
  2700. param {
  2701. lr_mult: 0
  2702. decay_mult: 0
  2703. }
  2704. param {
  2705. lr_mult: 0
  2706. decay_mult: 0
  2707. }
  2708. include {
  2709. phase: TEST
  2710. }
  2711. batch_norm_param {
  2712. use_global_stats: true
  2713. moving_average_fraction: 0.999
  2714. }
  2715. }
  2716. layer { # L2_b4_cbr2_scale
  2717. name: "L2_b4_cbr2_scale"
  2718. type: "Scale"
  2719. bottom: "L2_b4_cbr2_bn_top"
  2720. top: "L2_b4_cbr2_bn_top"
  2721. scale_param {
  2722. bias_term: true
  2723. }
  2724. }
  2725. #} L2_b4_cbr2 end
  2726. layer { # L2_b4_sum_eltwise
  2727. name: "L2_b4_sum_eltwise"
  2728. type: "Eltwise"
  2729. bottom: "L2_b4_cbr2_bn_top"
  2730. bottom: "L2_b3_sum_eltwise_top"
  2731. top: "L2_b4_sum_eltwise_top"
  2732. eltwise_param {
  2733. operation: SUM
  2734. }
  2735. }
  2736. layer { # L2_b4_relu
  2737. name: "L2_b4_relu"
  2738. type: "ReLU"
  2739. bottom: "L2_b4_sum_eltwise_top"
  2740. top: "L2_b4_sum_eltwise_top"
  2741. }
  2742. #} L2_b4 end
  2743. #{ L2_b5 start
  2744. #{ L2_b5_cbr1 start
  2745. layer { # L2_b5_cbr1_conv
  2746. name: "L2_b5_cbr1_conv"
  2747. type: "Convolution"
  2748. bottom: "L2_b4_sum_eltwise_top"
  2749. top: "L2_b5_cbr1_conv_top"
  2750. param {
  2751. lr_mult: 1
  2752. decay_mult: 1
  2753. }
  2754. param {
  2755. lr_mult: 2
  2756. decay_mult: 0
  2757. }
  2758. convolution_param {
  2759. num_output: 32
  2760. pad: 1
  2761. kernel_size: 3
  2762. stride: 1
  2763. weight_filler {
  2764. type: "msra"
  2765. }
  2766. bias_filler {
  2767. type: "constant"
  2768. }
  2769. }
  2770. }
  2771. layer { # L2_b5_cbr1_bn
  2772. name: "L2_b5_cbr1_bn"
  2773. type: "BatchNorm"
  2774. bottom: "L2_b5_cbr1_conv_top"
  2775. top: "L2_b5_cbr1_bn_top"
  2776. param {
  2777. lr_mult: 0
  2778. decay_mult: 0
  2779. }
  2780. param {
  2781. lr_mult: 0
  2782. decay_mult: 0
  2783. }
  2784. param {
  2785. lr_mult: 0
  2786. decay_mult: 0
  2787. }
  2788. include {
  2789. phase: TRAIN
  2790. }
  2791. batch_norm_param {
  2792. use_global_stats: false
  2793. moving_average_fraction: 0.999
  2794. }
  2795. }
  2796. layer { # L2_b5_cbr1_bn
  2797. name: "L2_b5_cbr1_bn"
  2798. type: "BatchNorm"
  2799. bottom: "L2_b5_cbr1_conv_top"
  2800. top: "L2_b5_cbr1_bn_top"
  2801. param {
  2802. lr_mult: 0
  2803. decay_mult: 0
  2804. }
  2805. param {
  2806. lr_mult: 0
  2807. decay_mult: 0
  2808. }
  2809. param {
  2810. lr_mult: 0
  2811. decay_mult: 0
  2812. }
  2813. include {
  2814. phase: TEST
  2815. }
  2816. batch_norm_param {
  2817. use_global_stats: true
  2818. moving_average_fraction: 0.999
  2819. }
  2820. }
  2821. layer { # L2_b5_cbr1_scale
  2822. name: "L2_b5_cbr1_scale"
  2823. type: "Scale"
  2824. bottom: "L2_b5_cbr1_bn_top"
  2825. top: "L2_b5_cbr1_bn_top"
  2826. scale_param {
  2827. bias_term: true
  2828. }
  2829. }
  2830. layer { # L2_b5_cbr1_relu
  2831. name: "L2_b5_cbr1_relu"
  2832. type: "ReLU"
  2833. bottom: "L2_b5_cbr1_bn_top"
  2834. top: "L2_b5_cbr1_bn_top"
  2835. }
  2836. #} L2_b5_cbr1 end
  2837. #{ L2_b5_cbr2 start
  2838. layer { # L2_b5_cbr2_conv
  2839. name: "L2_b5_cbr2_conv"
  2840. type: "Convolution"
  2841. bottom: "L2_b5_cbr1_bn_top"
  2842. top: "L2_b5_cbr2_conv_top"
  2843. param {
  2844. lr_mult: 1
  2845. decay_mult: 1
  2846. }
  2847. param {
  2848. lr_mult: 2
  2849. decay_mult: 0
  2850. }
  2851. convolution_param {
  2852. num_output: 32
  2853. pad: 1
  2854. kernel_size: 3
  2855. stride: 1
  2856. weight_filler {
  2857. type: "msra"
  2858. }
  2859. bias_filler {
  2860. type: "constant"
  2861. }
  2862. }
  2863. }
  2864. layer { # L2_b5_cbr2_bn
  2865. name: "L2_b5_cbr2_bn"
  2866. type: "BatchNorm"
  2867. bottom: "L2_b5_cbr2_conv_top"
  2868. top: "L2_b5_cbr2_bn_top"
  2869. param {
  2870. lr_mult: 0
  2871. decay_mult: 0
  2872. }
  2873. param {
  2874. lr_mult: 0
  2875. decay_mult: 0
  2876. }
  2877. param {
  2878. lr_mult: 0
  2879. decay_mult: 0
  2880. }
  2881. include {
  2882. phase: TRAIN
  2883. }
  2884. batch_norm_param {
  2885. use_global_stats: false
  2886. moving_average_fraction: 0.999
  2887. }
  2888. }
  2889. layer { # L2_b5_cbr2_bn
  2890. name: "L2_b5_cbr2_bn"
  2891. type: "BatchNorm"
  2892. bottom: "L2_b5_cbr2_conv_top"
  2893. top: "L2_b5_cbr2_bn_top"
  2894. param {
  2895. lr_mult: 0
  2896. decay_mult: 0
  2897. }
  2898. param {
  2899. lr_mult: 0
  2900. decay_mult: 0
  2901. }
  2902. param {
  2903. lr_mult: 0
  2904. decay_mult: 0
  2905. }
  2906. include {
  2907. phase: TEST
  2908. }
  2909. batch_norm_param {
  2910. use_global_stats: true
  2911. moving_average_fraction: 0.999
  2912. }
  2913. }
  2914. layer { # L2_b5_cbr2_scale
  2915. name: "L2_b5_cbr2_scale"
  2916. type: "Scale"
  2917. bottom: "L2_b5_cbr2_bn_top"
  2918. top: "L2_b5_cbr2_bn_top"
  2919. scale_param {
  2920. bias_term: true
  2921. }
  2922. }
  2923. #} L2_b5_cbr2 end
  2924. layer { # L2_b5_sum_eltwise
  2925. name: "L2_b5_sum_eltwise"
  2926. type: "Eltwise"
  2927. bottom: "L2_b5_cbr2_bn_top"
  2928. bottom: "L2_b4_sum_eltwise_top"
  2929. top: "L2_b5_sum_eltwise_top"
  2930. eltwise_param {
  2931. operation: SUM
  2932. }
  2933. }
  2934. layer { # L2_b5_relu
  2935. name: "L2_b5_relu"
  2936. type: "ReLU"
  2937. bottom: "L2_b5_sum_eltwise_top"
  2938. top: "L2_b5_sum_eltwise_top"
  2939. }
  2940. #} L2_b5 end
  2941. #{ L2_b6 start
  2942. #{ L2_b6_cbr1 start
  2943. layer { # L2_b6_cbr1_conv
  2944. name: "L2_b6_cbr1_conv"
  2945. type: "Convolution"
  2946. bottom: "L2_b5_sum_eltwise_top"
  2947. top: "L2_b6_cbr1_conv_top"
  2948. param {
  2949. lr_mult: 1
  2950. decay_mult: 1
  2951. }
  2952. param {
  2953. lr_mult: 2
  2954. decay_mult: 0
  2955. }
  2956. convolution_param {
  2957. num_output: 32
  2958. pad: 1
  2959. kernel_size: 3
  2960. stride: 1
  2961. weight_filler {
  2962. type: "msra"
  2963. }
  2964. bias_filler {
  2965. type: "constant"
  2966. }
  2967. }
  2968. }
  2969. layer { # L2_b6_cbr1_bn
  2970. name: "L2_b6_cbr1_bn"
  2971. type: "BatchNorm"
  2972. bottom: "L2_b6_cbr1_conv_top"
  2973. top: "L2_b6_cbr1_bn_top"
  2974. param {
  2975. lr_mult: 0
  2976. decay_mult: 0
  2977. }
  2978. param {
  2979. lr_mult: 0
  2980. decay_mult: 0
  2981. }
  2982. param {
  2983. lr_mult: 0
  2984. decay_mult: 0
  2985. }
  2986. include {
  2987. phase: TRAIN
  2988. }
  2989. batch_norm_param {
  2990. use_global_stats: false
  2991. moving_average_fraction: 0.999
  2992. }
  2993. }
  2994. layer { # L2_b6_cbr1_bn
  2995. name: "L2_b6_cbr1_bn"
  2996. type: "BatchNorm"
  2997. bottom: "L2_b6_cbr1_conv_top"
  2998. top: "L2_b6_cbr1_bn_top"
  2999. param {
  3000. lr_mult: 0
  3001. decay_mult: 0
  3002. }
  3003. param {
  3004. lr_mult: 0
  3005. decay_mult: 0
  3006. }
  3007. param {
  3008. lr_mult: 0
  3009. decay_mult: 0
  3010. }
  3011. include {
  3012. phase: TEST
  3013. }
  3014. batch_norm_param {
  3015. use_global_stats: true
  3016. moving_average_fraction: 0.999
  3017. }
  3018. }
  3019. layer { # L2_b6_cbr1_scale
  3020. name: "L2_b6_cbr1_scale"
  3021. type: "Scale"
  3022. bottom: "L2_b6_cbr1_bn_top"
  3023. top: "L2_b6_cbr1_bn_top"
  3024. scale_param {
  3025. bias_term: true
  3026. }
  3027. }
  3028. layer { # L2_b6_cbr1_relu
  3029. name: "L2_b6_cbr1_relu"
  3030. type: "ReLU"
  3031. bottom: "L2_b6_cbr1_bn_top"
  3032. top: "L2_b6_cbr1_bn_top"
  3033. }
  3034. #} L2_b6_cbr1 end
  3035. #{ L2_b6_cbr2 start
  3036. layer { # L2_b6_cbr2_conv
  3037. name: "L2_b6_cbr2_conv"
  3038. type: "Convolution"
  3039. bottom: "L2_b6_cbr1_bn_top"
  3040. top: "L2_b6_cbr2_conv_top"
  3041. param {
  3042. lr_mult: 1
  3043. decay_mult: 1
  3044. }
  3045. param {
  3046. lr_mult: 2
  3047. decay_mult: 0
  3048. }
  3049. convolution_param {
  3050. num_output: 32
  3051. pad: 1
  3052. kernel_size: 3
  3053. stride: 1
  3054. weight_filler {
  3055. type: "msra"
  3056. }
  3057. bias_filler {
  3058. type: "constant"
  3059. }
  3060. }
  3061. }
  3062. layer { # L2_b6_cbr2_bn
  3063. name: "L2_b6_cbr2_bn"
  3064. type: "BatchNorm"
  3065. bottom: "L2_b6_cbr2_conv_top"
  3066. top: "L2_b6_cbr2_bn_top"
  3067. param {
  3068. lr_mult: 0
  3069. decay_mult: 0
  3070. }
  3071. param {
  3072. lr_mult: 0
  3073. decay_mult: 0
  3074. }
  3075. param {
  3076. lr_mult: 0
  3077. decay_mult: 0
  3078. }
  3079. include {
  3080. phase: TRAIN
  3081. }
  3082. batch_norm_param {
  3083. use_global_stats: false
  3084. moving_average_fraction: 0.999
  3085. }
  3086. }
  3087. layer { # L2_b6_cbr2_bn
  3088. name: "L2_b6_cbr2_bn"
  3089. type: "BatchNorm"
  3090. bottom: "L2_b6_cbr2_conv_top"
  3091. top: "L2_b6_cbr2_bn_top"
  3092. param {
  3093. lr_mult: 0
  3094. decay_mult: 0
  3095. }
  3096. param {
  3097. lr_mult: 0
  3098. decay_mult: 0
  3099. }
  3100. param {
  3101. lr_mult: 0
  3102. decay_mult: 0
  3103. }
  3104. include {
  3105. phase: TEST
  3106. }
  3107. batch_norm_param {
  3108. use_global_stats: true
  3109. moving_average_fraction: 0.999
  3110. }
  3111. }
  3112. layer { # L2_b6_cbr2_scale
  3113. name: "L2_b6_cbr2_scale"
  3114. type: "Scale"
  3115. bottom: "L2_b6_cbr2_bn_top"
  3116. top: "L2_b6_cbr2_bn_top"
  3117. scale_param {
  3118. bias_term: true
  3119. }
  3120. }
  3121. #} L2_b6_cbr2 end
  3122. layer { # L2_b6_sum_eltwise
  3123. name: "L2_b6_sum_eltwise"
  3124. type: "Eltwise"
  3125. bottom: "L2_b6_cbr2_bn_top"
  3126. bottom: "L2_b5_sum_eltwise_top"
  3127. top: "L2_b6_sum_eltwise_top"
  3128. eltwise_param {
  3129. operation: SUM
  3130. }
  3131. }
  3132. layer { # L2_b6_relu
  3133. name: "L2_b6_relu"
  3134. type: "ReLU"
  3135. bottom: "L2_b6_sum_eltwise_top"
  3136. top: "L2_b6_sum_eltwise_top"
  3137. }
  3138. #} L2_b6 end
  3139. #{ L2_b7 start
  3140. #{ L2_b7_cbr1 start
  3141. layer { # L2_b7_cbr1_conv
  3142. name: "L2_b7_cbr1_conv"
  3143. type: "Convolution"
  3144. bottom: "L2_b6_sum_eltwise_top"
  3145. top: "L2_b7_cbr1_conv_top"
  3146. param {
  3147. lr_mult: 1
  3148. decay_mult: 1
  3149. }
  3150. param {
  3151. lr_mult: 2
  3152. decay_mult: 0
  3153. }
  3154. convolution_param {
  3155. num_output: 32
  3156. pad: 1
  3157. kernel_size: 3
  3158. stride: 1
  3159. weight_filler {
  3160. type: "msra"
  3161. }
  3162. bias_filler {
  3163. type: "constant"
  3164. }
  3165. }
  3166. }
  3167. layer { # L2_b7_cbr1_bn
  3168. name: "L2_b7_cbr1_bn"
  3169. type: "BatchNorm"
  3170. bottom: "L2_b7_cbr1_conv_top"
  3171. top: "L2_b7_cbr1_bn_top"
  3172. param {
  3173. lr_mult: 0
  3174. decay_mult: 0
  3175. }
  3176. param {
  3177. lr_mult: 0
  3178. decay_mult: 0
  3179. }
  3180. param {
  3181. lr_mult: 0
  3182. decay_mult: 0
  3183. }
  3184. include {
  3185. phase: TRAIN
  3186. }
  3187. batch_norm_param {
  3188. use_global_stats: false
  3189. moving_average_fraction: 0.999
  3190. }
  3191. }
  3192. layer { # L2_b7_cbr1_bn
  3193. name: "L2_b7_cbr1_bn"
  3194. type: "BatchNorm"
  3195. bottom: "L2_b7_cbr1_conv_top"
  3196. top: "L2_b7_cbr1_bn_top"
  3197. param {
  3198. lr_mult: 0
  3199. decay_mult: 0
  3200. }
  3201. param {
  3202. lr_mult: 0
  3203. decay_mult: 0
  3204. }
  3205. param {
  3206. lr_mult: 0
  3207. decay_mult: 0
  3208. }
  3209. include {
  3210. phase: TEST
  3211. }
  3212. batch_norm_param {
  3213. use_global_stats: true
  3214. moving_average_fraction: 0.999
  3215. }
  3216. }
  3217. layer { # L2_b7_cbr1_scale
  3218. name: "L2_b7_cbr1_scale"
  3219. type: "Scale"
  3220. bottom: "L2_b7_cbr1_bn_top"
  3221. top: "L2_b7_cbr1_bn_top"
  3222. scale_param {
  3223. bias_term: true
  3224. }
  3225. }
  3226. layer { # L2_b7_cbr1_relu
  3227. name: "L2_b7_cbr1_relu"
  3228. type: "ReLU"
  3229. bottom: "L2_b7_cbr1_bn_top"
  3230. top: "L2_b7_cbr1_bn_top"
  3231. }
  3232. #} L2_b7_cbr1 end
  3233. #{ L2_b7_cbr2 start
  3234. layer { # L2_b7_cbr2_conv
  3235. name: "L2_b7_cbr2_conv"
  3236. type: "Convolution"
  3237. bottom: "L2_b7_cbr1_bn_top"
  3238. top: "L2_b7_cbr2_conv_top"
  3239. param {
  3240. lr_mult: 1
  3241. decay_mult: 1
  3242. }
  3243. param {
  3244. lr_mult: 2
  3245. decay_mult: 0
  3246. }
  3247. convolution_param {
  3248. num_output: 32
  3249. pad: 1
  3250. kernel_size: 3
  3251. stride: 1
  3252. weight_filler {
  3253. type: "msra"
  3254. }
  3255. bias_filler {
  3256. type: "constant"
  3257. }
  3258. }
  3259. }
  3260. layer { # L2_b7_cbr2_bn
  3261. name: "L2_b7_cbr2_bn"
  3262. type: "BatchNorm"
  3263. bottom: "L2_b7_cbr2_conv_top"
  3264. top: "L2_b7_cbr2_bn_top"
  3265. param {
  3266. lr_mult: 0
  3267. decay_mult: 0
  3268. }
  3269. param {
  3270. lr_mult: 0
  3271. decay_mult: 0
  3272. }
  3273. param {
  3274. lr_mult: 0
  3275. decay_mult: 0
  3276. }
  3277. include {
  3278. phase: TRAIN
  3279. }
  3280. batch_norm_param {
  3281. use_global_stats: false
  3282. moving_average_fraction: 0.999
  3283. }
  3284. }
  3285. layer { # L2_b7_cbr2_bn
  3286. name: "L2_b7_cbr2_bn"
  3287. type: "BatchNorm"
  3288. bottom: "L2_b7_cbr2_conv_top"
  3289. top: "L2_b7_cbr2_bn_top"
  3290. param {
  3291. lr_mult: 0
  3292. decay_mult: 0
  3293. }
  3294. param {
  3295. lr_mult: 0
  3296. decay_mult: 0
  3297. }
  3298. param {
  3299. lr_mult: 0
  3300. decay_mult: 0
  3301. }
  3302. include {
  3303. phase: TEST
  3304. }
  3305. batch_norm_param {
  3306. use_global_stats: true
  3307. moving_average_fraction: 0.999
  3308. }
  3309. }
  3310. layer { # L2_b7_cbr2_scale
  3311. name: "L2_b7_cbr2_scale"
  3312. type: "Scale"
  3313. bottom: "L2_b7_cbr2_bn_top"
  3314. top: "L2_b7_cbr2_bn_top"
  3315. scale_param {
  3316. bias_term: true
  3317. }
  3318. }
  3319. #} L2_b7_cbr2 end
  3320. layer { # L2_b7_sum_eltwise
  3321. name: "L2_b7_sum_eltwise"
  3322. type: "Eltwise"
  3323. bottom: "L2_b7_cbr2_bn_top"
  3324. bottom: "L2_b6_sum_eltwise_top"
  3325. top: "L2_b7_sum_eltwise_top"
  3326. eltwise_param {
  3327. operation: SUM
  3328. }
  3329. }
  3330. layer { # L2_b7_relu
  3331. name: "L2_b7_relu"
  3332. type: "ReLU"
  3333. bottom: "L2_b7_sum_eltwise_top"
  3334. top: "L2_b7_sum_eltwise_top"
  3335. }
  3336. #} L2_b7 end
  3337. #{ L2_b8 start
  3338. #{ L2_b8_cbr1 start
  3339. layer { # L2_b8_cbr1_conv
  3340. name: "L2_b8_cbr1_conv"
  3341. type: "Convolution"
  3342. bottom: "L2_b7_sum_eltwise_top"
  3343. top: "L2_b8_cbr1_conv_top"
  3344. param {
  3345. lr_mult: 1
  3346. decay_mult: 1
  3347. }
  3348. param {
  3349. lr_mult: 2
  3350. decay_mult: 0
  3351. }
  3352. convolution_param {
  3353. num_output: 32
  3354. pad: 1
  3355. kernel_size: 3
  3356. stride: 1
  3357. weight_filler {
  3358. type: "msra"
  3359. }
  3360. bias_filler {
  3361. type: "constant"
  3362. }
  3363. }
  3364. }
  3365. layer { # L2_b8_cbr1_bn
  3366. name: "L2_b8_cbr1_bn"
  3367. type: "BatchNorm"
  3368. bottom: "L2_b8_cbr1_conv_top"
  3369. top: "L2_b8_cbr1_bn_top"
  3370. param {
  3371. lr_mult: 0
  3372. decay_mult: 0
  3373. }
  3374. param {
  3375. lr_mult: 0
  3376. decay_mult: 0
  3377. }
  3378. param {
  3379. lr_mult: 0
  3380. decay_mult: 0
  3381. }
  3382. include {
  3383. phase: TRAIN
  3384. }
  3385. batch_norm_param {
  3386. use_global_stats: false
  3387. moving_average_fraction: 0.999
  3388. }
  3389. }
  3390. layer { # L2_b8_cbr1_bn
  3391. name: "L2_b8_cbr1_bn"
  3392. type: "BatchNorm"
  3393. bottom: "L2_b8_cbr1_conv_top"
  3394. top: "L2_b8_cbr1_bn_top"
  3395. param {
  3396. lr_mult: 0
  3397. decay_mult: 0
  3398. }
  3399. param {
  3400. lr_mult: 0
  3401. decay_mult: 0
  3402. }
  3403. param {
  3404. lr_mult: 0
  3405. decay_mult: 0
  3406. }
  3407. include {
  3408. phase: TEST
  3409. }
  3410. batch_norm_param {
  3411. use_global_stats: true
  3412. moving_average_fraction: 0.999
  3413. }
  3414. }
  3415. layer { # L2_b8_cbr1_scale
  3416. name: "L2_b8_cbr1_scale"
  3417. type: "Scale"
  3418. bottom: "L2_b8_cbr1_bn_top"
  3419. top: "L2_b8_cbr1_bn_top"
  3420. scale_param {
  3421. bias_term: true
  3422. }
  3423. }
  3424. layer { # L2_b8_cbr1_relu
  3425. name: "L2_b8_cbr1_relu"
  3426. type: "ReLU"
  3427. bottom: "L2_b8_cbr1_bn_top"
  3428. top: "L2_b8_cbr1_bn_top"
  3429. }
  3430. #} L2_b8_cbr1 end
  3431. #{ L2_b8_cbr2 start
  3432. layer { # L2_b8_cbr2_conv
  3433. name: "L2_b8_cbr2_conv"
  3434. type: "Convolution"
  3435. bottom: "L2_b8_cbr1_bn_top"
  3436. top: "L2_b8_cbr2_conv_top"
  3437. param {
  3438. lr_mult: 1
  3439. decay_mult: 1
  3440. }
  3441. param {
  3442. lr_mult: 2
  3443. decay_mult: 0
  3444. }
  3445. convolution_param {
  3446. num_output: 32
  3447. pad: 1
  3448. kernel_size: 3
  3449. stride: 1
  3450. weight_filler {
  3451. type: "msra"
  3452. }
  3453. bias_filler {
  3454. type: "constant"
  3455. }
  3456. }
  3457. }
  3458. layer { # L2_b8_cbr2_bn
  3459. name: "L2_b8_cbr2_bn"
  3460. type: "BatchNorm"
  3461. bottom: "L2_b8_cbr2_conv_top"
  3462. top: "L2_b8_cbr2_bn_top"
  3463. param {
  3464. lr_mult: 0
  3465. decay_mult: 0
  3466. }
  3467. param {
  3468. lr_mult: 0
  3469. decay_mult: 0
  3470. }
  3471. param {
  3472. lr_mult: 0
  3473. decay_mult: 0
  3474. }
  3475. include {
  3476. phase: TRAIN
  3477. }
  3478. batch_norm_param {
  3479. use_global_stats: false
  3480. moving_average_fraction: 0.999
  3481. }
  3482. }
  3483. layer { # L2_b8_cbr2_bn
  3484. name: "L2_b8_cbr2_bn"
  3485. type: "BatchNorm"
  3486. bottom: "L2_b8_cbr2_conv_top"
  3487. top: "L2_b8_cbr2_bn_top"
  3488. param {
  3489. lr_mult: 0
  3490. decay_mult: 0
  3491. }
  3492. param {
  3493. lr_mult: 0
  3494. decay_mult: 0
  3495. }
  3496. param {
  3497. lr_mult: 0
  3498. decay_mult: 0
  3499. }
  3500. include {
  3501. phase: TEST
  3502. }
  3503. batch_norm_param {
  3504. use_global_stats: true
  3505. moving_average_fraction: 0.999
  3506. }
  3507. }
  3508. layer { # L2_b8_cbr2_scale
  3509. name: "L2_b8_cbr2_scale"
  3510. type: "Scale"
  3511. bottom: "L2_b8_cbr2_bn_top"
  3512. top: "L2_b8_cbr2_bn_top"
  3513. scale_param {
  3514. bias_term: true
  3515. }
  3516. }
  3517. #} L2_b8_cbr2 end
  3518. layer { # L2_b8_sum_eltwise
  3519. name: "L2_b8_sum_eltwise"
  3520. type: "Eltwise"
  3521. bottom: "L2_b8_cbr2_bn_top"
  3522. bottom: "L2_b7_sum_eltwise_top"
  3523. top: "L2_b8_sum_eltwise_top"
  3524. eltwise_param {
  3525. operation: SUM
  3526. }
  3527. }
  3528. layer { # L2_b8_relu
  3529. name: "L2_b8_relu"
  3530. type: "ReLU"
  3531. bottom: "L2_b8_sum_eltwise_top"
  3532. top: "L2_b8_sum_eltwise_top"
  3533. }
  3534. #} L2_b8 end
  3535. #{ L2_b9 start
  3536. #{ L2_b9_cbr1 start
  3537. layer { # L2_b9_cbr1_conv
  3538. name: "L2_b9_cbr1_conv"
  3539. type: "Convolution"
  3540. bottom: "L2_b8_sum_eltwise_top"
  3541. top: "L2_b9_cbr1_conv_top"
  3542. param {
  3543. lr_mult: 1
  3544. decay_mult: 1
  3545. }
  3546. param {
  3547. lr_mult: 2
  3548. decay_mult: 0
  3549. }
  3550. convolution_param {
  3551. num_output: 32
  3552. pad: 1
  3553. kernel_size: 3
  3554. stride: 1
  3555. weight_filler {
  3556. type: "msra"
  3557. }
  3558. bias_filler {
  3559. type: "constant"
  3560. }
  3561. }
  3562. }
  3563. layer { # L2_b9_cbr1_bn
  3564. name: "L2_b9_cbr1_bn"
  3565. type: "BatchNorm"
  3566. bottom: "L2_b9_cbr1_conv_top"
  3567. top: "L2_b9_cbr1_bn_top"
  3568. param {
  3569. lr_mult: 0
  3570. decay_mult: 0
  3571. }
  3572. param {
  3573. lr_mult: 0
  3574. decay_mult: 0
  3575. }
  3576. param {
  3577. lr_mult: 0
  3578. decay_mult: 0
  3579. }
  3580. include {
  3581. phase: TRAIN
  3582. }
  3583. batch_norm_param {
  3584. use_global_stats: false
  3585. moving_average_fraction: 0.999
  3586. }
  3587. }
  3588. layer { # L2_b9_cbr1_bn
  3589. name: "L2_b9_cbr1_bn"
  3590. type: "BatchNorm"
  3591. bottom: "L2_b9_cbr1_conv_top"
  3592. top: "L2_b9_cbr1_bn_top"
  3593. param {
  3594. lr_mult: 0
  3595. decay_mult: 0
  3596. }
  3597. param {
  3598. lr_mult: 0
  3599. decay_mult: 0
  3600. }
  3601. param {
  3602. lr_mult: 0
  3603. decay_mult: 0
  3604. }
  3605. include {
  3606. phase: TEST
  3607. }
  3608. batch_norm_param {
  3609. use_global_stats: true
  3610. moving_average_fraction: 0.999
  3611. }
  3612. }
  3613. layer { # L2_b9_cbr1_scale
  3614. name: "L2_b9_cbr1_scale"
  3615. type: "Scale"
  3616. bottom: "L2_b9_cbr1_bn_top"
  3617. top: "L2_b9_cbr1_bn_top"
  3618. scale_param {
  3619. bias_term: true
  3620. }
  3621. }
  3622. layer { # L2_b9_cbr1_relu
  3623. name: "L2_b9_cbr1_relu"
  3624. type: "ReLU"
  3625. bottom: "L2_b9_cbr1_bn_top"
  3626. top: "L2_b9_cbr1_bn_top"
  3627. }
  3628. #} L2_b9_cbr1 end
  3629. #{ L2_b9_cbr2 start
  3630. layer { # L2_b9_cbr2_conv
  3631. name: "L2_b9_cbr2_conv"
  3632. type: "Convolution"
  3633. bottom: "L2_b9_cbr1_bn_top"
  3634. top: "L2_b9_cbr2_conv_top"
  3635. param {
  3636. lr_mult: 1
  3637. decay_mult: 1
  3638. }
  3639. param {
  3640. lr_mult: 2
  3641. decay_mult: 0
  3642. }
  3643. convolution_param {
  3644. num_output: 32
  3645. pad: 1
  3646. kernel_size: 3
  3647. stride: 1
  3648. weight_filler {
  3649. type: "msra"
  3650. }
  3651. bias_filler {
  3652. type: "constant"
  3653. }
  3654. }
  3655. }
  3656. layer { # L2_b9_cbr2_bn
  3657. name: "L2_b9_cbr2_bn"
  3658. type: "BatchNorm"
  3659. bottom: "L2_b9_cbr2_conv_top"
  3660. top: "L2_b9_cbr2_bn_top"
  3661. param {
  3662. lr_mult: 0
  3663. decay_mult: 0
  3664. }
  3665. param {
  3666. lr_mult: 0
  3667. decay_mult: 0
  3668. }
  3669. param {
  3670. lr_mult: 0
  3671. decay_mult: 0
  3672. }
  3673. include {
  3674. phase: TRAIN
  3675. }
  3676. batch_norm_param {
  3677. use_global_stats: false
  3678. moving_average_fraction: 0.999
  3679. }
  3680. }
  3681. layer { # L2_b9_cbr2_bn
  3682. name: "L2_b9_cbr2_bn"
  3683. type: "BatchNorm"
  3684. bottom: "L2_b9_cbr2_conv_top"
  3685. top: "L2_b9_cbr2_bn_top"
  3686. param {
  3687. lr_mult: 0
  3688. decay_mult: 0
  3689. }
  3690. param {
  3691. lr_mult: 0
  3692. decay_mult: 0
  3693. }
  3694. param {
  3695. lr_mult: 0
  3696. decay_mult: 0
  3697. }
  3698. include {
  3699. phase: TEST
  3700. }
  3701. batch_norm_param {
  3702. use_global_stats: true
  3703. moving_average_fraction: 0.999
  3704. }
  3705. }
  3706. layer { # L2_b9_cbr2_scale
  3707. name: "L2_b9_cbr2_scale"
  3708. type: "Scale"
  3709. bottom: "L2_b9_cbr2_bn_top"
  3710. top: "L2_b9_cbr2_bn_top"
  3711. scale_param {
  3712. bias_term: true
  3713. }
  3714. }
  3715. #} L2_b9_cbr2 end
  3716. layer { # L2_b9_sum_eltwise
  3717. name: "L2_b9_sum_eltwise"
  3718. type: "Eltwise"
  3719. bottom: "L2_b9_cbr2_bn_top"
  3720. bottom: "L2_b8_sum_eltwise_top"
  3721. top: "L2_b9_sum_eltwise_top"
  3722. eltwise_param {
  3723. operation: SUM
  3724. }
  3725. }
  3726. layer { # L2_b9_relu
  3727. name: "L2_b9_relu"
  3728. type: "ReLU"
  3729. bottom: "L2_b9_sum_eltwise_top"
  3730. top: "L2_b9_sum_eltwise_top"
  3731. }
  3732. #} L2_b9 end
  3733. #} L2 end
  3734. #{ L3 start
  3735. #{ L3_b1 start
  3736. #{ L3_b1_cbr1 start
  3737. layer { # L3_b1_cbr1_conv
  3738. name: "L3_b1_cbr1_conv"
  3739. type: "Convolution"
  3740. bottom: "L2_b9_sum_eltwise_top"
  3741. top: "L3_b1_cbr1_conv_top"
  3742. param {
  3743. lr_mult: 1
  3744. decay_mult: 1
  3745. }
  3746. param {
  3747. lr_mult: 2
  3748. decay_mult: 0
  3749. }
  3750. convolution_param {
  3751. num_output: 32
  3752. pad: 1
  3753. kernel_size: 3
  3754. stride: 2
  3755. weight_filler {
  3756. type: "msra"
  3757. }
  3758. bias_filler {
  3759. type: "constant"
  3760. }
  3761. }
  3762. }
  3763. layer { # L3_b1_cbr1_bn
  3764. name: "L3_b1_cbr1_bn"
  3765. type: "BatchNorm"
  3766. bottom: "L3_b1_cbr1_conv_top"
  3767. top: "L3_b1_cbr1_bn_top"
  3768. param {
  3769. lr_mult: 0
  3770. decay_mult: 0
  3771. }
  3772. param {
  3773. lr_mult: 0
  3774. decay_mult: 0
  3775. }
  3776. param {
  3777. lr_mult: 0
  3778. decay_mult: 0
  3779. }
  3780. include {
  3781. phase: TRAIN
  3782. }
  3783. batch_norm_param {
  3784. use_global_stats: false
  3785. moving_average_fraction: 0.999
  3786. }
  3787. }
  3788. layer { # L3_b1_cbr1_bn
  3789. name: "L3_b1_cbr1_bn"
  3790. type: "BatchNorm"
  3791. bottom: "L3_b1_cbr1_conv_top"
  3792. top: "L3_b1_cbr1_bn_top"
  3793. param {
  3794. lr_mult: 0
  3795. decay_mult: 0
  3796. }
  3797. param {
  3798. lr_mult: 0
  3799. decay_mult: 0
  3800. }
  3801. param {
  3802. lr_mult: 0
  3803. decay_mult: 0
  3804. }
  3805. include {
  3806. phase: TEST
  3807. }
  3808. batch_norm_param {
  3809. use_global_stats: true
  3810. moving_average_fraction: 0.999
  3811. }
  3812. }
  3813. layer { # L3_b1_cbr1_scale
  3814. name: "L3_b1_cbr1_scale"
  3815. type: "Scale"
  3816. bottom: "L3_b1_cbr1_bn_top"
  3817. top: "L3_b1_cbr1_bn_top"
  3818. scale_param {
  3819. bias_term: true
  3820. }
  3821. }
  3822. layer { # L3_b1_cbr1_relu
  3823. name: "L3_b1_cbr1_relu"
  3824. type: "ReLU"
  3825. bottom: "L3_b1_cbr1_bn_top"
  3826. top: "L3_b1_cbr1_bn_top"
  3827. }
  3828. #} L3_b1_cbr1 end
  3829. #{ L3_b1_cbr2 start
  3830. layer { # L3_b1_cbr2_conv
  3831. name: "L3_b1_cbr2_conv"
  3832. type: "Convolution"
  3833. bottom: "L3_b1_cbr1_bn_top"
  3834. top: "L3_b1_cbr2_conv_top"
  3835. param {
  3836. lr_mult: 1
  3837. decay_mult: 1
  3838. }
  3839. param {
  3840. lr_mult: 2
  3841. decay_mult: 0
  3842. }
  3843. convolution_param {
  3844. num_output: 32
  3845. pad: 1
  3846. kernel_size: 3
  3847. stride: 1
  3848. weight_filler {
  3849. type: "msra"
  3850. }
  3851. bias_filler {
  3852. type: "constant"
  3853. }
  3854. }
  3855. }
  3856. layer { # L3_b1_cbr2_bn
  3857. name: "L3_b1_cbr2_bn"
  3858. type: "BatchNorm"
  3859. bottom: "L3_b1_cbr2_conv_top"
  3860. top: "L3_b1_cbr2_bn_top"
  3861. param {
  3862. lr_mult: 0
  3863. decay_mult: 0
  3864. }
  3865. param {
  3866. lr_mult: 0
  3867. decay_mult: 0
  3868. }
  3869. param {
  3870. lr_mult: 0
  3871. decay_mult: 0
  3872. }
  3873. include {
  3874. phase: TRAIN
  3875. }
  3876. batch_norm_param {
  3877. use_global_stats: false
  3878. moving_average_fraction: 0.999
  3879. }
  3880. }
  3881. layer { # L3_b1_cbr2_bn
  3882. name: "L3_b1_cbr2_bn"
  3883. type: "BatchNorm"
  3884. bottom: "L3_b1_cbr2_conv_top"
  3885. top: "L3_b1_cbr2_bn_top"
  3886. param {
  3887. lr_mult: 0
  3888. decay_mult: 0
  3889. }
  3890. param {
  3891. lr_mult: 0
  3892. decay_mult: 0
  3893. }
  3894. param {
  3895. lr_mult: 0
  3896. decay_mult: 0
  3897. }
  3898. include {
  3899. phase: TEST
  3900. }
  3901. batch_norm_param {
  3902. use_global_stats: true
  3903. moving_average_fraction: 0.999
  3904. }
  3905. }
  3906. layer { # L3_b1_cbr2_scale
  3907. name: "L3_b1_cbr2_scale"
  3908. type: "Scale"
  3909. bottom: "L3_b1_cbr2_bn_top"
  3910. top: "L3_b1_cbr2_bn_top"
  3911. scale_param {
  3912. bias_term: true
  3913. }
  3914. }
  3915. #} L3_b1_cbr2 end
  3916. layer { # L3_b1_pool
  3917. name: "L3_b1_pool"
  3918. type: "Pooling"
  3919. bottom: "L2_b9_sum_eltwise_top"
  3920. top: "L3_b1_pool"
  3921. pooling_param {
  3922. pool: AVE
  3923. kernel_size: 3
  3924. stride: 2
  3925. }
  3926. }
  3927. layer { # L3_b1_sum_eltwise
  3928. name: "L3_b1_sum_eltwise"
  3929. type: "Eltwise"
  3930. bottom: "L3_b1_cbr2_bn_top"
  3931. bottom: "L3_b1_pool"
  3932. top: "L3_b1_sum_eltwise_top"
  3933. eltwise_param {
  3934. operation: SUM
  3935. }
  3936. }
  3937. layer { # L3_b1_relu
  3938. name: "L3_b1_relu"
  3939. type: "ReLU"
  3940. bottom: "L3_b1_sum_eltwise_top"
  3941. top: "L3_b1_sum_eltwise_top"
  3942. }
  3943. #} L3_b1 end
  3944. layer { # L3_b1_zeros
  3945. name: "L3_b1_zeros"
  3946. type: "DummyData"
  3947. top: "L3_b1_zeros"
  3948. dummy_data_param {
  3949. shape: {dim: 125 dim: 32 dim: 8 dim: 8 }
  3950. data_filler: {
  3951. type: "constant"
  3952. value: 0
  3953. }
  3954. }
  3955. }
  3956. layer { # L3_b1_concat0
  3957. name: "L3_b1_concat0"
  3958. type: "Concat"
  3959. bottom: "L3_b1_sum_eltwise_top"
  3960. bottom: "L3_b1_zeros"
  3961. top: "L3_b1_concat0"
  3962. concat_param {
  3963. axis: 1
  3964. }
  3965. }
  3966. #{ L3_b2 start
  3967. #{ L3_b2_cbr1 start
  3968. layer { # L3_b2_cbr1_conv
  3969. name: "L3_b2_cbr1_conv"
  3970. type: "Convolution"
  3971. bottom: "L3_b1_concat0"
  3972. top: "L3_b2_cbr1_conv_top"
  3973. param {
  3974. lr_mult: 1
  3975. decay_mult: 1
  3976. }
  3977. param {
  3978. lr_mult: 2
  3979. decay_mult: 0
  3980. }
  3981. convolution_param {
  3982. num_output: 64
  3983. pad: 1
  3984. kernel_size: 3
  3985. stride: 1
  3986. weight_filler {
  3987. type: "msra"
  3988. }
  3989. bias_filler {
  3990. type: "constant"
  3991. }
  3992. }
  3993. }
  3994. layer { # L3_b2_cbr1_bn
  3995. name: "L3_b2_cbr1_bn"
  3996. type: "BatchNorm"
  3997. bottom: "L3_b2_cbr1_conv_top"
  3998. top: "L3_b2_cbr1_bn_top"
  3999. param {
  4000. lr_mult: 0
  4001. decay_mult: 0
  4002. }
  4003. param {
  4004. lr_mult: 0
  4005. decay_mult: 0
  4006. }
  4007. param {
  4008. lr_mult: 0
  4009. decay_mult: 0
  4010. }
  4011. include {
  4012. phase: TRAIN
  4013. }
  4014. batch_norm_param {
  4015. use_global_stats: false
  4016. moving_average_fraction: 0.999
  4017. }
  4018. }
  4019. layer { # L3_b2_cbr1_bn
  4020. name: "L3_b2_cbr1_bn"
  4021. type: "BatchNorm"
  4022. bottom: "L3_b2_cbr1_conv_top"
  4023. top: "L3_b2_cbr1_bn_top"
  4024. param {
  4025. lr_mult: 0
  4026. decay_mult: 0
  4027. }
  4028. param {
  4029. lr_mult: 0
  4030. decay_mult: 0
  4031. }
  4032. param {
  4033. lr_mult: 0
  4034. decay_mult: 0
  4035. }
  4036. include {
  4037. phase: TEST
  4038. }
  4039. batch_norm_param {
  4040. use_global_stats: true
  4041. moving_average_fraction: 0.999
  4042. }
  4043. }
  4044. layer { # L3_b2_cbr1_scale
  4045. name: "L3_b2_cbr1_scale"
  4046. type: "Scale"
  4047. bottom: "L3_b2_cbr1_bn_top"
  4048. top: "L3_b2_cbr1_bn_top"
  4049. scale_param {
  4050. bias_term: true
  4051. }
  4052. }
  4053. layer { # L3_b2_cbr1_relu
  4054. name: "L3_b2_cbr1_relu"
  4055. type: "ReLU"
  4056. bottom: "L3_b2_cbr1_bn_top"
  4057. top: "L3_b2_cbr1_bn_top"
  4058. }
  4059. #} L3_b2_cbr1 end
  4060. #{ L3_b2_cbr2 start
  4061. layer { # L3_b2_cbr2_conv
  4062. name: "L3_b2_cbr2_conv"
  4063. type: "Convolution"
  4064. bottom: "L3_b2_cbr1_bn_top"
  4065. top: "L3_b2_cbr2_conv_top"
  4066. param {
  4067. lr_mult: 1
  4068. decay_mult: 1
  4069. }
  4070. param {
  4071. lr_mult: 2
  4072. decay_mult: 0
  4073. }
  4074. convolution_param {
  4075. num_output: 64
  4076. pad: 1
  4077. kernel_size: 3
  4078. stride: 1
  4079. weight_filler {
  4080. type: "msra"
  4081. }
  4082. bias_filler {
  4083. type: "constant"
  4084. }
  4085. }
  4086. }
  4087. layer { # L3_b2_cbr2_bn
  4088. name: "L3_b2_cbr2_bn"
  4089. type: "BatchNorm"
  4090. bottom: "L3_b2_cbr2_conv_top"
  4091. top: "L3_b2_cbr2_bn_top"
  4092. param {
  4093. lr_mult: 0
  4094. decay_mult: 0
  4095. }
  4096. param {
  4097. lr_mult: 0
  4098. decay_mult: 0
  4099. }
  4100. param {
  4101. lr_mult: 0
  4102. decay_mult: 0
  4103. }
  4104. include {
  4105. phase: TRAIN
  4106. }
  4107. batch_norm_param {
  4108. use_global_stats: false
  4109. moving_average_fraction: 0.999
  4110. }
  4111. }
  4112. layer { # L3_b2_cbr2_bn
  4113. name: "L3_b2_cbr2_bn"
  4114. type: "BatchNorm"
  4115. bottom: "L3_b2_cbr2_conv_top"
  4116. top: "L3_b2_cbr2_bn_top"
  4117. param {
  4118. lr_mult: 0
  4119. decay_mult: 0
  4120. }
  4121. param {
  4122. lr_mult: 0
  4123. decay_mult: 0
  4124. }
  4125. param {
  4126. lr_mult: 0
  4127. decay_mult: 0
  4128. }
  4129. include {
  4130. phase: TEST
  4131. }
  4132. batch_norm_param {
  4133. use_global_stats: true
  4134. moving_average_fraction: 0.999
  4135. }
  4136. }
  4137. layer { # L3_b2_cbr2_scale
  4138. name: "L3_b2_cbr2_scale"
  4139. type: "Scale"
  4140. bottom: "L3_b2_cbr2_bn_top"
  4141. top: "L3_b2_cbr2_bn_top"
  4142. scale_param {
  4143. bias_term: true
  4144. }
  4145. }
  4146. #} L3_b2_cbr2 end
  4147. layer { # L3_b2_sum_eltwise
  4148. name: "L3_b2_sum_eltwise"
  4149. type: "Eltwise"
  4150. bottom: "L3_b2_cbr2_bn_top"
  4151. bottom: "L3_b1_concat0"
  4152. top: "L3_b2_sum_eltwise_top"
  4153. eltwise_param {
  4154. operation: SUM
  4155. }
  4156. }
  4157. layer { # L3_b2_relu
  4158. name: "L3_b2_relu"
  4159. type: "ReLU"
  4160. bottom: "L3_b2_sum_eltwise_top"
  4161. top: "L3_b2_sum_eltwise_top"
  4162. }
  4163. #} L3_b2 end
  4164. #{ L3_b3 start
  4165. #{ L3_b3_cbr1 start
  4166. layer { # L3_b3_cbr1_conv
  4167. name: "L3_b3_cbr1_conv"
  4168. type: "Convolution"
  4169. bottom: "L3_b2_sum_eltwise_top"
  4170. top: "L3_b3_cbr1_conv_top"
  4171. param {
  4172. lr_mult: 1
  4173. decay_mult: 1
  4174. }
  4175. param {
  4176. lr_mult: 2
  4177. decay_mult: 0
  4178. }
  4179. convolution_param {
  4180. num_output: 64
  4181. pad: 1
  4182. kernel_size: 3
  4183. stride: 1
  4184. weight_filler {
  4185. type: "msra"
  4186. }
  4187. bias_filler {
  4188. type: "constant"
  4189. }
  4190. }
  4191. }
  4192. layer { # L3_b3_cbr1_bn
  4193. name: "L3_b3_cbr1_bn"
  4194. type: "BatchNorm"
  4195. bottom: "L3_b3_cbr1_conv_top"
  4196. top: "L3_b3_cbr1_bn_top"
  4197. param {
  4198. lr_mult: 0
  4199. decay_mult: 0
  4200. }
  4201. param {
  4202. lr_mult: 0
  4203. decay_mult: 0
  4204. }
  4205. param {
  4206. lr_mult: 0
  4207. decay_mult: 0
  4208. }
  4209. include {
  4210. phase: TRAIN
  4211. }
  4212. batch_norm_param {
  4213. use_global_stats: false
  4214. moving_average_fraction: 0.999
  4215. }
  4216. }
  4217. layer { # L3_b3_cbr1_bn
  4218. name: "L3_b3_cbr1_bn"
  4219. type: "BatchNorm"
  4220. bottom: "L3_b3_cbr1_conv_top"
  4221. top: "L3_b3_cbr1_bn_top"
  4222. param {
  4223. lr_mult: 0
  4224. decay_mult: 0
  4225. }
  4226. param {
  4227. lr_mult: 0
  4228. decay_mult: 0
  4229. }
  4230. param {
  4231. lr_mult: 0
  4232. decay_mult: 0
  4233. }
  4234. include {
  4235. phase: TEST
  4236. }
  4237. batch_norm_param {
  4238. use_global_stats: true
  4239. moving_average_fraction: 0.999
  4240. }
  4241. }
  4242. layer { # L3_b3_cbr1_scale
  4243. name: "L3_b3_cbr1_scale"
  4244. type: "Scale"
  4245. bottom: "L3_b3_cbr1_bn_top"
  4246. top: "L3_b3_cbr1_bn_top"
  4247. scale_param {
  4248. bias_term: true
  4249. }
  4250. }
  4251. layer { # L3_b3_cbr1_relu
  4252. name: "L3_b3_cbr1_relu"
  4253. type: "ReLU"
  4254. bottom: "L3_b3_cbr1_bn_top"
  4255. top: "L3_b3_cbr1_bn_top"
  4256. }
  4257. #} L3_b3_cbr1 end
  4258. #{ L3_b3_cbr2 start
  4259. layer { # L3_b3_cbr2_conv
  4260. name: "L3_b3_cbr2_conv"
  4261. type: "Convolution"
  4262. bottom: "L3_b3_cbr1_bn_top"
  4263. top: "L3_b3_cbr2_conv_top"
  4264. param {
  4265. lr_mult: 1
  4266. decay_mult: 1
  4267. }
  4268. param {
  4269. lr_mult: 2
  4270. decay_mult: 0
  4271. }
  4272. convolution_param {
  4273. num_output: 64
  4274. pad: 1
  4275. kernel_size: 3
  4276. stride: 1
  4277. weight_filler {
  4278. type: "msra"
  4279. }
  4280. bias_filler {
  4281. type: "constant"
  4282. }
  4283. }
  4284. }
  4285. layer { # L3_b3_cbr2_bn
  4286. name: "L3_b3_cbr2_bn"
  4287. type: "BatchNorm"
  4288. bottom: "L3_b3_cbr2_conv_top"
  4289. top: "L3_b3_cbr2_bn_top"
  4290. param {
  4291. lr_mult: 0
  4292. decay_mult: 0
  4293. }
  4294. param {
  4295. lr_mult: 0
  4296. decay_mult: 0
  4297. }
  4298. param {
  4299. lr_mult: 0
  4300. decay_mult: 0
  4301. }
  4302. include {
  4303. phase: TRAIN
  4304. }
  4305. batch_norm_param {
  4306. use_global_stats: false
  4307. moving_average_fraction: 0.999
  4308. }
  4309. }
  4310. layer { # L3_b3_cbr2_bn
  4311. name: "L3_b3_cbr2_bn"
  4312. type: "BatchNorm"
  4313. bottom: "L3_b3_cbr2_conv_top"
  4314. top: "L3_b3_cbr2_bn_top"
  4315. param {
  4316. lr_mult: 0
  4317. decay_mult: 0
  4318. }
  4319. param {
  4320. lr_mult: 0
  4321. decay_mult: 0
  4322. }
  4323. param {
  4324. lr_mult: 0
  4325. decay_mult: 0
  4326. }
  4327. include {
  4328. phase: TEST
  4329. }
  4330. batch_norm_param {
  4331. use_global_stats: true
  4332. moving_average_fraction: 0.999
  4333. }
  4334. }
  4335. layer { # L3_b3_cbr2_scale
  4336. name: "L3_b3_cbr2_scale"
  4337. type: "Scale"
  4338. bottom: "L3_b3_cbr2_bn_top"
  4339. top: "L3_b3_cbr2_bn_top"
  4340. scale_param {
  4341. bias_term: true
  4342. }
  4343. }
  4344. #} L3_b3_cbr2 end
  4345. layer { # L3_b3_sum_eltwise
  4346. name: "L3_b3_sum_eltwise"
  4347. type: "Eltwise"
  4348. bottom: "L3_b3_cbr2_bn_top"
  4349. bottom: "L3_b2_sum_eltwise_top"
  4350. top: "L3_b3_sum_eltwise_top"
  4351. eltwise_param {
  4352. operation: SUM
  4353. }
  4354. }
  4355. layer { # L3_b3_relu
  4356. name: "L3_b3_relu"
  4357. type: "ReLU"
  4358. bottom: "L3_b3_sum_eltwise_top"
  4359. top: "L3_b3_sum_eltwise_top"
  4360. }
  4361. #} L3_b3 end
  4362. #{ L3_b4 start
  4363. #{ L3_b4_cbr1 start
  4364. layer { # L3_b4_cbr1_conv
  4365. name: "L3_b4_cbr1_conv"
  4366. type: "Convolution"
  4367. bottom: "L3_b3_sum_eltwise_top"
  4368. top: "L3_b4_cbr1_conv_top"
  4369. param {
  4370. lr_mult: 1
  4371. decay_mult: 1
  4372. }
  4373. param {
  4374. lr_mult: 2
  4375. decay_mult: 0
  4376. }
  4377. convolution_param {
  4378. num_output: 64
  4379. pad: 1
  4380. kernel_size: 3
  4381. stride: 1
  4382. weight_filler {
  4383. type: "msra"
  4384. }
  4385. bias_filler {
  4386. type: "constant"
  4387. }
  4388. }
  4389. }
  4390. layer { # L3_b4_cbr1_bn
  4391. name: "L3_b4_cbr1_bn"
  4392. type: "BatchNorm"
  4393. bottom: "L3_b4_cbr1_conv_top"
  4394. top: "L3_b4_cbr1_bn_top"
  4395. param {
  4396. lr_mult: 0
  4397. decay_mult: 0
  4398. }
  4399. param {
  4400. lr_mult: 0
  4401. decay_mult: 0
  4402. }
  4403. param {
  4404. lr_mult: 0
  4405. decay_mult: 0
  4406. }
  4407. include {
  4408. phase: TRAIN
  4409. }
  4410. batch_norm_param {
  4411. use_global_stats: false
  4412. moving_average_fraction: 0.999
  4413. }
  4414. }
  4415. layer { # L3_b4_cbr1_bn
  4416. name: "L3_b4_cbr1_bn"
  4417. type: "BatchNorm"
  4418. bottom: "L3_b4_cbr1_conv_top"
  4419. top: "L3_b4_cbr1_bn_top"
  4420. param {
  4421. lr_mult: 0
  4422. decay_mult: 0
  4423. }
  4424. param {
  4425. lr_mult: 0
  4426. decay_mult: 0
  4427. }
  4428. param {
  4429. lr_mult: 0
  4430. decay_mult: 0
  4431. }
  4432. include {
  4433. phase: TEST
  4434. }
  4435. batch_norm_param {
  4436. use_global_stats: true
  4437. moving_average_fraction: 0.999
  4438. }
  4439. }
  4440. layer { # L3_b4_cbr1_scale
  4441. name: "L3_b4_cbr1_scale"
  4442. type: "Scale"
  4443. bottom: "L3_b4_cbr1_bn_top"
  4444. top: "L3_b4_cbr1_bn_top"
  4445. scale_param {
  4446. bias_term: true
  4447. }
  4448. }
  4449. layer { # L3_b4_cbr1_relu
  4450. name: "L3_b4_cbr1_relu"
  4451. type: "ReLU"
  4452. bottom: "L3_b4_cbr1_bn_top"
  4453. top: "L3_b4_cbr1_bn_top"
  4454. }
  4455. #} L3_b4_cbr1 end
  4456. #{ L3_b4_cbr2 start
  4457. layer { # L3_b4_cbr2_conv
  4458. name: "L3_b4_cbr2_conv"
  4459. type: "Convolution"
  4460. bottom: "L3_b4_cbr1_bn_top"
  4461. top: "L3_b4_cbr2_conv_top"
  4462. param {
  4463. lr_mult: 1
  4464. decay_mult: 1
  4465. }
  4466. param {
  4467. lr_mult: 2
  4468. decay_mult: 0
  4469. }
  4470. convolution_param {
  4471. num_output: 64
  4472. pad: 1
  4473. kernel_size: 3
  4474. stride: 1
  4475. weight_filler {
  4476. type: "msra"
  4477. }
  4478. bias_filler {
  4479. type: "constant"
  4480. }
  4481. }
  4482. }
  4483. layer { # L3_b4_cbr2_bn
  4484. name: "L3_b4_cbr2_bn"
  4485. type: "BatchNorm"
  4486. bottom: "L3_b4_cbr2_conv_top"
  4487. top: "L3_b4_cbr2_bn_top"
  4488. param {
  4489. lr_mult: 0
  4490. decay_mult: 0
  4491. }
  4492. param {
  4493. lr_mult: 0
  4494. decay_mult: 0
  4495. }
  4496. param {
  4497. lr_mult: 0
  4498. decay_mult: 0
  4499. }
  4500. include {
  4501. phase: TRAIN
  4502. }
  4503. batch_norm_param {
  4504. use_global_stats: false
  4505. moving_average_fraction: 0.999
  4506. }
  4507. }
  4508. layer { # L3_b4_cbr2_bn
  4509. name: "L3_b4_cbr2_bn"
  4510. type: "BatchNorm"
  4511. bottom: "L3_b4_cbr2_conv_top"
  4512. top: "L3_b4_cbr2_bn_top"
  4513. param {
  4514. lr_mult: 0
  4515. decay_mult: 0
  4516. }
  4517. param {
  4518. lr_mult: 0
  4519. decay_mult: 0
  4520. }
  4521. param {
  4522. lr_mult: 0
  4523. decay_mult: 0
  4524. }
  4525. include {
  4526. phase: TEST
  4527. }
  4528. batch_norm_param {
  4529. use_global_stats: true
  4530. moving_average_fraction: 0.999
  4531. }
  4532. }
  4533. layer { # L3_b4_cbr2_scale
  4534. name: "L3_b4_cbr2_scale"
  4535. type: "Scale"
  4536. bottom: "L3_b4_cbr2_bn_top"
  4537. top: "L3_b4_cbr2_bn_top"
  4538. scale_param {
  4539. bias_term: true
  4540. }
  4541. }
  4542. #} L3_b4_cbr2 end
  4543. layer { # L3_b4_sum_eltwise
  4544. name: "L3_b4_sum_eltwise"
  4545. type: "Eltwise"
  4546. bottom: "L3_b4_cbr2_bn_top"
  4547. bottom: "L3_b3_sum_eltwise_top"
  4548. top: "L3_b4_sum_eltwise_top"
  4549. eltwise_param {
  4550. operation: SUM
  4551. }
  4552. }
  4553. layer { # L3_b4_relu
  4554. name: "L3_b4_relu"
  4555. type: "ReLU"
  4556. bottom: "L3_b4_sum_eltwise_top"
  4557. top: "L3_b4_sum_eltwise_top"
  4558. }
  4559. #} L3_b4 end
  4560. #{ L3_b5 start
  4561. #{ L3_b5_cbr1 start
  4562. layer { # L3_b5_cbr1_conv
  4563. name: "L3_b5_cbr1_conv"
  4564. type: "Convolution"
  4565. bottom: "L3_b4_sum_eltwise_top"
  4566. top: "L3_b5_cbr1_conv_top"
  4567. param {
  4568. lr_mult: 1
  4569. decay_mult: 1
  4570. }
  4571. param {
  4572. lr_mult: 2
  4573. decay_mult: 0
  4574. }
  4575. convolution_param {
  4576. num_output: 64
  4577. pad: 1
  4578. kernel_size: 3
  4579. stride: 1
  4580. weight_filler {
  4581. type: "msra"
  4582. }
  4583. bias_filler {
  4584. type: "constant"
  4585. }
  4586. }
  4587. }
  4588. layer { # L3_b5_cbr1_bn
  4589. name: "L3_b5_cbr1_bn"
  4590. type: "BatchNorm"
  4591. bottom: "L3_b5_cbr1_conv_top"
  4592. top: "L3_b5_cbr1_bn_top"
  4593. param {
  4594. lr_mult: 0
  4595. decay_mult: 0
  4596. }
  4597. param {
  4598. lr_mult: 0
  4599. decay_mult: 0
  4600. }
  4601. param {
  4602. lr_mult: 0
  4603. decay_mult: 0
  4604. }
  4605. include {
  4606. phase: TRAIN
  4607. }
  4608. batch_norm_param {
  4609. use_global_stats: false
  4610. moving_average_fraction: 0.999
  4611. }
  4612. }
  4613. layer { # L3_b5_cbr1_bn
  4614. name: "L3_b5_cbr1_bn"
  4615. type: "BatchNorm"
  4616. bottom: "L3_b5_cbr1_conv_top"
  4617. top: "L3_b5_cbr1_bn_top"
  4618. param {
  4619. lr_mult: 0
  4620. decay_mult: 0
  4621. }
  4622. param {
  4623. lr_mult: 0
  4624. decay_mult: 0
  4625. }
  4626. param {
  4627. lr_mult: 0
  4628. decay_mult: 0
  4629. }
  4630. include {
  4631. phase: TEST
  4632. }
  4633. batch_norm_param {
  4634. use_global_stats: true
  4635. moving_average_fraction: 0.999
  4636. }
  4637. }
  4638. layer { # L3_b5_cbr1_scale
  4639. name: "L3_b5_cbr1_scale"
  4640. type: "Scale"
  4641. bottom: "L3_b5_cbr1_bn_top"
  4642. top: "L3_b5_cbr1_bn_top"
  4643. scale_param {
  4644. bias_term: true
  4645. }
  4646. }
  4647. layer { # L3_b5_cbr1_relu
  4648. name: "L3_b5_cbr1_relu"
  4649. type: "ReLU"
  4650. bottom: "L3_b5_cbr1_bn_top"
  4651. top: "L3_b5_cbr1_bn_top"
  4652. }
  4653. #} L3_b5_cbr1 end
  4654. #{ L3_b5_cbr2 start
  4655. layer { # L3_b5_cbr2_conv
  4656. name: "L3_b5_cbr2_conv"
  4657. type: "Convolution"
  4658. bottom: "L3_b5_cbr1_bn_top"
  4659. top: "L3_b5_cbr2_conv_top"
  4660. param {
  4661. lr_mult: 1
  4662. decay_mult: 1
  4663. }
  4664. param {
  4665. lr_mult: 2
  4666. decay_mult: 0
  4667. }
  4668. convolution_param {
  4669. num_output: 64
  4670. pad: 1
  4671. kernel_size: 3
  4672. stride: 1
  4673. weight_filler {
  4674. type: "msra"
  4675. }
  4676. bias_filler {
  4677. type: "constant"
  4678. }
  4679. }
  4680. }
  4681. layer { # L3_b5_cbr2_bn
  4682. name: "L3_b5_cbr2_bn"
  4683. type: "BatchNorm"
  4684. bottom: "L3_b5_cbr2_conv_top"
  4685. top: "L3_b5_cbr2_bn_top"
  4686. param {
  4687. lr_mult: 0
  4688. decay_mult: 0
  4689. }
  4690. param {
  4691. lr_mult: 0
  4692. decay_mult: 0
  4693. }
  4694. param {
  4695. lr_mult: 0
  4696. decay_mult: 0
  4697. }
  4698. include {
  4699. phase: TRAIN
  4700. }
  4701. batch_norm_param {
  4702. use_global_stats: false
  4703. moving_average_fraction: 0.999
  4704. }
  4705. }
  4706. layer { # L3_b5_cbr2_bn
  4707. name: "L3_b5_cbr2_bn"
  4708. type: "BatchNorm"
  4709. bottom: "L3_b5_cbr2_conv_top"
  4710. top: "L3_b5_cbr2_bn_top"
  4711. param {
  4712. lr_mult: 0
  4713. decay_mult: 0
  4714. }
  4715. param {
  4716. lr_mult: 0
  4717. decay_mult: 0
  4718. }
  4719. param {
  4720. lr_mult: 0
  4721. decay_mult: 0
  4722. }
  4723. include {
  4724. phase: TEST
  4725. }
  4726. batch_norm_param {
  4727. use_global_stats: true
  4728. moving_average_fraction: 0.999
  4729. }
  4730. }
  4731. layer { # L3_b5_cbr2_scale
  4732. name: "L3_b5_cbr2_scale"
  4733. type: "Scale"
  4734. bottom: "L3_b5_cbr2_bn_top"
  4735. top: "L3_b5_cbr2_bn_top"
  4736. scale_param {
  4737. bias_term: true
  4738. }
  4739. }
  4740. #} L3_b5_cbr2 end
  4741. layer { # L3_b5_sum_eltwise
  4742. name: "L3_b5_sum_eltwise"
  4743. type: "Eltwise"
  4744. bottom: "L3_b5_cbr2_bn_top"
  4745. bottom: "L3_b4_sum_eltwise_top"
  4746. top: "L3_b5_sum_eltwise_top"
  4747. eltwise_param {
  4748. operation: SUM
  4749. }
  4750. }
  4751. layer { # L3_b5_relu
  4752. name: "L3_b5_relu"
  4753. type: "ReLU"
  4754. bottom: "L3_b5_sum_eltwise_top"
  4755. top: "L3_b5_sum_eltwise_top"
  4756. }
  4757. #} L3_b5 end
  4758. #{ L3_b6 start
  4759. #{ L3_b6_cbr1 start
  4760. layer { # L3_b6_cbr1_conv
  4761. name: "L3_b6_cbr1_conv"
  4762. type: "Convolution"
  4763. bottom: "L3_b5_sum_eltwise_top"
  4764. top: "L3_b6_cbr1_conv_top"
  4765. param {
  4766. lr_mult: 1
  4767. decay_mult: 1
  4768. }
  4769. param {
  4770. lr_mult: 2
  4771. decay_mult: 0
  4772. }
  4773. convolution_param {
  4774. num_output: 64
  4775. pad: 1
  4776. kernel_size: 3
  4777. stride: 1
  4778. weight_filler {
  4779. type: "msra"
  4780. }
  4781. bias_filler {
  4782. type: "constant"
  4783. }
  4784. }
  4785. }
  4786. layer { # L3_b6_cbr1_bn
  4787. name: "L3_b6_cbr1_bn"
  4788. type: "BatchNorm"
  4789. bottom: "L3_b6_cbr1_conv_top"
  4790. top: "L3_b6_cbr1_bn_top"
  4791. param {
  4792. lr_mult: 0
  4793. decay_mult: 0
  4794. }
  4795. param {
  4796. lr_mult: 0
  4797. decay_mult: 0
  4798. }
  4799. param {
  4800. lr_mult: 0
  4801. decay_mult: 0
  4802. }
  4803. include {
  4804. phase: TRAIN
  4805. }
  4806. batch_norm_param {
  4807. use_global_stats: false
  4808. moving_average_fraction: 0.999
  4809. }
  4810. }
  4811. layer { # L3_b6_cbr1_bn
  4812. name: "L3_b6_cbr1_bn"
  4813. type: "BatchNorm"
  4814. bottom: "L3_b6_cbr1_conv_top"
  4815. top: "L3_b6_cbr1_bn_top"
  4816. param {
  4817. lr_mult: 0
  4818. decay_mult: 0
  4819. }
  4820. param {
  4821. lr_mult: 0
  4822. decay_mult: 0
  4823. }
  4824. param {
  4825. lr_mult: 0
  4826. decay_mult: 0
  4827. }
  4828. include {
  4829. phase: TEST
  4830. }
  4831. batch_norm_param {
  4832. use_global_stats: true
  4833. moving_average_fraction: 0.999
  4834. }
  4835. }
  4836. layer { # L3_b6_cbr1_scale
  4837. name: "L3_b6_cbr1_scale"
  4838. type: "Scale"
  4839. bottom: "L3_b6_cbr1_bn_top"
  4840. top: "L3_b6_cbr1_bn_top"
  4841. scale_param {
  4842. bias_term: true
  4843. }
  4844. }
  4845. layer { # L3_b6_cbr1_relu
  4846. name: "L3_b6_cbr1_relu"
  4847. type: "ReLU"
  4848. bottom: "L3_b6_cbr1_bn_top"
  4849. top: "L3_b6_cbr1_bn_top"
  4850. }
  4851. #} L3_b6_cbr1 end
  4852. #{ L3_b6_cbr2 start
  4853. layer { # L3_b6_cbr2_conv
  4854. name: "L3_b6_cbr2_conv"
  4855. type: "Convolution"
  4856. bottom: "L3_b6_cbr1_bn_top"
  4857. top: "L3_b6_cbr2_conv_top"
  4858. param {
  4859. lr_mult: 1
  4860. decay_mult: 1
  4861. }
  4862. param {
  4863. lr_mult: 2
  4864. decay_mult: 0
  4865. }
  4866. convolution_param {
  4867. num_output: 64
  4868. pad: 1
  4869. kernel_size: 3
  4870. stride: 1
  4871. weight_filler {
  4872. type: "msra"
  4873. }
  4874. bias_filler {
  4875. type: "constant"
  4876. }
  4877. }
  4878. }
  4879. layer { # L3_b6_cbr2_bn
  4880. name: "L3_b6_cbr2_bn"
  4881. type: "BatchNorm"
  4882. bottom: "L3_b6_cbr2_conv_top"
  4883. top: "L3_b6_cbr2_bn_top"
  4884. param {
  4885. lr_mult: 0
  4886. decay_mult: 0
  4887. }
  4888. param {
  4889. lr_mult: 0
  4890. decay_mult: 0
  4891. }
  4892. param {
  4893. lr_mult: 0
  4894. decay_mult: 0
  4895. }
  4896. include {
  4897. phase: TRAIN
  4898. }
  4899. batch_norm_param {
  4900. use_global_stats: false
  4901. moving_average_fraction: 0.999
  4902. }
  4903. }
  4904. layer { # L3_b6_cbr2_bn
  4905. name: "L3_b6_cbr2_bn"
  4906. type: "BatchNorm"
  4907. bottom: "L3_b6_cbr2_conv_top"
  4908. top: "L3_b6_cbr2_bn_top"
  4909. param {
  4910. lr_mult: 0
  4911. decay_mult: 0
  4912. }
  4913. param {
  4914. lr_mult: 0
  4915. decay_mult: 0
  4916. }
  4917. param {
  4918. lr_mult: 0
  4919. decay_mult: 0
  4920. }
  4921. include {
  4922. phase: TEST
  4923. }
  4924. batch_norm_param {
  4925. use_global_stats: true
  4926. moving_average_fraction: 0.999
  4927. }
  4928. }
  4929. layer { # L3_b6_cbr2_scale
  4930. name: "L3_b6_cbr2_scale"
  4931. type: "Scale"
  4932. bottom: "L3_b6_cbr2_bn_top"
  4933. top: "L3_b6_cbr2_bn_top"
  4934. scale_param {
  4935. bias_term: true
  4936. }
  4937. }
  4938. #} L3_b6_cbr2 end
  4939. layer { # L3_b6_sum_eltwise
  4940. name: "L3_b6_sum_eltwise"
  4941. type: "Eltwise"
  4942. bottom: "L3_b6_cbr2_bn_top"
  4943. bottom: "L3_b5_sum_eltwise_top"
  4944. top: "L3_b6_sum_eltwise_top"
  4945. eltwise_param {
  4946. operation: SUM
  4947. }
  4948. }
  4949. layer { # L3_b6_relu
  4950. name: "L3_b6_relu"
  4951. type: "ReLU"
  4952. bottom: "L3_b6_sum_eltwise_top"
  4953. top: "L3_b6_sum_eltwise_top"
  4954. }
  4955. #} L3_b6 end
  4956. #{ L3_b7 start
  4957. #{ L3_b7_cbr1 start
  4958. layer { # L3_b7_cbr1_conv
  4959. name: "L3_b7_cbr1_conv"
  4960. type: "Convolution"
  4961. bottom: "L3_b6_sum_eltwise_top"
  4962. top: "L3_b7_cbr1_conv_top"
  4963. param {
  4964. lr_mult: 1
  4965. decay_mult: 1
  4966. }
  4967. param {
  4968. lr_mult: 2
  4969. decay_mult: 0
  4970. }
  4971. convolution_param {
  4972. num_output: 64
  4973. pad: 1
  4974. kernel_size: 3
  4975. stride: 1
  4976. weight_filler {
  4977. type: "msra"
  4978. }
  4979. bias_filler {
  4980. type: "constant"
  4981. }
  4982. }
  4983. }
  4984. layer { # L3_b7_cbr1_bn
  4985. name: "L3_b7_cbr1_bn"
  4986. type: "BatchNorm"
  4987. bottom: "L3_b7_cbr1_conv_top"
  4988. top: "L3_b7_cbr1_bn_top"
  4989. param {
  4990. lr_mult: 0
  4991. decay_mult: 0
  4992. }
  4993. param {
  4994. lr_mult: 0
  4995. decay_mult: 0
  4996. }
  4997. param {
  4998. lr_mult: 0
  4999. decay_mult: 0
  5000. }
  5001. include {
  5002. phase: TRAIN
  5003. }
  5004. batch_norm_param {
  5005. use_global_stats: false
  5006. moving_average_fraction: 0.999
  5007. }
  5008. }
  5009. layer { # L3_b7_cbr1_bn
  5010. name: "L3_b7_cbr1_bn"
  5011. type: "BatchNorm"
  5012. bottom: "L3_b7_cbr1_conv_top"
  5013. top: "L3_b7_cbr1_bn_top"
  5014. param {
  5015. lr_mult: 0
  5016. decay_mult: 0
  5017. }
  5018. param {
  5019. lr_mult: 0
  5020. decay_mult: 0
  5021. }
  5022. param {
  5023. lr_mult: 0
  5024. decay_mult: 0
  5025. }
  5026. include {
  5027. phase: TEST
  5028. }
  5029. batch_norm_param {
  5030. use_global_stats: true
  5031. moving_average_fraction: 0.999
  5032. }
  5033. }
  5034. layer { # L3_b7_cbr1_scale
  5035. name: "L3_b7_cbr1_scale"
  5036. type: "Scale"
  5037. bottom: "L3_b7_cbr1_bn_top"
  5038. top: "L3_b7_cbr1_bn_top"
  5039. scale_param {
  5040. bias_term: true
  5041. }
  5042. }
  5043. layer { # L3_b7_cbr1_relu
  5044. name: "L3_b7_cbr1_relu"
  5045. type: "ReLU"
  5046. bottom: "L3_b7_cbr1_bn_top"
  5047. top: "L3_b7_cbr1_bn_top"
  5048. }
  5049. #} L3_b7_cbr1 end
  5050. #{ L3_b7_cbr2 start
  5051. layer { # L3_b7_cbr2_conv
  5052. name: "L3_b7_cbr2_conv"
  5053. type: "Convolution"
  5054. bottom: "L3_b7_cbr1_bn_top"
  5055. top: "L3_b7_cbr2_conv_top"
  5056. param {
  5057. lr_mult: 1
  5058. decay_mult: 1
  5059. }
  5060. param {
  5061. lr_mult: 2
  5062. decay_mult: 0
  5063. }
  5064. convolution_param {
  5065. num_output: 64
  5066. pad: 1
  5067. kernel_size: 3
  5068. stride: 1
  5069. weight_filler {
  5070. type: "msra"
  5071. }
  5072. bias_filler {
  5073. type: "constant"
  5074. }
  5075. }
  5076. }
  5077. layer { # L3_b7_cbr2_bn
  5078. name: "L3_b7_cbr2_bn"
  5079. type: "BatchNorm"
  5080. bottom: "L3_b7_cbr2_conv_top"
  5081. top: "L3_b7_cbr2_bn_top"
  5082. param {
  5083. lr_mult: 0
  5084. decay_mult: 0
  5085. }
  5086. param {
  5087. lr_mult: 0
  5088. decay_mult: 0
  5089. }
  5090. param {
  5091. lr_mult: 0
  5092. decay_mult: 0
  5093. }
  5094. include {
  5095. phase: TRAIN
  5096. }
  5097. batch_norm_param {
  5098. use_global_stats: false
  5099. moving_average_fraction: 0.999
  5100. }
  5101. }
  5102. layer { # L3_b7_cbr2_bn
  5103. name: "L3_b7_cbr2_bn"
  5104. type: "BatchNorm"
  5105. bottom: "L3_b7_cbr2_conv_top"
  5106. top: "L3_b7_cbr2_bn_top"
  5107. param {
  5108. lr_mult: 0
  5109. decay_mult: 0
  5110. }
  5111. param {
  5112. lr_mult: 0
  5113. decay_mult: 0
  5114. }
  5115. param {
  5116. lr_mult: 0
  5117. decay_mult: 0
  5118. }
  5119. include {
  5120. phase: TEST
  5121. }
  5122. batch_norm_param {
  5123. use_global_stats: true
  5124. moving_average_fraction: 0.999
  5125. }
  5126. }
  5127. layer { # L3_b7_cbr2_scale
  5128. name: "L3_b7_cbr2_scale"
  5129. type: "Scale"
  5130. bottom: "L3_b7_cbr2_bn_top"
  5131. top: "L3_b7_cbr2_bn_top"
  5132. scale_param {
  5133. bias_term: true
  5134. }
  5135. }
  5136. #} L3_b7_cbr2 end
  5137. layer { # L3_b7_sum_eltwise
  5138. name: "L3_b7_sum_eltwise"
  5139. type: "Eltwise"
  5140. bottom: "L3_b7_cbr2_bn_top"
  5141. bottom: "L3_b6_sum_eltwise_top"
  5142. top: "L3_b7_sum_eltwise_top"
  5143. eltwise_param {
  5144. operation: SUM
  5145. }
  5146. }
  5147. layer { # L3_b7_relu
  5148. name: "L3_b7_relu"
  5149. type: "ReLU"
  5150. bottom: "L3_b7_sum_eltwise_top"
  5151. top: "L3_b7_sum_eltwise_top"
  5152. }
  5153. #} L3_b7 end
  5154. #{ L3_b8 start
  5155. #{ L3_b8_cbr1 start
  5156. layer { # L3_b8_cbr1_conv
  5157. name: "L3_b8_cbr1_conv"
  5158. type: "Convolution"
  5159. bottom: "L3_b7_sum_eltwise_top"
  5160. top: "L3_b8_cbr1_conv_top"
  5161. param {
  5162. lr_mult: 1
  5163. decay_mult: 1
  5164. }
  5165. param {
  5166. lr_mult: 2
  5167. decay_mult: 0
  5168. }
  5169. convolution_param {
  5170. num_output: 64
  5171. pad: 1
  5172. kernel_size: 3
  5173. stride: 1
  5174. weight_filler {
  5175. type: "msra"
  5176. }
  5177. bias_filler {
  5178. type: "constant"
  5179. }
  5180. }
  5181. }
  5182. layer { # L3_b8_cbr1_bn
  5183. name: "L3_b8_cbr1_bn"
  5184. type: "BatchNorm"
  5185. bottom: "L3_b8_cbr1_conv_top"
  5186. top: "L3_b8_cbr1_bn_top"
  5187. param {
  5188. lr_mult: 0
  5189. decay_mult: 0
  5190. }
  5191. param {
  5192. lr_mult: 0
  5193. decay_mult: 0
  5194. }
  5195. param {
  5196. lr_mult: 0
  5197. decay_mult: 0
  5198. }
  5199. include {
  5200. phase: TRAIN
  5201. }
  5202. batch_norm_param {
  5203. use_global_stats: false
  5204. moving_average_fraction: 0.999
  5205. }
  5206. }
  5207. layer { # L3_b8_cbr1_bn
  5208. name: "L3_b8_cbr1_bn"
  5209. type: "BatchNorm"
  5210. bottom: "L3_b8_cbr1_conv_top"
  5211. top: "L3_b8_cbr1_bn_top"
  5212. param {
  5213. lr_mult: 0
  5214. decay_mult: 0
  5215. }
  5216. param {
  5217. lr_mult: 0
  5218. decay_mult: 0
  5219. }
  5220. param {
  5221. lr_mult: 0
  5222. decay_mult: 0
  5223. }
  5224. include {
  5225. phase: TEST
  5226. }
  5227. batch_norm_param {
  5228. use_global_stats: true
  5229. moving_average_fraction: 0.999
  5230. }
  5231. }
  5232. layer { # L3_b8_cbr1_scale
  5233. name: "L3_b8_cbr1_scale"
  5234. type: "Scale"
  5235. bottom: "L3_b8_cbr1_bn_top"
  5236. top: "L3_b8_cbr1_bn_top"
  5237. scale_param {
  5238. bias_term: true
  5239. }
  5240. }
  5241. layer { # L3_b8_cbr1_relu
  5242. name: "L3_b8_cbr1_relu"
  5243. type: "ReLU"
  5244. bottom: "L3_b8_cbr1_bn_top"
  5245. top: "L3_b8_cbr1_bn_top"
  5246. }
  5247. #} L3_b8_cbr1 end
  5248. #{ L3_b8_cbr2 start
  5249. layer { # L3_b8_cbr2_conv
  5250. name: "L3_b8_cbr2_conv"
  5251. type: "Convolution"
  5252. bottom: "L3_b8_cbr1_bn_top"
  5253. top: "L3_b8_cbr2_conv_top"
  5254. param {
  5255. lr_mult: 1
  5256. decay_mult: 1
  5257. }
  5258. param {
  5259. lr_mult: 2
  5260. decay_mult: 0
  5261. }
  5262. convolution_param {
  5263. num_output: 64
  5264. pad: 1
  5265. kernel_size: 3
  5266. stride: 1
  5267. weight_filler {
  5268. type: "msra"
  5269. }
  5270. bias_filler {
  5271. type: "constant"
  5272. }
  5273. }
  5274. }
  5275. layer { # L3_b8_cbr2_bn
  5276. name: "L3_b8_cbr2_bn"
  5277. type: "BatchNorm"
  5278. bottom: "L3_b8_cbr2_conv_top"
  5279. top: "L3_b8_cbr2_bn_top"
  5280. param {
  5281. lr_mult: 0
  5282. decay_mult: 0
  5283. }
  5284. param {
  5285. lr_mult: 0
  5286. decay_mult: 0
  5287. }
  5288. param {
  5289. lr_mult: 0
  5290. decay_mult: 0
  5291. }
  5292. include {
  5293. phase: TRAIN
  5294. }
  5295. batch_norm_param {
  5296. use_global_stats: false
  5297. moving_average_fraction: 0.999
  5298. }
  5299. }
  5300. layer { # L3_b8_cbr2_bn
  5301. name: "L3_b8_cbr2_bn"
  5302. type: "BatchNorm"
  5303. bottom: "L3_b8_cbr2_conv_top"
  5304. top: "L3_b8_cbr2_bn_top"
  5305. param {
  5306. lr_mult: 0
  5307. decay_mult: 0
  5308. }
  5309. param {
  5310. lr_mult: 0
  5311. decay_mult: 0
  5312. }
  5313. param {
  5314. lr_mult: 0
  5315. decay_mult: 0
  5316. }
  5317. include {
  5318. phase: TEST
  5319. }
  5320. batch_norm_param {
  5321. use_global_stats: true
  5322. moving_average_fraction: 0.999
  5323. }
  5324. }
  5325. layer { # L3_b8_cbr2_scale
  5326. name: "L3_b8_cbr2_scale"
  5327. type: "Scale"
  5328. bottom: "L3_b8_cbr2_bn_top"
  5329. top: "L3_b8_cbr2_bn_top"
  5330. scale_param {
  5331. bias_term: true
  5332. }
  5333. }
  5334. #} L3_b8_cbr2 end
  5335. layer { # L3_b8_sum_eltwise
  5336. name: "L3_b8_sum_eltwise"
  5337. type: "Eltwise"
  5338. bottom: "L3_b8_cbr2_bn_top"
  5339. bottom: "L3_b7_sum_eltwise_top"
  5340. top: "L3_b8_sum_eltwise_top"
  5341. eltwise_param {
  5342. operation: SUM
  5343. }
  5344. }
  5345. layer { # L3_b8_relu
  5346. name: "L3_b8_relu"
  5347. type: "ReLU"
  5348. bottom: "L3_b8_sum_eltwise_top"
  5349. top: "L3_b8_sum_eltwise_top"
  5350. }
  5351. #} L3_b8 end
  5352. #{ L3_b9 start
  5353. #{ L3_b9_cbr1 start
  5354. layer { # L3_b9_cbr1_conv
  5355. name: "L3_b9_cbr1_conv"
  5356. type: "Convolution"
  5357. bottom: "L3_b8_sum_eltwise_top"
  5358. top: "L3_b9_cbr1_conv_top"
  5359. param {
  5360. lr_mult: 1
  5361. decay_mult: 1
  5362. }
  5363. param {
  5364. lr_mult: 2
  5365. decay_mult: 0
  5366. }
  5367. convolution_param {
  5368. num_output: 64
  5369. pad: 1
  5370. kernel_size: 3
  5371. stride: 1
  5372. weight_filler {
  5373. type: "msra"
  5374. }
  5375. bias_filler {
  5376. type: "constant"
  5377. }
  5378. }
  5379. }
  5380. layer { # L3_b9_cbr1_bn
  5381. name: "L3_b9_cbr1_bn"
  5382. type: "BatchNorm"
  5383. bottom: "L3_b9_cbr1_conv_top"
  5384. top: "L3_b9_cbr1_bn_top"
  5385. param {
  5386. lr_mult: 0
  5387. decay_mult: 0
  5388. }
  5389. param {
  5390. lr_mult: 0
  5391. decay_mult: 0
  5392. }
  5393. param {
  5394. lr_mult: 0
  5395. decay_mult: 0
  5396. }
  5397. include {
  5398. phase: TRAIN
  5399. }
  5400. batch_norm_param {
  5401. use_global_stats: false
  5402. moving_average_fraction: 0.999
  5403. }
  5404. }
  5405. layer { # L3_b9_cbr1_bn
  5406. name: "L3_b9_cbr1_bn"
  5407. type: "BatchNorm"
  5408. bottom: "L3_b9_cbr1_conv_top"
  5409. top: "L3_b9_cbr1_bn_top"
  5410. param {
  5411. lr_mult: 0
  5412. decay_mult: 0
  5413. }
  5414. param {
  5415. lr_mult: 0
  5416. decay_mult: 0
  5417. }
  5418. param {
  5419. lr_mult: 0
  5420. decay_mult: 0
  5421. }
  5422. include {
  5423. phase: TEST
  5424. }
  5425. batch_norm_param {
  5426. use_global_stats: true
  5427. moving_average_fraction: 0.999
  5428. }
  5429. }
  5430. layer { # L3_b9_cbr1_scale
  5431. name: "L3_b9_cbr1_scale"
  5432. type: "Scale"
  5433. bottom: "L3_b9_cbr1_bn_top"
  5434. top: "L3_b9_cbr1_bn_top"
  5435. scale_param {
  5436. bias_term: true
  5437. }
  5438. }
  5439. layer { # L3_b9_cbr1_relu
  5440. name: "L3_b9_cbr1_relu"
  5441. type: "ReLU"
  5442. bottom: "L3_b9_cbr1_bn_top"
  5443. top: "L3_b9_cbr1_bn_top"
  5444. }
  5445. #} L3_b9_cbr1 end
  5446. #{ L3_b9_cbr2 start
  5447. layer { # L3_b9_cbr2_conv
  5448. name: "L3_b9_cbr2_conv"
  5449. type: "Convolution"
  5450. bottom: "L3_b9_cbr1_bn_top"
  5451. top: "L3_b9_cbr2_conv_top"
  5452. param {
  5453. lr_mult: 1
  5454. decay_mult: 1
  5455. }
  5456. param {
  5457. lr_mult: 2
  5458. decay_mult: 0
  5459. }
  5460. convolution_param {
  5461. num_output: 64
  5462. pad: 1
  5463. kernel_size: 3
  5464. stride: 1
  5465. weight_filler {
  5466. type: "msra"
  5467. }
  5468. bias_filler {
  5469. type: "constant"
  5470. }
  5471. }
  5472. }
  5473. layer { # L3_b9_cbr2_bn
  5474. name: "L3_b9_cbr2_bn"
  5475. type: "BatchNorm"
  5476. bottom: "L3_b9_cbr2_conv_top"
  5477. top: "L3_b9_cbr2_bn_top"
  5478. param {
  5479. lr_mult: 0
  5480. decay_mult: 0
  5481. }
  5482. param {
  5483. lr_mult: 0
  5484. decay_mult: 0
  5485. }
  5486. param {
  5487. lr_mult: 0
  5488. decay_mult: 0
  5489. }
  5490. include {
  5491. phase: TRAIN
  5492. }
  5493. batch_norm_param {
  5494. use_global_stats: false
  5495. moving_average_fraction: 0.999
  5496. }
  5497. }
  5498. layer { # L3_b9_cbr2_bn
  5499. name: "L3_b9_cbr2_bn"
  5500. type: "BatchNorm"
  5501. bottom: "L3_b9_cbr2_conv_top"
  5502. top: "L3_b9_cbr2_bn_top"
  5503. param {
  5504. lr_mult: 0
  5505. decay_mult: 0
  5506. }
  5507. param {
  5508. lr_mult: 0
  5509. decay_mult: 0
  5510. }
  5511. param {
  5512. lr_mult: 0
  5513. decay_mult: 0
  5514. }
  5515. include {
  5516. phase: TEST
  5517. }
  5518. batch_norm_param {
  5519. use_global_stats: true
  5520. moving_average_fraction: 0.999
  5521. }
  5522. }
  5523. layer { # L3_b9_cbr2_scale
  5524. name: "L3_b9_cbr2_scale"
  5525. type: "Scale"
  5526. bottom: "L3_b9_cbr2_bn_top"
  5527. top: "L3_b9_cbr2_bn_top"
  5528. scale_param {
  5529. bias_term: true
  5530. }
  5531. }
  5532. #} L3_b9_cbr2 end
  5533. layer { # L3_b9_sum_eltwise
  5534. name: "L3_b9_sum_eltwise"
  5535. type: "Eltwise"
  5536. bottom: "L3_b9_cbr2_bn_top"
  5537. bottom: "L3_b8_sum_eltwise_top"
  5538. top: "L3_b9_sum_eltwise_top"
  5539. eltwise_param {
  5540. operation: SUM
  5541. }
  5542. }
  5543. layer { # L3_b9_relu
  5544. name: "L3_b9_relu"
  5545. type: "ReLU"
  5546. bottom: "L3_b9_sum_eltwise_top"
  5547. top: "L3_b9_sum_eltwise_top"
  5548. }
  5549. #} L3_b9 end
  5550. #} L3 end
  5551. layer { # post_pool
  5552. name: "post_pool"
  5553. type: "Pooling"
  5554. bottom: "L3_b9_sum_eltwise_top"
  5555. top: "post_pool"
  5556. pooling_param {
  5557. pool: AVE
  5558. kernel_size: 8
  5559. stride: 1
  5560. }
  5561. }
  5562. layer { # post_FC
  5563. name: "post_FC"
  5564. type: "InnerProduct"
  5565. bottom: "post_pool"
  5566. top: "post_FC_top"
  5567. param {
  5568. lr_mult: 1
  5569. }
  5570. param {
  5571. lr_mult: 2
  5572. }
  5573. inner_product_param {
  5574. num_output: 10
  5575. weight_filler {
  5576. type: "xavier"
  5577. }
  5578. bias_filler {
  5579. type: "constant"
  5580. }
  5581. }
  5582. }
  5583. layer { # accuracy
  5584. name: "accuracy"
  5585. type: "Accuracy"
  5586. bottom: "post_FC_top"
  5587. bottom: "label"
  5588. top: "accuracy"
  5589. }
  5590. layer { # loss
  5591. name: "loss"
  5592. type: "SoftmaxWithLoss"
  5593. bottom: "post_FC_top"
  5594. bottom: "label"
  5595. top: "loss"
  5596. }
Add Comment
Please, Sign In to add comment