Advertisement
Guest User

Untitled

a guest
Aug 24th, 2017
62
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 19.38 KB | None | 0 0
  1. name: "resnet_cifar10"
  2. layer {
  3. name: "Data1"
  4. type: "Data"
  5. top: "Data1"
  6. top: "Data2"
  7. include {
  8. phase: TRAIN
  9. }
  10. transform_param {
  11. mean_file: "examples/xor/mean.binaryproto"
  12. crop_size: 28
  13. mirror:true
  14. }
  15. data_param {
  16. source: "examples/xor/cifar10_train_lmdb"
  17. batch_size: 100
  18. backend: LMDB
  19. }
  20. }
  21. layer {
  22. name: "Data1"
  23. type: "Data"
  24. top: "Data1"
  25. top: "Data2"
  26. include {
  27. phase: TEST
  28. }
  29. transform_param {
  30. mean_file: "examples/xor/mean.binaryproto"
  31. }
  32. data_param {
  33. source: "examples/xor/cifar10_test_lmdb"
  34. batch_size: 100
  35. backend: LMDB
  36. }
  37. }
  38. layer {
  39. name: "Convolution1"
  40. type: "Convolution"
  41. bottom: "Data1"
  42. top: "Convolution1"
  43. param {
  44. lr_mult: 1
  45. decay_mult: 1
  46. }
  47. param {
  48. lr_mult: 2
  49. decay_mult: 0
  50. }
  51. convolution_param {
  52. num_output: 16
  53. pad: 1
  54. kernel_size: 3
  55. stride: 1
  56. weight_filler {
  57. type: "gaussian"
  58. std: 0.118
  59. }
  60. bias_filler {
  61. type: "constant"
  62. value: 0
  63. }
  64. }
  65. }
  66. layer {
  67. name: "BatchNorm1"
  68. type: "BatchNorm"
  69. bottom: "Convolution1"
  70. top: "Convolution1"
  71. param {
  72. lr_mult: 0
  73. decay_mult: 0
  74. }
  75. param {
  76. lr_mult: 0
  77. decay_mult: 0
  78. }
  79. param {
  80. lr_mult: 0
  81. decay_mult: 0
  82. }
  83. }
  84. layer {
  85. name: "Scale1"
  86. type: "Scale"
  87. bottom: "Convolution1"
  88. top: "Convolution1"
  89. scale_param {
  90. bias_term: true
  91. }
  92. }
  93. layer {
  94. name: "ReLU1"
  95. type: "ReLU"
  96. bottom: "Convolution1"
  97. top: "Convolution1"
  98. }
  99. layer {
  100. name: "Convolution2"
  101. type: "Convolution"
  102. bottom: "Convolution1"
  103. top: "Convolution2"
  104. param {
  105. lr_mult: 1
  106. decay_mult: 1
  107. }
  108. param {
  109. lr_mult: 2
  110. decay_mult: 0
  111. }
  112. convolution_param {
  113. num_output: 16
  114. pad: 1
  115. kernel_size: 3
  116. stride: 1
  117. weight_filler {
  118. type: "gaussian"
  119. std: 0.118
  120. }
  121. bias_filler {
  122. type: "constant"
  123. value: 0
  124. }
  125. }
  126. }
  127. layer {
  128. name: "BatchNorm2"
  129. type: "BatchNorm"
  130. bottom: "Convolution2"
  131. top: "Convolution2"
  132. param {
  133. lr_mult: 0
  134. decay_mult: 0
  135. }
  136. param {
  137. lr_mult: 0
  138. decay_mult: 0
  139. }
  140. param {
  141. lr_mult: 0
  142. decay_mult: 0
  143. }
  144. }
  145. layer {
  146. name: "Scale2"
  147. type: "Scale"
  148. bottom: "Convolution2"
  149. top: "Convolution2"
  150. scale_param {
  151. bias_term: true
  152. }
  153. }
  154. layer {
  155. name: "ReLU2"
  156. type: "ReLU"
  157. bottom: "Convolution2"
  158. top: "Convolution2"
  159. }
  160. layer {
  161. name: "Convolution3"
  162. type: "Convolution"
  163. bottom: "Convolution2"
  164. top: "Convolution3"
  165. param {
  166. lr_mult: 1
  167. decay_mult: 1
  168. }
  169. param {
  170. lr_mult: 2
  171. decay_mult: 0
  172. }
  173. convolution_param {
  174. num_output: 16
  175. pad: 1
  176. kernel_size: 3
  177. stride: 1
  178. weight_filler {
  179. type: "gaussian"
  180. std: 0.118
  181. }
  182. bias_filler {
  183. type: "constant"
  184. value: 0
  185. }
  186. }
  187. }
  188. layer {
  189. name: "BatchNorm3"
  190. type: "BatchNorm"
  191. bottom: "Convolution3"
  192. top: "Convolution3"
  193. param {
  194. lr_mult: 0
  195. decay_mult: 0
  196. }
  197. param {
  198. lr_mult: 0
  199. decay_mult: 0
  200. }
  201. param {
  202. lr_mult: 0
  203. decay_mult: 0
  204. }
  205. }
  206. layer {
  207. name: "Scale3"
  208. type: "Scale"
  209. bottom: "Convolution3"
  210. top: "Convolution3"
  211. scale_param {
  212. bias_term: true
  213. }
  214. }
  215. layer {
  216. name: "Eltwise1"
  217. type: "Eltwise"
  218. bottom: "Convolution1"
  219. bottom: "Convolution3"
  220. top: "Eltwise1"
  221. eltwise_param {
  222. operation: SUM
  223. }
  224. }
  225. layer {
  226. name: "ReLU3"
  227. type: "ReLU"
  228. bottom: "Eltwise1"
  229. top: "Eltwise1"
  230. }
  231. layer {
  232. name: "Convolution4"
  233. type: "Convolution"
  234. bottom: "Eltwise1"
  235. top: "Convolution4"
  236. param {
  237. lr_mult: 1
  238. decay_mult: 1
  239. }
  240. param {
  241. lr_mult: 2
  242. decay_mult: 0
  243. }
  244. convolution_param {
  245. num_output: 16
  246. pad: 1
  247. kernel_size: 3
  248. stride: 1
  249. weight_filler {
  250. type: "gaussian"
  251. std: 0.118
  252. }
  253. bias_filler {
  254. type: "constant"
  255. value: 0
  256. }
  257. }
  258. }
  259. layer {
  260. name: "BatchNorm4"
  261. type: "BatchNorm"
  262. bottom: "Convolution4"
  263. top: "Convolution4"
  264. param {
  265. lr_mult: 0
  266. decay_mult: 0
  267. }
  268. param {
  269. lr_mult: 0
  270. decay_mult: 0
  271. }
  272. param {
  273. lr_mult: 0
  274. decay_mult: 0
  275. }
  276. }
  277. layer {
  278. name: "Scale4"
  279. type: "Scale"
  280. bottom: "Convolution4"
  281. top: "Convolution4"
  282. scale_param {
  283. bias_term: true
  284. }
  285. }
  286. layer {
  287. name: "ReLU4"
  288. type: "ReLU"
  289. bottom: "Convolution4"
  290. top: "Convolution4"
  291. }
  292. layer {
  293. name: "Convolution5"
  294. type: "Convolution"
  295. bottom: "Convolution4"
  296. top: "Convolution5"
  297. param {
  298. lr_mult: 1
  299. decay_mult: 1
  300. }
  301. param {
  302. lr_mult: 2
  303. decay_mult: 0
  304. }
  305. convolution_param {
  306. num_output: 16
  307. pad: 1
  308. kernel_size: 3
  309. stride: 1
  310. weight_filler {
  311. type: "gaussian"
  312. std: 0.118
  313. }
  314. bias_filler {
  315. type: "constant"
  316. value: 0
  317. }
  318. }
  319. }
  320. layer {
  321. name: "BatchNorm5"
  322. type: "BatchNorm"
  323. bottom: "Convolution5"
  324. top: "Convolution5"
  325. param {
  326. lr_mult: 0
  327. decay_mult: 0
  328. }
  329. param {
  330. lr_mult: 0
  331. decay_mult: 0
  332. }
  333. param {
  334. lr_mult: 0
  335. decay_mult: 0
  336. }
  337. }
  338. layer {
  339. name: "Scale5"
  340. type: "Scale"
  341. bottom: "Convolution5"
  342. top: "Convolution5"
  343. scale_param {
  344. bias_term: true
  345. }
  346. }
  347. layer {
  348. name: "Eltwise2"
  349. type: "Eltwise"
  350. bottom: "Eltwise1"
  351. bottom: "Convolution5"
  352. top: "Eltwise2"
  353. eltwise_param {
  354. operation: SUM
  355. }
  356. }
  357. layer {
  358. name: "ReLU5"
  359. type: "ReLU"
  360. bottom: "Eltwise2"
  361. top: "Eltwise2"
  362. }
  363. layer {
  364. name: "Convolution6"
  365. type: "Convolution"
  366. bottom: "Eltwise2"
  367. top: "Convolution6"
  368. param {
  369. lr_mult: 1
  370. decay_mult: 1
  371. }
  372. param {
  373. lr_mult: 2
  374. decay_mult: 0
  375. }
  376. convolution_param {
  377. num_output: 16
  378. pad: 1
  379. kernel_size: 3
  380. stride: 1
  381. weight_filler {
  382. type: "gaussian"
  383. std: 0.118
  384. }
  385. bias_filler {
  386. type: "constant"
  387. value: 0
  388. }
  389. }
  390. }
  391. layer {
  392. name: "BatchNorm6"
  393. type: "BatchNorm"
  394. bottom: "Convolution6"
  395. top: "Convolution6"
  396. param {
  397. lr_mult: 0
  398. decay_mult: 0
  399. }
  400. param {
  401. lr_mult: 0
  402. decay_mult: 0
  403. }
  404. param {
  405. lr_mult: 0
  406. decay_mult: 0
  407. }
  408. }
  409. layer {
  410. name: "Scale6"
  411. type: "Scale"
  412. bottom: "Convolution6"
  413. top: "Convolution6"
  414. scale_param {
  415. bias_term: true
  416. }
  417. }
  418. layer {
  419. name: "ReLU6"
  420. type: "ReLU"
  421. bottom: "Convolution6"
  422. top: "Convolution6"
  423. }
  424. layer {
  425. name: "Convolution7"
  426. type: "Convolution"
  427. bottom: "Convolution6"
  428. top: "Convolution7"
  429. param {
  430. lr_mult: 1
  431. decay_mult: 1
  432. }
  433. param {
  434. lr_mult: 2
  435. decay_mult: 0
  436. }
  437. convolution_param {
  438. num_output: 16
  439. pad: 1
  440. kernel_size: 3
  441. stride: 1
  442. weight_filler {
  443. type: "gaussian"
  444. std: 0.118
  445. }
  446. bias_filler {
  447. type: "constant"
  448. value: 0
  449. }
  450. }
  451. }
  452. layer {
  453. name: "BatchNorm7"
  454. type: "BatchNorm"
  455. bottom: "Convolution7"
  456. top: "Convolution7"
  457. param {
  458. lr_mult: 0
  459. decay_mult: 0
  460. }
  461. param {
  462. lr_mult: 0
  463. decay_mult: 0
  464. }
  465. param {
  466. lr_mult: 0
  467. decay_mult: 0
  468. }
  469. }
  470. layer {
  471. name: "Scale7"
  472. type: "Scale"
  473. bottom: "Convolution7"
  474. top: "Convolution7"
  475. scale_param {
  476. bias_term: true
  477. }
  478. }
  479. layer {
  480. name: "Eltwise3"
  481. type: "Eltwise"
  482. bottom: "Eltwise2"
  483. bottom: "Convolution7"
  484. top: "Eltwise3"
  485. eltwise_param {
  486. operation: SUM
  487. }
  488. }
  489. layer {
  490. name: "ReLU7"
  491. type: "ReLU"
  492. bottom: "Eltwise3"
  493. top: "Eltwise3"
  494. }
  495. layer {
  496. name: "Convolution8"
  497. type: "Convolution"
  498. bottom: "Eltwise3"
  499. top: "Convolution8"
  500. param {
  501. lr_mult: 1
  502. decay_mult: 1
  503. }
  504. param {
  505. lr_mult: 2
  506. decay_mult: 0
  507. }
  508. convolution_param {
  509. num_output: 32
  510. pad: 0
  511. kernel_size: 1
  512. stride: 2
  513. weight_filler {
  514. type: "gaussian"
  515. std: 0.25
  516. }
  517. bias_filler {
  518. type: "constant"
  519. value: 0
  520. }
  521. }
  522. }
  523. layer {
  524. name: "BatchNorm8"
  525. type: "BatchNorm"
  526. bottom: "Convolution8"
  527. top: "Convolution8"
  528. param {
  529. lr_mult: 0
  530. decay_mult: 0
  531. }
  532. param {
  533. lr_mult: 0
  534. decay_mult: 0
  535. }
  536. param {
  537. lr_mult: 0
  538. decay_mult: 0
  539. }
  540. }
  541. layer {
  542. name: "Scale8"
  543. type: "Scale"
  544. bottom: "Convolution8"
  545. top: "Convolution8"
  546. scale_param {
  547. bias_term: true
  548. }
  549. }
  550. layer {
  551. name: "Convolution9"
  552. type: "Convolution"
  553. bottom: "Eltwise3"
  554. top: "Convolution9"
  555. param {
  556. lr_mult: 1
  557. decay_mult: 1
  558. }
  559. param {
  560. lr_mult: 2
  561. decay_mult: 0
  562. }
  563. convolution_param {
  564. num_output: 32
  565. pad: 1
  566. kernel_size: 3
  567. stride: 2
  568. weight_filler {
  569. type: "gaussian"
  570. std: 0.083
  571. }
  572. bias_filler {
  573. type: "constant"
  574. value: 0
  575. }
  576. }
  577. }
  578. layer {
  579. name: "BatchNorm9"
  580. type: "BatchNorm"
  581. bottom: "Convolution9"
  582. top: "Convolution9"
  583. param {
  584. lr_mult: 0
  585. decay_mult: 0
  586. }
  587. param {
  588. lr_mult: 0
  589. decay_mult: 0
  590. }
  591. param {
  592. lr_mult: 0
  593. decay_mult: 0
  594. }
  595. }
  596. layer {
  597. name: "Scale9"
  598. type: "Scale"
  599. bottom: "Convolution9"
  600. top: "Convolution9"
  601. scale_param {
  602. bias_term: true
  603. }
  604. }
  605. layer {
  606. name: "ReLU8"
  607. type: "ReLU"
  608. bottom: "Convolution9"
  609. top: "Convolution9"
  610. }
  611. layer {
  612. name: "Convolution10"
  613. type: "Convolution"
  614. bottom: "Convolution9"
  615. top: "Convolution10"
  616. param {
  617. lr_mult: 1
  618. decay_mult: 1
  619. }
  620. param {
  621. lr_mult: 2
  622. decay_mult: 0
  623. }
  624. convolution_param {
  625. num_output: 32
  626. pad: 1
  627. kernel_size: 3
  628. stride: 1
  629. weight_filler {
  630. type: "gaussian"
  631. std: 0.083
  632. }
  633. bias_filler {
  634. type: "constant"
  635. value: 0
  636. }
  637. }
  638. }
  639. layer {
  640. name: "BatchNorm10"
  641. type: "BatchNorm"
  642. bottom: "Convolution10"
  643. top: "Convolution10"
  644. param {
  645. lr_mult: 0
  646. decay_mult: 0
  647. }
  648. param {
  649. lr_mult: 0
  650. decay_mult: 0
  651. }
  652. param {
  653. lr_mult: 0
  654. decay_mult: 0
  655. }
  656. }
  657. layer {
  658. name: "Scale10"
  659. type: "Scale"
  660. bottom: "Convolution10"
  661. top: "Convolution10"
  662. scale_param {
  663. bias_term: true
  664. }
  665. }
  666. layer {
  667. name: "Eltwise4"
  668. type: "Eltwise"
  669. bottom: "Convolution8"
  670. bottom: "Convolution10"
  671. top: "Eltwise4"
  672. eltwise_param {
  673. operation: SUM
  674. }
  675. }
  676. layer {
  677. name: "ReLU9"
  678. type: "ReLU"
  679. bottom: "Eltwise4"
  680. top: "Eltwise4"
  681. }
  682. layer {
  683. name: "Convolution11"
  684. type: "Convolution"
  685. bottom: "Eltwise4"
  686. top: "Convolution11"
  687. param {
  688. lr_mult: 1
  689. decay_mult: 1
  690. }
  691. param {
  692. lr_mult: 2
  693. decay_mult: 0
  694. }
  695. convolution_param {
  696. num_output: 32
  697. pad: 1
  698. kernel_size: 3
  699. stride: 1
  700. weight_filler {
  701. type: "gaussian"
  702. std: 0.083
  703. }
  704. bias_filler {
  705. type: "constant"
  706. value: 0
  707. }
  708. }
  709. }
  710. layer {
  711. name: "BatchNorm11"
  712. type: "BatchNorm"
  713. bottom: "Convolution11"
  714. top: "Convolution11"
  715. param {
  716. lr_mult: 0
  717. decay_mult: 0
  718. }
  719. param {
  720. lr_mult: 0
  721. decay_mult: 0
  722. }
  723. param {
  724. lr_mult: 0
  725. decay_mult: 0
  726. }
  727. }
  728. layer {
  729. name: "Scale11"
  730. type: "Scale"
  731. bottom: "Convolution11"
  732. top: "Convolution11"
  733. scale_param {
  734. bias_term: true
  735. }
  736. }
  737. layer {
  738. name: "ReLU10"
  739. type: "ReLU"
  740. bottom: "Convolution11"
  741. top: "Convolution11"
  742. }
  743. layer {
  744. name: "Convolution12"
  745. type: "Convolution"
  746. bottom: "Convolution11"
  747. top: "Convolution12"
  748. param {
  749. lr_mult: 1
  750. decay_mult: 1
  751. }
  752. param {
  753. lr_mult: 2
  754. decay_mult: 0
  755. }
  756. convolution_param {
  757. num_output: 32
  758. pad: 1
  759. kernel_size: 3
  760. stride: 1
  761. weight_filler {
  762. type: "gaussian"
  763. std: 0.083
  764. }
  765. bias_filler {
  766. type: "constant"
  767. value: 0
  768. }
  769. }
  770. }
  771. layer {
  772. name: "BatchNorm12"
  773. type: "BatchNorm"
  774. bottom: "Convolution12"
  775. top: "Convolution12"
  776. param {
  777. lr_mult: 0
  778. decay_mult: 0
  779. }
  780. param {
  781. lr_mult: 0
  782. decay_mult: 0
  783. }
  784. param {
  785. lr_mult: 0
  786. decay_mult: 0
  787. }
  788. }
  789. layer {
  790. name: "Scale12"
  791. type: "Scale"
  792. bottom: "Convolution12"
  793. top: "Convolution12"
  794. scale_param {
  795. bias_term: true
  796. }
  797. }
  798. layer {
  799. name: "Eltwise5"
  800. type: "Eltwise"
  801. bottom: "Eltwise4"
  802. bottom: "Convolution12"
  803. top: "Eltwise5"
  804. eltwise_param {
  805. operation: SUM
  806. }
  807. }
  808. layer {
  809. name: "ReLU11"
  810. type: "ReLU"
  811. bottom: "Eltwise5"
  812. top: "Eltwise5"
  813. }
  814. layer {
  815. name: "Convolution13"
  816. type: "Convolution"
  817. bottom: "Eltwise5"
  818. top: "Convolution13"
  819. param {
  820. lr_mult: 1
  821. decay_mult: 1
  822. }
  823. param {
  824. lr_mult: 2
  825. decay_mult: 0
  826. }
  827. convolution_param {
  828. num_output: 32
  829. pad: 1
  830. kernel_size: 3
  831. stride: 1
  832. weight_filler {
  833. type: "gaussian"
  834. std: 0.083
  835. }
  836. bias_filler {
  837. type: "constant"
  838. value: 0
  839. }
  840. }
  841. }
  842. layer {
  843. name: "BatchNorm13"
  844. type: "BatchNorm"
  845. bottom: "Convolution13"
  846. top: "Convolution13"
  847. param {
  848. lr_mult: 0
  849. decay_mult: 0
  850. }
  851. param {
  852. lr_mult: 0
  853. decay_mult: 0
  854. }
  855. param {
  856. lr_mult: 0
  857. decay_mult: 0
  858. }
  859. }
  860. layer {
  861. name: "Scale13"
  862. type: "Scale"
  863. bottom: "Convolution13"
  864. top: "Convolution13"
  865. scale_param {
  866. bias_term: true
  867. }
  868. }
  869. layer {
  870. name: "ReLU12"
  871. type: "ReLU"
  872. bottom: "Convolution13"
  873. top: "Convolution13"
  874. }
  875. layer {
  876. name: "Convolution14"
  877. type: "Convolution"
  878. bottom: "Convolution13"
  879. top: "Convolution14"
  880. param {
  881. lr_mult: 1
  882. decay_mult: 1
  883. }
  884. param {
  885. lr_mult: 2
  886. decay_mult: 0
  887. }
  888. convolution_param {
  889. num_output: 32
  890. pad: 1
  891. kernel_size: 3
  892. stride: 1
  893. weight_filler {
  894. type: "gaussian"
  895. std: 0.083
  896. }
  897. bias_filler {
  898. type: "constant"
  899. value: 0
  900. }
  901. }
  902. }
  903. layer {
  904. name: "BatchNorm14"
  905. type: "BatchNorm"
  906. bottom: "Convolution14"
  907. top: "Convolution14"
  908. param {
  909. lr_mult: 0
  910. decay_mult: 0
  911. }
  912. param {
  913. lr_mult: 0
  914. decay_mult: 0
  915. }
  916. param {
  917. lr_mult: 0
  918. decay_mult: 0
  919. }
  920. }
  921. layer {
  922. name: "Scale14"
  923. type: "Scale"
  924. bottom: "Convolution14"
  925. top: "Convolution14"
  926. scale_param {
  927. bias_term: true
  928. }
  929. }
  930. layer {
  931. name: "Eltwise6"
  932. type: "Eltwise"
  933. bottom: "Eltwise5"
  934. bottom: "Convolution14"
  935. top: "Eltwise6"
  936. eltwise_param {
  937. operation: SUM
  938. }
  939. }
  940. layer {
  941. name: "ReLU13"
  942. type: "ReLU"
  943. bottom: "Eltwise6"
  944. top: "Eltwise6"
  945. }
  946. layer {
  947. name: "Convolution15"
  948. type: "Convolution"
  949. bottom: "Eltwise6"
  950. top: "Convolution15"
  951. param {
  952. lr_mult: 1
  953. decay_mult: 1
  954. }
  955. param {
  956. lr_mult: 2
  957. decay_mult: 0
  958. }
  959. convolution_param {
  960. num_output: 64
  961. pad: 0
  962. kernel_size: 1
  963. stride: 2
  964. weight_filler {
  965. type: "gaussian"
  966. std: 0.176776695297
  967. }
  968. bias_filler {
  969. type: "constant"
  970. value: 0
  971. }
  972. }
  973. }
  974. layer {
  975. name: "BatchNorm15"
  976. type: "BatchNorm"
  977. bottom: "Convolution15"
  978. top: "Convolution15"
  979. param {
  980. lr_mult: 0
  981. decay_mult: 0
  982. }
  983. param {
  984. lr_mult: 0
  985. decay_mult: 0
  986. }
  987. param {
  988. lr_mult: 0
  989. decay_mult: 0
  990. }
  991. }
  992. layer {
  993. name: "Scale15"
  994. type: "Scale"
  995. bottom: "Convolution15"
  996. top: "Convolution15"
  997. scale_param {
  998. bias_term: true
  999. }
  1000. }
  1001. layer {
  1002. name: "BinBatchNorm16"
  1003. type: "BatchNorm"
  1004. bottom: "Eltwise6"
  1005. top: "Eltwise6_1"
  1006. }
  1007.  
  1008. layer {
  1009. name: "Binactiv16"
  1010. type: "BinActiv"
  1011. bottom: "Eltwise6_1"
  1012. top: "bin-eltwise6"
  1013. binactiv_param{
  1014. no_k: true
  1015. }
  1016. }
  1017.  
  1018. layer {
  1019. name: "BinConvolution16"
  1020. type: "BinaryConvolution"
  1021. bottom: "bin-eltwise6"
  1022. top: "Convolution16"
  1023. param {
  1024. lr_mult: 1
  1025. decay_mult: 1
  1026. }
  1027. param {
  1028. lr_mult: 2
  1029. decay_mult: 0
  1030. }
  1031. convolution_param {
  1032. num_output: 64
  1033. pad: 1
  1034. kernel_size: 3
  1035. stride: 2
  1036. weight_filler {
  1037. type: "gaussian"
  1038. std: 0.059
  1039. }
  1040. bias_filler {
  1041. type: "constant"
  1042. value: 0
  1043. }
  1044. }
  1045. }
  1046.  
  1047.  
  1048. layer {
  1049. name: "BinBatchNorm17"
  1050. type: "BatchNorm"
  1051. bottom: "Convolution16"
  1052. top: "Convolution16"
  1053. }
  1054.  
  1055. layer {
  1056. name: "Binactiv17"
  1057. type: "BinActiv"
  1058. bottom: "Convolution16"
  1059. top: "B-Convolution16"
  1060. binactiv_param{
  1061. no_k: true
  1062. }
  1063. }
  1064. layer {
  1065. name: "BinConvolution17"
  1066. type: "BinaryConvolution"
  1067. bottom: "B-Convolution16"
  1068. top: "Convolution17"
  1069. param {
  1070. lr_mult: 1
  1071. decay_mult: 1
  1072. }
  1073. param {
  1074. lr_mult: 2
  1075. decay_mult: 0
  1076. }
  1077. convolution_param {
  1078. num_output: 64
  1079. pad: 1
  1080. kernel_size: 3
  1081. stride: 1
  1082. weight_filler {
  1083. type: "gaussian"
  1084. std: 0.059
  1085. }
  1086. bias_filler {
  1087. type: "constant"
  1088. value: 0
  1089. }
  1090. }
  1091. }
  1092. layer {
  1093. name: "Eltwise7"
  1094. type: "Eltwise"
  1095. bottom: "Convolution15"
  1096. bottom: "Convolution17"
  1097. top: "Eltwise7"
  1098. eltwise_param {
  1099. operation: SUM
  1100. }
  1101. }
  1102. layer {
  1103. name: "PReLU15"
  1104. type: "PReLU"
  1105. bottom: "Eltwise7"
  1106. top: "Eltwise7"
  1107. }
  1108. layer {
  1109. name: "BatchNorm18"
  1110. type: "BatchNorm"
  1111. bottom: "Eltwise7"
  1112. top: "Eltwise7"
  1113. }
  1114.  
  1115. layer {
  1116. name: "Binactiv18"
  1117. type: "BinActiv"
  1118. bottom: "Eltwise7"
  1119. top: "B-Eltwise7"
  1120. binactiv_param{
  1121. no_k: true
  1122. }
  1123. }
  1124.  
  1125. layer {
  1126. name: "BinConvolution18"
  1127. type: "BinaryConvolution"
  1128. bottom: "B-Eltwise7"
  1129. top: "Convolution18"
  1130. param {
  1131. lr_mult: 1
  1132. decay_mult: 1
  1133. }
  1134. param {
  1135. lr_mult: 2
  1136. decay_mult: 0
  1137. }
  1138. convolution_param {
  1139. num_output: 64
  1140. pad: 1
  1141. kernel_size: 3
  1142. stride: 1
  1143. weight_filler {
  1144. type: "gaussian"
  1145. std: 0.059
  1146. }
  1147. bias_filler {
  1148. type: "constant"
  1149. value: 0
  1150. }
  1151. }
  1152. }
  1153.  
  1154.  
  1155. layer {
  1156. name: "BatchNorm19"
  1157. type: "BatchNorm"
  1158. bottom: "Convolution18"
  1159. top: "Convolution18"
  1160. }
  1161.  
  1162. layer {
  1163. name: "Binactiv19"
  1164. type: "BinActiv"
  1165. bottom: "Convolution18"
  1166. top: "B-Convolution18"
  1167. binactiv_param{
  1168. no_k: true
  1169. }
  1170. }
  1171.  
  1172. layer {
  1173. name: "BinConvolution19"
  1174. type: "BinaryConvolution"
  1175. bottom: "B-Convolution18"
  1176. top: "Convolution19"
  1177. param {
  1178. lr_mult: 1
  1179. decay_mult: 1
  1180. }
  1181. param {
  1182. lr_mult: 2
  1183. decay_mult: 0
  1184. }
  1185. convolution_param {
  1186. num_output: 64
  1187. pad: 1
  1188. kernel_size: 3
  1189. stride: 1
  1190. weight_filler {
  1191. type: "gaussian"
  1192. std: 0.059
  1193. }
  1194. bias_filler {
  1195. type: "constant"
  1196. value: 0
  1197. }
  1198. }
  1199. }
  1200.  
  1201. layer {
  1202. name: "Eltwise8"
  1203. type: "Eltwise"
  1204. bottom: "Eltwise7"
  1205. bottom: "Convolution19"
  1206. top: "Eltwise8"
  1207. eltwise_param {
  1208. operation: SUM
  1209. }
  1210. }
  1211. layer {
  1212. name: "PReLU17"
  1213. type: "PReLU"
  1214. bottom: "Eltwise8"
  1215. top: "Eltwise8"
  1216. }
  1217. layer {
  1218. name: "BatchNorm20"
  1219. type: "BatchNorm"
  1220. bottom: "Eltwise8"
  1221. top: "BEltwise8"
  1222. }
  1223. layer {
  1224. name: "Binactiv20"
  1225. type: "BinActiv"
  1226. bottom: "BEltwise8"
  1227. top: "B-Eltwise8"
  1228. binactiv_param{
  1229. no_k:true
  1230. }
  1231. }
  1232. layer {
  1233. name: "BinConvolution20"
  1234. type: "BinaryConvolution"
  1235. bottom: "B-Eltwise8"
  1236. top: "Convolution20"
  1237. param {
  1238. lr_mult: 1
  1239. decay_mult: 1
  1240. }
  1241. param {
  1242. lr_mult: 2
  1243. decay_mult: 0
  1244. }
  1245. convolution_param {
  1246. num_output: 64
  1247. pad: 1
  1248. kernel_size: 3
  1249. stride: 1
  1250. weight_filler {
  1251. type: "gaussian"
  1252. std: 0.059
  1253. }
  1254. bias_filler {
  1255. type: "constant"
  1256. value: 0
  1257. }
  1258. }
  1259. }
  1260.  
  1261.  
  1262. layer {
  1263. name: "BatchNorm21"
  1264. type: "BatchNorm"
  1265. bottom: "Convolution20"
  1266. top: "Convolution20"
  1267. }
  1268. layer {
  1269. name: "Binactiv21"
  1270. type: "BinActiv"
  1271. bottom: "Convolution20"
  1272. top: "B-Convolution20"
  1273. binactiv_param{
  1274. no_k:true
  1275. }
  1276. }
  1277.  
  1278. layer {
  1279. name: "BinConvolution21"
  1280. type: "BinaryConvolution"
  1281. bottom: "B-Convolution20"
  1282. top: "Convolution21"
  1283. param {
  1284. lr_mult: 1
  1285. decay_mult: 1
  1286. }
  1287. param {
  1288. lr_mult: 2
  1289. decay_mult: 0
  1290. }
  1291. convolution_param {
  1292. num_output: 64
  1293. pad: 1
  1294. kernel_size: 3
  1295. stride: 1
  1296. weight_filler {
  1297. type: "gaussian"
  1298. std: 0.059
  1299. }
  1300. bias_filler {
  1301. type: "constant"
  1302. value: 0
  1303. }
  1304. }
  1305. }
  1306.  
  1307. layer {
  1308. name: "Eltwise9"
  1309. type: "Eltwise"
  1310. bottom: "Eltwise8"
  1311. bottom: "Convolution21"
  1312. top: "Eltwise9"
  1313. eltwise_param {
  1314. operation: SUM
  1315. }
  1316. }
  1317. layer {
  1318. name: "PReLU19"
  1319. type: "PReLU"
  1320. bottom: "Eltwise9"
  1321. top: "Eltwise9"
  1322. }
  1323. layer {
  1324. name: "Pooling1"
  1325. type: "Pooling"
  1326. bottom: "Eltwise9"
  1327. top: "Pooling1"
  1328. pooling_param {
  1329. pool: AVE
  1330. global_pooling: true
  1331. }
  1332. }
  1333. layer {
  1334. name: "InnerProduct1"
  1335. type: "InnerProduct"
  1336. bottom: "Pooling1"
  1337. top: "InnerProduct1"
  1338. param {
  1339. lr_mult: 1
  1340. decay_mult: 1
  1341. }
  1342. param {
  1343. lr_mult: 2
  1344. decay_mult: 1
  1345. }
  1346. inner_product_param {
  1347. num_output: 10
  1348. weight_filler {
  1349. type: "gaussian"
  1350. std: 0.01
  1351. }
  1352. bias_filler {
  1353. type: "constant"
  1354. value: 0
  1355. }
  1356. }
  1357. }
  1358. layer {
  1359. name: "SoftmaxWithLoss1"
  1360. type: "SoftmaxWithLoss"
  1361. bottom: "InnerProduct1"
  1362. bottom: "Data2"
  1363. top: "SoftmaxWithLoss1"
  1364. }
  1365. layer {
  1366. name: "Accuracy1"
  1367. type: "Accuracy"
  1368. bottom: "InnerProduct1"
  1369. bottom: "Data2"
  1370. top: "Accuracy1"
  1371. include {
  1372. phase: TEST
  1373. }
  1374. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement