SHARE
TWEET

Untitled

a guest Aug 24th, 2017 59 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. name: "resnet_cifar10"
  2. layer {
  3.   name: "Data1"
  4.   type: "Data"
  5.   top: "Data1"
  6.   top: "Data2"
  7.   include {
  8.     phase: TRAIN
  9.   }
  10.   transform_param {
  11.     mean_file: "examples/xor/mean.binaryproto"
  12.     crop_size: 28
  13.     mirror:true
  14.   }
  15.   data_param {
  16.     source: "examples/xor/cifar10_train_lmdb"
  17.     batch_size: 100
  18.     backend: LMDB
  19.   }
  20. }
  21. layer {
  22.   name: "Data1"
  23.   type: "Data"
  24.   top: "Data1"
  25.   top: "Data2"
  26.   include {
  27.     phase: TEST
  28.   }
  29.   transform_param {
  30.     mean_file: "examples/xor/mean.binaryproto"
  31.   }
  32.   data_param {
  33.     source: "examples/xor/cifar10_test_lmdb"
  34.     batch_size: 100
  35.     backend: LMDB
  36.   }
  37. }
  38. layer {
  39.   name: "Convolution1"
  40.   type: "Convolution"
  41.   bottom: "Data1"
  42.   top: "Convolution1"
  43.   param {
  44.     lr_mult: 1
  45.     decay_mult: 1
  46.   }
  47.   param {
  48.     lr_mult: 2
  49.     decay_mult: 0
  50.   }
  51.   convolution_param {
  52.     num_output: 16
  53.     pad: 1
  54.     kernel_size: 3
  55.     stride: 1
  56.     weight_filler {
  57.       type: "gaussian"
  58.       std: 0.118
  59.     }
  60.     bias_filler {
  61.       type: "constant"
  62.       value: 0
  63.     }
  64.   }
  65. }
  66. layer {
  67.   name: "BatchNorm1"
  68.   type: "BatchNorm"
  69.   bottom: "Convolution1"
  70.   top: "Convolution1"
  71.   param {
  72.     lr_mult: 0
  73.     decay_mult: 0
  74.   }
  75.   param {
  76.     lr_mult: 0
  77.     decay_mult: 0
  78.   }
  79.   param {
  80.     lr_mult: 0
  81.     decay_mult: 0
  82.   }
  83. }
  84. layer {
  85.   name: "Scale1"
  86.   type: "Scale"
  87.   bottom: "Convolution1"
  88.   top: "Convolution1"
  89.   scale_param {
  90.     bias_term: true
  91.   }
  92. }
  93. layer {
  94.   name: "ReLU1"
  95.   type: "ReLU"
  96.   bottom: "Convolution1"
  97.   top: "Convolution1"
  98. }
  99. layer {
  100.   name: "Convolution2"
  101.   type: "Convolution"
  102.   bottom: "Convolution1"
  103.   top: "Convolution2"
  104.   param {
  105.     lr_mult: 1
  106.     decay_mult: 1
  107.   }
  108.   param {
  109.     lr_mult: 2
  110.     decay_mult: 0
  111.   }
  112.   convolution_param {
  113.     num_output: 16
  114.     pad: 1
  115.     kernel_size: 3
  116.     stride: 1
  117.     weight_filler {
  118.       type: "gaussian"
  119.       std: 0.118
  120.     }
  121.     bias_filler {
  122.       type: "constant"
  123.       value: 0
  124.     }
  125.   }
  126. }
  127. layer {
  128.   name: "BatchNorm2"
  129.   type: "BatchNorm"
  130.   bottom: "Convolution2"
  131.   top: "Convolution2"
  132.   param {
  133.     lr_mult: 0
  134.     decay_mult: 0
  135.   }
  136.   param {
  137.     lr_mult: 0
  138.     decay_mult: 0
  139.   }
  140.   param {
  141.     lr_mult: 0
  142.     decay_mult: 0
  143.   }
  144. }
  145. layer {
  146.   name: "Scale2"
  147.   type: "Scale"
  148.   bottom: "Convolution2"
  149.   top: "Convolution2"
  150.   scale_param {
  151.     bias_term: true
  152.   }
  153. }
  154. layer {
  155.   name: "ReLU2"
  156.   type: "ReLU"
  157.   bottom: "Convolution2"
  158.   top: "Convolution2"
  159. }
  160. layer {
  161.   name: "Convolution3"
  162.   type: "Convolution"
  163.   bottom: "Convolution2"
  164.   top: "Convolution3"
  165.   param {
  166.     lr_mult: 1
  167.     decay_mult: 1
  168.   }
  169.   param {
  170.     lr_mult: 2
  171.     decay_mult: 0
  172.   }
  173.   convolution_param {
  174.     num_output: 16
  175.     pad: 1
  176.     kernel_size: 3
  177.     stride: 1
  178.     weight_filler {
  179.       type: "gaussian"
  180.       std: 0.118
  181.     }
  182.     bias_filler {
  183.       type: "constant"
  184.       value: 0
  185.     }
  186.   }
  187. }
  188. layer {
  189.   name: "BatchNorm3"
  190.   type: "BatchNorm"
  191.   bottom: "Convolution3"
  192.   top: "Convolution3"
  193.   param {
  194.     lr_mult: 0
  195.     decay_mult: 0
  196.   }
  197.   param {
  198.     lr_mult: 0
  199.     decay_mult: 0
  200.   }
  201.   param {
  202.     lr_mult: 0
  203.     decay_mult: 0
  204.   }
  205. }
  206. layer {
  207.   name: "Scale3"
  208.   type: "Scale"
  209.   bottom: "Convolution3"
  210.   top: "Convolution3"
  211.   scale_param {
  212.     bias_term: true
  213.   }
  214. }
  215. layer {
  216.   name: "Eltwise1"
  217.   type: "Eltwise"
  218.   bottom: "Convolution1"
  219.   bottom: "Convolution3"
  220.   top: "Eltwise1"
  221.   eltwise_param {
  222.     operation: SUM
  223.   }
  224. }
  225. layer {
  226.   name: "ReLU3"
  227.   type: "ReLU"
  228.   bottom: "Eltwise1"
  229.   top: "Eltwise1"
  230. }
  231. layer {
  232.   name: "Convolution4"
  233.   type: "Convolution"
  234.   bottom: "Eltwise1"
  235.   top: "Convolution4"
  236.   param {
  237.     lr_mult: 1
  238.     decay_mult: 1
  239.   }
  240.   param {
  241.     lr_mult: 2
  242.     decay_mult: 0
  243.   }
  244.   convolution_param {
  245.     num_output: 16
  246.     pad: 1
  247.     kernel_size: 3
  248.     stride: 1
  249.     weight_filler {
  250.       type: "gaussian"
  251.       std: 0.118
  252.     }
  253.     bias_filler {
  254.       type: "constant"
  255.       value: 0
  256.     }
  257.   }
  258. }
  259. layer {
  260.   name: "BatchNorm4"
  261.   type: "BatchNorm"
  262.   bottom: "Convolution4"
  263.   top: "Convolution4"
  264.   param {
  265.     lr_mult: 0
  266.     decay_mult: 0
  267.   }
  268.   param {
  269.     lr_mult: 0
  270.     decay_mult: 0
  271.   }
  272.   param {
  273.     lr_mult: 0
  274.     decay_mult: 0
  275.   }
  276. }
  277. layer {
  278.   name: "Scale4"
  279.   type: "Scale"
  280.   bottom: "Convolution4"
  281.   top: "Convolution4"
  282.   scale_param {
  283.     bias_term: true
  284.   }
  285. }
  286. layer {
  287.   name: "ReLU4"
  288.   type: "ReLU"
  289.   bottom: "Convolution4"
  290.   top: "Convolution4"
  291. }
  292. layer {
  293.   name: "Convolution5"
  294.   type: "Convolution"
  295.   bottom: "Convolution4"
  296.   top: "Convolution5"
  297.   param {
  298.     lr_mult: 1
  299.     decay_mult: 1
  300.   }
  301.   param {
  302.     lr_mult: 2
  303.     decay_mult: 0
  304.   }
  305.   convolution_param {
  306.     num_output: 16
  307.     pad: 1
  308.     kernel_size: 3
  309.     stride: 1
  310.     weight_filler {
  311.       type: "gaussian"
  312.       std: 0.118
  313.     }
  314.     bias_filler {
  315.       type: "constant"
  316.       value: 0
  317.     }
  318.   }
  319. }
  320. layer {
  321.   name: "BatchNorm5"
  322.   type: "BatchNorm"
  323.   bottom: "Convolution5"
  324.   top: "Convolution5"
  325.   param {
  326.     lr_mult: 0
  327.     decay_mult: 0
  328.   }
  329.   param {
  330.     lr_mult: 0
  331.     decay_mult: 0
  332.   }
  333.   param {
  334.     lr_mult: 0
  335.     decay_mult: 0
  336.   }
  337. }
  338. layer {
  339.   name: "Scale5"
  340.   type: "Scale"
  341.   bottom: "Convolution5"
  342.   top: "Convolution5"
  343.   scale_param {
  344.     bias_term: true
  345.   }
  346. }
  347. layer {
  348.   name: "Eltwise2"
  349.   type: "Eltwise"
  350.   bottom: "Eltwise1"
  351.   bottom: "Convolution5"
  352.   top: "Eltwise2"
  353.   eltwise_param {
  354.     operation: SUM
  355.   }
  356. }
  357. layer {
  358.   name: "ReLU5"
  359.   type: "ReLU"
  360.   bottom: "Eltwise2"
  361.   top: "Eltwise2"
  362. }
  363. layer {
  364.   name: "Convolution6"
  365.   type: "Convolution"
  366.   bottom: "Eltwise2"
  367.   top: "Convolution6"
  368.   param {
  369.     lr_mult: 1
  370.     decay_mult: 1
  371.   }
  372.   param {
  373.     lr_mult: 2
  374.     decay_mult: 0
  375.   }
  376.   convolution_param {
  377.     num_output: 16
  378.     pad: 1
  379.     kernel_size: 3
  380.     stride: 1
  381.     weight_filler {
  382.       type: "gaussian"
  383.       std: 0.118
  384.     }
  385.     bias_filler {
  386.       type: "constant"
  387.       value: 0
  388.     }
  389.   }
  390. }
  391. layer {
  392.   name: "BatchNorm6"
  393.   type: "BatchNorm"
  394.   bottom: "Convolution6"
  395.   top: "Convolution6"
  396.   param {
  397.     lr_mult: 0
  398.     decay_mult: 0
  399.   }
  400.   param {
  401.     lr_mult: 0
  402.     decay_mult: 0
  403.   }
  404.   param {
  405.     lr_mult: 0
  406.     decay_mult: 0
  407.   }
  408. }
  409. layer {
  410.   name: "Scale6"
  411.   type: "Scale"
  412.   bottom: "Convolution6"
  413.   top: "Convolution6"
  414.   scale_param {
  415.     bias_term: true
  416.   }
  417. }
  418. layer {
  419.   name: "ReLU6"
  420.   type: "ReLU"
  421.   bottom: "Convolution6"
  422.   top: "Convolution6"
  423. }
  424. layer {
  425.   name: "Convolution7"
  426.   type: "Convolution"
  427.   bottom: "Convolution6"
  428.   top: "Convolution7"
  429.   param {
  430.     lr_mult: 1
  431.     decay_mult: 1
  432.   }
  433.   param {
  434.     lr_mult: 2
  435.     decay_mult: 0
  436.   }
  437.   convolution_param {
  438.     num_output: 16
  439.     pad: 1
  440.     kernel_size: 3
  441.     stride: 1
  442.     weight_filler {
  443.       type: "gaussian"
  444.       std: 0.118
  445.     }
  446.     bias_filler {
  447.       type: "constant"
  448.       value: 0
  449.     }
  450.   }
  451. }
  452. layer {
  453.   name: "BatchNorm7"
  454.   type: "BatchNorm"
  455.   bottom: "Convolution7"
  456.   top: "Convolution7"
  457.   param {
  458.     lr_mult: 0
  459.     decay_mult: 0
  460.   }
  461.   param {
  462.     lr_mult: 0
  463.     decay_mult: 0
  464.   }
  465.   param {
  466.     lr_mult: 0
  467.     decay_mult: 0
  468.   }
  469. }
  470. layer {
  471.   name: "Scale7"
  472.   type: "Scale"
  473.   bottom: "Convolution7"
  474.   top: "Convolution7"
  475.   scale_param {
  476.     bias_term: true
  477.   }
  478. }
  479. layer {
  480.   name: "Eltwise3"
  481.   type: "Eltwise"
  482.   bottom: "Eltwise2"
  483.   bottom: "Convolution7"
  484.   top: "Eltwise3"
  485.   eltwise_param {
  486.     operation: SUM
  487.   }
  488. }
  489. layer {
  490.   name: "ReLU7"
  491.   type: "ReLU"
  492.   bottom: "Eltwise3"
  493.   top: "Eltwise3"
  494. }
  495. layer {
  496.   name: "Convolution8"
  497.   type: "Convolution"
  498.   bottom: "Eltwise3"
  499.   top: "Convolution8"
  500.   param {
  501.     lr_mult: 1
  502.     decay_mult: 1
  503.   }
  504.   param {
  505.     lr_mult: 2
  506.     decay_mult: 0
  507.   }
  508.   convolution_param {
  509.     num_output: 32
  510.     pad: 0
  511.     kernel_size: 1
  512.     stride: 2
  513.     weight_filler {
  514.       type: "gaussian"
  515.       std: 0.25
  516.     }
  517.     bias_filler {
  518.       type: "constant"
  519.       value: 0
  520.     }
  521.   }
  522. }
  523. layer {
  524.   name: "BatchNorm8"
  525.   type: "BatchNorm"
  526.   bottom: "Convolution8"
  527.   top: "Convolution8"
  528.   param {
  529.     lr_mult: 0
  530.     decay_mult: 0
  531.   }
  532.   param {
  533.     lr_mult: 0
  534.     decay_mult: 0
  535.   }
  536.   param {
  537.     lr_mult: 0
  538.     decay_mult: 0
  539.   }
  540. }
  541. layer {
  542.   name: "Scale8"
  543.   type: "Scale"
  544.   bottom: "Convolution8"
  545.   top: "Convolution8"
  546.   scale_param {
  547.     bias_term: true
  548.   }
  549. }
  550. layer {
  551.   name: "Convolution9"
  552.   type: "Convolution"
  553.   bottom: "Eltwise3"
  554.   top: "Convolution9"
  555.   param {
  556.     lr_mult: 1
  557.     decay_mult: 1
  558.   }
  559.   param {
  560.     lr_mult: 2
  561.     decay_mult: 0
  562.   }
  563.   convolution_param {
  564.     num_output: 32
  565.     pad: 1
  566.     kernel_size: 3
  567.     stride: 2
  568.     weight_filler {
  569.       type: "gaussian"
  570.       std: 0.083
  571.     }
  572.     bias_filler {
  573.       type: "constant"
  574.       value: 0
  575.     }
  576.   }
  577. }
  578. layer {
  579.   name: "BatchNorm9"
  580.   type: "BatchNorm"
  581.   bottom: "Convolution9"
  582.   top: "Convolution9"
  583.   param {
  584.     lr_mult: 0
  585.     decay_mult: 0
  586.   }
  587.   param {
  588.     lr_mult: 0
  589.     decay_mult: 0
  590.   }
  591.   param {
  592.     lr_mult: 0
  593.     decay_mult: 0
  594.   }
  595. }
  596. layer {
  597.   name: "Scale9"
  598.   type: "Scale"
  599.   bottom: "Convolution9"
  600.   top: "Convolution9"
  601.   scale_param {
  602.     bias_term: true
  603.   }
  604. }
  605. layer {
  606.   name: "ReLU8"
  607.   type: "ReLU"
  608.   bottom: "Convolution9"
  609.   top: "Convolution9"
  610. }
  611. layer {
  612.   name: "Convolution10"
  613.   type: "Convolution"
  614.   bottom: "Convolution9"
  615.   top: "Convolution10"
  616.   param {
  617.     lr_mult: 1
  618.     decay_mult: 1
  619.   }
  620.   param {
  621.     lr_mult: 2
  622.     decay_mult: 0
  623.   }
  624.   convolution_param {
  625.     num_output: 32
  626.     pad: 1
  627.     kernel_size: 3
  628.     stride: 1
  629.     weight_filler {
  630.       type: "gaussian"
  631.       std: 0.083
  632.     }
  633.     bias_filler {
  634.       type: "constant"
  635.       value: 0
  636.     }
  637.   }
  638. }
  639. layer {
  640.   name: "BatchNorm10"
  641.   type: "BatchNorm"
  642.   bottom: "Convolution10"
  643.   top: "Convolution10"
  644.   param {
  645.     lr_mult: 0
  646.     decay_mult: 0
  647.   }
  648.   param {
  649.     lr_mult: 0
  650.     decay_mult: 0
  651.   }
  652.   param {
  653.     lr_mult: 0
  654.     decay_mult: 0
  655.   }
  656. }
  657. layer {
  658.   name: "Scale10"
  659.   type: "Scale"
  660.   bottom: "Convolution10"
  661.   top: "Convolution10"
  662.   scale_param {
  663.     bias_term: true
  664.   }
  665. }
  666. layer {
  667.   name: "Eltwise4"
  668.   type: "Eltwise"
  669.   bottom: "Convolution8"
  670.   bottom: "Convolution10"
  671.   top: "Eltwise4"
  672.   eltwise_param {
  673.     operation: SUM
  674.   }
  675. }
  676. layer {
  677.   name: "ReLU9"
  678.   type: "ReLU"
  679.   bottom: "Eltwise4"
  680.   top: "Eltwise4"
  681. }
  682. layer {
  683.   name: "Convolution11"
  684.   type: "Convolution"
  685.   bottom: "Eltwise4"
  686.   top: "Convolution11"
  687.   param {
  688.     lr_mult: 1
  689.     decay_mult: 1
  690.   }
  691.   param {
  692.     lr_mult: 2
  693.     decay_mult: 0
  694.   }
  695.   convolution_param {
  696.     num_output: 32
  697.     pad: 1
  698.     kernel_size: 3
  699.     stride: 1
  700.     weight_filler {
  701.       type: "gaussian"
  702.       std: 0.083
  703.     }
  704.     bias_filler {
  705.       type: "constant"
  706.       value: 0
  707.     }
  708.   }
  709. }
  710. layer {
  711.   name: "BatchNorm11"
  712.   type: "BatchNorm"
  713.   bottom: "Convolution11"
  714.   top: "Convolution11"
  715.   param {
  716.     lr_mult: 0
  717.     decay_mult: 0
  718.   }
  719.   param {
  720.     lr_mult: 0
  721.     decay_mult: 0
  722.   }
  723.   param {
  724.     lr_mult: 0
  725.     decay_mult: 0
  726.   }
  727. }
  728. layer {
  729.   name: "Scale11"
  730.   type: "Scale"
  731.   bottom: "Convolution11"
  732.   top: "Convolution11"
  733.   scale_param {
  734.     bias_term: true
  735.   }
  736. }
  737. layer {
  738.   name: "ReLU10"
  739.   type: "ReLU"
  740.   bottom: "Convolution11"
  741.   top: "Convolution11"
  742. }
  743. layer {
  744.   name: "Convolution12"
  745.   type: "Convolution"
  746.   bottom: "Convolution11"
  747.   top: "Convolution12"
  748.   param {
  749.     lr_mult: 1
  750.     decay_mult: 1
  751.   }
  752.   param {
  753.     lr_mult: 2
  754.     decay_mult: 0
  755.   }
  756.   convolution_param {
  757.     num_output: 32
  758.     pad: 1
  759.     kernel_size: 3
  760.     stride: 1
  761.     weight_filler {
  762.       type: "gaussian"
  763.       std: 0.083
  764.     }
  765.     bias_filler {
  766.       type: "constant"
  767.       value: 0
  768.     }
  769.   }
  770. }
  771. layer {
  772.   name: "BatchNorm12"
  773.   type: "BatchNorm"
  774.   bottom: "Convolution12"
  775.   top: "Convolution12"
  776.   param {
  777.     lr_mult: 0
  778.     decay_mult: 0
  779.   }
  780.   param {
  781.     lr_mult: 0
  782.     decay_mult: 0
  783.   }
  784.   param {
  785.     lr_mult: 0
  786.     decay_mult: 0
  787.   }
  788. }
  789. layer {
  790.   name: "Scale12"
  791.   type: "Scale"
  792.   bottom: "Convolution12"
  793.   top: "Convolution12"
  794.   scale_param {
  795.     bias_term: true
  796.   }
  797. }
  798. layer {
  799.   name: "Eltwise5"
  800.   type: "Eltwise"
  801.   bottom: "Eltwise4"
  802.   bottom: "Convolution12"
  803.   top: "Eltwise5"
  804.   eltwise_param {
  805.     operation: SUM
  806.   }
  807. }
  808. layer {
  809.   name: "PReLU11"
  810.   type: "PReLU"
  811.   bottom: "Eltwise5"
  812.   top: "Eltwise5"
  813. }
  814. layer {
  815.   name: "BinBatchNorm13"
  816.   type: "BatchNorm"
  817.   bottom: "Eltwise5"
  818.   top: "Eltwise5"
  819. }
  820.  
  821. layer {
  822.   name: "Binactiv13"
  823.   type: "BinActiv"
  824.   bottom: "Eltwise5"
  825.   top: "B-Eltwise5"
  826.    binactiv_param{
  827.   no_k: true
  828.   }
  829. }
  830.  
  831.  
  832. layer {
  833.   name: "BinConvolution13"
  834.   type: "BinaryConvolution"
  835.   bottom: "B-Eltwise5"
  836.   top: "Convolution13"
  837.   param {
  838.     lr_mult: 1
  839.     decay_mult: 1
  840.   }
  841.   param {
  842.     lr_mult: 2
  843.     decay_mult: 0
  844.   }
  845.   convolution_param {
  846.     num_output: 32
  847.     pad: 1
  848.     kernel_size: 3
  849.     stride: 1
  850.     weight_filler {
  851.       type: "gaussian"
  852.       std: 0.083
  853.     }
  854.     bias_filler {
  855.       type: "constant"
  856.       value: 0
  857.     }
  858.   }
  859. }
  860.  
  861.  
  862. layer {
  863.   name: "BinBatchNorm14"
  864.   type: "BatchNorm"
  865.   bottom: "Convolution13"
  866.   top: "Convolution13"
  867. }
  868.  
  869. layer {
  870.   name: "Binactiv14"
  871.   type: "BinActiv"
  872.   bottom: "Convolution13"
  873.   top: "B-Convolution13"
  874.    binactiv_param{
  875.   no_k: true
  876.   }
  877. }
  878.  
  879.  
  880. layer {
  881.   name: "BinConvolution14"
  882.   type: "BinaryConvolution"
  883.   bottom: "B-Convolution13"
  884.   top: "Convolution14"
  885.   param {
  886.     lr_mult: 1
  887.     decay_mult: 1
  888.   }
  889.   param {
  890.     lr_mult: 2
  891.     decay_mult: 0
  892.   }
  893.   convolution_param {
  894.     num_output: 32
  895.     pad: 1
  896.     kernel_size: 3
  897.     stride: 1
  898.     weight_filler {
  899.       type: "gaussian"
  900.       std: 0.083
  901.     }
  902.     bias_filler {
  903.       type: "constant"
  904.       value: 0
  905.     }
  906.   }
  907. }
  908. layer {
  909.   name: "Eltwise6"
  910.   type: "Eltwise"
  911.   bottom: "Eltwise5"
  912.   bottom: "Convolution14"
  913.   top: "Eltwise6"
  914.   eltwise_param {
  915.     operation: SUM
  916.   }
  917. }
  918. layer {
  919.   name: "PReLU13"
  920.   type: "PReLU"
  921.   bottom: "Eltwise6"
  922.   top: "Eltwise6"
  923. }
  924. layer {
  925.   name: "Convolution15"
  926.   type: "Convolution"
  927.   bottom: "Eltwise6"
  928.   top: "Convolution15"
  929.   param {
  930.     lr_mult: 1
  931.     decay_mult: 1
  932.   }
  933.   param {
  934.     lr_mult: 2
  935.     decay_mult: 0
  936.   }
  937.   convolution_param {
  938.     num_output: 64
  939.     pad: 0
  940.     kernel_size: 1
  941.     stride: 2
  942.     weight_filler {
  943.       type: "gaussian"
  944.       std: 0.176776695297
  945.     }
  946.     bias_filler {
  947.       type: "constant"
  948.       value: 0
  949.     }
  950.   }
  951. }
  952. layer {
  953.   name: "BatchNorm15"
  954.   type: "BatchNorm"
  955.   bottom: "Convolution15"
  956.   top: "Convolution15"
  957.   param {
  958.     lr_mult: 0
  959.     decay_mult: 0
  960.   }
  961.   param {
  962.     lr_mult: 0
  963.     decay_mult: 0
  964.   }
  965.   param {
  966.     lr_mult: 0
  967.     decay_mult: 0
  968.   }
  969. }
  970. layer {
  971.   name: "Scale15"
  972.   type: "Scale"
  973.   bottom: "Convolution15"
  974.   top: "Convolution15"
  975.   scale_param {
  976.     bias_term: true
  977.   }
  978. }
  979. layer {
  980.   name: "BinBatchNorm16"
  981.   type: "BatchNorm"
  982.   bottom: "Eltwise6"
  983.   top: "Eltwise6_1"
  984. }
  985.  
  986. layer {
  987.   name: "Binactiv16"
  988.   type: "BinActiv"
  989.   bottom: "Eltwise6_1"
  990.   top: "bin-eltwise6"
  991.    binactiv_param{
  992.   no_k: true
  993.   }
  994. }
  995.  
  996. layer {
  997.   name: "BinConvolution16"
  998.   type: "BinaryConvolution"
  999.   bottom: "bin-eltwise6"
  1000.   top: "Convolution16"
  1001.   param {
  1002.     lr_mult: 1
  1003.     decay_mult: 1
  1004.   }
  1005.   param {
  1006.     lr_mult: 2
  1007.     decay_mult: 0
  1008.   }
  1009.   convolution_param {
  1010.     num_output: 64
  1011.     pad: 1
  1012.     kernel_size: 3
  1013.     stride: 2
  1014.     weight_filler {
  1015.       type: "gaussian"
  1016.       std: 0.059
  1017.     }
  1018.     bias_filler {
  1019.       type: "constant"
  1020.       value: 0
  1021.     }
  1022.   }
  1023. }
  1024.  
  1025.  
  1026. layer {
  1027.   name: "BinBatchNorm17"
  1028.   type: "BatchNorm"
  1029.   bottom: "Convolution16"
  1030.   top: "Convolution16"
  1031. }
  1032.  
  1033. layer {
  1034.   name: "Binactiv17"
  1035.   type: "BinActiv"
  1036.   bottom: "Convolution16"
  1037.   top: "B-Convolution16"
  1038.    binactiv_param{
  1039.   no_k: true
  1040.   }
  1041. }
  1042. layer {
  1043.   name: "BinConvolution17"
  1044.   type: "BinaryConvolution"
  1045.   bottom: "B-Convolution16"
  1046.   top: "Convolution17"
  1047.   param {
  1048.     lr_mult: 1
  1049.     decay_mult: 1
  1050.   }
  1051.   param {
  1052.     lr_mult: 2
  1053.     decay_mult: 0
  1054.   }
  1055.   convolution_param {
  1056.     num_output: 64
  1057.     pad: 1
  1058.     kernel_size: 3
  1059.     stride: 1
  1060.     weight_filler {
  1061.       type: "gaussian"
  1062.       std: 0.059
  1063.     }
  1064.     bias_filler {
  1065.       type: "constant"
  1066.       value: 0
  1067.     }
  1068.   }
  1069. }
  1070. layer {
  1071.   name: "Eltwise7"
  1072.   type: "Eltwise"
  1073.   bottom: "Convolution15"
  1074.   bottom: "Convolution17"
  1075.   top: "Eltwise7"
  1076.   eltwise_param {
  1077.     operation: SUM
  1078.   }
  1079. }
  1080. layer {
  1081.   name: "PReLU15"
  1082.   type: "PReLU"
  1083.   bottom: "Eltwise7"
  1084.   top: "Eltwise7"
  1085. }
  1086. layer {
  1087.   name: "BatchNorm18"
  1088.   type: "BatchNorm"
  1089.   bottom: "Eltwise7"
  1090.   top: "Eltwise7"
  1091. }
  1092.  
  1093. layer {
  1094.   name: "Binactiv18"
  1095.   type: "BinActiv"
  1096.   bottom: "Eltwise7"
  1097.   top: "B-Eltwise7"
  1098.   binactiv_param{
  1099.   no_k: true
  1100.   }
  1101. }
  1102.  
  1103. layer {
  1104.   name: "BinConvolution18"
  1105.   type: "BinaryConvolution"
  1106.   bottom: "B-Eltwise7"
  1107.   top: "Convolution18"
  1108.   param {
  1109.     lr_mult: 1
  1110.     decay_mult: 1
  1111.   }
  1112.   param {
  1113.     lr_mult: 2
  1114.     decay_mult: 0
  1115.   }
  1116.   convolution_param {
  1117.     num_output: 64
  1118.     pad: 1
  1119.     kernel_size: 3
  1120.     stride: 1
  1121.     weight_filler {
  1122.       type: "gaussian"
  1123.       std: 0.059
  1124.     }
  1125.     bias_filler {
  1126.       type: "constant"
  1127.       value: 0
  1128.     }
  1129.   }
  1130. }
  1131.  
  1132.  
  1133. layer {
  1134.   name: "BatchNorm19"
  1135.   type: "BatchNorm"
  1136.   bottom: "Convolution18"
  1137.   top: "Convolution18"
  1138. }
  1139.  
  1140. layer {
  1141.   name: "Binactiv19"
  1142.   type: "BinActiv"
  1143.   bottom: "Convolution18"
  1144.   top: "B-Convolution18"
  1145.    binactiv_param{
  1146.   no_k: true
  1147.   }
  1148. }
  1149.  
  1150. layer {
  1151.   name: "BinConvolution19"
  1152.   type: "BinaryConvolution"
  1153.   bottom: "B-Convolution18"
  1154.   top: "Convolution19"
  1155.   param {
  1156.     lr_mult: 1
  1157.     decay_mult: 1
  1158.   }
  1159.   param {
  1160.     lr_mult: 2
  1161.     decay_mult: 0
  1162.   }
  1163.   convolution_param {
  1164.     num_output: 64
  1165.     pad: 1
  1166.     kernel_size: 3
  1167.     stride: 1
  1168.     weight_filler {
  1169.       type: "gaussian"
  1170.       std: 0.059
  1171.     }
  1172.     bias_filler {
  1173.       type: "constant"
  1174.       value: 0
  1175.     }
  1176.   }
  1177. }
  1178.  
  1179. layer {
  1180.   name: "Eltwise8"
  1181.   type: "Eltwise"
  1182.   bottom: "Eltwise7"
  1183.   bottom: "Convolution19"
  1184.   top: "Eltwise8"
  1185.   eltwise_param {
  1186.     operation: SUM
  1187.   }
  1188. }
  1189. layer {
  1190.   name: "PReLU17"
  1191.   type: "PReLU"
  1192.   bottom: "Eltwise8"
  1193.   top: "Eltwise8"
  1194. }
  1195. layer {
  1196.   name: "BatchNorm20"
  1197.   type: "BatchNorm"
  1198.   bottom: "Eltwise8"
  1199.   top: "BEltwise8"
  1200. }
  1201. layer {
  1202.   name: "Binactiv20"
  1203.   type: "BinActiv"
  1204.   bottom: "BEltwise8"
  1205.   top: "B-Eltwise8"
  1206.   binactiv_param{
  1207.     no_k:true
  1208.   }
  1209. }
  1210. layer {
  1211.   name: "BinConvolution20"
  1212.   type: "BinaryConvolution"
  1213.   bottom: "B-Eltwise8"
  1214.   top: "Convolution20"
  1215.   param {
  1216.     lr_mult: 1
  1217.     decay_mult: 1
  1218.   }
  1219.   param {
  1220.     lr_mult: 2
  1221.     decay_mult: 0
  1222.   }
  1223.   convolution_param {
  1224.     num_output: 64
  1225.     pad: 1
  1226.     kernel_size: 3
  1227.     stride: 1
  1228.     weight_filler {
  1229.       type: "gaussian"
  1230.       std: 0.059
  1231.     }
  1232.     bias_filler {
  1233.       type: "constant"
  1234.       value: 0
  1235.     }
  1236.   }
  1237. }
  1238.  
  1239.  
  1240. layer {
  1241.   name: "BatchNorm21"
  1242.   type: "BatchNorm"
  1243.   bottom: "Convolution20"
  1244.   top: "Convolution20"
  1245. }
  1246. layer {
  1247.   name: "Binactiv21"
  1248.   type: "BinActiv"
  1249.   bottom: "Convolution20"
  1250.   top: "B-Convolution20"
  1251.   binactiv_param{
  1252.     no_k:true
  1253.   }
  1254. }
  1255.  
  1256. layer {
  1257.   name: "BinConvolution21"
  1258.   type: "BinaryConvolution"
  1259.   bottom: "B-Convolution20"
  1260.   top: "Convolution21"
  1261.   param {
  1262.     lr_mult: 1
  1263.     decay_mult: 1
  1264.   }
  1265.   param {
  1266.     lr_mult: 2
  1267.     decay_mult: 0
  1268.   }
  1269.   convolution_param {
  1270.     num_output: 64
  1271.     pad: 1
  1272.     kernel_size: 3
  1273.     stride: 1
  1274.     weight_filler {
  1275.       type: "gaussian"
  1276.       std: 0.059
  1277.     }
  1278.     bias_filler {
  1279.       type: "constant"
  1280.       value: 0
  1281.     }
  1282.   }
  1283. }
  1284.  
  1285. layer {
  1286.   name: "Eltwise9"
  1287.   type: "Eltwise"
  1288.   bottom: "Eltwise8"
  1289.   bottom: "Convolution21"
  1290.   top: "Eltwise9"
  1291.   eltwise_param {
  1292.     operation: SUM
  1293.   }
  1294. }
  1295. layer {
  1296.   name: "PReLU19"
  1297.   type: "PReLU"
  1298.   bottom: "Eltwise9"
  1299.   top: "Eltwise9"
  1300. }
  1301. layer {
  1302.   name: "Pooling1"
  1303.   type: "Pooling"
  1304.   bottom: "Eltwise9"
  1305.   top: "Pooling1"
  1306.   pooling_param {
  1307.     pool: AVE
  1308.     global_pooling: true
  1309.   }
  1310. }
  1311. layer {
  1312.   name: "InnerProduct1"
  1313.   type: "InnerProduct"
  1314.   bottom: "Pooling1"
  1315.   top: "InnerProduct1"
  1316.   param {
  1317.     lr_mult: 1
  1318.     decay_mult: 1
  1319.   }
  1320.   param {
  1321.     lr_mult: 2
  1322.     decay_mult: 1
  1323.   }
  1324.   inner_product_param {
  1325.     num_output: 10
  1326.     weight_filler {
  1327.       type: "gaussian"
  1328.       std: 0.01
  1329.     }
  1330.     bias_filler {
  1331.       type: "constant"
  1332.       value: 0
  1333.     }
  1334.   }
  1335. }
  1336. layer {
  1337.   name: "SoftmaxWithLoss1"
  1338.   type: "SoftmaxWithLoss"
  1339.   bottom: "InnerProduct1"
  1340.   bottom: "Data2"
  1341.   top: "SoftmaxWithLoss1"
  1342. }
  1343. layer {
  1344.   name: "Accuracy1"
  1345.   type: "Accuracy"
  1346.   bottom: "InnerProduct1"
  1347.   bottom: "Data2"
  1348.   top: "Accuracy1"
  1349.   include {
  1350.     phase: TEST
  1351.   }
  1352. }
RAW Paste Data
Top