daily pastebin goal
10%
SHARE
TWEET

Untitled

a guest Feb 22nd, 2019 49 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. name: "asl-resnet-depth3-width46-cifar10"
  2. layer {
  3.   name: "data"
  4.   type: "Data"
  5.   top: "data"
  6.   top: "label"
  7.   include {
  8.     phase: TRAIN
  9.   }
  10.   transform_param {
  11.     mirror: true
  12.     crop_size: 32
  13.     mean_value: 0.0
  14.   }
  15.   data_param {
  16.     source: "data_result/train"
  17.     batch_size: 128
  18.     backend: LMDB
  19.   }
  20. }
  21. layer {
  22.   name: "data"
  23.   type: "Data"
  24.   top: "data"
  25.   top: "label"
  26.   include {
  27.     phase: TEST
  28.   }
  29.   transform_param {
  30.     mean_value: 0.0
  31.   }
  32.   data_param {
  33.     source: "data_result/test"
  34.     batch_size: 100
  35.     backend: LMDB
  36.   }
  37. }
  38. layer {
  39.   name: "first_conv"
  40.   type: "Convolution"
  41.   bottom: "data"
  42.   top: "first_conv"
  43.   param {
  44.     lr_mult: 1.0
  45.     decay_mult: 1.0
  46.   }
  47.   convolution_param {
  48.     num_output: 46
  49.     bias_term: false
  50.     pad: 1
  51.     kernel_size: 3
  52.     group: 1
  53.     stride: 1
  54.     weight_filler {
  55.       type: "msra"
  56.     }
  57.   }
  58. }
  59. layer {
  60.   name: "group0_block0_bn0"
  61.   type: "BatchNorm"
  62.   bottom: "first_conv"
  63.   top: "group0_block0_bn0"
  64.   param {
  65.     lr_mult: 0.0
  66.     decay_mult: 0.0
  67.   }
  68.   param {
  69.     lr_mult: 0.0
  70.     decay_mult: 0.0
  71.   }
  72.   param {
  73.     lr_mult: 0.0
  74.     decay_mult: 0.0
  75.   }
  76.   batch_norm_param {
  77.     moving_average_fraction: 0.8999999761581421
  78.   }
  79. }
  80. layer {
  81.   name: "group0_block0_scale0"
  82.   type: "Scale"
  83.   bottom: "group0_block0_bn0"
  84.   top: "group0_block0_bn0"
  85.   param {
  86.     lr_mult: 1.0
  87.     decay_mult: 1.0
  88.   }
  89.   param {
  90.     lr_mult: 1.0
  91.     decay_mult: 0.0
  92.   }
  93.   scale_param {
  94.     bias_term: true
  95.   }
  96. }
  97. layer {
  98.   name: "group0_block0_relu0"
  99.   type: "ReLU"
  100.   bottom: "group0_block0_bn0"
  101.   top: "group0_block0_bn0"
  102. }
  103. layer {
  104.   name: "group0_block0_conv0"
  105.   type: "Convolution"
  106.   bottom: "group0_block0_bn0"
  107.   top: "group0_block0_conv0"
  108.   param {
  109.     lr_mult: 1.0
  110.     decay_mult: 1.0
  111.   }
  112.   convolution_param {
  113.     num_output: 46
  114.     bias_term: false
  115.     pad: 0
  116.     kernel_size: 1
  117.     group: 1
  118.     stride: 1
  119.     weight_filler {
  120.       type: "msra"
  121.     }
  122.   }
  123. }
  124. layer {
  125.   name: "group0_block0_bn1"
  126.   type: "BatchNorm"
  127.   bottom: "group0_block0_conv0"
  128.   top: "group0_block0_conv0"
  129.   param {
  130.     lr_mult: 0.0
  131.     decay_mult: 0.0
  132.   }
  133.   param {
  134.     lr_mult: 0.0
  135.     decay_mult: 0.0
  136.   }
  137.   param {
  138.     lr_mult: 0.0
  139.     decay_mult: 0.0
  140.   }
  141.   batch_norm_param {
  142.     moving_average_fraction: 0.8999999761581421
  143.   }
  144. }
  145. layer {
  146.   name: "group0_block0_scale1"
  147.   type: "Scale"
  148.   bottom: "group0_block0_conv0"
  149.   top: "group0_block0_conv0"
  150.   param {
  151.     lr_mult: 1.0
  152.     decay_mult: 1.0
  153.   }
  154.   param {
  155.     lr_mult: 1.0
  156.     decay_mult: 0.0
  157.   }
  158.   scale_param {
  159.     bias_term: true
  160.   }
  161. }
  162. layer {
  163.   name: "group0_block0_relu1"
  164.   type: "ReLU"
  165.   bottom: "group0_block0_conv0"
  166.   top: "group0_block0_conv0"
  167. }
  168. layer {
  169.   name: "group0_block0_asl"
  170.   type: "ActiveShift"
  171.   bottom: "group0_block0_conv0"
  172.   top: "group0_block0_asl"
  173.   param {
  174.     lr_mult: 1.0
  175.     decay_mult: 0.0
  176.   }
  177.   asl_param {
  178.     pad: 0
  179.     stride: 1
  180.     shift_filler {
  181.       type: "uniform"
  182.       min: -1.0
  183.       max: 1.0
  184.     }
  185.     normalize: true
  186.   }
  187. }
  188. layer {
  189.   name: "group0_block0_conv1"
  190.   type: "Convolution"
  191.   bottom: "group0_block0_asl"
  192.   top: "group0_block0_conv1"
  193.   param {
  194.     lr_mult: 1.0
  195.     decay_mult: 1.0
  196.   }
  197.   convolution_param {
  198.     num_output: 46
  199.     bias_term: false
  200.     pad: 0
  201.     kernel_size: 1
  202.     group: 1
  203.     stride: 1
  204.     weight_filler {
  205.       type: "msra"
  206.     }
  207.   }
  208. }
  209. layer {
  210.   name: "group0_block0_sum"
  211.   type: "Eltwise"
  212.   bottom: "first_conv"
  213.   bottom: "group0_block0_conv1"
  214.   top: "group0_block0_sum"
  215.   eltwise_param {
  216.     operation: SUM
  217.   }
  218. }
  219. layer {
  220.   name: "group0_block1_bn0"
  221.   type: "BatchNorm"
  222.   bottom: "group0_block0_sum"
  223.   top: "group0_block1_bn0"
  224.   param {
  225.     lr_mult: 0.0
  226.     decay_mult: 0.0
  227.   }
  228.   param {
  229.     lr_mult: 0.0
  230.     decay_mult: 0.0
  231.   }
  232.   param {
  233.     lr_mult: 0.0
  234.     decay_mult: 0.0
  235.   }
  236.   batch_norm_param {
  237.     moving_average_fraction: 0.8999999761581421
  238.   }
  239. }
  240. layer {
  241.   name: "group0_block1_scale0"
  242.   type: "Scale"
  243.   bottom: "group0_block1_bn0"
  244.   top: "group0_block1_bn0"
  245.   param {
  246.     lr_mult: 1.0
  247.     decay_mult: 1.0
  248.   }
  249.   param {
  250.     lr_mult: 1.0
  251.     decay_mult: 0.0
  252.   }
  253.   scale_param {
  254.     bias_term: true
  255.   }
  256. }
  257. layer {
  258.   name: "group0_block1_relu0"
  259.   type: "ReLU"
  260.   bottom: "group0_block1_bn0"
  261.   top: "group0_block1_bn0"
  262. }
  263. layer {
  264.   name: "group0_block1_conv0"
  265.   type: "Convolution"
  266.   bottom: "group0_block1_bn0"
  267.   top: "group0_block1_conv0"
  268.   param {
  269.     lr_mult: 1.0
  270.     decay_mult: 1.0
  271.   }
  272.   convolution_param {
  273.     num_output: 46
  274.     bias_term: false
  275.     pad: 0
  276.     kernel_size: 1
  277.     group: 1
  278.     stride: 1
  279.     weight_filler {
  280.       type: "msra"
  281.     }
  282.   }
  283. }
  284. layer {
  285.   name: "group0_block1_bn1"
  286.   type: "BatchNorm"
  287.   bottom: "group0_block1_conv0"
  288.   top: "group0_block1_conv0"
  289.   param {
  290.     lr_mult: 0.0
  291.     decay_mult: 0.0
  292.   }
  293.   param {
  294.     lr_mult: 0.0
  295.     decay_mult: 0.0
  296.   }
  297.   param {
  298.     lr_mult: 0.0
  299.     decay_mult: 0.0
  300.   }
  301.   batch_norm_param {
  302.     moving_average_fraction: 0.8999999761581421
  303.   }
  304. }
  305. layer {
  306.   name: "group0_block1_scale1"
  307.   type: "Scale"
  308.   bottom: "group0_block1_conv0"
  309.   top: "group0_block1_conv0"
  310.   param {
  311.     lr_mult: 1.0
  312.     decay_mult: 1.0
  313.   }
  314.   param {
  315.     lr_mult: 1.0
  316.     decay_mult: 0.0
  317.   }
  318.   scale_param {
  319.     bias_term: true
  320.   }
  321. }
  322. layer {
  323.   name: "group0_block1_relu1"
  324.   type: "ReLU"
  325.   bottom: "group0_block1_conv0"
  326.   top: "group0_block1_conv0"
  327. }
  328. layer {
  329.   name: "group0_block1_asl"
  330.   type: "ActiveShift"
  331.   bottom: "group0_block1_conv0"
  332.   top: "group0_block1_asl"
  333.   param {
  334.     lr_mult: 1.0
  335.     decay_mult: 0.0
  336.   }
  337.   asl_param {
  338.     pad: 0
  339.     stride: 1
  340.     shift_filler {
  341.       type: "uniform"
  342.       min: -1.0
  343.       max: 1.0
  344.     }
  345.     normalize: true
  346.   }
  347. }
  348. layer {
  349.   name: "group0_block1_conv1"
  350.   type: "Convolution"
  351.   bottom: "group0_block1_asl"
  352.   top: "group0_block1_conv1"
  353.   param {
  354.     lr_mult: 1.0
  355.     decay_mult: 1.0
  356.   }
  357.   convolution_param {
  358.     num_output: 46
  359.     bias_term: false
  360.     pad: 0
  361.     kernel_size: 1
  362.     group: 1
  363.     stride: 1
  364.     weight_filler {
  365.       type: "msra"
  366.     }
  367.   }
  368. }
  369. layer {
  370.   name: "group0_block1_sum"
  371.   type: "Eltwise"
  372.   bottom: "group0_block0_sum"
  373.   bottom: "group0_block1_conv1"
  374.   top: "group0_block1_sum"
  375.   eltwise_param {
  376.     operation: SUM
  377.   }
  378. }
  379. layer {
  380.   name: "group0_block2_bn0"
  381.   type: "BatchNorm"
  382.   bottom: "group0_block1_sum"
  383.   top: "group0_block2_bn0"
  384.   param {
  385.     lr_mult: 0.0
  386.     decay_mult: 0.0
  387.   }
  388.   param {
  389.     lr_mult: 0.0
  390.     decay_mult: 0.0
  391.   }
  392.   param {
  393.     lr_mult: 0.0
  394.     decay_mult: 0.0
  395.   }
  396.   batch_norm_param {
  397.     moving_average_fraction: 0.8999999761581421
  398.   }
  399. }
  400. layer {
  401.   name: "group0_block2_scale0"
  402.   type: "Scale"
  403.   bottom: "group0_block2_bn0"
  404.   top: "group0_block2_bn0"
  405.   param {
  406.     lr_mult: 1.0
  407.     decay_mult: 1.0
  408.   }
  409.   param {
  410.     lr_mult: 1.0
  411.     decay_mult: 0.0
  412.   }
  413.   scale_param {
  414.     bias_term: true
  415.   }
  416. }
  417. layer {
  418.   name: "group0_block2_relu0"
  419.   type: "ReLU"
  420.   bottom: "group0_block2_bn0"
  421.   top: "group0_block2_bn0"
  422. }
  423. layer {
  424.   name: "group0_block2_conv0"
  425.   type: "Convolution"
  426.   bottom: "group0_block2_bn0"
  427.   top: "group0_block2_conv0"
  428.   param {
  429.     lr_mult: 1.0
  430.     decay_mult: 1.0
  431.   }
  432.   convolution_param {
  433.     num_output: 46
  434.     bias_term: false
  435.     pad: 0
  436.     kernel_size: 1
  437.     group: 1
  438.     stride: 1
  439.     weight_filler {
  440.       type: "msra"
  441.     }
  442.   }
  443. }
  444. layer {
  445.   name: "group0_block2_bn1"
  446.   type: "BatchNorm"
  447.   bottom: "group0_block2_conv0"
  448.   top: "group0_block2_conv0"
  449.   param {
  450.     lr_mult: 0.0
  451.     decay_mult: 0.0
  452.   }
  453.   param {
  454.     lr_mult: 0.0
  455.     decay_mult: 0.0
  456.   }
  457.   param {
  458.     lr_mult: 0.0
  459.     decay_mult: 0.0
  460.   }
  461.   batch_norm_param {
  462.     moving_average_fraction: 0.8999999761581421
  463.   }
  464. }
  465. layer {
  466.   name: "group0_block2_scale1"
  467.   type: "Scale"
  468.   bottom: "group0_block2_conv0"
  469.   top: "group0_block2_conv0"
  470.   param {
  471.     lr_mult: 1.0
  472.     decay_mult: 1.0
  473.   }
  474.   param {
  475.     lr_mult: 1.0
  476.     decay_mult: 0.0
  477.   }
  478.   scale_param {
  479.     bias_term: true
  480.   }
  481. }
  482. layer {
  483.   name: "group0_block2_relu1"
  484.   type: "ReLU"
  485.   bottom: "group0_block2_conv0"
  486.   top: "group0_block2_conv0"
  487. }
  488. layer {
  489.   name: "group0_block2_asl"
  490.   type: "ActiveShift"
  491.   bottom: "group0_block2_conv0"
  492.   top: "group0_block2_asl"
  493.   param {
  494.     lr_mult: 1.0
  495.     decay_mult: 0.0
  496.   }
  497.   asl_param {
  498.     pad: 0
  499.     stride: 1
  500.     shift_filler {
  501.       type: "uniform"
  502.       min: -1.0
  503.       max: 1.0
  504.     }
  505.     normalize: true
  506.   }
  507. }
  508. layer {
  509.   name: "group0_block2_conv1"
  510.   type: "Convolution"
  511.   bottom: "group0_block2_asl"
  512.   top: "group0_block2_conv1"
  513.   param {
  514.     lr_mult: 1.0
  515.     decay_mult: 1.0
  516.   }
  517.   convolution_param {
  518.     num_output: 46
  519.     bias_term: false
  520.     pad: 0
  521.     kernel_size: 1
  522.     group: 1
  523.     stride: 1
  524.     weight_filler {
  525.       type: "msra"
  526.     }
  527.   }
  528. }
  529. layer {
  530.   name: "group0_block2_sum"
  531.   type: "Eltwise"
  532.   bottom: "group0_block1_sum"
  533.   bottom: "group0_block2_conv1"
  534.   top: "group0_block2_sum"
  535.   eltwise_param {
  536.     operation: SUM
  537.   }
  538. }
  539. layer {
  540.   name: "group1_block0_bn0"
  541.   type: "BatchNorm"
  542.   bottom: "group0_block2_sum"
  543.   top: "group1_block0_bn0"
  544.   param {
  545.     lr_mult: 0.0
  546.     decay_mult: 0.0
  547.   }
  548.   param {
  549.     lr_mult: 0.0
  550.     decay_mult: 0.0
  551.   }
  552.   param {
  553.     lr_mult: 0.0
  554.     decay_mult: 0.0
  555.   }
  556.   batch_norm_param {
  557.     moving_average_fraction: 0.8999999761581421
  558.   }
  559. }
  560. layer {
  561.   name: "group1_block0_scale0"
  562.   type: "Scale"
  563.   bottom: "group1_block0_bn0"
  564.   top: "group1_block0_bn0"
  565.   param {
  566.     lr_mult: 1.0
  567.     decay_mult: 1.0
  568.   }
  569.   param {
  570.     lr_mult: 1.0
  571.     decay_mult: 0.0
  572.   }
  573.   scale_param {
  574.     bias_term: true
  575.   }
  576. }
  577. layer {
  578.   name: "group1_block0_relu0"
  579.   type: "ReLU"
  580.   bottom: "group1_block0_bn0"
  581.   top: "group1_block0_bn0"
  582. }
  583. layer {
  584.   name: "group1_block0_conv0"
  585.   type: "Convolution"
  586.   bottom: "group1_block0_bn0"
  587.   top: "group1_block0_conv0"
  588.   param {
  589.     lr_mult: 1.0
  590.     decay_mult: 1.0
  591.   }
  592.   convolution_param {
  593.     num_output: 92
  594.     bias_term: false
  595.     pad: 0
  596.     kernel_size: 1
  597.     group: 1
  598.     stride: 1
  599.     weight_filler {
  600.       type: "msra"
  601.     }
  602.   }
  603. }
  604. layer {
  605.   name: "group1_block0_bn1"
  606.   type: "BatchNorm"
  607.   bottom: "group1_block0_conv0"
  608.   top: "group1_block0_conv0"
  609.   param {
  610.     lr_mult: 0.0
  611.     decay_mult: 0.0
  612.   }
  613.   param {
  614.     lr_mult: 0.0
  615.     decay_mult: 0.0
  616.   }
  617.   param {
  618.     lr_mult: 0.0
  619.     decay_mult: 0.0
  620.   }
  621.   batch_norm_param {
  622.     moving_average_fraction: 0.8999999761581421
  623.   }
  624. }
  625. layer {
  626.   name: "group1_block0_scale1"
  627.   type: "Scale"
  628.   bottom: "group1_block0_conv0"
  629.   top: "group1_block0_conv0"
  630.   param {
  631.     lr_mult: 1.0
  632.     decay_mult: 1.0
  633.   }
  634.   param {
  635.     lr_mult: 1.0
  636.     decay_mult: 0.0
  637.   }
  638.   scale_param {
  639.     bias_term: true
  640.   }
  641. }
  642. layer {
  643.   name: "group1_block0_relu1"
  644.   type: "ReLU"
  645.   bottom: "group1_block0_conv0"
  646.   top: "group1_block0_conv0"
  647. }
  648. layer {
  649.   name: "group1_block0_asl"
  650.   type: "ActiveShift"
  651.   bottom: "group1_block0_conv0"
  652.   top: "group1_block0_asl"
  653.   param {
  654.     lr_mult: 1.0
  655.     decay_mult: 0.0
  656.   }
  657.   asl_param {
  658.     pad: 0
  659.     stride: 2
  660.     shift_filler {
  661.       type: "uniform"
  662.       min: -1.0
  663.       max: 1.0
  664.     }
  665.     normalize: true
  666.   }
  667. }
  668. layer {
  669.   name: "group1_block0_conv1"
  670.   type: "Convolution"
  671.   bottom: "group1_block0_asl"
  672.   top: "group1_block0_conv1"
  673.   param {
  674.     lr_mult: 1.0
  675.     decay_mult: 1.0
  676.   }
  677.   convolution_param {
  678.     num_output: 92
  679.     bias_term: false
  680.     pad: 0
  681.     kernel_size: 1
  682.     group: 1
  683.     stride: 1
  684.     weight_filler {
  685.       type: "msra"
  686.     }
  687.   }
  688. }
  689. layer {
  690.   name: "group1_block0_proj"
  691.   type: "Convolution"
  692.   bottom: "group1_block0_bn0"
  693.   top: "group1_block0_proj"
  694.   param {
  695.     lr_mult: 1.0
  696.     decay_mult: 1.0
  697.   }
  698.   param {
  699.     lr_mult: 2.0
  700.     decay_mult: 0.0
  701.   }
  702.   convolution_param {
  703.     num_output: 92
  704.     pad: 0
  705.     kernel_size: 1
  706.     group: 1
  707.     stride: 2
  708.     weight_filler {
  709.       type: "msra"
  710.     }
  711.     bias_filler {
  712.       type: "constant"
  713.       value: 0.0
  714.     }
  715.   }
  716. }
  717. layer {
  718.   name: "group1_block0_sum"
  719.   type: "Eltwise"
  720.   bottom: "group1_block0_proj"
  721.   bottom: "group1_block0_conv1"
  722.   top: "group1_block0_sum"
  723.   eltwise_param {
  724.     operation: SUM
  725.   }
  726. }
  727. layer {
  728.   name: "group1_block1_bn0"
  729.   type: "BatchNorm"
  730.   bottom: "group1_block0_sum"
  731.   top: "group1_block1_bn0"
  732.   param {
  733.     lr_mult: 0.0
  734.     decay_mult: 0.0
  735.   }
  736.   param {
  737.     lr_mult: 0.0
  738.     decay_mult: 0.0
  739.   }
  740.   param {
  741.     lr_mult: 0.0
  742.     decay_mult: 0.0
  743.   }
  744.   batch_norm_param {
  745.     moving_average_fraction: 0.8999999761581421
  746.   }
  747. }
  748. layer {
  749.   name: "group1_block1_scale0"
  750.   type: "Scale"
  751.   bottom: "group1_block1_bn0"
  752.   top: "group1_block1_bn0"
  753.   param {
  754.     lr_mult: 1.0
  755.     decay_mult: 1.0
  756.   }
  757.   param {
  758.     lr_mult: 1.0
  759.     decay_mult: 0.0
  760.   }
  761.   scale_param {
  762.     bias_term: true
  763.   }
  764. }
  765. layer {
  766.   name: "group1_block1_relu0"
  767.   type: "ReLU"
  768.   bottom: "group1_block1_bn0"
  769.   top: "group1_block1_bn0"
  770. }
  771. layer {
  772.   name: "group1_block1_conv0"
  773.   type: "Convolution"
  774.   bottom: "group1_block1_bn0"
  775.   top: "group1_block1_conv0"
  776.   param {
  777.     lr_mult: 1.0
  778.     decay_mult: 1.0
  779.   }
  780.   convolution_param {
  781.     num_output: 92
  782.     bias_term: false
  783.     pad: 0
  784.     kernel_size: 1
  785.     group: 1
  786.     stride: 1
  787.     weight_filler {
  788.       type: "msra"
  789.     }
  790.   }
  791. }
  792. layer {
  793.   name: "group1_block1_bn1"
  794.   type: "BatchNorm"
  795.   bottom: "group1_block1_conv0"
  796.   top: "group1_block1_conv0"
  797.   param {
  798.     lr_mult: 0.0
  799.     decay_mult: 0.0
  800.   }
  801.   param {
  802.     lr_mult: 0.0
  803.     decay_mult: 0.0
  804.   }
  805.   param {
  806.     lr_mult: 0.0
  807.     decay_mult: 0.0
  808.   }
  809.   batch_norm_param {
  810.     moving_average_fraction: 0.8999999761581421
  811.   }
  812. }
  813. layer {
  814.   name: "group1_block1_scale1"
  815.   type: "Scale"
  816.   bottom: "group1_block1_conv0"
  817.   top: "group1_block1_conv0"
  818.   param {
  819.     lr_mult: 1.0
  820.     decay_mult: 1.0
  821.   }
  822.   param {
  823.     lr_mult: 1.0
  824.     decay_mult: 0.0
  825.   }
  826.   scale_param {
  827.     bias_term: true
  828.   }
  829. }
  830. layer {
  831.   name: "group1_block1_relu1"
  832.   type: "ReLU"
  833.   bottom: "group1_block1_conv0"
  834.   top: "group1_block1_conv0"
  835. }
  836. layer {
  837.   name: "group1_block1_asl"
  838.   type: "ActiveShift"
  839.   bottom: "group1_block1_conv0"
  840.   top: "group1_block1_asl"
  841.   param {
  842.     lr_mult: 1.0
  843.     decay_mult: 0.0
  844.   }
  845.   asl_param {
  846.     pad: 0
  847.     stride: 1
  848.     shift_filler {
  849.       type: "uniform"
  850.       min: -1.0
  851.       max: 1.0
  852.     }
  853.     normalize: true
  854.   }
  855. }
  856. layer {
  857.   name: "group1_block1_conv1"
  858.   type: "Convolution"
  859.   bottom: "group1_block1_asl"
  860.   top: "group1_block1_conv1"
  861.   param {
  862.     lr_mult: 1.0
  863.     decay_mult: 1.0
  864.   }
  865.   convolution_param {
  866.     num_output: 92
  867.     bias_term: false
  868.     pad: 0
  869.     kernel_size: 1
  870.     group: 1
  871.     stride: 1
  872.     weight_filler {
  873.       type: "msra"
  874.     }
  875.   }
  876. }
  877. layer {
  878.   name: "group1_block1_sum"
  879.   type: "Eltwise"
  880.   bottom: "group1_block0_sum"
  881.   bottom: "group1_block1_conv1"
  882.   top: "group1_block1_sum"
  883.   eltwise_param {
  884.     operation: SUM
  885.   }
  886. }
  887. layer {
  888.   name: "group1_block2_bn0"
  889.   type: "BatchNorm"
  890.   bottom: "group1_block1_sum"
  891.   top: "group1_block2_bn0"
  892.   param {
  893.     lr_mult: 0.0
  894.     decay_mult: 0.0
  895.   }
  896.   param {
  897.     lr_mult: 0.0
  898.     decay_mult: 0.0
  899.   }
  900.   param {
  901.     lr_mult: 0.0
  902.     decay_mult: 0.0
  903.   }
  904.   batch_norm_param {
  905.     moving_average_fraction: 0.8999999761581421
  906.   }
  907. }
  908. layer {
  909.   name: "group1_block2_scale0"
  910.   type: "Scale"
  911.   bottom: "group1_block2_bn0"
  912.   top: "group1_block2_bn0"
  913.   param {
  914.     lr_mult: 1.0
  915.     decay_mult: 1.0
  916.   }
  917.   param {
  918.     lr_mult: 1.0
  919.     decay_mult: 0.0
  920.   }
  921.   scale_param {
  922.     bias_term: true
  923.   }
  924. }
  925. layer {
  926.   name: "group1_block2_relu0"
  927.   type: "ReLU"
  928.   bottom: "group1_block2_bn0"
  929.   top: "group1_block2_bn0"
  930. }
  931. layer {
  932.   name: "group1_block2_conv0"
  933.   type: "Convolution"
  934.   bottom: "group1_block2_bn0"
  935.   top: "group1_block2_conv0"
  936.   param {
  937.     lr_mult: 1.0
  938.     decay_mult: 1.0
  939.   }
  940.   convolution_param {
  941.     num_output: 92
  942.     bias_term: false
  943.     pad: 0
  944.     kernel_size: 1
  945.     group: 1
  946.     stride: 1
  947.     weight_filler {
  948.       type: "msra"
  949.     }
  950.   }
  951. }
  952. layer {
  953.   name: "group1_block2_bn1"
  954.   type: "BatchNorm"
  955.   bottom: "group1_block2_conv0"
  956.   top: "group1_block2_conv0"
  957.   param {
  958.     lr_mult: 0.0
  959.     decay_mult: 0.0
  960.   }
  961.   param {
  962.     lr_mult: 0.0
  963.     decay_mult: 0.0
  964.   }
  965.   param {
  966.     lr_mult: 0.0
  967.     decay_mult: 0.0
  968.   }
  969.   batch_norm_param {
  970.     moving_average_fraction: 0.8999999761581421
  971.   }
  972. }
  973. layer {
  974.   name: "group1_block2_scale1"
  975.   type: "Scale"
  976.   bottom: "group1_block2_conv0"
  977.   top: "group1_block2_conv0"
  978.   param {
  979.     lr_mult: 1.0
  980.     decay_mult: 1.0
  981.   }
  982.   param {
  983.     lr_mult: 1.0
  984.     decay_mult: 0.0
  985.   }
  986.   scale_param {
  987.     bias_term: true
  988.   }
  989. }
  990. layer {
  991.   name: "group1_block2_relu1"
  992.   type: "ReLU"
  993.   bottom: "group1_block2_conv0"
  994.   top: "group1_block2_conv0"
  995. }
  996. layer {
  997.   name: "group1_block2_asl"
  998.   type: "ActiveShift"
  999.   bottom: "group1_block2_conv0"
  1000.   top: "group1_block2_asl"
  1001.   param {
  1002.     lr_mult: 1.0
  1003.     decay_mult: 0.0
  1004.   }
  1005.   asl_param {
  1006.     pad: 0
  1007.     stride: 1
  1008.     shift_filler {
  1009.       type: "uniform"
  1010.       min: -1.0
  1011.       max: 1.0
  1012.     }
  1013.     normalize: true
  1014.   }
  1015. }
  1016. layer {
  1017.   name: "group1_block2_conv1"
  1018.   type: "Convolution"
  1019.   bottom: "group1_block2_asl"
  1020.   top: "group1_block2_conv1"
  1021.   param {
  1022.     lr_mult: 1.0
  1023.     decay_mult: 1.0
  1024.   }
  1025.   convolution_param {
  1026.     num_output: 92
  1027.     bias_term: false
  1028.     pad: 0
  1029.     kernel_size: 1
  1030.     group: 1
  1031.     stride: 1
  1032.     weight_filler {
  1033.       type: "msra"
  1034.     }
  1035.   }
  1036. }
  1037. layer {
  1038.   name: "group1_block2_sum"
  1039.   type: "Eltwise"
  1040.   bottom: "group1_block1_sum"
  1041.   bottom: "group1_block2_conv1"
  1042.   top: "group1_block2_sum"
  1043.   eltwise_param {
  1044.     operation: SUM
  1045.   }
  1046. }
  1047. layer {
  1048.   name: "group2_block0_bn0"
  1049.   type: "BatchNorm"
  1050.   bottom: "group1_block2_sum"
  1051.   top: "group2_block0_bn0"
  1052.   param {
  1053.     lr_mult: 0.0
  1054.     decay_mult: 0.0
  1055.   }
  1056.   param {
  1057.     lr_mult: 0.0
  1058.     decay_mult: 0.0
  1059.   }
  1060.   param {
  1061.     lr_mult: 0.0
  1062.     decay_mult: 0.0
  1063.   }
  1064.   batch_norm_param {
  1065.     moving_average_fraction: 0.8999999761581421
  1066.   }
  1067. }
  1068. layer {
  1069.   name: "group2_block0_scale0"
  1070.   type: "Scale"
  1071.   bottom: "group2_block0_bn0"
  1072.   top: "group2_block0_bn0"
  1073.   param {
  1074.     lr_mult: 1.0
  1075.     decay_mult: 1.0
  1076.   }
  1077.   param {
  1078.     lr_mult: 1.0
  1079.     decay_mult: 0.0
  1080.   }
  1081.   scale_param {
  1082.     bias_term: true
  1083.   }
  1084. }
  1085. layer {
  1086.   name: "group2_block0_relu0"
  1087.   type: "ReLU"
  1088.   bottom: "group2_block0_bn0"
  1089.   top: "group2_block0_bn0"
  1090. }
  1091. layer {
  1092.   name: "group2_block0_conv0"
  1093.   type: "Convolution"
  1094.   bottom: "group2_block0_bn0"
  1095.   top: "group2_block0_conv0"
  1096.   param {
  1097.     lr_mult: 1.0
  1098.     decay_mult: 1.0
  1099.   }
  1100.   convolution_param {
  1101.     num_output: 184
  1102.     bias_term: false
  1103.     pad: 0
  1104.     kernel_size: 1
  1105.     group: 1
  1106.     stride: 1
  1107.     weight_filler {
  1108.       type: "msra"
  1109.     }
  1110.   }
  1111. }
  1112. layer {
  1113.   name: "group2_block0_bn1"
  1114.   type: "BatchNorm"
  1115.   bottom: "group2_block0_conv0"
  1116.   top: "group2_block0_conv0"
  1117.   param {
  1118.     lr_mult: 0.0
  1119.     decay_mult: 0.0
  1120.   }
  1121.   param {
  1122.     lr_mult: 0.0
  1123.     decay_mult: 0.0
  1124.   }
  1125.   param {
  1126.     lr_mult: 0.0
  1127.     decay_mult: 0.0
  1128.   }
  1129.   batch_norm_param {
  1130.     moving_average_fraction: 0.8999999761581421
  1131.   }
  1132. }
  1133. layer {
  1134.   name: "group2_block0_scale1"
  1135.   type: "Scale"
  1136.   bottom: "group2_block0_conv0"
  1137.   top: "group2_block0_conv0"
  1138.   param {
  1139.     lr_mult: 1.0
  1140.     decay_mult: 1.0
  1141.   }
  1142.   param {
  1143.     lr_mult: 1.0
  1144.     decay_mult: 0.0
  1145.   }
  1146.   scale_param {
  1147.     bias_term: true
  1148.   }
  1149. }
  1150. layer {
  1151.   name: "group2_block0_relu1"
  1152.   type: "ReLU"
  1153.   bottom: "group2_block0_conv0"
  1154.   top: "group2_block0_conv0"
  1155. }
  1156. layer {
  1157.   name: "group2_block0_asl"
  1158.   type: "ActiveShift"
  1159.   bottom: "group2_block0_conv0"
  1160.   top: "group2_block0_asl"
  1161.   param {
  1162.     lr_mult: 1.0
  1163.     decay_mult: 0.0
  1164.   }
  1165.   asl_param {
  1166.     pad: 0
  1167.     stride: 2
  1168.     shift_filler {
  1169.       type: "uniform"
  1170.       min: -1.0
  1171.       max: 1.0
  1172.     }
  1173.     normalize: true
  1174.   }
  1175. }
  1176. layer {
  1177.   name: "group2_block0_conv1"
  1178.   type: "Convolution"
  1179.   bottom: "group2_block0_asl"
  1180.   top: "group2_block0_conv1"
  1181.   param {
  1182.     lr_mult: 1.0
  1183.     decay_mult: 1.0
  1184.   }
  1185.   convolution_param {
  1186.     num_output: 184
  1187.     bias_term: false
  1188.     pad: 0
  1189.     kernel_size: 1
  1190.     group: 1
  1191.     stride: 1
  1192.     weight_filler {
  1193.       type: "msra"
  1194.     }
  1195.   }
  1196. }
  1197. layer {
  1198.   name: "group2_block0_proj"
  1199.   type: "Convolution"
  1200.   bottom: "group2_block0_bn0"
  1201.   top: "group2_block0_proj"
  1202.   param {
  1203.     lr_mult: 1.0
  1204.     decay_mult: 1.0
  1205.   }
  1206.   param {
  1207.     lr_mult: 2.0
  1208.     decay_mult: 0.0
  1209.   }
  1210.   convolution_param {
  1211.     num_output: 184
  1212.     pad: 0
  1213.     kernel_size: 1
  1214.     group: 1
  1215.     stride: 2
  1216.     weight_filler {
  1217.       type: "msra"
  1218.     }
  1219.     bias_filler {
  1220.       type: "constant"
  1221.       value: 0.0
  1222.     }
  1223.   }
  1224. }
  1225. layer {
  1226.   name: "group2_block0_sum"
  1227.   type: "Eltwise"
  1228.   bottom: "group2_block0_proj"
  1229.   bottom: "group2_block0_conv1"
  1230.   top: "group2_block0_sum"
  1231.   eltwise_param {
  1232.     operation: SUM
  1233.   }
  1234. }
  1235. layer {
  1236.   name: "group2_block1_bn0"
  1237.   type: "BatchNorm"
  1238.   bottom: "group2_block0_sum"
  1239.   top: "group2_block1_bn0"
  1240.   param {
  1241.     lr_mult: 0.0
  1242.     decay_mult: 0.0
  1243.   }
  1244.   param {
  1245.     lr_mult: 0.0
  1246.     decay_mult: 0.0
  1247.   }
  1248.   param {
  1249.     lr_mult: 0.0
  1250.     decay_mult: 0.0
  1251.   }
  1252.   batch_norm_param {
  1253.     moving_average_fraction: 0.8999999761581421
  1254.   }
  1255. }
  1256. layer {
  1257.   name: "group2_block1_scale0"
  1258.   type: "Scale"
  1259.   bottom: "group2_block1_bn0"
  1260.   top: "group2_block1_bn0"
  1261.   param {
  1262.     lr_mult: 1.0
  1263.     decay_mult: 1.0
  1264.   }
  1265.   param {
  1266.     lr_mult: 1.0
  1267.     decay_mult: 0.0
  1268.   }
  1269.   scale_param {
  1270.     bias_term: true
  1271.   }
  1272. }
  1273. layer {
  1274.   name: "group2_block1_relu0"
  1275.   type: "ReLU"
  1276.   bottom: "group2_block1_bn0"
  1277.   top: "group2_block1_bn0"
  1278. }
  1279. layer {
  1280.   name: "group2_block1_conv0"
  1281.   type: "Convolution"
  1282.   bottom: "group2_block1_bn0"
  1283.   top: "group2_block1_conv0"
  1284.   param {
  1285.     lr_mult: 1.0
  1286.     decay_mult: 1.0
  1287.   }
  1288.   convolution_param {
  1289.     num_output: 184
  1290.     bias_term: false
  1291.     pad: 0
  1292.     kernel_size: 1
  1293.     group: 1
  1294.     stride: 1
  1295.     weight_filler {
  1296.       type: "msra"
  1297.     }
  1298.   }
  1299. }
  1300. layer {
  1301.   name: "group2_block1_bn1"
  1302.   type: "BatchNorm"
  1303.   bottom: "group2_block1_conv0"
  1304.   top: "group2_block1_conv0"
  1305.   param {
  1306.     lr_mult: 0.0
  1307.     decay_mult: 0.0
  1308.   }
  1309.   param {
  1310.     lr_mult: 0.0
  1311.     decay_mult: 0.0
  1312.   }
  1313.   param {
  1314.     lr_mult: 0.0
  1315.     decay_mult: 0.0
  1316.   }
  1317.   batch_norm_param {
  1318.     moving_average_fraction: 0.8999999761581421
  1319.   }
  1320. }
  1321. layer {
  1322.   name: "group2_block1_scale1"
  1323.   type: "Scale"
  1324.   bottom: "group2_block1_conv0"
  1325.   top: "group2_block1_conv0"
  1326.   param {
  1327.     lr_mult: 1.0
  1328.     decay_mult: 1.0
  1329.   }
  1330.   param {
  1331.     lr_mult: 1.0
  1332.     decay_mult: 0.0
  1333.   }
  1334.   scale_param {
  1335.     bias_term: true
  1336.   }
  1337. }
  1338. layer {
  1339.   name: "group2_block1_relu1"
  1340.   type: "ReLU"
  1341.   bottom: "group2_block1_conv0"
  1342.   top: "group2_block1_conv0"
  1343. }
  1344. layer {
  1345.   name: "group2_block1_asl"
  1346.   type: "ActiveShift"
  1347.   bottom: "group2_block1_conv0"
  1348.   top: "group2_block1_asl"
  1349.   param {
  1350.     lr_mult: 1.0
  1351.     decay_mult: 0.0
  1352.   }
  1353.   asl_param {
  1354.     pad: 0
  1355.     stride: 1
  1356.     shift_filler {
  1357.       type: "uniform"
  1358.       min: -1.0
  1359.       max: 1.0
  1360.     }
  1361.     normalize: true
  1362.   }
  1363. }
  1364. layer {
  1365.   name: "group2_block1_conv1"
  1366.   type: "Convolution"
  1367.   bottom: "group2_block1_asl"
  1368.   top: "group2_block1_conv1"
  1369.   param {
  1370.     lr_mult: 1.0
  1371.     decay_mult: 1.0
  1372.   }
  1373.   convolution_param {
  1374.     num_output: 184
  1375.     bias_term: false
  1376.     pad: 0
  1377.     kernel_size: 1
  1378.     group: 1
  1379.     stride: 1
  1380.     weight_filler {
  1381.       type: "msra"
  1382.     }
  1383.   }
  1384. }
  1385. layer {
  1386.   name: "group2_block1_sum"
  1387.   type: "Eltwise"
  1388.   bottom: "group2_block0_sum"
  1389.   bottom: "group2_block1_conv1"
  1390.   top: "group2_block1_sum"
  1391.   eltwise_param {
  1392.     operation: SUM
  1393.   }
  1394. }
  1395. layer {
  1396.   name: "group2_block2_bn0"
  1397.   type: "BatchNorm"
  1398.   bottom: "group2_block1_sum"
  1399.   top: "group2_block2_bn0"
  1400.   param {
  1401.     lr_mult: 0.0
  1402.     decay_mult: 0.0
  1403.   }
  1404.   param {
  1405.     lr_mult: 0.0
  1406.     decay_mult: 0.0
  1407.   }
  1408.   param {
  1409.     lr_mult: 0.0
  1410.     decay_mult: 0.0
  1411.   }
  1412.   batch_norm_param {
  1413.     moving_average_fraction: 0.8999999761581421
  1414.   }
  1415. }
  1416. layer {
  1417.   name: "group2_block2_scale0"
  1418.   type: "Scale"
  1419.   bottom: "group2_block2_bn0"
  1420.   top: "group2_block2_bn0"
  1421.   param {
  1422.     lr_mult: 1.0
  1423.     decay_mult: 1.0
  1424.   }
  1425.   param {
  1426.     lr_mult: 1.0
  1427.     decay_mult: 0.0
  1428.   }
  1429.   scale_param {
  1430.     bias_term: true
  1431.   }
  1432. }
  1433. layer {
  1434.   name: "group2_block2_relu0"
  1435.   type: "ReLU"
  1436.   bottom: "group2_block2_bn0"
  1437.   top: "group2_block2_bn0"
  1438. }
  1439. layer {
  1440.   name: "group2_block2_conv0"
  1441.   type: "Convolution"
  1442.   bottom: "group2_block2_bn0"
  1443.   top: "group2_block2_conv0"
  1444.   param {
  1445.     lr_mult: 1.0
  1446.     decay_mult: 1.0
  1447.   }
  1448.   convolution_param {
  1449.     num_output: 184
  1450.     bias_term: false
  1451.     pad: 0
  1452.     kernel_size: 1
  1453.     group: 1
  1454.     stride: 1
  1455.     weight_filler {
  1456.       type: "msra"
  1457.     }
  1458.   }
  1459. }
  1460. layer {
  1461.   name: "group2_block2_bn1"
  1462.   type: "BatchNorm"
  1463.   bottom: "group2_block2_conv0"
  1464.   top: "group2_block2_conv0"
  1465.   param {
  1466.     lr_mult: 0.0
  1467.     decay_mult: 0.0
  1468.   }
  1469.   param {
  1470.     lr_mult: 0.0
  1471.     decay_mult: 0.0
  1472.   }
  1473.   param {
  1474.     lr_mult: 0.0
  1475.     decay_mult: 0.0
  1476.   }
  1477.   batch_norm_param {
  1478.     moving_average_fraction: 0.8999999761581421
  1479.   }
  1480. }
  1481. layer {
  1482.   name: "group2_block2_scale1"
  1483.   type: "Scale"
  1484.   bottom: "group2_block2_conv0"
  1485.   top: "group2_block2_conv0"
  1486.   param {
  1487.     lr_mult: 1.0
  1488.     decay_mult: 1.0
  1489.   }
  1490.   param {
  1491.     lr_mult: 1.0
  1492.     decay_mult: 0.0
  1493.   }
  1494.   scale_param {
  1495.     bias_term: true
  1496.   }
  1497. }
  1498. layer {
  1499.   name: "group2_block2_relu1"
  1500.   type: "ReLU"
  1501.   bottom: "group2_block2_conv0"
  1502.   top: "group2_block2_conv0"
  1503. }
  1504. layer {
  1505.   name: "group2_block2_asl"
  1506.   type: "ActiveShift"
  1507.   bottom: "group2_block2_conv0"
  1508.   top: "group2_block2_asl"
  1509.   param {
  1510.     lr_mult: 1.0
  1511.     decay_mult: 0.0
  1512.   }
  1513.   asl_param {
  1514.     pad: 0
  1515.     stride: 1
  1516.     shift_filler {
  1517.       type: "uniform"
  1518.       min: -1.0
  1519.       max: 1.0
  1520.     }
  1521.     normalize: true
  1522.   }
  1523. }
  1524. layer {
  1525.   name: "group2_block2_conv1"
  1526.   type: "Convolution"
  1527.   bottom: "group2_block2_asl"
  1528.   top: "group2_block2_conv1"
  1529.   param {
  1530.     lr_mult: 1.0
  1531.     decay_mult: 1.0
  1532.   }
  1533.   convolution_param {
  1534.     num_output: 184
  1535.     bias_term: false
  1536.     pad: 0
  1537.     kernel_size: 1
  1538.     group: 1
  1539.     stride: 1
  1540.     weight_filler {
  1541.       type: "msra"
  1542.     }
  1543.   }
  1544. }
  1545. layer {
  1546.   name: "group2_block2_sum"
  1547.   type: "Eltwise"
  1548.   bottom: "group2_block1_sum"
  1549.   bottom: "group2_block2_conv1"
  1550.   top: "group2_block2_sum"
  1551.   eltwise_param {
  1552.     operation: SUM
  1553.   }
  1554. }
  1555. layer {
  1556.   name: "last_bn"
  1557.   type: "BatchNorm"
  1558.   bottom: "group2_block2_sum"
  1559.   top: "group2_block2_sum"
  1560.   param {
  1561.     lr_mult: 0.0
  1562.     decay_mult: 0.0
  1563.   }
  1564.   param {
  1565.     lr_mult: 0.0
  1566.     decay_mult: 0.0
  1567.   }
  1568.   param {
  1569.     lr_mult: 0.0
  1570.     decay_mult: 0.0
  1571.   }
  1572.   batch_norm_param {
  1573.     moving_average_fraction: 0.8999999761581421
  1574.   }
  1575. }
  1576. layer {
  1577.   name: "last_scale"
  1578.   type: "Scale"
  1579.   bottom: "group2_block2_sum"
  1580.   top: "group2_block2_sum"
  1581.   param {
  1582.     lr_mult: 1.0
  1583.     decay_mult: 1.0
  1584.   }
  1585.   param {
  1586.     lr_mult: 1.0
  1587.     decay_mult: 0.0
  1588.   }
  1589.   scale_param {
  1590.     bias_term: true
  1591.   }
  1592. }
  1593. layer {
  1594.   name: "last_relu"
  1595.   type: "ReLU"
  1596.   bottom: "group2_block2_sum"
  1597.   top: "group2_block2_sum"
  1598. }
  1599. layer {
  1600.   name: "global_avg_pool"
  1601.   type: "Pooling"
  1602.   bottom: "group2_block2_sum"
  1603.   top: "global_avg_pool"
  1604.   pooling_param {
  1605.     pool: AVE
  1606.     global_pooling: true
  1607.   }
  1608. }
  1609. layer {
  1610.   name: "fc"
  1611.   type: "InnerProduct"
  1612.   bottom: "global_avg_pool"
  1613.   top: "fc"
  1614.   param {
  1615.     lr_mult: 1.0
  1616.     decay_mult: 1.0
  1617.   }
  1618.   param {
  1619.     lr_mult: 2.0
  1620.     decay_mult: 0.0
  1621.   }
  1622.   inner_product_param {
  1623.     num_output: 10
  1624.     weight_filler {
  1625.       type: "msra"
  1626.     }
  1627.     bias_filler {
  1628.       type: "constant"
  1629.       value: 0.0
  1630.     }
  1631.   }
  1632. }
  1633. layer {
  1634.   name: "loss"
  1635.   type: "SoftmaxWithLoss"
  1636.   bottom: "fc"
  1637.   bottom: "label"
  1638.   top: "loss"
  1639. }
  1640. layer {
  1641.   name: "softmax"
  1642.   type: "Softmax"
  1643.   bottom: "fc"
  1644.   top: "softmax"
  1645. }
  1646. layer {
  1647.   name: "Accuracy"
  1648.   type: "Accuracy"
  1649.   bottom: "softmax"
  1650.   bottom: "label"
  1651.   top: "Accuracy"
  1652. }
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top