Guest User

CNN definition

a guest
Jun 30th, 2016
199
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.71 KB | None | 0 0
  1. # CNN to create a probability map for the features
  2. # Author: A. AYDIN (with very heavy contributions from C. Wolf)
  3. # Date: 21/06/2016
  4.  
  5. name: "Feature_detection"
  6.  
  7. # These are for training
  8. # This layer extracts the images
  9. layer
  10. {
  11. name: "ImageData"
  12. type: "Data"
  13. top: "data"
  14. top: "redundant1"
  15. include
  16. {
  17. phase: TRAIN
  18. }
  19. data_param
  20. {
  21. source: "Convnet/lmdbImageTrain"
  22. batch_size: 1
  23. backend: LMDB
  24. }
  25. }
  26.  
  27. # This one extracts the labels (which are images)
  28. layer
  29. {
  30. name: "ImageGT"
  31. type: "Data"
  32. top: "label"
  33. top: "redundant2"
  34. include
  35. {
  36. phase: TRAIN
  37. }
  38. data_param
  39. {
  40. source: "Convnet/lmdbLabelTrain"
  41. batch_size: 1
  42. backend: LMDB
  43. }
  44. }
  45.  
  46. # These are for validation
  47. # This layer extracts the images
  48. layer
  49. {
  50. name: "ImageData"
  51. type: "Data"
  52. top: "data"
  53. top: "redundant1"
  54. include
  55. {
  56. phase: TEST
  57. }
  58. data_param
  59. {
  60. source: "Convnet/lmdbImageTest"
  61. batch_size: 1
  62. backend: LMDB
  63. }
  64. }
  65.  
  66. # This one extracts the labels (which are images)
  67. layer
  68. {
  69. name: "ImageGT"
  70. type: "Data"
  71. top: "label"
  72. top: "redundant2"
  73. include
  74. {
  75. phase: TEST
  76. }
  77. data_param
  78. {
  79. source: "Convnet/lmdbLabelTest"
  80. batch_size: 1
  81. backend: LMDB
  82. }
  83. }
  84.  
  85. # We are going to have 2 Conv+Norm+MaxPool layers
  86. layer
  87. {
  88. name:"Conv5x5x32"
  89. type:"Convolution"
  90. bottom: "data"
  91. top: "conv1"
  92. convolution_param
  93. {
  94. kernel_size: 5
  95. num_output: 32
  96. # The filters are 5x5x32
  97. pad: 2
  98. # So the output is HxWx32
  99. }
  100. }
  101.  
  102. layer
  103. {
  104. name: "Norm1"
  105. type: "LRN"
  106. bottom: "conv1"
  107. top: "norm1"
  108. lrn_param
  109. {
  110. # For now, I am using the default variables of:
  111. # local_size: 5
  112. # alpha: 1
  113. # beta: 5
  114. norm_region: WITHIN_CHANNEL
  115. }
  116. }
  117.  
  118. layer
  119. {
  120. name: "Pool1"
  121. type: "Pooling"
  122. bottom: "norm1"
  123. top: "pool1"
  124. pooling_param
  125. {
  126. pool: MAX
  127. kernel_size: 2
  128. stride: 2
  129. }
  130. }
  131. # The first one is done, now onto the second
  132.  
  133. layer
  134. {
  135. name:"Conv3x3x64_1"
  136. type:"Convolution"
  137. bottom: "pool1"
  138. top: "conv2"
  139. convolution_param
  140. {
  141. kernel_size: 3
  142. num_output: 64
  143. # The filters are 3x3x64
  144. pad: 1
  145. # So the output is H/2xW/2x64
  146. }
  147. }
  148.  
  149. layer
  150. {
  151. name: "Norm2"
  152. type: "LRN"
  153. bottom: "conv2"
  154. top: "norm2"
  155. lrn_param
  156. {
  157. # For now, I am using the default variables of:
  158. # local_size: 5
  159. # alpha: 1
  160. # beta: 5
  161. norm_region: WITHIN_CHANNEL
  162. }
  163. }
  164.  
  165. layer
  166. {
  167. name: "Pool2"
  168. type: "Pooling"
  169. bottom: "norm2"
  170. top: "pool2"
  171. pooling_param
  172. {
  173. pool: MAX
  174. kernel_size: 2
  175. stride: 2
  176. }
  177. }
  178.  
  179. # Now that we are done with the Conv+Norm+Max, we will have 3 layers of conv3x3x64
  180.  
  181. layer
  182. {
  183. name:"Conv3x3x64_2"
  184. type:"Convolution"
  185. bottom: "pool2"
  186. top: "conv3"
  187. convolution_param
  188. {
  189. kernel_size: 3
  190. num_output: 64
  191. # The filters are 3x3x64
  192. pad: 1
  193. # So the output is H/4xW/4x64
  194. }
  195. }
  196.  
  197. layer
  198. {
  199. name:"Conv3x3x64_3"
  200. type:"Convolution"
  201. bottom: "conv3"
  202. top: "conv4"
  203. convolution_param
  204. {
  205. kernel_size: 3
  206. num_output: 64
  207. # The filters are 3x3x64
  208. pad: 1
  209. # So the output is H/4xW/4x64
  210. }
  211. }
  212.  
  213. layer
  214. {
  215. name:"Conv3x3x64_4"
  216. type:"Convolution"
  217. bottom: "conv4"
  218. top: "conv5"
  219. convolution_param
  220. {
  221. kernel_size: 3
  222. num_output: 64
  223. # The filters are 3x3x64
  224. pad: 1
  225. # So the output is H/4xW/4x64
  226. }
  227. }
  228.  
  229. # Followed by a layers of conv3x3x256
  230. layer
  231. {
  232. name:"Conv3x3x256"
  233. type:"Convolution"
  234. bottom: "conv5"
  235. top: "conv6"
  236. convolution_param
  237. {
  238. kernel_size: 3
  239. num_output: 256
  240. # The filters are 3x3x256
  241. pad: 1
  242. # So the output is H/4xW/4x256
  243. }
  244. }
  245. # And lastly, a with a conv3x3x1
  246. layer
  247. {
  248. name:"Conv3x3x1"
  249. type:"Convolution"
  250. bottom: "conv6"
  251. top: "conv7"
  252. convolution_param
  253. {
  254. kernel_size: 3
  255. num_output: 1
  256. # The filters are 3x3x64
  257. pad: 1
  258. # So the output is H/4xW/4x1
  259. }
  260. }
  261. # We transform the last feature map into a probability map
  262. layer
  263. {
  264. name: "Prob"
  265. type: "Softmax"
  266. bottom: "conv7"
  267. top: "prob"
  268. }
  269.  
  270.  
  271. # Lastly we calculate the loss
  272. layer
  273. {
  274. name:"Loss"
  275. type: "SoftmaxWithLoss"
  276. bottom: "prob"
  277. bottom: "label"
  278. top: "loss"
  279. }
Advertisement
Add Comment
Please, Sign In to add comment