Advertisement
Guest User

Untitled

a guest
Apr 30th, 2017
51
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.05 KB | None | 0 0
  1. from keras import layers
  2. from keras import models
  3.  
  4.  
  5. #
  6. # image dimensions
  7. #
  8.  
  9. img_height = 224
  10. img_width = 224
  11. img_channels = 3
  12.  
  13. #
  14. # network params
  15. #
  16.  
  17. cardinality = 32
  18. d = 4 # bottleneck width
  19.  
  20.  
  21. def resNeXt_network(x):
  22. def add_common_layers(y):
  23. y = layers.BatchNormalization()(y)
  24. y = layers.LeakyReLU()(y)
  25.  
  26. return y
  27.  
  28. def residual_block(y, nb_channels_out, strides=(1, 1)):
  29. """
  30. Our network consists of a stack of residual blocks. These blocks have the same topology,
  31. and are subject to two simple rules:
  32.  
  33. - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
  34. - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
  35.  
  36. :param y:
  37. :param nb_channels_out:
  38. :param strides:
  39. :return:
  40. """
  41. shortcut = y
  42.  
  43. # the shortcuts are identity connections except for those increasing dimensions which are projections
  44. shortcut = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), strides=strides, padding='same')(shortcut)
  45.  
  46. branches = []
  47. for _ in range(cardinality):
  48. tmp = layers.Conv2D(d, kernel_size=(1, 1), padding='same')(y)
  49. tmp = add_common_layers(tmp)
  50.  
  51. tmp = layers.Conv2D(d, kernel_size=(3, 3), strides=strides, padding='same')(tmp)
  52. tmp = add_common_layers(tmp)
  53.  
  54. branches.append(tmp)
  55.  
  56. y = layers.concatenate(branches, axis=-1)
  57. y = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), padding='same')(y)
  58.  
  59. # batch normalization is employed after aggregating the transformations and before adding to the shortcut
  60. y = layers.BatchNormalization()(y)
  61. y = layers.add([shortcut, y])
  62.  
  63. # relu is performed right after each batch normalization,
  64. # expect for the output of the block where relu is performed after the adding to the shortcut
  65. y = layers.LeakyReLU()(y)
  66.  
  67. return y
  68.  
  69. # conv1
  70. x = layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(x)
  71. x = add_common_layers(x)
  72.  
  73. # conv2
  74. x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
  75. for i in range(3):
  76. x = residual_block(x, 256)
  77.  
  78. # conv3
  79. for i in range(4):
  80. # down-sampling is done by stride-2 convolutions in the 3×3 layer of the first block in each stage
  81. _strides = (2, 2) if i == 0 else (1, 1)
  82. x = residual_block(x, 512, _strides)
  83.  
  84. # conv4
  85. for i in range(6):
  86. _strides = (2, 2) if i == 0 else (1, 1)
  87. x = residual_block(x, 1024, _strides)
  88.  
  89. # conv5
  90. for i in range(3):
  91. _strides = (2, 2) if i == 0 else (1, 1)
  92. x = residual_block(x, 2048, _strides)
  93.  
  94. x = layers.GlobalAveragePooling2D()(x)
  95. x = layers.Dense(1)(x)
  96.  
  97. return x
  98.  
  99.  
  100. image_tensor = layers.Input(shape=(img_height, img_width, img_channels))
  101. network_output = resNeXt_network(image_tensor)
  102.  
  103. model = models.Model(inputs=[image_tensor], outputs=[network_output])
  104. print(model.summary())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement