Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras import layers
- from keras import models
- #
- # image dimensions
- #
- img_height = 224
- img_width = 224
- img_channels = 3
- #
- # network params
- #
- cardinality = 32
- d = 4 # bottleneck width
- def resNeXt_network(x):
- def add_common_layers(y):
- y = layers.BatchNormalization()(y)
- y = layers.LeakyReLU()(y)
- return y
- def residual_block(y, nb_channels_out, strides=(1, 1)):
- """
- Our network consists of a stack of residual blocks. These blocks have the same topology,
- and are subject to two simple rules:
- - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
- - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
- :param y:
- :param nb_channels_out:
- :param strides:
- :return:
- """
- shortcut = y
- # the shortcuts are identity connections except for those increasing dimensions which are projections
- shortcut = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), strides=strides, padding='same')(shortcut)
- branches = []
- for _ in range(cardinality):
- tmp = layers.Conv2D(d, kernel_size=(1, 1), padding='same')(y)
- tmp = add_common_layers(tmp)
- tmp = layers.Conv2D(d, kernel_size=(3, 3), strides=strides, padding='same')(tmp)
- tmp = add_common_layers(tmp)
- branches.append(tmp)
- y = layers.concatenate(branches, axis=-1)
- y = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), padding='same')(y)
- # batch normalization is employed after aggregating the transformations and before adding to the shortcut
- y = layers.BatchNormalization()(y)
- y = layers.add([shortcut, y])
- # relu is performed right after each batch normalization,
- # expect for the output of the block where relu is performed after the adding to the shortcut
- y = layers.LeakyReLU()(y)
- return y
- # conv1
- x = layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(x)
- x = add_common_layers(x)
- # conv2
- x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
- for i in range(3):
- x = residual_block(x, 256)
- # conv3
- for i in range(4):
- # down-sampling is done by stride-2 convolutions in the 3×3 layer of the first block in each stage
- _strides = (2, 2) if i == 0 else (1, 1)
- x = residual_block(x, 512, _strides)
- # conv4
- for i in range(6):
- _strides = (2, 2) if i == 0 else (1, 1)
- x = residual_block(x, 1024, _strides)
- # conv5
- for i in range(3):
- _strides = (2, 2) if i == 0 else (1, 1)
- x = residual_block(x, 2048, _strides)
- x = layers.GlobalAveragePooling2D()(x)
- x = layers.Dense(1)(x)
- return x
- image_tensor = layers.Input(shape=(img_height, img_width, img_channels))
- network_output = resNeXt_network(image_tensor)
- model = models.Model(inputs=[image_tensor], outputs=[network_output])
- print(model.summary())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement