Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- concate_axis = -1
- batchnorm_axis = -1
- drop_rate = 0.1
- def down_block(inputs, n_filters):
- layer_1 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
- layer_2 = concatenate([layer_1, inputs], axis=concate_axis)
- layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
- layer_4 = concatenate([layer_3, layer_1, inputs], axis=concate_axis)
- layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
- layer_6 = BatchNormalization(axis=batchnorm_axis)(layer_5)
- layer_7 = MaxPooling2D(pool_size=(2, 2))(layer_6)
- layer_8 = Dropout(drop_rate)(layer_7)
- skip = concatenate([layer_6, inputs], axis=concate_axis)
- return skip, layer_8
- def bottom_block(inputs, n_filters):
- layer_1 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
- layer_2 = concatenate([layer_1, inputs], axis=concate_axis)
- layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
- layer_4 = concatenate([layer_3, layer_1, inputs], axis=concate_axis)
- layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
- layer_6 = BatchNormalization(axis=batchnorm_axis)(layer_5)
- layer_7 = Dropout(drop_rate)(layer_6)
- return layer_7
- def up_block(skip_input, ups_input, n_filters):
- layer_1 = UpSampling2D(size=(2, 2))(ups_input)
- layer_2 = concatenate([layer_1, skip_input], axis=concate_axis)
- layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
- layer_4 = concatenate([layer_3, layer_2], axis=concate_axis)
- layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
- layer_6 = concatenate([layer_5, layer_3, layer_2], axis=concate_axis)
- layer_7 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_6)
- layer_8 = BatchNormalization(axis=batchnorm_axis)(layer_7)
- layer_9 = Dropout(drop_rate)(layer_8)
- return layer_9
- def get_model():
- inputs = Input((512, 512, 1))
- skip1, down1 = down_block(inputs, 16)
- skip2, down2 = down_block(down1, 32)
- skip3, down3 = down_block(down2, 64)
- skip4, down4 = down_block(down3, 128)
- skip5, down5 = down_block(down4, 256)
- skip6, down6 = down_block(down5, 256)
- bottom = bottom_block(down6, 512)
- up_1 = up_block(skip6, bottom, 256)
- up_2 = up_block(skip5, up_1, 256)
- up_3 = up_block(skip4, up_2, 128)
- up_4 = up_block(skip3, up_3, 64)
- up_5 = up_block(skip2, up_4, 32)
- up_6 = up_block(skip1, up_5, 16)
- outputs = Conv2D(1, 1, activation = 'sigmoid')(up_6)
- model = Model(inputs, outputs)
- model.compile(optimizer = Adam(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
- return model
Add Comment
Please, Sign In to add comment