Guest User

Untitled

a guest
Apr 26th, 2018
72
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.96 KB | None | 0 0
  1. concate_axis = -1
  2. batchnorm_axis = -1
  3. drop_rate = 0.1
  4.  
  5. def down_block(inputs, n_filters):
  6. layer_1 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
  7. layer_2 = concatenate([layer_1, inputs], axis=concate_axis)
  8. layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
  9. layer_4 = concatenate([layer_3, layer_1, inputs], axis=concate_axis)
  10. layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
  11. layer_6 = BatchNormalization(axis=batchnorm_axis)(layer_5)
  12. layer_7 = MaxPooling2D(pool_size=(2, 2))(layer_6)
  13. layer_8 = Dropout(drop_rate)(layer_7)
  14. skip = concatenate([layer_6, inputs], axis=concate_axis)
  15. return skip, layer_8
  16.  
  17. def bottom_block(inputs, n_filters):
  18. layer_1 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
  19. layer_2 = concatenate([layer_1, inputs], axis=concate_axis)
  20. layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
  21. layer_4 = concatenate([layer_3, layer_1, inputs], axis=concate_axis)
  22. layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
  23. layer_6 = BatchNormalization(axis=batchnorm_axis)(layer_5)
  24. layer_7 = Dropout(drop_rate)(layer_6)
  25. return layer_7
  26.  
  27. def up_block(skip_input, ups_input, n_filters):
  28. layer_1 = UpSampling2D(size=(2, 2))(ups_input)
  29. layer_2 = concatenate([layer_1, skip_input], axis=concate_axis)
  30. layer_3 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_2)
  31. layer_4 = concatenate([layer_3, layer_2], axis=concate_axis)
  32. layer_5 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_4)
  33. layer_6 = concatenate([layer_5, layer_3, layer_2], axis=concate_axis)
  34. layer_7 = Conv2D(n_filters, 3, activation = 'elu', padding = 'same', kernel_initializer = 'he_normal')(layer_6)
  35. layer_8 = BatchNormalization(axis=batchnorm_axis)(layer_7)
  36. layer_9 = Dropout(drop_rate)(layer_8)
  37. return layer_9
  38.  
  39. def get_model():
  40. inputs = Input((512, 512, 1))
  41. skip1, down1 = down_block(inputs, 16)
  42. skip2, down2 = down_block(down1, 32)
  43. skip3, down3 = down_block(down2, 64)
  44. skip4, down4 = down_block(down3, 128)
  45. skip5, down5 = down_block(down4, 256)
  46. skip6, down6 = down_block(down5, 256)
  47. bottom = bottom_block(down6, 512)
  48. up_1 = up_block(skip6, bottom, 256)
  49. up_2 = up_block(skip5, up_1, 256)
  50. up_3 = up_block(skip4, up_2, 128)
  51. up_4 = up_block(skip3, up_3, 64)
  52. up_5 = up_block(skip2, up_4, 32)
  53. up_6 = up_block(skip1, up_5, 16)
  54. outputs = Conv2D(1, 1, activation = 'sigmoid')(up_6)
  55.  
  56. model = Model(inputs, outputs)
  57. model.compile(optimizer = Adam(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
  58. return model
Add Comment
Please, Sign In to add comment