Advertisement
Guest User

Untitled

a guest
Jun 24th, 2017
65
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.21 KB | None | 0 0
  1. #!/usr/bin/env python2
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Sat Jun 24 23:00:58 2017
  5.  
  6. @author: andreimouraviev
  7. """
  8.  
  9. import numpy as np
  10.  
  11. from keras.layers import Input, Dense, Convolution2D, merge, Conv2D,Conv2DTranspose
  12. from keras.layers import MaxPooling2D, UpSampling2D,Dropout, Flatten,concatenate
  13. from keras.models import Model
  14. from keras.utils import np_utils
  15. from keras import backend as K
  16. from keras.models import model_from_json
  17. from keras.optimizers import Adam
  18. from keras.callbacks import ModelCheckpoint, LearningRateScheduler
  19. import keras
  20.  
  21. smooth = 1.
  22. def dice_coef(y_true, y_pred):
  23.  
  24. y_true_f = K.flatten(y_true)
  25. y_pred_f = K.flatten(y_pred)
  26. intersection = K.sum(y_true_f * y_pred_f)
  27. return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
  28.  
  29. def dice_coef_loss(y_true, y_pred):
  30. return -dice_coef(y_true, y_pred)
  31.  
  32.  
  33. #%%
  34. img_rows,img_cols=512,512
  35. N = 100
  36.  
  37. DropP=0
  38. Optimizer=Adam(lr=1e-5)
  39. LossF = dice_coef_loss
  40.  
  41. #%%
  42.  
  43. inputs = Input(( img_rows, img_cols, 1))
  44. conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
  45. conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
  46. pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
  47. pool1 = Dropout(DropP)(pool1)
  48.  
  49.  
  50. conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
  51. conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
  52. pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
  53. pool2 = Dropout(DropP)(pool2)
  54.  
  55. conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
  56. conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
  57. pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
  58. pool3 = Dropout(DropP)(pool3)
  59.  
  60. conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
  61. conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
  62. pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
  63. pool4 = Dropout(DropP)(pool4)
  64.  
  65. conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
  66. conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
  67. up6 = concatenate([Conv2DTranspose(256,(2, 2), strides=(2, 2), padding='same')(conv5), conv4],name='up6', axis=3)
  68. up6 = Dropout(DropP)(up6)
  69.  
  70.  
  71. conv6 = Conv2D(256,(3, 3), activation='relu', padding='same')(up6)
  72. conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
  73.  
  74. up7 = concatenate([Conv2DTranspose(128,(2, 2), strides=(2, 2), padding='same')(conv6), conv3],name='up7', axis=3)
  75. up7 = Dropout(DropP)(up7)
  76. conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
  77. conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
  78.  
  79. up8 = concatenate([Conv2DTranspose(64,(2, 2), strides=(2, 2), padding='same')(conv7), conv2],name='up8', axis=3)
  80. up8 = Dropout(DropP)(up8)
  81. conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
  82. conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
  83.  
  84. up9 = concatenate([Conv2DTranspose(32,(2, 2), strides=(2, 2), padding='same')(conv8), conv1],name='up9',axis=3)
  85. up9 = Dropout(DropP)(up9)
  86. conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
  87. conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
  88.  
  89. conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
  90. conv10 = keras.layers.Flatten()(conv10)
  91.  
  92. model = Model(inputs=inputs, outputs=conv10)
  93. model.compile(optimizer=Optimizer, loss=LossF, metrics=[dice_coef], sample_weight_mode='temporal' )
  94.  
  95. #%%
  96. image_data = np.random.rand(N,img_rows,img_cols,1)
  97. target_data = np.random.randint(2 ,size=(N,img_rows,img_cols,1) )
  98.  
  99.  
  100.  
  101. def get_class_weights(target):
  102. target=target/target.max()
  103. class_frequencies = np.array([ np.sum(target == 0), np.sum(target/255 == 1) ])
  104. class_weights = (class_frequencies[[1,0]])**0.25
  105. class_weights = class_weights / np.sum(class_weights) * 2.
  106. class_weights = class_weights.astype(np.float32)
  107. return class_weights
  108.  
  109.  
  110.  
  111. class_W = np.array([1,10],dtype=np.float32) # positive classes have more weight
  112. W_map= class_W[target_data]
  113.  
  114.  
  115.  
  116. #%%
  117. smpl_w = np.zeros( (W_map.shape[0], W_map.shape[1]*W_map.shape[2]) )
  118. for i,smpl in enumerate(W_map):
  119. smpl_w[i,:] = smpl.flatten()
  120. #%%
  121.  
  122.  
  123. target_data_flat = target_data.reshape( (N,img_rows*img_cols) )
  124. model.fit(image_data, target_data_flat, batch_size=1, epochs=2,\
  125. verbose=1,class_weight=None,sample_weight=smpl_w)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement