Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python2
- # -*- coding: utf-8 -*-
- """
- Created on Sat Jun 24 23:00:58 2017
- @author: andreimouraviev
- """
- import numpy as np
- from keras.layers import Input, Dense, Convolution2D, merge, Conv2D,Conv2DTranspose
- from keras.layers import MaxPooling2D, UpSampling2D,Dropout, Flatten,concatenate
- from keras.models import Model
- from keras.utils import np_utils
- from keras import backend as K
- from keras.models import model_from_json
- from keras.optimizers import Adam
- from keras.callbacks import ModelCheckpoint, LearningRateScheduler
- import keras
- smooth = 1.
- def dice_coef(y_true, y_pred):
- y_true_f = K.flatten(y_true)
- y_pred_f = K.flatten(y_pred)
- intersection = K.sum(y_true_f * y_pred_f)
- return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
- def dice_coef_loss(y_true, y_pred):
- return -dice_coef(y_true, y_pred)
- #%%
- img_rows,img_cols=512,512
- N = 100
- DropP=0
- Optimizer=Adam(lr=1e-5)
- LossF = dice_coef_loss
- #%%
- inputs = Input(( img_rows, img_cols, 1))
- conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
- conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
- pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
- pool1 = Dropout(DropP)(pool1)
- conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
- conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
- pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
- pool2 = Dropout(DropP)(pool2)
- conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
- conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
- pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
- pool3 = Dropout(DropP)(pool3)
- conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
- conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
- pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
- pool4 = Dropout(DropP)(pool4)
- conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
- conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
- up6 = concatenate([Conv2DTranspose(256,(2, 2), strides=(2, 2), padding='same')(conv5), conv4],name='up6', axis=3)
- up6 = Dropout(DropP)(up6)
- conv6 = Conv2D(256,(3, 3), activation='relu', padding='same')(up6)
- conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
- up7 = concatenate([Conv2DTranspose(128,(2, 2), strides=(2, 2), padding='same')(conv6), conv3],name='up7', axis=3)
- up7 = Dropout(DropP)(up7)
- conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
- conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
- up8 = concatenate([Conv2DTranspose(64,(2, 2), strides=(2, 2), padding='same')(conv7), conv2],name='up8', axis=3)
- up8 = Dropout(DropP)(up8)
- conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
- conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
- up9 = concatenate([Conv2DTranspose(32,(2, 2), strides=(2, 2), padding='same')(conv8), conv1],name='up9',axis=3)
- up9 = Dropout(DropP)(up9)
- conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
- conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
- conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
- conv10 = keras.layers.Flatten()(conv10)
- model = Model(inputs=inputs, outputs=conv10)
- model.compile(optimizer=Optimizer, loss=LossF, metrics=[dice_coef], sample_weight_mode='temporal' )
- #%%
- image_data = np.random.rand(N,img_rows,img_cols,1)
- target_data = np.random.randint(2 ,size=(N,img_rows,img_cols,1) )
- def get_class_weights(target):
- target=target/target.max()
- class_frequencies = np.array([ np.sum(target == 0), np.sum(target/255 == 1) ])
- class_weights = (class_frequencies[[1,0]])**0.25
- class_weights = class_weights / np.sum(class_weights) * 2.
- class_weights = class_weights.astype(np.float32)
- return class_weights
- class_W = np.array([1,10],dtype=np.float32) # positive classes have more weight
- W_map= class_W[target_data]
- #%%
- smpl_w = np.zeros( (W_map.shape[0], W_map.shape[1]*W_map.shape[2]) )
- for i,smpl in enumerate(W_map):
- smpl_w[i,:] = smpl.flatten()
- #%%
- target_data_flat = target_data.reshape( (N,img_rows*img_cols) )
- model.fit(image_data, target_data_flat, batch_size=1, epochs=2,\
- verbose=1,class_weight=None,sample_weight=smpl_w)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement