Advertisement
kalabukdima

train_fcn

Mar 30th, 2018
354
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 5.81 KB | None | 0 0
  1. import numpy as np
  2. from skimage.io import imshow, imread
  3. import pandas as pd
  4. import matplotlib.pyplot as plt
  5.  
  6. from time import time
  7. import itertools
  8.  
  9. import sklearn.preprocessing
  10.  
  11.  
  12. import keras.models
  13. import keras.layers
  14. import keras.optimizers
  15. import keras.metrics
  16. import keras.callbacks
  17. import keras.utils
  18. from keras.regularizers import l1, l2
  19.  
  20. import cv2
  21. import scipy.ndimage
  22. import imgaug
  23. from imgaug import augmenters as iaa
  24.  
  25.  
  26. NAME = '13_bin_multiple_images'
  27. LR = 0.00003
  28. BATCH_SIZE = 16
  29. EPOCHS = 2048
  30. NUM_CLASSES = 2
  31. IMAGE_SHAPE = (256, 256)
  32. TOTAL_FILES = 21
  33.  
  34.  
  35. def load_train_image(index):
  36.     assert(1 <= index <= TOTAL_FILES)
  37.     return imread('regions/binary/g{}_g.tif'.format(index))
  38.  
  39. def load_val_image(index):
  40.     assert(1 <= index <= TOTAL_FILES)
  41.     if index == 6:
  42.         return imread('regions/binary/g6_b.tif')
  43.     return imread('regions/binary/g{}_y.tif'.format(index))
  44.  
  45. def load_mask(index):
  46.     assert(1 <= index <= TOTAL_FILES)
  47.     mask = imread('regions/binary/g{}_m.tif'.format(index))[:, :, 0]
  48.     mask[mask > 0] = 1
  49.     return mask
  50.  
  51. aug = iaa.Sequential([
  52.         iaa.Fliplr(0.5),
  53.         iaa.Flipud(0.5),
  54.         # iaa.ContrastNormalization((0.75, 1.5)),
  55.         iaa.Multiply((0.9, 1.1), per_channel=0.6),
  56.         iaa.Sometimes(0.4,
  57.             iaa.GaussianBlur(sigma=(0, 0.5))
  58.         )
  59.     ])
  60.  
  61.  
  62. def get_rotated_subregion(image, shape, angle, shift=(0, 0), scale=1):
  63.     radius = max(shape) / 2 ** 0.5
  64.     shift = (min(max(shift[1], radius), image.shape[1] - radius),
  65.              min(max(shift[0], radius), image.shape[0] - radius))
  66.     cosa = np.cos(angle)
  67.     sina = np.sin(angle)
  68.  
  69.     matShiftB = np.array([[1., 0., -shift[0]], [0., 1., -shift[1]], [0., 0., 1.]])
  70.     matRot = np.array([[cosa, sina, 0.], [-sina, cosa, 0.], [0., 0., 1.]])
  71.     matShiftF = np.array([[1., 0., shape[0] / 2.], [0., 1., shape[1] / 2.], [0., 0., 1.]])
  72.     matScale = np.array([[scale, 0., 0.], [0., scale, 0.], [0., 0., 1.]])
  73.     matTotal = matShiftF.dot(matRot.dot(matScale.dot(matShiftB)))
  74.    
  75.     return cv2.warpAffine(image, matTotal[:2, :], shape)
  76.  
  77.  
  78. def image_generator(batch_size, load_image, load_mask):
  79.     while True:
  80.         image_index = np.random.randint(1, TOTAL_FILES + 1)
  81.         big_image = load_image(image_index)
  82.         mask = load_mask(image_index)
  83.  
  84.         angles = np.random.uniform(0, np.pi * 2, (batch_size,))
  85.         angles = np.random.uniform(0, np.pi * 2, (batch_size,))
  86.         shifts_x = np.random.uniform(0, mask.shape[0], (batch_size,))
  87.         shifts_y = np.random.uniform(0, mask.shape[1], (batch_size,))
  88.  
  89.         images = np.array([get_rotated_subregion(
  90.                 big_image, IMAGE_SHAPE, angle, shift
  91.             ) for angle, shift in zip(angles, zip(shifts_x, shifts_y))
  92.         ])
  93.         Ys = np.array([get_rotated_subregion(mask, IMAGE_SHAPE,
  94.             angle, shift) for angle, shift in zip(angles,
  95.                 zip(shifts_x, shifts_y))])
  96.         labels = Ys.reshape(batch_size, IMAGE_SHAPE[0], IMAGE_SHAPE[1], 1)
  97.         # labels = keras.utils.to_categorical(Ys.flatten(),
  98.         #         NUM_CLASSES).reshape(batch_size,
  99.         #                              IMAGE_SHAPE[0],
  100.         #                              IMAGE_SHAPE[1], NUM_CLASSES)
  101.         yield images, labels
  102.  
  103.  
  104. def get_model():
  105.     CONVS = 2
  106.     LAYERS = 6
  107.     FILTERS = 8
  108.  
  109.     dataInput = keras.layers.Input(shape=(None, None, 3))
  110.     x = dataInput
  111.     # -------- Encoder --------
  112.     lstMaxPools = []
  113.     for cc in range(LAYERS):
  114.         for ii in range(CONVS):
  115.             x = keras.layers.Conv2D(filters=FILTERS * (2**cc),
  116.                 kernel_size=(3, 3),
  117.                 padding='same',
  118.                 activation='relu')(x)
  119.         lstMaxPools.append(x)
  120.         x = keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
  121.     # -------- Decoder --------
  122.     for cc in range(LAYERS):
  123.         for ii in range(CONVS):
  124.             x = keras.layers.Conv2D(filters=FILTERS * (2 ** (LAYERS - 1 - cc)),
  125.                 kernel_size=(3, 3),
  126.                 padding='same',
  127.                 activation='relu')(x)
  128.         x = keras.layers.UpSampling2D(size=(2, 2))(x)
  129.         if cc + 2 < LAYERS:
  130.             x = keras.layers.concatenate([x, lstMaxPools[-1 - cc]], axis=-1)
  131.  
  132.     # 1x1 Convolution: emulation of Dense layer
  133.     if NUM_CLASSES == 2:
  134.         x = keras.layers.Conv2D(filters=1, kernel_size=(1,1), padding='valid',
  135.                                 activation='sigmoid')(x)
  136.     else:
  137.         x = keras.layers.Conv2D(filters=NUM_CLASSES, kernel_size=(1, 1),
  138.                                 padding='valid')(x)
  139.         x = keras.layers.Activation('softmax')(x)
  140.     model = keras.models.Model(dataInput, x)
  141.  
  142.     model.compile(keras.optimizers.adam(lr=LR), 'binary_crossentropy',
  143.                   ['binary_accuracy'])
  144.     return model
  145.  
  146.  
  147. def limit_gpu_usage():
  148.     config = keras.backend.tf.ConfigProto(
  149.         gpu_options=keras.backend.tf.GPUOptions(per_process_gpu_memory_fraction=0.2))
  150.     keras.backend.tensorflow_backend.set_session(keras.backend.tf.Session(config=config))
  151.  
  152.  
  153. def fit():
  154.     steps = 192
  155.     steps_val = steps
  156.  
  157.     model = get_model()
  158.  
  159.     hist = model.fit_generator(
  160.         image_generator(BATCH_SIZE, load_train_image, load_mask),
  161.         steps_per_epoch=steps,
  162.         validation_data=image_generator(BATCH_SIZE, load_val_image, load_mask),
  163.         validation_steps=steps_val,
  164.         epochs=EPOCHS,
  165.         callbacks=[
  166.             keras.callbacks.TensorBoard(
  167.                 './logs/fcn/' + NAME,
  168.                 write_images=False,
  169.                 batch_size=BATCH_SIZE
  170.             ),
  171.             keras.callbacks.ModelCheckpoint(
  172.                 NAME + '.h5', verbose=False,
  173.                 save_best_only=True, monitor='val_loss'
  174.             )
  175.         ]
  176.     )
  177.  
  178.  
  179. if __name__ == '__main__':
  180.     limit_gpu_usage()
  181.  
  182.     fit()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement