Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- '''Utilities to deploy a model as detector in a fully-convolutional fashion.
- Author: Markus Rempfler
- '''
- import numpy as np
- def get_input_shape(model):
- '''
- '''
- return model.layers[0].input_shape
- def get_batch_size(model):
- '''
- '''
- return get_input_shape(model)[0]
- def get_output_shape(model):
- '''
- '''
- return model.layers[-1].output_shape
- def normalize(img_patch, eps=1e-9):
- '''normalize given image to zero mean and unit standard deviation.
- Parameters
- ----------
- img_patch : Image type
- Image to be normalized.
- eps : float
- standard deviation regularizer.
- '''
- return (img_patch - img_patch.mean()) / (img_patch.std() + eps)
- def predict_complete(model, image, border_size=None, step_size=None):
- '''applies a model to an entire image by stitching.
- Parameters
- ----------
- model : Model type
- Model that is applied in an FCN fashion. Needs to provide
- model.predict() and be compatible to get_input_shape()
- image : Image type
- Image to be processed.
- border_size : int
- Size of border of processable image patches.
- step_size : tuple of (int, int)
- Step size for slicing. Either border_size or step_size
- need to be defined.
- Returns
- -------
- prediction : Image type
- Prediction of given model for the provided image.
- Notes
- -----
- Currently only working with single channel response models.
- '''
- image_shape = image.shape
- input_shape = get_input_shape(model)
- batch_size = input_shape[0]
- patch_size = input_shape[1:]
- if step_size is None and border_size is not None:
- step_size = (patch_size[0] - 2 * border_size,
- patch_size[1] - 2 * border_size)
- elif step_size is not None and border_size is None:
- border_size = (patch_size[0] - step_size[0]) / 2
- elif step_size is None and border_size is None:
- raise ValueError('Either border_size or step_size need to be defined')
- # corners of patches.
- x = range(0, image_shape[0] - patch_size[0], step_size[0])
- y = range(0, image_shape[1] - patch_size[1], step_size[1])
- x.append(image_shape[0] - patch_size[0])
- y.append(image_shape[1] - patch_size[1])
- xy = [(i, j) for i in x for j in y]
- def patch_generator(batch_size):
- img_batch = []
- while True:
- for i, j in xy:
- img_patch = image[i:i + patch_size[0], j:j + patch_size[1]]
- img_patch = normalize(img_patch)
- img_batch.append(img_patch)
- if len(img_batch) == batch_size:
- yield np.asarray(img_batch)
- img_batch = []
- if len(img_batch) > 0:
- last = img_batch[-1]
- for _ in xrange(batch_size - len(img_batch)):
- img_batch.append(last)
- yield np.asarray(img_batch)
- break
- # predict on each patch.
- # TODO consider stitching and prediction concurrently.
- # NOTE The squeeze this is only compatible with single channel responses.
- responses = []
- for batch in patch_generator(batch_size):
- for rmap in model.predict(batch, batch_size=batch_size):
- responses.append(np.squeeze(rmap, axis=-1))
- # re-assemble.
- # NOTE this assumes that input and output have the same shape.
- response_map = np.zeros([image_shape[0], image_shape[1]])
- for idx, (i, j) in enumerate(xy):
- offsetx = border_size if i > 0 else 0
- offsety = border_size if j > 0 else 0
- response_map[i + offsetx:i + patch_size[0],
- j + offsety:j + patch_size[1]] = \
- responses[idx][offsetx:, offsety:]
- return response_map
- class Detector(object):
- '''Wrapper class around detection models for usage in
- a fully convolutional setting.
- '''
- def __init__(self, model, **kwargs):
- '''
- '''
- self.model = model
- self.pred_kwargs = kwargs
- def predict_complete(self, input_img):
- '''
- '''
- return predict_complete(self.model, input_img, **self.pred_kwargs)
- def make_detector(model, **kwargs):
- '''creates a convolutional detector from a given model.
- '''
- return Detector(model, **kwargs)
Add Comment
Please, Sign In to add comment