Advertisement
larsklein

keras preprocessing for boxes

Jul 13th, 2017
361
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 53.19 KB | None | 0 0
  1. """Fairly basic set of tools for real-time data augmentation on image data.
  2. Can easily be extended to include new transformations,
  3. new preprocessing methods, etc...
  4. """
  5. from __future__ import absolute_import
  6. from __future__ import print_function
  7.  
  8. import numpy as np
  9. import re
  10. from scipy import linalg
  11. import scipy.ndimage as ndi
  12. from six.moves import range
  13. import os
  14. import threading
  15. import warnings
  16. import multiprocessing.pool
  17. from functools import partial
  18.  
  19. from .. import backend as K
  20.  
  21. try:
  22.     from PIL import Image as pil_image
  23. except ImportError:
  24.     pil_image = None
  25.  
  26.  
  27.  
  28.  
  29. def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
  30.                     fill_mode='nearest', cval=0.):
  31.     """Performs a random rotation of a Numpy image tensor.
  32.  
  33.    # Arguments
  34.        x: Input tensor. Must be 3D.
  35.        rg: Rotation range, in degrees.
  36.        row_axis: Index of axis for rows in the input tensor.
  37.        col_axis: Index of axis for columns in the input tensor.
  38.        channel_axis: Index of axis for channels in the input tensor.
  39.        fill_mode: Points outside the boundaries of the input
  40.            are filled according to the given mode
  41.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  42.        cval: Value used for points outside the boundaries
  43.            of the input if `mode='constant'`.
  44.  
  45.    # Returns
  46.        Rotated Numpy image tensor.
  47.    """
  48.     theta = np.pi / 180 * np.random.uniform(-rg, rg)
  49.     rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
  50.                                 [np.sin(theta), np.cos(theta), 0],
  51.                                 [0, 0, 1]])
  52.  
  53.     h, w = x.shape[row_axis], x.shape[col_axis]
  54.     transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
  55.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  56.     return x
  57.  
  58. def random_rotation_with_boxes(x, boxes, rg, row_axis=1, col_axis=2, channel_axis=0,
  59.                     fill_mode='nearest', cval=0.):
  60.     """Performs a random rotation of a Numpy image tensor. Also rotates the corresponding bounding boxes
  61.  
  62.    # Arguments
  63.        x: Input tensor. Must be 3D.
  64.        boxes: a list of bounding boxes [xmin, ymin, xmax, ymax], values in [0,1].
  65.        rg: Rotation range, in degrees.
  66.        row_axis: Index of axis for rows in the input tensor.
  67.        col_axis: Index of axis for columns in the input tensor.
  68.        channel_axis: Index of axis for channels in the input tensor.
  69.        fill_mode: Points outside the boundaries of the input
  70.            are filled according to the given mode
  71.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  72.        cval: Value used for points outside the boundaries
  73.            of the input if `mode='constant'`.
  74.  
  75.    # Returns
  76.        Rotated Numpy image tensor.
  77.        And rotated bounding boxes
  78.    """
  79.  
  80.     # sample parameter for augmentation
  81.     theta = np.pi / 180 * np.random.uniform(-rg, rg)
  82.  
  83.     # apply to image
  84.     rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
  85.                                 [np.sin(theta), np.cos(theta), 0],
  86.                                 [0, 0, 1]])
  87.  
  88.     h, w = x.shape[row_axis], x.shape[col_axis]
  89.     transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
  90.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  91.  
  92.     # apply to vertices
  93.     vertices = boxes_to_vertices(boxes)
  94.     vertices = vertices.reshape((-1, 2))
  95.  
  96.     # apply offset to have pivot point at [0.5, 0.5]
  97.     vertices -= [0.5, 0.5]
  98.  
  99.     # apply rotation, we only need the rotation part of the matrix
  100.     vertices = np.dot(vertices, rotation_matrix[:2, :2])
  101.     vertices += [0.5, 0.5]
  102.  
  103.     boxes = vertices_to_boxes(vertices)
  104.  
  105.     return x, boxes, vertices
  106.  
  107. def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
  108.                  fill_mode='nearest', cval=0.):
  109.     """Performs a random spatial shift of a Numpy image tensor.
  110.  
  111.    # Arguments
  112.        x: Input tensor. Must be 3D.
  113.        wrg: Width shift range, as a float fraction of the width.
  114.        hrg: Height shift range, as a float fraction of the height.
  115.        row_axis: Index of axis for rows in the input tensor.
  116.        col_axis: Index of axis for columns in the input tensor.
  117.        channel_axis: Index of axis for channels in the input tensor.
  118.        fill_mode: Points outside the boundaries of the input
  119.            are filled according to the given mode
  120.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  121.        cval: Value used for points outside the boundaries
  122.            of the input if `mode='constant'`.
  123.  
  124.    # Returns
  125.        Shifted Numpy image tensor.
  126.    """
  127.     h, w = x.shape[row_axis], x.shape[col_axis]
  128.     tx = np.random.uniform(-hrg, hrg) * h
  129.     ty = np.random.uniform(-wrg, wrg) * w
  130.     translation_matrix = np.array([[1, 0, tx],
  131.                                    [0, 1, ty],
  132.                                    [0, 0, 1]])
  133.  
  134.     transform_matrix = translation_matrix  # no need to do offset
  135.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  136.     return x
  137.  
  138. def random_shift_with_boxes(x, boxes, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
  139.                  fill_mode='nearest', cval=0.):
  140.     """Performs a random spatial shift of a Numpy image tensor.
  141.  
  142.    # Arguments
  143.        x: Input tensor. Must be 3D.
  144.        boxes: a list of bounding boxes [xmin, ymin, xmax, ymax], values in [0,1].
  145.        wrg: Width shift range, as a float fraction of the width.
  146.        hrg: Height shift range, as a float fraction of the height.
  147.        row_axis: Index of axis for rows in the input tensor.
  148.        col_axis: Index of axis for columns in the input tensor.
  149.        channel_axis: Index of axis for channels in the input tensor.
  150.        fill_mode: Points outside the boundaries of the input
  151.            are filled according to the given mode
  152.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  153.        cval: Value used for points outside the boundaries
  154.            of the input if `mode='constant'`.
  155.  
  156.    # Returns
  157.        Shifted Numpy image tensor.
  158.        And shifted bounding boxes
  159.    """
  160.  
  161.     # sample parameters for augmentation
  162.     shift_h = np.random.uniform(-hrg, hrg)
  163.     shift_w = np.random.uniform(-wrg, wrg)
  164.  
  165.     # apply to image
  166.     h, w = x.shape[row_axis], x.shape[col_axis]
  167.     tx = shift_h * h
  168.     ty = shift_w * w
  169.     translation_matrix = np.array([[1, 0, tx],
  170.                                    [0, 1, ty],
  171.                                    [0, 0, 1]])
  172.  
  173.     transform_matrix = translation_matrix  # no need to do offset
  174.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  175.  
  176.     # apply to vertices
  177.     vertices = boxes_to_vertices(boxes)
  178.     vertices = vertices.reshape((-1, 2))
  179.     vertices = vertices - [shift_h, shift_w]
  180.  
  181.     boxes = vertices_to_boxes(vertices)
  182.     return x, boxes
  183.  
  184. def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
  185.                  fill_mode='nearest', cval=0.):
  186.     """Performs a random spatial shear of a Numpy image tensor.
  187.  
  188.    # Arguments
  189.        x: Input tensor. Must be 3D.
  190.        intensity: Transformation intensity.
  191.        row_axis: Index of axis for rows in the input tensor.
  192.        col_axis: Index of axis for columns in the input tensor.
  193.        channel_axis: Index of axis for channels in the input tensor.
  194.        fill_mode: Points outside the boundaries of the input
  195.            are filled according to the given mode
  196.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  197.        cval: Value used for points outside the boundaries
  198.            of the input if `mode='constant'`.
  199.  
  200.    # Returns
  201.        Sheared Numpy image tensor.
  202.    """
  203.     shear = np.random.uniform(-intensity, intensity)
  204.     shear_matrix = np.array([[1, -np.sin(shear), 0],
  205.                              [0, np.cos(shear), 0],
  206.                              [0, 0, 1]])
  207.  
  208.     h, w = x.shape[row_axis], x.shape[col_axis]
  209.     transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
  210.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  211.     return x
  212.  
  213.  
  214. def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
  215.                 fill_mode='nearest', cval=0.):
  216.     """Performs a random spatial zoom of a Numpy image tensor.
  217.  
  218.    # Arguments
  219.        x: Input tensor. Must be 3D.
  220.        zoom_range: Tuple of floats; zoom range for width and height.
  221.        row_axis: Index of axis for rows in the input tensor.
  222.        col_axis: Index of axis for columns in the input tensor.
  223.        channel_axis: Index of axis for channels in the input tensor.
  224.        fill_mode: Points outside the boundaries of the input
  225.            are filled according to the given mode
  226.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  227.        cval: Value used for points outside the boundaries
  228.            of the input if `mode='constant'`.
  229.  
  230.    # Returns
  231.        Zoomed Numpy image tensor.
  232.  
  233.    # Raises
  234.        ValueError: if `zoom_range` isn't a tuple.
  235.    """
  236.     if len(zoom_range) != 2:
  237.         raise ValueError('`zoom_range` should be a tuple or list of two floats. '
  238.                          'Received arg: ', zoom_range)
  239.  
  240.     if zoom_range[0] == 1 and zoom_range[1] == 1:
  241.         zx, zy = 1, 1
  242.     else:
  243.         zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
  244.     zoom_matrix = np.array([[zx, 0, 0],
  245.                             [0, zy, 0],
  246.                             [0, 0, 1]])
  247.  
  248.     h, w = x.shape[row_axis], x.shape[col_axis]
  249.     transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
  250.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  251.     return x
  252.  
  253. def random_zoom_with_boxes(x, boxes, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
  254.                 fill_mode='nearest', cval=0.):
  255.     """Performs a random spatial zoom of a Numpy image tensor.
  256.    Also zooms the corresponding bounding boxes
  257.  
  258.    # Arguments
  259.        x: Input tensor. Must be 3D.
  260.        boxes. Input tensor. Must be 2D
  261.        zoom_range: Tuple of floats; zoom range for width and height.
  262.        row_axis: Index of axis for rows in the input tensor.
  263.        col_axis: Index of axis for columns in the input tensor.
  264.        channel_axis: Index of axis for channels in the input tensor.
  265.        fill_mode: Points outside the boundaries of the input
  266.            are filled according to the given mode
  267.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  268.        cval: Value used for points outside the boundaries
  269.            of the input if `mode='constant'`.
  270.  
  271.    # Returns
  272.        Zoomed Numpy image tensor.
  273.        Zoomed bounding boxes
  274.  
  275.    # Raises
  276.        ValueError: if `zoom_range` isn't a tuple.
  277.    """
  278.     if len(zoom_range) != 2:
  279.         raise ValueError('`zoom_range` should be a tuple or list of two floats. '
  280.                          'Received arg: ', zoom_range)
  281.  
  282.     if zoom_range[0] == 1 and zoom_range[1] == 1:
  283.         zx, zy = 1, 1
  284.     else:
  285.         zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
  286.     zoom_matrix = np.array([[zx, 0, 0],
  287.                             [0, zy, 0],
  288.                             [0, 0, 1]])
  289.  
  290.     h, w = x.shape[row_axis], x.shape[col_axis]
  291.     transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
  292.     x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
  293.  
  294.     # apply to vertices
  295.     vertices = boxes_to_vertices(boxes)
  296.     vertices = vertices.reshape((-1, 2))
  297.  
  298.     """
  299.    homog_vertices = np.ones((vertices.shape[0], 3))
  300.    homog_vertices[:,:2]=vertices
  301.  
  302.    offset_matrix = np.array([[1,0,-0.5], [0,1,-0.5], [0,0,1]])
  303.    zoom_matrix = np.array([[zy, 0, 0],
  304.                            [0, zx, 0],
  305.                            [0, 0, 1]])
  306.    reset_matrix = np.array([[1,0,0.5], [0,1,0.5], [0,0,1]])
  307.    v_matrix = np.dot(np.dot(offset_matrix, zoom_matrix), reset_matrix)
  308.  
  309.    homog_vertices = np.dot(homog_vertices, v_matrix)
  310.    homog_boxes = vertices_to_boxes(homog_vertices[:, :2])
  311.    """
  312.     # apply offset to have pivot point at [0.5, 0.5]
  313.     vertices -= [0.5, 0.5]
  314.  
  315.     # apply zoom, we only need the zoom part of the matrix
  316.     vertices = np.dot(vertices, zoom_matrix[:2, :2].T[::-1, ::-1])
  317.     vertices += [0.5, 0.5]
  318.  
  319.     boxes = vertices_to_boxes(vertices)
  320.  
  321.     return x, boxes, vertices
  322.  
  323.  
  324. def random_channel_shift(x, intensity, channel_axis=0):
  325.     x = np.rollaxis(x, channel_axis, 0)
  326.     min_x, max_x = np.min(x), np.max(x)
  327.     channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
  328.                       for x_channel in x]
  329.     x = np.stack(channel_images, axis=0)
  330.     x = np.rollaxis(x, 0, channel_axis + 1)
  331.     return x
  332.  
  333.  
  334. def transform_matrix_offset_center(matrix, x, y):
  335.     o_x = float(x) / 2 + 0.5
  336.     o_y = float(y) / 2 + 0.5
  337.     offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
  338.     reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
  339.     transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
  340.     return transform_matrix
  341.  
  342.  
  343. def apply_transform(x,
  344.                     transform_matrix,
  345.                     channel_axis=0,
  346.                     fill_mode='nearest',
  347.                     cval=0.):
  348.     """Apply the image transformation specified by a matrix.
  349.  
  350.    # Arguments
  351.        x: 2D numpy array, single image.
  352.        transform_matrix: Numpy array specifying the geometric transformation.
  353.        channel_axis: Index of axis for channels in the input tensor.
  354.        fill_mode: Points outside the boundaries of the input
  355.            are filled according to the given mode
  356.            (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
  357.        cval: Value used for points outside the boundaries
  358.            of the input if `mode='constant'`.
  359.  
  360.    # Returns
  361.        The transformed version of the input.
  362.    """
  363.     x = np.rollaxis(x, channel_axis, 0)
  364.     final_affine_matrix = transform_matrix[:2, :2]
  365.     final_offset = transform_matrix[:2, 2]
  366.     channel_images = [ndi.interpolation.affine_transform(
  367.         x_channel,
  368.         final_affine_matrix,
  369.         final_offset,
  370.         order=0,
  371.         mode=fill_mode,
  372.         cval=cval) for x_channel in x]
  373.     x = np.stack(channel_images, axis=0)
  374.     x = np.rollaxis(x, 0, channel_axis + 1)
  375.     return x
  376.  
  377.  
  378. def flip_axis(x, axis):
  379.     x = np.asarray(x).swapaxes(axis, 0)
  380.     x = x[::-1, ...]
  381.     x = x.swapaxes(0, axis)
  382.     return x
  383.  
  384. def boxes_to_vertices(boxes):
  385.     """
  386.    Takes a list of bounding boxes and creates a list of vertices
  387.    The output shape is [number of boxes, 4, 2]
  388.    4 for the 4 vertices,
  389.    2 for x/y
  390.    :param boxes: Input tensor, must be 2D
  391.    :return: output tensor, 3D
  392.    """
  393.  
  394.     assert len(boxes.shape) == 2, "boxes must be a 2D tensor"
  395.     assert boxes.shape[1] == 4, "boxes must be [:, 4] tensor"
  396.  
  397.     min_val = boxes.min()
  398.     max_val = boxes.max()
  399.     assert 0 <= min_val <= 1, "bounding box coordinates must be in [0,1]"
  400.     assert 0 <= max_val <= 1, "bounding box coordinates must be in [0,1]"
  401.  
  402.     num_boxes = boxes.shape[0]
  403.  
  404.     # look at the four vertices of each box
  405.     x_min = boxes[:, 0]
  406.     y_min = boxes[:, 1]
  407.     x_max = boxes[:, 2]
  408.     y_max = boxes[:, 3]
  409.  
  410.     assert np.all(x_min < x_max), "coordinates must be given as [xmin, ymin, xmax, ymax"
  411.     assert np.all(y_min < y_max), "coordinates must be given as [xmin, ymin, xmax, ymax"
  412.  
  413.     # create new axis to stack the x,y coordinates
  414.     x_min = np.expand_dims(x_min, axis=-1)
  415.     y_min = np.expand_dims(y_min, axis=-1)
  416.     x_max = np.expand_dims(x_max, axis=-1)
  417.     y_max = np.expand_dims(y_max, axis=-1)
  418.  
  419.     # stack the x,y coordinates to create the vertices
  420.     # the resulting arrays are indexed [idx of box, idx of x or y]
  421.     up_left = np.concatenate([x_min, y_min], axis=-1)
  422.     up_right = np.concatenate([x_min, y_max], axis=-1)
  423.     down_right = np.concatenate([x_max, y_max], axis=-1)
  424.     down_left = np.concatenate([x_max, y_min], axis=-1)
  425.  
  426.     # now stack the vertices, along axis 1
  427.     up_left = np.expand_dims(up_left, axis=1)
  428.     up_right = np.expand_dims(up_right, axis=1)
  429.     down_right = np.expand_dims(down_right, axis=1)
  430.     down_left = np.expand_dims(down_left, axis=1)
  431.  
  432.     # create an array of all vertices, of all boxes
  433.     # the shape is [number of boxes, number of vertices, number of coordinates]
  434.     # ->  shape is [number of boxes, 4, 2]
  435.     vertices = np.concatenate([up_left, up_right, down_right, down_left], axis=1)
  436.  
  437.     return vertices
  438.  
  439. def vertices_to_boxes(vertices):
  440.     """
  441.    Takes a list of vertices and converts them to bounding boxes
  442.    :param vertices: Input tensor, must be 2D
  443.    :return: output tensor, 2D
  444.    """
  445.  
  446.     assert len(vertices.shape)==2, "vertices must be a 2D tensor"
  447.     assert vertices.shape[1]==2, "vertices must be [:, 2] tensor"
  448.  
  449.     vertices = vertices.reshape((-1, 4, 2))
  450.  
  451.     x = vertices[:, :, 0]
  452.     y = vertices[:, :, 1]
  453.  
  454.     x_min = x.min(axis=-1)
  455.     x_max = x.max(axis=-1)
  456.     y_min = y.min(axis=-1)
  457.     y_max = y.max(axis=-1)
  458.  
  459.     x_min = np.expand_dims(x_min, axis=-1)
  460.     x_max = np.expand_dims(x_max, axis=-1)
  461.     y_min = np.expand_dims(y_min, axis=-1)
  462.     y_max = np.expand_dims(y_max, axis=-1)
  463.  
  464.     boxes = np.concatenate([x_min, y_min, x_max, y_max], axis=-1)
  465.  
  466.     return boxes
  467.  
  468. def array_to_img(x, data_format=None, scale=True):
  469.     """Converts a 3D Numpy array to a PIL Image instance.
  470.  
  471.    # Arguments
  472.        x: Input Numpy array.
  473.        data_format: Image data format.
  474.        scale: Whether to rescale image values
  475.            to be within [0, 255].
  476.  
  477.    # Returns
  478.        A PIL Image instance.
  479.  
  480.    # Raises
  481.        ImportError: if PIL is not available.
  482.        ValueError: if invalid `x` or `data_format` is passed.
  483.    """
  484.     if pil_image is None:
  485.         raise ImportError('Could not import PIL.Image. '
  486.                           'The use of `array_to_img` requires PIL.')
  487.     x = np.asarray(x, dtype=K.floatx())
  488.     if x.ndim != 3:
  489.         raise ValueError('Expected image array to have rank 3 (single image). '
  490.                          'Got array with shape:', x.shape)
  491.  
  492.     if data_format is None:
  493.         data_format = K.image_data_format()
  494.     if data_format not in {'channels_first', 'channels_last'}:
  495.         raise ValueError('Invalid data_format:', data_format)
  496.  
  497.     # Original Numpy array x has format (height, width, channel)
  498.     # or (channel, height, width)
  499.     # but target PIL image has format (width, height, channel)
  500.     if data_format == 'channels_first':
  501.         x = x.transpose(1, 2, 0)
  502.     if scale:
  503.         x = x + max(-np.min(x), 0)
  504.         x_max = np.max(x)
  505.         if x_max != 0:
  506.             x /= x_max
  507.         x *= 255
  508.     if x.shape[2] == 3:
  509.         # RGB
  510.         return pil_image.fromarray(x.astype('uint8'), 'RGB')
  511.     elif x.shape[2] == 1:
  512.         # grayscale
  513.         return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
  514.     else:
  515.         raise ValueError('Unsupported channel number: ', x.shape[2])
  516.  
  517.  
  518. def img_to_array(img, data_format=None):
  519.     """Converts a PIL Image instance to a Numpy array.
  520.  
  521.    # Arguments
  522.        img: PIL Image instance.
  523.        data_format: Image data format.
  524.  
  525.    # Returns
  526.        A 3D Numpy array.
  527.  
  528.    # Raises
  529.        ValueError: if invalid `img` or `data_format` is passed.
  530.    """
  531.     if data_format is None:
  532.         data_format = K.image_data_format()
  533.     if data_format not in {'channels_first', 'channels_last'}:
  534.         raise ValueError('Unknown data_format: ', data_format)
  535.     # Numpy array x has format (height, width, channel)
  536.     # or (channel, height, width)
  537.     # but original PIL image has format (width, height, channel)
  538.     x = np.asarray(img, dtype=K.floatx())
  539.     if len(x.shape) == 3:
  540.         if data_format == 'channels_first':
  541.             x = x.transpose(2, 0, 1)
  542.     elif len(x.shape) == 2:
  543.         if data_format == 'channels_first':
  544.             x = x.reshape((1, x.shape[0], x.shape[1]))
  545.         else:
  546.             x = x.reshape((x.shape[0], x.shape[1], 1))
  547.     else:
  548.         raise ValueError('Unsupported image shape: ', x.shape)
  549.     return x
  550.  
  551.  
  552. def load_img(path, grayscale=False, target_size=None):
  553.     """Loads an image into PIL format.
  554.  
  555.    # Arguments
  556.        path: Path to image file
  557.        grayscale: Boolean, whether to load the image as grayscale.
  558.        target_size: Either `None` (default to original size)
  559.            or tuple of ints `(img_height, img_width)`.
  560.  
  561.    # Returns
  562.        A PIL Image instance.
  563.  
  564.    # Raises
  565.        ImportError: if PIL is not available.
  566.    """
  567.     if pil_image is None:
  568.         raise ImportError('Could not import PIL.Image. '
  569.                           'The use of `array_to_img` requires PIL.')
  570.     img = pil_image.open(path)
  571.     if grayscale:
  572.         if img.mode != 'L':
  573.             img = img.convert('L')
  574.     else:
  575.         if img.mode != 'RGB':
  576.             img = img.convert('RGB')
  577.     if target_size:
  578.         hw_tuple = (target_size[1], target_size[0])
  579.         if img.size != hw_tuple:
  580.             img = img.resize(hw_tuple)
  581.     return img
  582.  
  583.  
  584. def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
  585.     return [os.path.join(root, f)
  586.             for root, _, files in os.walk(directory) for f in files
  587.             if re.match(r'([\w]+\.(?:' + ext + '))', f)]
  588.  
  589.  
  590. class ImageDataGenerator(object):
  591.     """Generate minibatches of image data with real-time data augmentation.
  592.  
  593.    # Arguments
  594.        featurewise_center: set input mean to 0 over the dataset.
  595.        samplewise_center: set each sample mean to 0.
  596.        featurewise_std_normalization: divide inputs by std of the dataset.
  597.        samplewise_std_normalization: divide each input by its std.
  598.        zca_whitening: apply ZCA whitening.
  599.        zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
  600.        rotation_range: degrees (0 to 180).
  601.        width_shift_range: fraction of total width.
  602.        height_shift_range: fraction of total height.
  603.        shear_range: shear intensity (shear angle in radians).
  604.        zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
  605.            in the range [1-z, 1+z]. A sequence of two can be passed instead
  606.            to select this range.
  607.        channel_shift_range: shift range for each channels.
  608.        fill_mode: points outside the boundaries are filled according to the
  609.            given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
  610.            is 'nearest'.
  611.        cval: value used for points outside the boundaries when fill_mode is
  612.            'constant'. Default is 0.
  613.        horizontal_flip: whether to randomly flip images horizontally.
  614.        vertical_flip: whether to randomly flip images vertically.
  615.        rescale: rescaling factor. If None or 0, no rescaling is applied,
  616.            otherwise we multiply the data by the value provided. This is
  617.            applied after the `preprocessing_function` (if any provided)
  618.            but before any other transformation.
  619.        preprocessing_function: function that will be implied on each input.
  620.            The function will run before any other modification on it.
  621.            The function should take one argument:
  622.            one image (Numpy tensor with rank 3),
  623.            and should output a Numpy tensor with the same shape.
  624.        data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
  625.            (the depth) is at index 1, in 'channels_last' mode it is at index 3.
  626.            It defaults to the `image_data_format` value found in your
  627.            Keras config file at `~/.keras/keras.json`.
  628.            If you never set it, then it will be "channels_last".
  629.    """
  630.  
  631.     def __init__(self,
  632.                  featurewise_center=False,
  633.                  samplewise_center=False,
  634.                  featurewise_std_normalization=False,
  635.                  samplewise_std_normalization=False,
  636.                  zca_whitening=False,
  637.                  zca_epsilon=1e-6,
  638.                  rotation_range=0.,
  639.                  width_shift_range=0.,
  640.                  height_shift_range=0.,
  641.                  shear_range=0.,
  642.                  zoom_range=0.,
  643.                  channel_shift_range=0.,
  644.                  fill_mode='nearest',
  645.                  cval=0.,
  646.                  horizontal_flip=False,
  647.                  vertical_flip=False,
  648.                  rescale=None,
  649.                  preprocessing_function=None,
  650.                  data_format=None):
  651.         if data_format is None:
  652.             data_format = K.image_data_format()
  653.         self.featurewise_center = featurewise_center
  654.         self.samplewise_center = samplewise_center
  655.         self.featurewise_std_normalization = featurewise_std_normalization
  656.         self.samplewise_std_normalization = samplewise_std_normalization
  657.         self.zca_whitening = zca_whitening
  658.         self.zca_epsilon = zca_epsilon
  659.         self.rotation_range = rotation_range
  660.         self.width_shift_range = width_shift_range
  661.         self.height_shift_range = height_shift_range
  662.         self.shear_range = shear_range
  663.         self.zoom_range = zoom_range
  664.         self.channel_shift_range = channel_shift_range
  665.         self.fill_mode = fill_mode
  666.         self.cval = cval
  667.         self.horizontal_flip = horizontal_flip
  668.         self.vertical_flip = vertical_flip
  669.         self.rescale = rescale
  670.         self.preprocessing_function = preprocessing_function
  671.  
  672.         if data_format not in {'channels_last', 'channels_first'}:
  673.             raise ValueError('`data_format` should be `"channels_last"` (channel after row and '
  674.                              'column) or `"channels_first"` (channel before row and column). '
  675.                              'Received arg: ', data_format)
  676.         self.data_format = data_format
  677.         if data_format == 'channels_first':
  678.             self.channel_axis = 1
  679.             self.row_axis = 2
  680.             self.col_axis = 3
  681.         if data_format == 'channels_last':
  682.             self.channel_axis = 3
  683.             self.row_axis = 1
  684.             self.col_axis = 2
  685.  
  686.         self.mean = None
  687.         self.std = None
  688.         self.principal_components = None
  689.  
  690.         if np.isscalar(zoom_range):
  691.             self.zoom_range = [1 - zoom_range, 1 + zoom_range]
  692.         elif len(zoom_range) == 2:
  693.             self.zoom_range = [zoom_range[0], zoom_range[1]]
  694.         else:
  695.             raise ValueError('`zoom_range` should be a float or '
  696.                              'a tuple or list of two floats. '
  697.                              'Received arg: ', zoom_range)
  698.  
  699.     def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
  700.              save_to_dir=None, save_prefix='', save_format='png'):
  701.         return NumpyArrayIterator(
  702.             x, y, self,
  703.             batch_size=batch_size,
  704.             shuffle=shuffle,
  705.             seed=seed,
  706.             data_format=self.data_format,
  707.             save_to_dir=save_to_dir,
  708.             save_prefix=save_prefix,
  709.             save_format=save_format)
  710.  
  711.     def flow_from_directory(self, directory,
  712.                             target_size=(256, 256), color_mode='rgb',
  713.                             classes=None, class_mode='categorical',
  714.                             batch_size=32, shuffle=True, seed=None,
  715.                             save_to_dir=None,
  716.                             save_prefix='',
  717.                             save_format='png',
  718.                             follow_links=False):
  719.         return DirectoryIterator(
  720.             directory, self,
  721.             target_size=target_size, color_mode=color_mode,
  722.             classes=classes, class_mode=class_mode,
  723.             data_format=self.data_format,
  724.             batch_size=batch_size, shuffle=shuffle, seed=seed,
  725.             save_to_dir=save_to_dir,
  726.             save_prefix=save_prefix,
  727.             save_format=save_format,
  728.             follow_links=follow_links)
  729.  
  730.     def standardize(self, x):
  731.         """Apply the normalization configuration to a batch of inputs.
  732.  
  733.        # Arguments
  734.            x: batch of inputs to be normalized.
  735.  
  736.        # Returns
  737.            The inputs, normalized.
  738.        """
  739.         if self.preprocessing_function:
  740.             x = self.preprocessing_function(x)
  741.         if self.rescale:
  742.             x *= self.rescale
  743.         # x is a single image, so it doesn't have image number at index 0
  744.         img_channel_axis = self.channel_axis - 1
  745.         if self.samplewise_center:
  746.             x -= np.mean(x, axis=img_channel_axis, keepdims=True)
  747.         if self.samplewise_std_normalization:
  748.             x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
  749.  
  750.         if self.featurewise_center:
  751.             if self.mean is not None:
  752.                 x -= self.mean
  753.             else:
  754.                 warnings.warn('This ImageDataGenerator specifies '
  755.                               '`featurewise_center`, but it hasn\'t'
  756.                               'been fit on any training data. Fit it '
  757.                               'first by calling `.fit(numpy_data)`.')
  758.         if self.featurewise_std_normalization:
  759.             if self.std is not None:
  760.                 x /= (self.std + 1e-7)
  761.             else:
  762.                 warnings.warn('This ImageDataGenerator specifies '
  763.                               '`featurewise_std_normalization`, but it hasn\'t'
  764.                               'been fit on any training data. Fit it '
  765.                               'first by calling `.fit(numpy_data)`.')
  766.         if self.zca_whitening:
  767.             if self.principal_components is not None:
  768.                 flatx = np.reshape(x, (x.size))
  769.                 whitex = np.dot(flatx, self.principal_components)
  770.                 x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
  771.             else:
  772.                 warnings.warn('This ImageDataGenerator specifies '
  773.                               '`zca_whitening`, but it hasn\'t'
  774.                               'been fit on any training data. Fit it '
  775.                               'first by calling `.fit(numpy_data)`.')
  776.         return x
  777.  
  778.     def random_transform(self, x, seed=None):
  779.         """Randomly augment a single image tensor.
  780.  
  781.        # Arguments
  782.            x: 3D tensor, single image.
  783.            seed: random seed.
  784.  
  785.        # Returns
  786.            A randomly transformed version of the input (same shape).
  787.        """
  788.         # x is a single image, so it doesn't have image number at index 0
  789.         img_row_axis = self.row_axis - 1
  790.         img_col_axis = self.col_axis - 1
  791.         img_channel_axis = self.channel_axis - 1
  792.  
  793.         if seed is not None:
  794.             np.random.seed(seed)
  795.  
  796.         # use composition of homographies
  797.         # to generate final transform that needs to be applied
  798.         if self.rotation_range:
  799.             theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
  800.         else:
  801.             theta = 0
  802.  
  803.         if self.height_shift_range:
  804.             tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
  805.         else:
  806.             tx = 0
  807.  
  808.         if self.width_shift_range:
  809.             ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
  810.         else:
  811.             ty = 0
  812.  
  813.         if self.shear_range:
  814.             shear = np.random.uniform(-self.shear_range, self.shear_range)
  815.         else:
  816.             shear = 0
  817.  
  818.         if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
  819.             zx, zy = 1, 1
  820.         else:
  821.             zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
  822.  
  823.         transform_matrix = None
  824.         if theta != 0:
  825.             rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
  826.                                         [np.sin(theta), np.cos(theta), 0],
  827.                                         [0, 0, 1]])
  828.             transform_matrix = rotation_matrix
  829.  
  830.         if tx != 0 or ty != 0:
  831.             shift_matrix = np.array([[1, 0, tx],
  832.                                      [0, 1, ty],
  833.                                      [0, 0, 1]])
  834.             transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
  835.  
  836.         if shear != 0:
  837.             shear_matrix = np.array([[1, -np.sin(shear), 0],
  838.                                     [0, np.cos(shear), 0],
  839.                                     [0, 0, 1]])
  840.             transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
  841.  
  842.         if zx != 1 or zy != 1:
  843.             zoom_matrix = np.array([[zx, 0, 0],
  844.                                     [0, zy, 0],
  845.                                     [0, 0, 1]])
  846.             transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
  847.  
  848.         if transform_matrix is not None:
  849.             h, w = x.shape[img_row_axis], x.shape[img_col_axis]
  850.             transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
  851.             x = apply_transform(x, transform_matrix, img_channel_axis,
  852.                                 fill_mode=self.fill_mode, cval=self.cval)
  853.  
  854.         if self.channel_shift_range != 0:
  855.             x = random_channel_shift(x,
  856.                                      self.channel_shift_range,
  857.                                      img_channel_axis)
  858.         if self.horizontal_flip:
  859.             if np.random.random() < 0.5:
  860.                 x = flip_axis(x, img_col_axis)
  861.  
  862.         if self.vertical_flip:
  863.             if np.random.random() < 0.5:
  864.                 x = flip_axis(x, img_row_axis)
  865.  
  866.         return x
  867.  
  868.     def fit(self, x,
  869.             augment=False,
  870.             rounds=1,
  871.             seed=None):
  872.         """Fits internal statistics to some sample data.
  873.  
  874.        Required for featurewise_center, featurewise_std_normalization
  875.        and zca_whitening.
  876.  
  877.        # Arguments
  878.            x: Numpy array, the data to fit on. Should have rank 4.
  879.                In case of grayscale data,
  880.                the channels axis should have value 1, and in case
  881.                of RGB data, it should have value 3.
  882.            augment: Whether to fit on randomly augmented samples
  883.            rounds: If `augment`,
  884.                how many augmentation passes to do over the data
  885.            seed: random seed.
  886.  
  887.        # Raises
  888.            ValueError: in case of invalid input `x`.
  889.        """
  890.         x = np.asarray(x, dtype=K.floatx())
  891.         if x.ndim != 4:
  892.             raise ValueError('Input to `.fit()` should have rank 4. '
  893.                              'Got array with shape: ' + str(x.shape))
  894.         if x.shape[self.channel_axis] not in {3, 4}:
  895.             warnings.warn(
  896.                 'Expected input to be images (as Numpy array) '
  897.                 'following the data format convention "' + self.data_format + '" '
  898.                 '(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
  899.                 'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
  900.                 'However, it was passed an array with shape ' + str(x.shape) +
  901.                 ' (' + str(x.shape[self.channel_axis]) + ' channels).')
  902.  
  903.         if seed is not None:
  904.             np.random.seed(seed)
  905.  
  906.         x = np.copy(x)
  907.         if augment:
  908.             ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
  909.             for r in range(rounds):
  910.                 for i in range(x.shape[0]):
  911.                     ax[i + r * x.shape[0]] = self.random_transform(x[i])
  912.             x = ax
  913.  
  914.         if self.featurewise_center:
  915.             self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
  916.             broadcast_shape = [1, 1, 1]
  917.             broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
  918.             self.mean = np.reshape(self.mean, broadcast_shape)
  919.             x -= self.mean
  920.  
  921.         if self.featurewise_std_normalization:
  922.             self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
  923.             broadcast_shape = [1, 1, 1]
  924.             broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
  925.             self.std = np.reshape(self.std, broadcast_shape)
  926.             x /= (self.std + K.epsilon())
  927.  
  928.         if self.zca_whitening:
  929.             flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
  930.             sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
  931.             u, s, _ = linalg.svd(sigma)
  932.             self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
  933.  
  934.  
  935. class Iterator(object):
  936.     """Abstract base class for image data iterators.
  937.  
  938.    # Arguments
  939.        n: Integer, total number of samples in the dataset to loop over.
  940.        batch_size: Integer, size of a batch.
  941.        shuffle: Boolean, whether to shuffle the data between epochs.
  942.        seed: Random seeding for data shuffling.
  943.    """
  944.  
  945.     def __init__(self, n, batch_size, shuffle, seed):
  946.         self.n = n
  947.         self.batch_size = batch_size
  948.         self.shuffle = shuffle
  949.         self.batch_index = 0
  950.         self.total_batches_seen = 0
  951.         self.lock = threading.Lock()
  952.         self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
  953.  
  954.     def reset(self):
  955.         self.batch_index = 0
  956.  
  957.     def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
  958.         # Ensure self.batch_index is 0.
  959.         self.reset()
  960.         while 1:
  961.             if seed is not None:
  962.                 np.random.seed(seed + self.total_batches_seen)
  963.             if self.batch_index == 0:
  964.                 index_array = np.arange(n)
  965.                 if shuffle:
  966.                     index_array = np.random.permutation(n)
  967.  
  968.             current_index = (self.batch_index * batch_size) % n
  969.             if n > current_index + batch_size:
  970.                 current_batch_size = batch_size
  971.                 self.batch_index += 1
  972.             else:
  973.                 current_batch_size = n - current_index
  974.                 self.batch_index = 0
  975.             self.total_batches_seen += 1
  976.             yield (index_array[current_index: current_index + current_batch_size],
  977.                    current_index, current_batch_size)
  978.  
  979.     def __iter__(self):
  980.         # Needed if we want to do something like:
  981.         # for x, y in data_gen.flow(...):
  982.         return self
  983.  
  984.     def __next__(self, *args, **kwargs):
  985.         return self.next(*args, **kwargs)
  986.  
  987.  
  988. class NumpyArrayIterator(Iterator):
  989.     """Iterator yielding data from a Numpy array.
  990.  
  991.    # Arguments
  992.        x: Numpy array of input data.
  993.        y: Numpy array of targets data.
  994.        image_data_generator: Instance of `ImageDataGenerator`
  995.            to use for random transformations and normalization.
  996.        batch_size: Integer, size of a batch.
  997.        shuffle: Boolean, whether to shuffle the data between epochs.
  998.        seed: Random seed for data shuffling.
  999.        data_format: String, one of `channels_first`, `channels_last`.
  1000.        save_to_dir: Optional directory where to save the pictures
  1001.            being yielded, in a viewable format. This is useful
  1002.            for visualizing the random transformations being
  1003.            applied, for debugging purposes.
  1004.        save_prefix: String prefix to use for saving sample
  1005.            images (if `save_to_dir` is set).
  1006.        save_format: Format to use for saving sample images
  1007.            (if `save_to_dir` is set).
  1008.    """
  1009.  
  1010.     def __init__(self, x, y, image_data_generator,
  1011.                  batch_size=32, shuffle=False, seed=None,
  1012.                  data_format=None,
  1013.                  save_to_dir=None, save_prefix='', save_format='png'):
  1014.         if y is not None and len(x) != len(y):
  1015.             raise ValueError('X (images tensor) and y (labels) '
  1016.                              'should have the same length. '
  1017.                              'Found: X.shape = %s, y.shape = %s' %
  1018.                              (np.asarray(x).shape, np.asarray(y).shape))
  1019.  
  1020.         if data_format is None:
  1021.             data_format = K.image_data_format()
  1022.         self.x = np.asarray(x, dtype=K.floatx())
  1023.  
  1024.         if self.x.ndim != 4:
  1025.             raise ValueError('Input data in `NumpyArrayIterator` '
  1026.                              'should have rank 4. You passed an array '
  1027.                              'with shape', self.x.shape)
  1028.         channels_axis = 3 if data_format == 'channels_last' else 1
  1029.         if self.x.shape[channels_axis] not in {1, 3, 4}:
  1030.             raise ValueError('NumpyArrayIterator is set to use the '
  1031.                              'data format convention "' + data_format + '" '
  1032.                              '(channels on axis ' + str(channels_axis) + '), i.e. expected '
  1033.                              'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
  1034.                              'However, it was passed an array with shape ' + str(self.x.shape) +
  1035.                              ' (' + str(self.x.shape[channels_axis]) + ' channels).')
  1036.         if y is not None:
  1037.             self.y = np.asarray(y)
  1038.         else:
  1039.             self.y = None
  1040.         self.image_data_generator = image_data_generator
  1041.         self.data_format = data_format
  1042.         self.save_to_dir = save_to_dir
  1043.         self.save_prefix = save_prefix
  1044.         self.save_format = save_format
  1045.         super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
  1046.  
  1047.     def next(self):
  1048.         """For python 2.x.
  1049.  
  1050.        # Returns
  1051.            The next batch.
  1052.        """
  1053.         # Keeps under lock only the mechanism which advances
  1054.         # the indexing of each batch.
  1055.         with self.lock:
  1056.             index_array, current_index, current_batch_size = next(self.index_generator)
  1057.         # The transformation of images is not under thread lock
  1058.         # so it can be done in parallel
  1059.         batch_x = np.zeros(tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
  1060.         for i, j in enumerate(index_array):
  1061.             x = self.x[j]
  1062.             x = self.image_data_generator.random_transform(x.astype(K.floatx()))
  1063.             x = self.image_data_generator.standardize(x)
  1064.             batch_x[i] = x
  1065.         if self.save_to_dir:
  1066.             for i in range(current_batch_size):
  1067.                 img = array_to_img(batch_x[i], self.data_format, scale=True)
  1068.                 fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
  1069.                                                                   index=current_index + i,
  1070.                                                                   hash=np.random.randint(1e4),
  1071.                                                                   format=self.save_format)
  1072.                 img.save(os.path.join(self.save_to_dir, fname))
  1073.         if self.y is None:
  1074.             return batch_x
  1075.         batch_y = self.y[index_array]
  1076.         return batch_x, batch_y
  1077.  
  1078.  
  1079. def _count_valid_files_in_directory(directory, white_list_formats, follow_links):
  1080.     """Count files with extension in `white_list_formats` contained in a directory.
  1081.  
  1082.    # Arguments
  1083.        directory: absolute path to the directory containing files to be counted
  1084.        white_list_formats: set of strings containing allowed extensions for
  1085.            the files to be counted.
  1086.  
  1087.    # Returns
  1088.        the count of files with extension in `white_list_formats` contained in
  1089.        the directory.
  1090.    """
  1091.     def _recursive_list(subpath):
  1092.         return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
  1093.  
  1094.     samples = 0
  1095.     for root, _, files in _recursive_list(directory):
  1096.         for fname in files:
  1097.             is_valid = False
  1098.             for extension in white_list_formats:
  1099.                 if fname.lower().endswith('.' + extension):
  1100.                     is_valid = True
  1101.                     break
  1102.             if is_valid:
  1103.                 samples += 1
  1104.     return samples
  1105.  
  1106.  
  1107. def _list_valid_filenames_in_directory(directory, white_list_formats,
  1108.                                        class_indices, follow_links):
  1109.     """List paths of files in `subdir` relative from `directory` whose extensions are in `white_list_formats`.
  1110.  
  1111.    # Arguments
  1112.        directory: absolute path to a directory containing the files to list.
  1113.            The directory name is used as class label and must be a key of `class_indices`.
  1114.        white_list_formats: set of strings containing allowed extensions for
  1115.            the files to be counted.
  1116.        class_indices: dictionary mapping a class name to its index.
  1117.  
  1118.    # Returns
  1119.        classes: a list of class indices
  1120.        filenames: the path of valid files in `directory`, relative from
  1121.            `directory`'s parent (e.g., if `directory` is "dataset/class1",
  1122.            the filenames will be ["class1/file1.jpg", "class1/file2.jpg", ...]).
  1123.    """
  1124.     def _recursive_list(subpath):
  1125.         return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
  1126.  
  1127.     classes = []
  1128.     filenames = []
  1129.     subdir = os.path.basename(directory)
  1130.     basedir = os.path.dirname(directory)
  1131.     for root, _, files in _recursive_list(directory):
  1132.         for fname in files:
  1133.             is_valid = False
  1134.             for extension in white_list_formats:
  1135.                 if fname.lower().endswith('.' + extension):
  1136.                     is_valid = True
  1137.                     break
  1138.             if is_valid:
  1139.                 classes.append(class_indices[subdir])
  1140.                 # add filename relative to directory
  1141.                 absolute_path = os.path.join(root, fname)
  1142.                 filenames.append(os.path.relpath(absolute_path, basedir))
  1143.     return classes, filenames
  1144.  
  1145.  
  1146. class DirectoryIterator(Iterator):
  1147.     """Iterator capable of reading images from a directory on disk.
  1148.  
  1149.    # Arguments
  1150.        directory: Path to the directory to read images from.
  1151.            Each subdirectory in this directory will be
  1152.            considered to contain images from one class,
  1153.            or alternatively you could specify class subdirectories
  1154.            via the `classes` argument.
  1155.        image_data_generator: Instance of `ImageDataGenerator`
  1156.            to use for random transformations and normalization.
  1157.        target_size: tuple of integers, dimensions to resize input images to.
  1158.        color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
  1159.        classes: Optional list of strings, names of sudirectories
  1160.            containing images from each class (e.g. `["dogs", "cats"]`).
  1161.            It will be computed automatically if not set.
  1162.        class_mode: Mode for yielding the targets:
  1163.            `"binary"`: binary targets (if there are only two classes),
  1164.            `"categorical"`: categorical targets,
  1165.            `"sparse"`: integer targets,
  1166.            `"input"`: targets are images identical to input images (mainly
  1167.                used to work with autoencoders),
  1168.            `None`: no targets get yielded (only input images are yielded).
  1169.        batch_size: Integer, size of a batch.
  1170.        shuffle: Boolean, whether to shuffle the data between epochs.
  1171.        seed: Random seed for data shuffling.
  1172.        data_format: String, one of `channels_first`, `channels_last`.
  1173.        save_to_dir: Optional directory where to save the pictures
  1174.            being yielded, in a viewable format. This is useful
  1175.            for visualizing the random transformations being
  1176.            applied, for debugging purposes.
  1177.        save_prefix: String prefix to use for saving sample
  1178.            images (if `save_to_dir` is set).
  1179.        save_format: Format to use for saving sample images
  1180.            (if `save_to_dir` is set).
  1181.    """
  1182.  
  1183.     def __init__(self, directory, image_data_generator,
  1184.                  target_size=(256, 256), color_mode='rgb',
  1185.                  classes=None, class_mode='categorical',
  1186.                  batch_size=32, shuffle=True, seed=None,
  1187.                  data_format=None,
  1188.                  save_to_dir=None, save_prefix='', save_format='png',
  1189.                  follow_links=False):
  1190.         if data_format is None:
  1191.             data_format = K.image_data_format()
  1192.         self.directory = directory
  1193.         self.image_data_generator = image_data_generator
  1194.         self.target_size = tuple(target_size)
  1195.         if color_mode not in {'rgb', 'grayscale'}:
  1196.             raise ValueError('Invalid color mode:', color_mode,
  1197.                              '; expected "rgb" or "grayscale".')
  1198.         self.color_mode = color_mode
  1199.         self.data_format = data_format
  1200.         if self.color_mode == 'rgb':
  1201.             if self.data_format == 'channels_last':
  1202.                 self.image_shape = self.target_size + (3,)
  1203.             else:
  1204.                 self.image_shape = (3,) + self.target_size
  1205.         else:
  1206.             if self.data_format == 'channels_last':
  1207.                 self.image_shape = self.target_size + (1,)
  1208.             else:
  1209.                 self.image_shape = (1,) + self.target_size
  1210.         self.classes = classes
  1211.         if class_mode not in {'categorical', 'binary', 'sparse',
  1212.                               'input', None}:
  1213.             raise ValueError('Invalid class_mode:', class_mode,
  1214.                              '; expected one of "categorical", '
  1215.                              '"binary", "sparse", "input"'
  1216.                              ' or None.')
  1217.         self.class_mode = class_mode
  1218.         self.save_to_dir = save_to_dir
  1219.         self.save_prefix = save_prefix
  1220.         self.save_format = save_format
  1221.  
  1222.         white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
  1223.  
  1224.         # first, count the number of samples and classes
  1225.         self.samples = 0
  1226.  
  1227.         if not classes:
  1228.             classes = []
  1229.             for subdir in sorted(os.listdir(directory)):
  1230.                 if os.path.isdir(os.path.join(directory, subdir)):
  1231.                     classes.append(subdir)
  1232.         self.num_class = len(classes)
  1233.         self.class_indices = dict(zip(classes, range(len(classes))))
  1234.  
  1235.         def _recursive_list(subpath):
  1236.             return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
  1237.  
  1238.         pool = multiprocessing.pool.ThreadPool()
  1239.         function_partial = partial(_count_valid_files_in_directory,
  1240.                                    white_list_formats=white_list_formats,
  1241.                                    follow_links=follow_links)
  1242.         self.samples = sum(pool.map(function_partial,
  1243.                                     (os.path.join(directory, subdir)
  1244.                                      for subdir in classes)))
  1245.  
  1246.         print('Found %d images belonging to %d classes.' % (self.samples, self.num_class))
  1247.  
  1248.         # second, build an index of the images in the different class subfolders
  1249.         results = []
  1250.  
  1251.         self.filenames = []
  1252.         self.classes = np.zeros((self.samples,), dtype='int32')
  1253.         i = 0
  1254.         for dirpath in (os.path.join(directory, subdir) for subdir in classes):
  1255.             results.append(pool.apply_async(_list_valid_filenames_in_directory,
  1256.                                             (dirpath, white_list_formats,
  1257.                                              self.class_indices, follow_links)))
  1258.         for res in results:
  1259.             classes, filenames = res.get()
  1260.             self.classes[i:i + len(classes)] = classes
  1261.             self.filenames += filenames
  1262.             i += len(classes)
  1263.         pool.close()
  1264.         pool.join()
  1265.         super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)
  1266.  
  1267.     def next(self):
  1268.         """For python 2.x.
  1269.  
  1270.        # Returns
  1271.            The next batch.
  1272.        """
  1273.         with self.lock:
  1274.             index_array, current_index, current_batch_size = next(self.index_generator)
  1275.         # The transformation of images is not under thread lock
  1276.         # so it can be done in parallel
  1277.         batch_x = np.zeros((current_batch_size,) + self.image_shape, dtype=K.floatx())
  1278.         grayscale = self.color_mode == 'grayscale'
  1279.         # build batch of image data
  1280.         for i, j in enumerate(index_array):
  1281.             fname = self.filenames[j]
  1282.             img = load_img(os.path.join(self.directory, fname),
  1283.                            grayscale=grayscale,
  1284.                            target_size=self.target_size)
  1285.             x = img_to_array(img, data_format=self.data_format)
  1286.             x = self.image_data_generator.random_transform(x)
  1287.             x = self.image_data_generator.standardize(x)
  1288.             batch_x[i] = x
  1289.         # optionally save augmented images to disk for debugging purposes
  1290.         if self.save_to_dir:
  1291.             for i in range(current_batch_size):
  1292.                 img = array_to_img(batch_x[i], self.data_format, scale=True)
  1293.                 fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
  1294.                                                                   index=current_index + i,
  1295.                                                                   hash=np.random.randint(1e4),
  1296.                                                                   format=self.save_format)
  1297.                 img.save(os.path.join(self.save_to_dir, fname))
  1298.         # build batch of labels
  1299.         if self.class_mode == 'input':
  1300.             batch_y = batch_x.copy()
  1301.         elif self.class_mode == 'sparse':
  1302.             batch_y = self.classes[index_array]
  1303.         elif self.class_mode == 'binary':
  1304.             batch_y = self.classes[index_array].astype(K.floatx())
  1305.         elif self.class_mode == 'categorical':
  1306.             batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
  1307.             for i, label in enumerate(self.classes[index_array]):
  1308.                 batch_y[i, label] = 1.
  1309.         else:
  1310.             return batch_x
  1311.         return batch_x, batch_y
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement