Advertisement
Guest User

perspective_control.py

a guest
Apr 18th, 2015
286
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 22.73 KB | None | 0 0
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3.  
  4. """This program demonstrates how perspective control could work in Lensfun.  It
  5. must be converted to Lensfun C++ code of course.  However, its structure is
  6. already very similar to how Lensfun is structured.
  7.  
  8. Usage:
  9.    python3 perspective_control.py <image files>
  10.  
  11. Every image file must have a sidecar JSON file with the extension .json with
  12. the following content:
  13.  
  14. [
  15.    18,
  16.    1.534,
  17.    6,
  18.    [   8,   59,  289,  229,   8,  289],
  19.    [ 188,  154,  187,  154, 188,  187]
  20. ]
  21.  
  22. Here, 18 is the focal length in mm, 1.534 is the crop factor, 6 is a scaling
  23. parameter, the first array are the x values of the control points, and the
  24. second array are the y values.  The (x, y) values must be image pixel
  25. coordinates.  There are 4, 6, or 8 points allowed, see
  26. `initialize_perspective_correction` below.
  27.  
  28. """
  29.  
  30. import sys, subprocess, os, array, json, multiprocessing, tempfile
  31. from math import sin, cos, tan, atan, sqrt, copysign, acos, log
  32. from math import pi as π
  33.  
  34. def read_ppm(input_file, read_data=True):
  35.     def read_token(skip_trailing_whitespace=True):
  36.         nonlocal current_character
  37.         token = b""
  38.         while current_character:
  39.             if current_character == b"#":
  40.                 while current_character and current_character not in b"\r\n":
  41.                     current_character = input_file.read(1)
  42.             elif current_character.isspace():
  43.                 if skip_trailing_whitespace:
  44.                     while current_character.isspace():
  45.                         current_character = input_file.read(1)
  46.                 return token
  47.             else:
  48.                 token += current_character
  49.             current_character = input_file.read(1)
  50.     state = "start"
  51.     current_character = input_file.read(1)
  52.     magic_number = read_token()
  53.     assert magic_number == b"P6"
  54.     width = int(read_token())
  55.     height = int(read_token())
  56.     max_value = int(read_token(skip_trailing_whitespace=False))
  57.     assert max_value == 255
  58.     if read_data:
  59.         image_data = array.array("B")
  60.         image_data.fromfile(input_file, width * height * 3)
  61.         image_data.byteswap()
  62.         return image_data, width, height
  63.     else:
  64.         return width, height
  65.  
  66. def read_image_file(filepath):
  67.     convert_process = subprocess.Popen(
  68.         ["convert", filepath, "-set", "colorspace", "RGB", "ppm:-"], stderr=open(os.devnull, "w"), stdout=subprocess.PIPE)
  69.     return read_ppm(convert_process.stdout)
  70.  
  71. def write_image_file(image_data, width, height, filepath):
  72.     image_data.byteswap()
  73.     with tempfile.NamedTemporaryFile(delete=False) as outfile:
  74.         outfile.write("P6\n{} {}\n255\n".format(width, height).encode("ascii") + image_data.tobytes())
  75.     subprocess.call(["convert", outfile.name, "-set", "colorspace", "RGB", filepath])
  76.     image_data.byteswap()
  77.  
  78.  
  79. def intersection(x1, y1, x2, y2, x3, y3, x4, y4):
  80.     A = x1 * y2 - y1 * x2
  81.     B = x3 * y4 - y3 * x4
  82.     C = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
  83.  
  84.     numerator_x = (A * (x3 - x4) - B * (x1 - x2))
  85.     numerator_y = (A * (y3 - y4) - B * (y1 - y2))
  86.     try:
  87.         return numerator_x / C, numerator_y / C
  88.     except ZeroDivisionError:
  89.         return math.copysign(float("inf"), numerator_x), math.copysign(float("inf"), numerator_y)
  90.  
  91.  
  92. # In the following, I refer to these two rotation matrices: (See
  93. # <http://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions>.)
  94. #
  95. #         ⎛ 1     0         0   ⎞
  96. # Rₓ(ϑ) = ⎜ 0   cos ϑ   - sin ϑ ⎟
  97. #         ⎝ 0   sin ϑ     cos ϑ ⎠
  98. #
  99. #          ⎛  cos ϑ   0   sin ϑ ⎞
  100. # R_y(ϑ) = ⎜   0      1    0    ⎟
  101. #          ⎝- sin ϑ   0   cos ϑ ⎠
  102. #
  103. #          ⎛ cos ϑ   - sin ϑ  0 ⎞
  104. # R_z(ϑ) = ⎜ sin ϑ     cos ϑ  0 ⎟
  105. #          ⎝   0         0    1 ⎠
  106.  
  107.  
  108. def rotate_ρ_δ(ρ, δ, x, y, z):
  109.     # This matrix is: Rₓ(δ) · R_y(ρ)
  110.     A11, A12, A13 = cos(ρ), 0, sin(ρ)
  111.     A21, A22, A23 = sin(ρ) * sin(δ), cos(δ), - cos(ρ) * sin(δ)
  112.     A31, A32, A33 = - sin(ρ) * cos(δ), sin(δ), cos(ρ) * cos(δ)
  113.     return \
  114.         A11 * x + A12 * y + A13 * z, \
  115.         A21 * x + A22 * y + A23 * z, \
  116.         A31 * x + A32 * y + A33 * z
  117.  
  118. def rotate_ρ_δ_ρh(ρ, δ, ρ_h, x, y, z):
  119.     # This matrix is: R_y(ρₕ) · Rₓ(δ) · R_y(ρ)
  120.     A11, A12, A13 = cos(ρ) * cos(ρ_h) - sin(ρ) * cos(δ) * sin(ρ_h), sin(δ) * sin(ρ_h), sin(ρ) * cos(ρ_h) + cos(ρ) * cos(δ) * sin(ρ_h)
  121.     A21, A22, A23 = sin(ρ) * sin(δ), cos(δ), - cos(ρ) * sin(δ)
  122.     A31, A32, A33 = - cos(ρ) * sin(ρ_h) -  sin(ρ) * cos(δ) * cos(ρ_h), \
  123.                     sin(δ) * cos(ρ_h), \
  124.                     - sin(ρ) * sin(ρ_h) + cos(ρ) * cos(δ) * cos(ρ_h)
  125.     return \
  126.         A11 * x + A12 * y + A13 * z, \
  127.         A21 * x + A22 * y + A23 * z, \
  128.         A31 * x + A32 * y + A33 * z
  129.  
  130. def calculate_angles(x, y, f, normalized_in_millimeters):
  131.     # Calculate the center of gravity of the control points
  132.     if len(x) == 8:
  133.         center_x = sum(x) / 8
  134.         center_y = sum(y) / 8
  135.     else:
  136.         center_x = sum(x[:4]) / 4
  137.         center_y = sum(y[:4]) / 4
  138.  
  139.     x_v, y_v = intersection(x[0], y[0], x[1], y[1], x[2], y[2], x[3], y[3])
  140.     if len(x) == 8:
  141.         # The problem is over-determined.  I prefer the fourth line over the
  142.         # focal length.  Maybe this is useful in cases where the focal length
  143.         # is not known.
  144.         x_h, y_h = intersection(x[4], y[4], x[5], y[5], x[6], y[6], x[7], y[7])
  145.         try:
  146.             f_normalized = sqrt(- x_h * x_v - y_h * y_v)
  147.         except ValueError:
  148.             # FixMe: Is really always an f given (even if it is inaccurate)?
  149.             f_normalized = f / normalized_in_millimeters
  150.         else:
  151.             print(f_normalized * normalized_in_millimeters, "mm")
  152.     else:
  153.         f_normalized = f / normalized_in_millimeters
  154.  
  155.     # Calculate vertex in polar coordinates
  156.     ρ = atan(- x_v / f_normalized)
  157.     δ = π / 2 - atan(- y_v / sqrt(x_v**2 + f_normalized**2))
  158.     if rotate_ρ_δ(ρ, δ, center_x, center_y, f_normalized)[2] < 0:
  159.         # We have to move the vertex into the nadir instead of the zenith.
  160.         δ -= π
  161.     a = normalize(x_v - x[0], y_v - y[0])
  162.     b = normalize(x_v - x[2], y_v - y[2])
  163.     c = (a[0] + b[0], a[1] + b[1])
  164.     if abs(c[0]) > abs(c[1]):
  165.         # The first two lines denote *horizontal* lines, the last two
  166.         # *verticals*.  The final rotation is by final_rotation·π/2.
  167.         final_rotation = copysign(1, ρ)
  168.     else:
  169.         final_rotation = 0
  170.  
  171.     # Calculate angle of intersection of horizontal great circle with equator,
  172.     # after the vertex was moved into the zenith
  173.     if len(x) == 4:
  174.         # This results from assuming that at y = 0, there is a horizontal in
  175.         # the picture.  y = 0 is arbitrary, but then, so is every other value.
  176.         ρ_h = 0
  177.     else:
  178.         z4 = z5 = f_normalized
  179.         x4_, y4_, z4_ = rotate_ρ_δ(ρ, δ, x[4], y[4], z4)
  180.         x5_, y5_, z5_ = rotate_ρ_δ(ρ, δ, x[5], y[5], z5)
  181.         if y4_ == y5_:
  182.             # The horizontal vanishing point is perfectly to the left/right, so
  183.             # no rotation necessary.  ρ_h is undefined if also y4_ == 0
  184.             # (horizontal great circle is on the equator), but we simply set
  185.             # ρ_h = 0 in this case, too.
  186.             ρ_h = 0
  187.         else:
  188.             λ = y4_ / (y4_ - y5_)
  189.             x_h = x4_ + λ * (x5_ - x4_)
  190.             z_h = z4_ + λ * (z5_ - z4_)
  191.             if z_h == 0:
  192.                 ρ_h = 0 if x_h > 0 else π
  193.             else:
  194.                 ρ_h = π / 2 - atan(x_h / z_h)
  195.             if rotate_ρ_δ_ρh(ρ, δ, ρ_h, center_x, center_y, f_normalized)[2] < 0:
  196.                 # We have to move the vertex to the left instead of right
  197.                 ρ_h -= π
  198.     return ρ, δ, ρ_h, f_normalized, final_rotation, center_x, center_y
  199.  
  200. def generate_rotation_matrix(ρ1, δ, ρ2, d):
  201.     """Returns a rotation matrix which combines three rotations.  First, around the
  202.    y axis by ρ₁, then, around the x axis by δ, and finally, around the y axis
  203.    again by ρ₂.
  204.    """
  205.     # We calculate the quaternion by multiplying the three quaternions for the
  206.     # three rotations (in reverse order).  We use quaternions here to be able
  207.     # to apply the d parameter in a reasonable way.
  208.     s_ρ2, c_ρ2 = sin(ρ2/2), cos(ρ2/2)
  209.     s_δ, c_δ = sin(δ/2), cos(δ/2)
  210.     s_ρ1, c_ρ1 = sin(ρ1/2), cos(ρ1/2)
  211.     w = c_ρ2 * c_δ * c_ρ1 - s_ρ2 * c_δ * s_ρ1
  212.     x = c_ρ2 * s_δ * c_ρ1 + s_ρ2 * s_δ * s_ρ1
  213.     y = c_ρ2 * c_δ * s_ρ1 + s_ρ2 * c_δ * c_ρ1
  214.     z = c_ρ2 * s_δ * s_ρ1 - s_ρ2 * s_δ * c_ρ1
  215.     # Now, decompose the quaternion into θ and the axis unit vector.
  216.     θ = 2 * acos(w)
  217.     if θ > π:
  218.         θ -= 2
  219.     s_θ = sin(θ/2)
  220.     x, y, z = x / s_θ, y / s_θ, z / s_θ
  221.     # Apply d
  222.     compression = 10
  223.     θ *= d + 1 if d <= 0 else 1 + 1 / compression * log(compression * d + 1)
  224.     if θ > 0.9 * π:
  225.         θ = 0.9 * π
  226.     elif θ < - 0.9 * π:
  227.         θ = - 0.9 * π
  228.     # Compose the quaternion again.
  229.     w = cos(θ/2)
  230.     s_θ = sin(θ/2)
  231.     x, y, z = x * s_θ, y * s_θ, z * s_θ
  232.     # Convert the quaternion to a rotation matrix, see e.g.
  233.     # <https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion>.  This matrix
  234.     # is (if d=0): R_y(ρ2) · Rₓ(δ) · R_y(ρ1)
  235.     A11, A12, A13 = 1 - 2 * y**2 - 2 * z**2,  2 * x * y - 2 * z * w,    2 * x * z + 2 * y * w
  236.     A21, A22, A23 = 2 * x * y + 2 * z * w,    1 - 2 * x**2 - 2 * z**2,  2 * y * z - 2 * x * w
  237.     A31, A32, A33 = 2 * x * z - 2 * y * w,    2 * y * z + 2 * x * w,    1 - 2 * x**2 - 2 * y**2
  238.     return A11, A12, A13, \
  239.            A21, A22, A23, \
  240.            A31, A32, A33
  241.  
  242. def normalize(x, y):
  243.     norm = sqrt(x**2 + y**2)
  244.     try:
  245.         return x / norm, y / norm
  246.     except ZeroDivisionError:
  247.         return math.copysign(float("inf"), x), math.copysign(float("inf"), y)
  248.  
  249. class Modifier:
  250.  
  251.     """First, the coordinate system.  It is right-handed.  The centre of the image
  252.    is the origin of the x-y plane.  x is to the right, y to the bottom, and z
  253.    into the back.  All rotations rotate points in mathematical positive
  254.    direction around the axes.  The axes keep their position (extrinsic
  255.    rotations).
  256.  
  257.    The general algorithm is as follows: The original image (note that the
  258.    sensor sees everything mirror-inverted, but this is annihilated by the
  259.    back-projection later) is positioned parallely to the x-y plane at z = -
  260.    focal length.  Then, it is central-projected to the full sphere (radius
  261.    equals focal length), so it gets positive z coordinates and can be viewed
  262.    properly from the origin.
  263.  
  264.    This way, also the vertical vanishing point is on the sphere at (- ρ, 90° -
  265.    δ).  Then, the sphere with the image is rotated around the y axis by ρ so
  266.    that the vanishing point is on the y-z plane with z > 0.  Then, the sphere
  267.    is rotated around the y axis by δ so that the vanishing point is in the
  268.    zenith – where it belongs.
  269.  
  270.    Next, the intersection of the horizontal control line with the x-z plane is
  271.    determined.  Its rectascension is called 90° - ρₕ.  So, the sphere is
  272.    rotation around the y axis a second time, this time by ρₕ.  Now, the right
  273.    vanishing point truely is to the right.
  274.  
  275.    The rotated image plane is central-projected back into the sensor plane at
  276.    z = - focal length.  In general, it will be way off the center of the
  277.    image, so that the image must be shifted.  We shift so that the image
  278.    center is a fix point.  If however the image center is not visible in the
  279.    result, or too close to the pole (i.e. magnification > 10), we use the
  280.    center of gravity of the control points instead.
  281.  
  282.    Finally, the scaling in the center of the image is kept constant.  One can
  283.    finetune with an additinal scaling, but this way, one has a good starting
  284.    point.
  285.  
  286.    Note that in reality, this whole process is performed backwards because we
  287.    make a lookup in the original picture.  In particular, the rotation by ρ,
  288.    δ, ρₕ becomes - ρₕ, - δ, - ρ.  We still need the forward direction in some
  289.    places.
  290.  
  291.    And then there is the d parameter which allows finetuning of the
  292.    correction.  It is between -1 and 1, with 0 meaning full correction, 0 no
  293.    correction, and 1 increased rotation by factor 1.25.  For example, if you
  294.    have tilted the camera by 40°, d = -1 means the original tilt of 40°, d = 0
  295.    means no tilt (perfect correction), and d = 1 means a tilt of 10° in the
  296.    opposite direction (over-correction).  This way, one can finetune the slope
  297.    of the lines as well as the aspect ratio.
  298.  
  299.    """
  300.  
  301.     def __init__(self, crop_factor, width, height):
  302.         assert width > height
  303.         self.crop_factor, self.width, self.height = crop_factor, width, height
  304.         aspect_ratio = width / height
  305.         aspect_ratio_correction = sqrt(1 + aspect_ratio**2)
  306.         self.normalized_in_millimeters = sqrt(36**2 + 24**2) / 2 / aspect_ratio_correction / self.crop_factor
  307.         self.norm_scale = 2 / (height - 1)
  308.         self.norm_unscale = (height - 1) / 2
  309.         self.center_x = width / height
  310.         self.center_y = 1
  311.  
  312.     def initialize(self, focal_length):
  313.         self.focal_length = focal_length
  314.  
  315.     def initialize_perspective_correction(self, x, y, d):
  316.         """Depending on the number of control points given, there are three possible
  317.        modes:
  318.  
  319.        4 control points: c0 and c1 define one vertical lines, c2 and c3 the other.
  320.        The focal length is used to get the proper aspect ratio.
  321.  
  322.        6 control points: c0 to c3 like above.  c4 and c5 define a horizontal line.
  323.        The focal length is used to get the proper aspect ratio.
  324.  
  325.        8 control points: c0 to c5 like above.  c6 and c7 define a second
  326.        horizontal line.  The focal length given is discarded and calculated from
  327.        the control points.
  328.  
  329.        If the lines constructed from the first four control points are more
  330.        horizontal than vertical, in all of the above, "horizontal" and
  331.        "vertical" need to be swapped.
  332.  
  333.        All control points must be given as pixel coordinates in the original
  334.        image.  In particular, cropping, rotating, shifting, or scaling must be
  335.        switched off or removed by mapping.  For best precision,
  336.        anti-distortion should have been applied already.  Moreover, fisheye
  337.        images must have been transformed into rectilinear of the same focal
  338.        length.  The control points can be written to a sidecar file because
  339.        they describe the perspective correction universally,
  340.        i.e. independently of all corrections that can be activated (scaling,
  341.        anti-distortion, projection transform, etc).
  342.  
  343.        FixMe: Maybe a static routine should be offered which takes a modifier,
  344.        and an (x, y) pair, and maps them on x and y coordinates as if only the
  345.        anti-distortion and the geometry transformation to rectilinear was
  346.        active.
  347.  
  348.        The d parameter is supposed to be offered to the user as a slider.  It
  349.        can take values from -1 to +1.  0 denotes the perfect correction.  -1
  350.        is the unchanged image.  +1 is an increase of the tilting angle by 25%.
  351.  
  352.        """
  353.         if self.focal_length <= 0 or len(x) not in [4, 6, 8] or len(x) != len(y):
  354.             # Don't add any callback
  355.             return False
  356.         if d <= - 1:
  357.             d = - 1
  358.         elif d >= 1:
  359.             d = 1
  360.         # x, y = x[:4], y[:4]  # For testing only four points
  361.         # x[0:6] = x[0], x[2], x[1], x[3], x[0], x[1]  # For testing horizonal mode
  362.         # y[0:6] = y[0], y[2], y[1], y[3], y[0], y[1]  # For testing horizonal mode
  363.         # x[6:8] = [x[1], x[3]]  # for faking an explicit second horizonal line
  364.         # y[6:8] = [y[1], y[3]]  # for faking an explicit second horizonal line
  365.         x = [value * self.norm_scale - self.center_x for value in x]
  366.         y = [value * self.norm_scale - self.center_y for value in y]
  367.  
  368.         ρ, δ, ρ_h, f_normalized, final_rotation, center_of_control_points_x, center_of_control_points_y = \
  369.                 calculate_angles(x, y, self.focal_length, self.normalized_in_millimeters)
  370.  
  371.         # Transform center point to get shift
  372.         z = rotate_ρ_δ_ρh(ρ, δ, ρ_h, 0, 0, f_normalized)[2]
  373.         # If the image centre is too much outside, or even at infinity, take the
  374.         # center of gravity of the control points instead.
  375.         new_image_center = "control points center" if z <= 0 or f_normalized / z > 10 else "old image center"
  376.  
  377.         # Generate a rotation matrix in forward direction, for getting the
  378.         # proper shift of the image center.
  379.         A11, A12, A13, \
  380.         A21, A22, A23, \
  381.         A31, A32, A33 = generate_rotation_matrix(ρ, δ, ρ_h, d)
  382.  
  383.         if new_image_center == "old image center":
  384.             x, y, z = A13 * f_normalized, A23 * f_normalized, A33 * f_normalized
  385.         elif new_image_center == "control points center":
  386.             x, y, z = A11 * center_of_control_points_x + A12 * center_of_control_points_y + A13 * f_normalized, \
  387.                       A21 * center_of_control_points_x + A22 * center_of_control_points_y + A23 * f_normalized, \
  388.                       A31 * center_of_control_points_x + A32 * center_of_control_points_y + A33 * f_normalized
  389.         if z <= 0:
  390.             return False
  391.         # This is the mapping scale in the image center
  392.         mapping_scale = f_normalized / z
  393.         # Central projection through the origin
  394.         Δa = - x * mapping_scale
  395.         Δb = - y * mapping_scale
  396.  
  397. #        print(ρ * 180 / π, δ * 180 / π, ρ_h * 180 / π, final_rotation * 90)
  398.  
  399.         # Finally, generate a rotation matrix in backward (lookup) direction
  400.         A11, A12, A13, \
  401.         A21, A22, A23, \
  402.         A31, A32, A33 = generate_rotation_matrix(- ρ_h, - δ, - ρ, d)
  403.  
  404.         if final_rotation:
  405.             # This matrix is: R_y(- ρ) · Rₓ(- δ) · R_y(- ρₕ) ·
  406.             # R_z(final_rotation·π/2).  final_rotation is eigher -1 or +1.
  407.             A11, A12, A13 = final_rotation * A12, - final_rotation * A11, A13
  408.             A21, A22, A23 = final_rotation * A22, - final_rotation * A21, A23
  409.             A31, A32, A33 = final_rotation * A32, - final_rotation * A31, A33
  410.             Δa, Δb = final_rotation * Δb, - final_rotation * Δa
  411.  
  412.         # The occurances of mapping_scale here avoid an additional
  413.         # multiplication in the inner loop of perspective_correction_callback.
  414.         self.callback_data = A11 * mapping_scale, A12 * mapping_scale, A13, \
  415.                              A21 * mapping_scale, A22 * mapping_scale, A23, \
  416.                              A31 * mapping_scale, A32 * mapping_scale, A33, \
  417.                              f_normalized, Δa / mapping_scale, Δb / mapping_scale
  418.         return True
  419.  
  420.     @staticmethod
  421.     def perspective_correction_callback(data, iocoord, offset, count):
  422.         A11, A12, A13, \
  423.         A21, A22, A23, \
  424.         A31, A32, A33 = data[:9]
  425.         f_normalized = data[9]
  426.         Δa, Δb = data[10:12]
  427.         for i in range(count):
  428.             x = iocoord[offset + i * 2] - Δa
  429.             y = iocoord[offset + i * 2 + 1] - Δb
  430.             z_ = A31 * x + A32 * y + A33 * f_normalized
  431.             if z_ > 0:
  432.                 x_ = A11 * x + A12 * y + A13 * f_normalized
  433.                 y_ = A21 * x + A22 * y + A23 * f_normalized
  434.                 # Central projection through the origin
  435.                 stretch_factor = f_normalized / z_
  436.                 iocoord[offset + i * 2] = x_ * stretch_factor
  437.                 iocoord[offset + i * 2 + 1] = y_ * stretch_factor
  438.             else:
  439.                 iocoord[offset + i * 2] = iocoord[offset + i * 2 + 1] = float("nan")
  440.  
  441.     @staticmethod
  442.     def scaling_callback(data, iocoord, offset, count):
  443.         scaling = data[0]
  444.         for i in range(count):
  445.             iocoord[offset + i * 2] *= scaling
  446.             iocoord[offset + i * 2 + 1] *= scaling
  447.  
  448.     def apply_perspective_correction(self, xu, yu, width, height, res):
  449.         y = yu * self.norm_scale - self.center_y
  450.         offset = 0   # In the C code, res itself is moved forward
  451.         for j in range(height):
  452.             x = xu * self.norm_scale - self.center_x
  453.             for i in range(width):
  454.                 res[offset + i * 2] = x
  455.                 res[offset + i * 2 + 1] = y
  456.                 x += self.norm_scale
  457.             #self.scaling_callback([self.scaling_factor], res, offset, width)
  458.             self.perspective_correction_callback(self.callback_data, res, offset, width)
  459.             for i in range(width):
  460.                 res[offset] = (res[offset] + self.center_x) * self.norm_unscale
  461.                 res[offset + 1] = (res[offset + 1] + self.center_y) * self.norm_unscale
  462.                 offset += 2
  463.             y += self.norm_scale
  464.  
  465. def process_image(filepath, d, index):
  466.     image_data, width, height = read_image_file(filepath)
  467.     f, crop_factor, shrinking, x, y = json.load(open(os.path.splitext(filepath)[0] + ".json"))
  468.     modifier = Modifier(crop_factor, width, height)
  469.     modifier.initialize(f)
  470.     if modifier.initialize_perspective_correction(x, y, d):
  471.         res = array.array("f", width * height * 2 * [0])
  472.         modifier.scaling_factor = shrinking
  473.         modifier.apply_perspective_correction(0, 0, width, height, res)
  474.         destination_image_data = array.array("B", width * height * 3 * [0])
  475.         for y in range(height):
  476.             for x in range(width):
  477.                 offset = (width * y + x) * 2
  478.                 try:
  479.                     a = int(round(res[offset]))
  480.                     b = int(round(res[offset + 1]))
  481.                 except ValueError:
  482.                     # NaN
  483.                     continue
  484.                 if 0 <= a < width and 0 <= b < height:
  485.                     destination_offset = (width * y + x) * 3
  486.                     image_offset = (width * b + a) * 3
  487.                     destination_image_data[destination_offset] = image_data[image_offset]
  488.                     destination_image_data[destination_offset + 1] = image_data[image_offset + 1]
  489.                     destination_image_data[destination_offset + 2] = image_data[image_offset + 2]
  490.     else:
  491.         destination_image_data = image_data
  492.     basename, extension = os.path.splitext(filepath)
  493.     write_image_file(destination_image_data, width, height, "{}_{:03}_{}{}".format(basename, index, d, extension))
  494.  
  495. pool = multiprocessing.Pool()
  496. results = set()
  497. for filename in sys.argv[1:]:
  498.     results.add(pool.apply_async(process_image, (filename, 0, 0)))
  499.     # for i, d in enumerate(numpy.arange(-1, 1.02, 0.02)):
  500.     #     results.add(pool.apply_async(process_image, (filename, d, i)))
  501. pool.close()
  502. pool.join()
  503. for result in results:
  504.     # Just to get tracebacks if necessary
  505.     result.get()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement