SHARE
TWEET

Untitled

a guest Jan 18th, 2019 85 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. import math
  2.  
  3. import libjevois as jevois
  4. import cv2
  5. import numpy as np
  6.  
  7. lower_green = np.array([60, 170, 70])
  8. upper_green = np.array([180, 255, 255])
  9. min_area = 150
  10. field_of_view = 60
  11.  
  12.  
  13. def preprocess_img(frame, color=cv2.COLOR_BGR2HSV, lower=lower_green, upper=upper_green):
  14.     hsv = cv2.cvtColor(frame, color)
  15.     blur = cv2.blur(hsv, (5, 5))
  16.     mask = cv2.inRange(blur, lower, upper)
  17.     eroded = cv2.erode(mask, np.ones((5, 5)), 3)
  18.     return mask
  19.  
  20.  
  21. def normalize_rotated_rect_angle(angle, width, height):
  22.     return 90 - angle if (width < height) else -angle
  23.  
  24.  
  25. def check_aspect_ratio(width, height):
  26.     aspect_ratio = width / height
  27.     return not (0.8 < aspect_ratio < 1.2) and (0.3 < aspect_ratio < 2.7)
  28.  
  29.  
  30. def check_area(width, height):
  31.     area = width * height
  32.     return area > min_area
  33.  
  34.  
  35. def check_left_angle(angle):
  36.     print("Left angle: " + str(angle))
  37.     return 50 < angle <= 90
  38.  
  39.  
  40. def check_right_angle(angle):
  41.     print("Right angle: " + str(angle))
  42.     return 0 < angle < 30 or 80 < angle < 120
  43.  
  44.  
  45. def filter_contour(contour):
  46.     return filter_bounding_rect(cv2.boundingRect(contour))
  47.  
  48.  
  49. def filter_bounding_rect(bounding_rect):
  50.     x, y, width, height = bounding_rect
  51.     perimeter = ((2 * width) + (2 * height))
  52.     return check_area(width, height) and check_aspect_ratio(width, height) and (perimeter > 10)
  53.  
  54.  
  55. def angle_to_target(frame, center_x, center_y):
  56.     # 2017 formula
  57.     # height, width, channel = frame.shape
  58.     # pixel_offset = width / 2 - center_x
  59.     # return 73 * pixel_offset / width
  60.  
  61.     # 2018/19 formula
  62.     _, width, _ = frame.shape
  63.     return ((center_x / width) * field_of_view) - field_of_view / 2
  64.  
  65.  
  66. def find_filter_sort_contour(mask):
  67.     contours = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
  68.     contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
  69.  
  70.     for index, contour in enumerate(contours):
  71.         x, y, width, height = cv2.boundingRect(contour)
  72.         cv2.putText(mask, str(index), (x, y + height + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1,
  73.                     cv2.LINE_AA)
  74.  
  75.     contours = list(filter(lambda ctr: filter_contour(ctr), contours))
  76.     return contours
  77.  
  78.  
  79. def pair_contours(mask, contours):
  80.     pairs = []
  81.  
  82.     for index, cnt in enumerate(contours):
  83.         rect = cv2.minAreaRect(cnt)
  84.         (x, y), (width, height), rect_angle = rect
  85.  
  86.         box = cv2.boxPoints(rect)
  87.         box = np.int0(box)
  88.         cv2.drawContours(mask, [box], 0, (0, 0, 255), 2)
  89.  
  90.         angle = normalize_rotated_rect_angle(rect_angle, width, height)
  91.  
  92.         end = len(contours) - 1
  93.  
  94.         if index < end:
  95.             if check_left_angle(angle):
  96.                 current_index_check = index + 1
  97.  
  98.                 while current_index_check <= end:
  99.                     next_rect = cv2.minAreaRect(contours[current_index_check])
  100.                     (next_x, next_y), (next_width, next_height), next_rect_angle = next_rect
  101.                     angle = normalize_rotated_rect_angle(next_rect_angle, next_width, next_height)
  102.  
  103.                     print("Angle " + str(angle))
  104.                     print("Aspect Ratio " + str(next_width/next_height))
  105.  
  106.                     if check_area(width, height) and check_right_angle(angle): #and check_aspect_ratio(next_width, next_height):
  107.                         pairs.append((cv2.boundingRect(cnt), cv2.boundingRect(contours[index + 1])))
  108.                         break
  109.                     else:
  110.                         current_index_check += 1
  111.  
  112.     return pairs
  113.  
  114.  
  115. focal_length_width = 601.6607142857143
  116. focal_length_height = 371.4004329004329
  117.  
  118. target_width = 2
  119. target_height = 5.5
  120.  
  121.  
  122. def find_distance(pair):
  123.     left_rect, right_rect = pair
  124.  
  125.     left_x, left_y, left_width, left_height = left_rect
  126.     right_x, right_y, right_width, right_height = right_rect
  127.  
  128.     distance_from_width = (target_width * focal_length_width) / ((left_width + right_width) / 2)
  129.     distance_from_height = (target_height * focal_length_height) / ((left_height + right_height) / 2)
  130.  
  131.     return (distance_from_height + distance_from_width) / 2
  132.  
  133.  
  134. def find_target_info(frame, mask, pairs):
  135.     target_info = []
  136.  
  137.     for pair in pairs:
  138.         left_rect, right_rect = pair
  139.  
  140.         left_x, left_y, left_width, left_height = left_rect
  141.         right_x, right_y, right_width, right_height = right_rect
  142.  
  143.         middle_of_rect_x = (left_x + (right_x + right_width)) / 2
  144.         middle_of_rect_y = (left_y + (right_y + right_width)) / 2
  145.  
  146.         angle = angle_to_target(frame, middle_of_rect_x, middle_of_rect_y)
  147.         distance = find_distance(pair)
  148.  
  149.         target_info.append((distance, angle))
  150.  
  151.         # cv2.rectangle(frame, (int(left_x), int(left_y)), (int(right_x + right_width), int(right_y + right_height)), (0, 255, 0))
  152.         cv2.putText(frame, "Angle: " + str(angle), (left_x, left_y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1,
  153.                     cv2.LINE_AA)
  154.  
  155.         cv2.putText(frame, "Distance: " + str(distance), (left_x, left_y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
  156.                     (255, 255, 255), 1,
  157.                     cv2.LINE_AA)
  158.  
  159.     return target_info
  160.  
  161.  
  162. def quad_fit(contour, approx_dp_error):
  163.     """Simple polygon fit to contour with error related to perimeter"""
  164.     peri = cv2.arcLength(contour, True)
  165.     return cv2.approxPolyDP(contour, approx_dp_error * peri, True)
  166.  
  167.  
  168. def sort_corners(cnrlist):
  169.     """Sort a list of 4 corners so that it goes in a known order. Does it in place!!"""
  170.     cnrlist.sort()
  171.     # now, swap the pairs to make sure in proper Y order
  172.     if cnrlist[0][1] > cnrlist[1][1]:
  173.         cnrlist[0], cnrlist[1] = cnrlist[1], cnrlist[0]
  174.     if cnrlist[2][1] < cnrlist[3][1]:
  175.         cnrlist[2], cnrlist[3] = cnrlist[3], cnrlist[2]
  176.     return
  177.  
  178.  
  179. def pipeline(inimg):
  180.     mask = preprocess_img(inimg)
  181.     contours = find_filter_sort_contour(mask)
  182.     pairs = pair_contours(inimg, contours)
  183.     target_infos = find_target_info(inimg, mask, pairs)
  184.  
  185.     return mask, pairs, target_infos
  186.  
  187.  
  188. class TargetDetector:
  189.     # Constructor
  190.     def __init__(self):
  191.         # USB send frame decimation
  192.         # Reduces send rate by this factor to limit USB bandwidth at high process rates
  193.         self.frame_dec_factor = 3  # At 30FPS, this still delivers 10FPS to the driver
  194.  
  195.         # Target information
  196.         self.target_angle = 0.0
  197.         self.target_distance = 0.0
  198.         self.target_available = False
  199.  
  200.         # real world dimensions of the switch target
  201.         # These are the full dimensions around both strips
  202.         self.TARGET_WIDTH = 14.627  # inches
  203.         self.TARGET_HEIGHT = 5.826  # inches
  204.         self.TARGET_STRIP_WIDTH = 2.0  # inches
  205.  
  206.         # Counterclockwise starting from top right?
  207.         # Clockwise from bottom right???????
  208.         #self.target_coords = np.array(
  209.         #    [[-self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
  210.         #     [-self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
  211.         #     [self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
  212.         #     [self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0]]
  213.         #)
  214.        
  215.         self.target_coords = np.array(
  216.             [[-self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
  217.              [-self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
  218.              [self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
  219.              [self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0]]
  220.          )
  221.  
  222.     # ###################################################################################################
  223.     ## Load camera calibration from JeVois share directory
  224.     def loadCameraCalibration(self, w, h):
  225.         cpf = "/jevois/share/camera/calibration{}x{}.yaml".format(w, h)
  226.         fs = cv2.FileStorage(cpf, cv2.FILE_STORAGE_READ)
  227.  
  228.         if fs.isOpened():
  229.             self.camMatrix = fs.getNode("camera_matrix").mat()
  230.             self.distCoeffs = fs.getNode("distortion_coefficients").mat()
  231.             jevois.LINFO("Loaded camera calibration from {}".format(cpf))
  232.         else:
  233.             jevois.LFATAL("Failed to read camera parameters from file [{}]".format(cpf))
  234.  
  235.     # ###################################################################################################
  236.     ## Send serial messages, one per object
  237.     def sendAllSerial(self, w, h, hlist, rvecs, tvecs):
  238.         idx = 0
  239.         for c in hlist:
  240.             # Compute quaternion: FIXME need to check!
  241.             tv = tvecs[idx]
  242.             axis = rvecs[idx]
  243.             angle = (axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]) ** 0.5
  244.  
  245.             # This code lifted from pyquaternion from_axis_angle:
  246.             mag_sq = axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]
  247.             if abs(1.0 - mag_sq) > 1e-12: axis = axis / (mag_sq ** 0.5)
  248.             theta = angle / 2.0
  249.             r = math.cos(theta)
  250.             i = axis * math.sin(theta)
  251.             q = (r, i[0], i[1], i[2])
  252.  
  253.             jevois.sendSerial("D3 {} {} {} {} {} {} {} {} {} {} OBJ6D".
  254.                               format(np.asscalar(tv[0]), np.asscalar(tv[1]), np.asscalar(tv[2]),  # position
  255.                                      self.TARGET_WIDTH, self.TARGET_HEIGHT, 1.0,  # size
  256.                                      r, np.asscalar(i[0]), np.asscalar(i[1]), np.asscalar(i[2])))  # pose
  257.             idx += 1
  258.  
  259.     def draw(self, img, corners, imgpts):    
  260.         corner = tuple(corners[0].ravel())
  261.        
  262.         corner = (int(corner[0]), int(corner[1]))
  263.        
  264.         point1 = tuple(imgpts[0].ravel())
  265.         point2 = tuple(imgpts[1].ravel())
  266.         point3 = tuple(imgpts[2].ravel())
  267.        
  268.         point1 = (int(point1[0]), int(point1[1]))
  269.         point2 = (int(point2[0]), int(point2[1]))
  270.         point3 = (int(point3[0]), int(point3[1]))
  271.        
  272.         img = cv2.line(img, corner, point1, (255,0,0), 5)
  273.         img = cv2.line(img, corner, point2, (0,255,0), 5)
  274.         img = cv2.line(img, corner, point3, (0,0,255), 5)
  275.        
  276.         return img
  277.  
  278.     def drawDetections(self, outimg, hlist, rvecs=None, tvecs=None):
  279.         # Show trihedron and parallelepiped centered on object:
  280.         hw = self.TARGET_WIDTH * 0.5
  281.         hh = self.TARGET_HEIGHT * 0.5
  282.         dd = -max(hw, hh)
  283.         i = 0
  284.         empty = np.array([0.0, 0.0, 0.0])
  285.  
  286.         # NOTE: this code similar to FirstVision, but in the present module we only have at most one object in the list
  287.         # (the window, if detected):
  288.         for obj in hlist:
  289.             # skip those for which solvePnP failed:
  290.             if np.array_equal(rvecs[i], empty):
  291.                 i += 1
  292.                 continue
  293.             # This could throw some overflow errors as we convert the coordinates to int, if the projection gets
  294.             # singular because of noisy detection:
  295.             try:
  296.                 # Project axis points:
  297.                 axisPoints = np.array([(0.0, 0.0, 0.0), (hw, 0.0, 0.0), (0.0, hh, 0.0), (0.0, 0.0, dd)])
  298.                 imagePoints, jac = cv2.projectPoints(axisPoints, rvecs[i], tvecs[i], self.camMatrix, self.distCoeffs)
  299.  
  300.                 # Draw axis lines:
  301.                 jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
  302.                                 int(imagePoints[1][0, 0] + 0.5), int(imagePoints[1][0, 1] + 0.5),
  303.                                 2, jevois.YUYV.MedPurple)
  304.                 jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
  305.                                 int(imagePoints[2][0, 0] + 0.5), int(imagePoints[2][0, 1] + 0.5),
  306.                                 2, jevois.YUYV.MedGreen)
  307.                 jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
  308.                                 int(imagePoints[3][0, 0] + 0.5), int(imagePoints[3][0, 1] + 0.5),
  309.                                 2, jevois.YUYV.MedGrey)
  310.  
  311.                 # Also draw a parallelepiped: NOTE: contrary to FirstVision, here we draw it going into the object, as
  312.                 # opposed to sticking out of it (we just negate Z for that):
  313.                 cubePoints = np.array([(-hw, -hh, 0.0), (hw, -hh, 0.0), (hw, hh, 0.0), (-hw, hh, 0.0),
  314.                                        (-hw, -hh, -dd), (hw, -hh, -dd), (hw, hh, -dd), (-hw, hh, -dd)])
  315.                 cu, jac2 = cv2.projectPoints(cubePoints, rvecs[i], tvecs[i], self.camMatrix, self.distCoeffs)
  316.  
  317.                 # Round all the coordinates and cast to int for drawing:
  318.                 cu = np.rint(cu)
  319.  
  320.                 # Draw parallelepiped lines:
  321.                 jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]), int(cu[1][0, 0]), int(cu[1][0, 1]),
  322.                                 1, jevois.YUYV.LightGreen)
  323.                 jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]), int(cu[2][0, 0]), int(cu[2][0, 1]),
  324.                                 1, jevois.YUYV.LightGreen)
  325.                 jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]), int(cu[3][0, 0]), int(cu[3][0, 1]),
  326.                                 1, jevois.YUYV.LightGreen)
  327.                 jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]), int(cu[0][0, 0]), int(cu[0][0, 1]),
  328.                                 1, jevois.YUYV.LightGreen)
  329.                 jevois.drawLine(outimg, int(cu[4][0, 0]), int(cu[4][0, 1]), int(cu[5][0, 0]), int(cu[5][0, 1]),
  330.                                 1, jevois.YUYV.LightGreen)
  331.                 jevois.drawLine(outimg, int(cu[5][0, 0]), int(cu[5][0, 1]), int(cu[6][0, 0]), int(cu[6][0, 1]),
  332.                                 1, jevois.YUYV.LightGreen)
  333.                 jevois.drawLine(outimg, int(cu[6][0, 0]), int(cu[6][0, 1]), int(cu[7][0, 0]), int(cu[7][0, 1]),
  334.                                 1, jevois.YUYV.LightGreen)
  335.                 jevois.drawLine(outimg, int(cu[7][0, 0]), int(cu[7][0, 1]), int(cu[4][0, 0]), int(cu[4][0, 1]),
  336.                                 1, jevois.YUYV.LightGreen)
  337.                 jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]), int(cu[4][0, 0]), int(cu[4][0, 1]),
  338.                                 1, jevois.YUYV.LightGreen)
  339.                 jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]), int(cu[5][0, 0]), int(cu[5][0, 1]),
  340.                                 1, jevois.YUYV.LightGreen)
  341.                 jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]), int(cu[6][0, 0]), int(cu[6][0, 1]),
  342.                                 1, jevois.YUYV.LightGreen)
  343.                 jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]), int(cu[7][0, 0]), int(cu[7][0, 1]),
  344.                                 1, jevois.YUYV.LightGreen)
  345.             except:
  346.                 pass
  347.  
  348.             i += 1
  349.  
  350.     # Process function with no USB output
  351.     def processNoUSB(self, inframe):
  352.         self.target_available = False
  353.  
  354.         inimg = inframe.getCvBGR()
  355.         h, w, _ = inimg.shape
  356.  
  357.         if not hasattr(self, 'camMatrix'):
  358.             self.loadCameraCalibration(w, h)
  359.  
  360.         mask, pairs, target_infos = pipeline(inimg)
  361.  
  362.         if len(target_infos) > 0:
  363.             self.target_available = True
  364.  
  365.             closest_pair = min(target_infos, key=lambda target_info: target_info[1])
  366.             self.target_distance = closest_pair[0]
  367.             self.target_angle = closest_pair[1]
  368.  
  369.     # Process function with USB output
  370.     def process(self, inframe, outframe):
  371.         self.target_available = False
  372.  
  373.         inimg = inframe.getCvBGR()
  374.         h, w, _ = inimg.shape
  375.        
  376.         if not hasattr(self, 'camMatrix'):
  377.             self.loadCameraCalibration(w, h)
  378.  
  379.         mask, pairs, target_infos = pipeline(inimg)
  380.  
  381.         if len(target_infos) > 0:
  382.             self.target_available = True
  383.  
  384.             closest_pair = min(target_infos, key=lambda target_info: target_info[1])
  385.             self.target_distance = closest_pair[0]
  386.             self.target_angle = closest_pair[1]
  387.  
  388.         rvecs = []
  389.         tvecs = []
  390.  
  391.         for pair in pairs:
  392.             left_rect, right_rect = pair
  393.  
  394.             left_x, left_y, left_width, left_height = left_rect
  395.             right_x, right_y, right_width, right_height = right_rect
  396.  
  397.             image_corners = np.array([[left_x, left_y],
  398.                                       [left_x, left_y + left_height],
  399.                                       [right_x + right_width, right_y + right_height],
  400.                                       [right_x + right_width, right_y]
  401.                                       ], dtype=np.float)
  402.  
  403.             retval, rvec, tvec = cv2.solvePnP(self.target_coords, image_corners, self.camMatrix, self.distCoeffs)
  404.            
  405.             if retval:
  406.                 rvecs.append(rvec)
  407.                 tvecs.append(tvec)
  408.             else:
  409.                 rvecs.append(np.array([0.0, 0.0, 0.0]))
  410.                 tvecs.append(np.array([0.0, 0.0, 0.0]))
  411.  
  412.         #self.drawDetections(inimg, pairs, rvecs, tvecs)
  413.         #self.sendAllSerial(w, h, pairs, rvecs, tvecs)
  414.  
  415.         empty = np.array([0.0, 0.0, 0.0])
  416.         axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, 3]]).reshape(-1, 3)
  417.  
  418.         for index, obj in enumerate(pairs):
  419.             if np.array_equal(rvecs[index], empty):
  420.                 continue
  421.  
  422.             imgpts, jac = cv2.projectPoints(self.target_coords, rvecs[index], tvecs[index], self.camMatrix, self.distCoeffs)
  423.  
  424.             inimg = self.draw(inimg, image_corners, imgpts)
  425.        
  426.         # jevois.sendSerial(self.target())
  427.        
  428.         # Convert our output image to video output format and send to host over USB:
  429.         outframe.sendCv(inimg)
  430.  
  431.     # Parse a serial command forwarded to us by the JeVois Engine, return a string
  432.     def parseSerial(self, command):
  433.         if command.strip() == "":
  434.             # For some reason, the jevois engine sometimes sends empty strings.
  435.             # Just do nothing in this case.
  436.             return ""
  437.  
  438.         if command == "target":
  439.             return self.target()
  440.         return "ERR: Unsupported command."
  441.  
  442.     # Return a string that describes the custom commands we support, for the JeVois help message
  443.     def supportedCommands(self):
  444.         # use \n separator if your module supports several commands
  445.         return "target - print target information"
  446.  
  447.     # Internal method that gets invoked as a custom command
  448.     def target(self):
  449.         return "{{{},{},{}}}\n".format(("T" if self.target_available else "F"), self.target_distance, self.target_angle)
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top