Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import math
- import libjevois as jevois
- import cv2
- import numpy as np
- lower_green = np.array([60, 170, 70])
- upper_green = np.array([180, 255, 255])
- min_area = 150
- field_of_view = 60
- def preprocess_img(frame, color=cv2.COLOR_BGR2HSV, lower=lower_green, upper=upper_green):
- hsv = cv2.cvtColor(frame, color)
- blur = cv2.blur(hsv, (5, 5))
- mask = cv2.inRange(blur, lower, upper)
- eroded = cv2.erode(mask, np.ones((5, 5)), 3)
- return mask
- def normalize_rotated_rect_angle(angle, width, height):
- return 90 - angle if (width < height) else -angle
- def check_aspect_ratio(width, height):
- aspect_ratio = width / height
- return not (0.8 < aspect_ratio < 1.2) and (0.3 < aspect_ratio < 2.7)
- def check_area(width, height):
- area = width * height
- return area > min_area
- def check_left_angle(angle):
- print("Left angle: " + str(angle))
- return 50 < angle <= 90
- def check_right_angle(angle):
- print("Right angle: " + str(angle))
- return 0 < angle < 30 or 80 < angle < 120
- def filter_contour(contour):
- return filter_bounding_rect(cv2.boundingRect(contour))
- def filter_bounding_rect(bounding_rect):
- x, y, width, height = bounding_rect
- perimeter = ((2 * width) + (2 * height))
- return check_area(width, height) and check_aspect_ratio(width, height) and (perimeter > 10)
- def angle_to_target(frame, center_x, center_y):
- # 2017 formula
- # height, width, channel = frame.shape
- # pixel_offset = width / 2 - center_x
- # return 73 * pixel_offset / width
- # 2018/19 formula
- _, width, _ = frame.shape
- return ((center_x / width) * field_of_view) - field_of_view / 2
- def find_filter_sort_contour(mask):
- contours = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
- contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
- for index, contour in enumerate(contours):
- x, y, width, height = cv2.boundingRect(contour)
- cv2.putText(mask, str(index), (x, y + height + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1,
- cv2.LINE_AA)
- contours = list(filter(lambda ctr: filter_contour(ctr), contours))
- return contours
- def pair_contours(mask, contours):
- pairs = []
- for index, cnt in enumerate(contours):
- rect = cv2.minAreaRect(cnt)
- (x, y), (width, height), rect_angle = rect
- box = cv2.boxPoints(rect)
- box = np.int0(box)
- cv2.drawContours(mask, [box], 0, (0, 0, 255), 2)
- angle = normalize_rotated_rect_angle(rect_angle, width, height)
- end = len(contours) - 1
- if index < end:
- if check_left_angle(angle):
- current_index_check = index + 1
- while current_index_check <= end:
- next_rect = cv2.minAreaRect(contours[current_index_check])
- (next_x, next_y), (next_width, next_height), next_rect_angle = next_rect
- angle = normalize_rotated_rect_angle(next_rect_angle, next_width, next_height)
- print("Angle " + str(angle))
- print("Aspect Ratio " + str(next_width/next_height))
- if check_area(width, height) and check_right_angle(angle): #and check_aspect_ratio(next_width, next_height):
- pairs.append((cv2.boundingRect(cnt), cv2.boundingRect(contours[index + 1])))
- break
- else:
- current_index_check += 1
- return pairs
- focal_length_width = 601.6607142857143
- focal_length_height = 371.4004329004329
- target_width = 2
- target_height = 5.5
- def find_distance(pair):
- left_rect, right_rect = pair
- left_x, left_y, left_width, left_height = left_rect
- right_x, right_y, right_width, right_height = right_rect
- distance_from_width = (target_width * focal_length_width) / ((left_width + right_width) / 2)
- distance_from_height = (target_height * focal_length_height) / ((left_height + right_height) / 2)
- return (distance_from_height + distance_from_width) / 2
- def find_target_info(frame, mask, pairs):
- target_info = []
- for pair in pairs:
- left_rect, right_rect = pair
- left_x, left_y, left_width, left_height = left_rect
- right_x, right_y, right_width, right_height = right_rect
- middle_of_rect_x = (left_x + (right_x + right_width)) / 2
- middle_of_rect_y = (left_y + (right_y + right_width)) / 2
- angle = angle_to_target(frame, middle_of_rect_x, middle_of_rect_y)
- distance = find_distance(pair)
- target_info.append((distance, angle))
- # cv2.rectangle(frame, (int(left_x), int(left_y)), (int(right_x + right_width), int(right_y + right_height)), (0, 255, 0))
- cv2.putText(frame, "Angle: " + str(angle), (left_x, left_y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1,
- cv2.LINE_AA)
- cv2.putText(frame, "Distance: " + str(distance), (left_x, left_y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
- (255, 255, 255), 1,
- cv2.LINE_AA)
- return target_info
- def quad_fit(contour, approx_dp_error):
- """Simple polygon fit to contour with error related to perimeter"""
- peri = cv2.arcLength(contour, True)
- return cv2.approxPolyDP(contour, approx_dp_error * peri, True)
- def sort_corners(cnrlist):
- """Sort a list of 4 corners so that it goes in a known order. Does it in place!!"""
- cnrlist.sort()
- # now, swap the pairs to make sure in proper Y order
- if cnrlist[0][1] > cnrlist[1][1]:
- cnrlist[0], cnrlist[1] = cnrlist[1], cnrlist[0]
- if cnrlist[2][1] < cnrlist[3][1]:
- cnrlist[2], cnrlist[3] = cnrlist[3], cnrlist[2]
- return
- def pipeline(inimg):
- mask = preprocess_img(inimg)
- contours = find_filter_sort_contour(mask)
- pairs = pair_contours(inimg, contours)
- target_infos = find_target_info(inimg, mask, pairs)
- return mask, pairs, target_infos
- class TargetDetector:
- # Constructor
- def __init__(self):
- # USB send frame decimation
- # Reduces send rate by this factor to limit USB bandwidth at high process rates
- self.frame_dec_factor = 3 # At 30FPS, this still delivers 10FPS to the driver
- # Target information
- self.target_angle = 0.0
- self.target_distance = 0.0
- self.target_available = False
- # real world dimensions of the switch target
- # These are the full dimensions around both strips
- self.TARGET_WIDTH = 14.627 # inches
- self.TARGET_HEIGHT = 5.826 # inches
- self.TARGET_STRIP_WIDTH = 2.0 # inches
- # Counterclockwise starting from top right?
- # Clockwise from bottom right???????
- #self.target_coords = np.array(
- # [[-self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
- # [-self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
- # [self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
- # [self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0]]
- #)
- self.target_coords = np.array(
- [[-self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0],
- [-self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
- [self.TARGET_WIDTH / 2.0, self.TARGET_HEIGHT / 2.0, 0.0],
- [self.TARGET_WIDTH / 2.0, -self.TARGET_HEIGHT / 2.0, 0.0]]
- )
- # ###################################################################################################
- ## Load camera calibration from JeVois share directory
- def loadCameraCalibration(self, w, h):
- cpf = "/jevois/share/camera/calibration{}x{}.yaml".format(w, h)
- fs = cv2.FileStorage(cpf, cv2.FILE_STORAGE_READ)
- if fs.isOpened():
- self.camMatrix = fs.getNode("camera_matrix").mat()
- self.distCoeffs = fs.getNode("distortion_coefficients").mat()
- jevois.LINFO("Loaded camera calibration from {}".format(cpf))
- else:
- jevois.LFATAL("Failed to read camera parameters from file [{}]".format(cpf))
- # ###################################################################################################
- ## Send serial messages, one per object
- def sendAllSerial(self, w, h, hlist, rvecs, tvecs):
- idx = 0
- for c in hlist:
- # Compute quaternion: FIXME need to check!
- tv = tvecs[idx]
- axis = rvecs[idx]
- angle = (axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]) ** 0.5
- # This code lifted from pyquaternion from_axis_angle:
- mag_sq = axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]
- if abs(1.0 - mag_sq) > 1e-12: axis = axis / (mag_sq ** 0.5)
- theta = angle / 2.0
- r = math.cos(theta)
- i = axis * math.sin(theta)
- q = (r, i[0], i[1], i[2])
- jevois.sendSerial("D3 {} {} {} {} {} {} {} {} {} {} OBJ6D".
- format(np.asscalar(tv[0]), np.asscalar(tv[1]), np.asscalar(tv[2]), # position
- self.TARGET_WIDTH, self.TARGET_HEIGHT, 1.0, # size
- r, np.asscalar(i[0]), np.asscalar(i[1]), np.asscalar(i[2]))) # pose
- idx += 1
- def draw(self, img, corners, imgpts):
- corner = tuple(corners[0].ravel())
- corner = (int(corner[0]), int(corner[1]))
- point1 = tuple(imgpts[0].ravel())
- point2 = tuple(imgpts[1].ravel())
- point3 = tuple(imgpts[2].ravel())
- point1 = (int(point1[0]), int(point1[1]))
- point2 = (int(point2[0]), int(point2[1]))
- point3 = (int(point3[0]), int(point3[1]))
- img = cv2.line(img, corner, point1, (255,0,0), 5)
- img = cv2.line(img, corner, point2, (0,255,0), 5)
- img = cv2.line(img, corner, point3, (0,0,255), 5)
- return img
- def drawDetections(self, outimg, hlist, rvecs=None, tvecs=None):
- # Show trihedron and parallelepiped centered on object:
- hw = self.TARGET_WIDTH * 0.5
- hh = self.TARGET_HEIGHT * 0.5
- dd = -max(hw, hh)
- i = 0
- empty = np.array([0.0, 0.0, 0.0])
- # NOTE: this code similar to FirstVision, but in the present module we only have at most one object in the list
- # (the window, if detected):
- for obj in hlist:
- # skip those for which solvePnP failed:
- if np.array_equal(rvecs[i], empty):
- i += 1
- continue
- # This could throw some overflow errors as we convert the coordinates to int, if the projection gets
- # singular because of noisy detection:
- try:
- # Project axis points:
- axisPoints = np.array([(0.0, 0.0, 0.0), (hw, 0.0, 0.0), (0.0, hh, 0.0), (0.0, 0.0, dd)])
- imagePoints, jac = cv2.projectPoints(axisPoints, rvecs[i], tvecs[i], self.camMatrix, self.distCoeffs)
- # Draw axis lines:
- jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
- int(imagePoints[1][0, 0] + 0.5), int(imagePoints[1][0, 1] + 0.5),
- 2, jevois.YUYV.MedPurple)
- jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
- int(imagePoints[2][0, 0] + 0.5), int(imagePoints[2][0, 1] + 0.5),
- 2, jevois.YUYV.MedGreen)
- jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5), int(imagePoints[0][0, 1] + 0.5),
- int(imagePoints[3][0, 0] + 0.5), int(imagePoints[3][0, 1] + 0.5),
- 2, jevois.YUYV.MedGrey)
- # Also draw a parallelepiped: NOTE: contrary to FirstVision, here we draw it going into the object, as
- # opposed to sticking out of it (we just negate Z for that):
- cubePoints = np.array([(-hw, -hh, 0.0), (hw, -hh, 0.0), (hw, hh, 0.0), (-hw, hh, 0.0),
- (-hw, -hh, -dd), (hw, -hh, -dd), (hw, hh, -dd), (-hw, hh, -dd)])
- cu, jac2 = cv2.projectPoints(cubePoints, rvecs[i], tvecs[i], self.camMatrix, self.distCoeffs)
- # Round all the coordinates and cast to int for drawing:
- cu = np.rint(cu)
- # Draw parallelepiped lines:
- jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]), int(cu[1][0, 0]), int(cu[1][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]), int(cu[2][0, 0]), int(cu[2][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]), int(cu[3][0, 0]), int(cu[3][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]), int(cu[0][0, 0]), int(cu[0][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[4][0, 0]), int(cu[4][0, 1]), int(cu[5][0, 0]), int(cu[5][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[5][0, 0]), int(cu[5][0, 1]), int(cu[6][0, 0]), int(cu[6][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[6][0, 0]), int(cu[6][0, 1]), int(cu[7][0, 0]), int(cu[7][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[7][0, 0]), int(cu[7][0, 1]), int(cu[4][0, 0]), int(cu[4][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]), int(cu[4][0, 0]), int(cu[4][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]), int(cu[5][0, 0]), int(cu[5][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]), int(cu[6][0, 0]), int(cu[6][0, 1]),
- 1, jevois.YUYV.LightGreen)
- jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]), int(cu[7][0, 0]), int(cu[7][0, 1]),
- 1, jevois.YUYV.LightGreen)
- except:
- pass
- i += 1
- # Process function with no USB output
- def processNoUSB(self, inframe):
- self.target_available = False
- inimg = inframe.getCvBGR()
- h, w, _ = inimg.shape
- if not hasattr(self, 'camMatrix'):
- self.loadCameraCalibration(w, h)
- mask, pairs, target_infos = pipeline(inimg)
- if len(target_infos) > 0:
- self.target_available = True
- closest_pair = min(target_infos, key=lambda target_info: target_info[1])
- self.target_distance = closest_pair[0]
- self.target_angle = closest_pair[1]
- # Process function with USB output
- def process(self, inframe, outframe):
- self.target_available = False
- inimg = inframe.getCvBGR()
- h, w, _ = inimg.shape
- if not hasattr(self, 'camMatrix'):
- self.loadCameraCalibration(w, h)
- mask, pairs, target_infos = pipeline(inimg)
- if len(target_infos) > 0:
- self.target_available = True
- closest_pair = min(target_infos, key=lambda target_info: target_info[1])
- self.target_distance = closest_pair[0]
- self.target_angle = closest_pair[1]
- rvecs = []
- tvecs = []
- for pair in pairs:
- left_rect, right_rect = pair
- left_x, left_y, left_width, left_height = left_rect
- right_x, right_y, right_width, right_height = right_rect
- image_corners = np.array([[left_x, left_y],
- [left_x, left_y + left_height],
- [right_x + right_width, right_y + right_height],
- [right_x + right_width, right_y]
- ], dtype=np.float)
- retval, rvec, tvec = cv2.solvePnP(self.target_coords, image_corners, self.camMatrix, self.distCoeffs)
- if retval:
- rvecs.append(rvec)
- tvecs.append(tvec)
- else:
- rvecs.append(np.array([0.0, 0.0, 0.0]))
- tvecs.append(np.array([0.0, 0.0, 0.0]))
- #self.drawDetections(inimg, pairs, rvecs, tvecs)
- #self.sendAllSerial(w, h, pairs, rvecs, tvecs)
- empty = np.array([0.0, 0.0, 0.0])
- axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, 3]]).reshape(-1, 3)
- for index, obj in enumerate(pairs):
- if np.array_equal(rvecs[index], empty):
- continue
- imgpts, jac = cv2.projectPoints(self.target_coords, rvecs[index], tvecs[index], self.camMatrix, self.distCoeffs)
- inimg = self.draw(inimg, image_corners, imgpts)
- # jevois.sendSerial(self.target())
- # Convert our output image to video output format and send to host over USB:
- outframe.sendCv(inimg)
- # Parse a serial command forwarded to us by the JeVois Engine, return a string
- def parseSerial(self, command):
- if command.strip() == "":
- # For some reason, the jevois engine sometimes sends empty strings.
- # Just do nothing in this case.
- return ""
- if command == "target":
- return self.target()
- return "ERR: Unsupported command."
- # Return a string that describes the custom commands we support, for the JeVois help message
- def supportedCommands(self):
- # use \n separator if your module supports several commands
- return "target - print target information"
- # Internal method that gets invoked as a custom command
- def target(self):
- return "{{{},{},{}}}\n".format(("T" if self.target_available else "F"), self.target_distance, self.target_angle)
Add Comment
Please, Sign In to add comment