Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import numpy as np
- from matplotlib import pyplot as plt
- # template (pattern) matching
- img = cv2.imread('test.jpg', 1)
- temp = cv2.imread('template.jpg', 1)
- w = temp.shape[1]
- h = temp.shape[0]
- # check docs for rest of template matching methods
- method = cv2.TM_CCORR_NORMED
- res = cv2.matchTemplate(img, temp, method)
- min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
- # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
- if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
- top_left = min_loc
- else:
- top_left = max_loc
- bottom_right = (top_left[0] + w, top_left[1] + h)
- cv2.rectangle(img,top_left, bottom_right, (0, 0, 255), 2)
- cv2.imshow('res', img)
- cv2.imshow('template', temp)
- cv2.waitKey()
- # contour matching
- img = cv2.imread('bottles.jpg', 0)
- temp = cv2.imread('bottle.jpg', 0)
- kernel = np.ones((5,5), np.uint8)
- val, img_thresh = cv2.threshold(img, 130, 255, cv2.THRESH_BINARY_INV)
- val, temp_thresh = cv2.threshold(temp, 130, 255, cv2.THRESH_BINARY_INV)
- _, img_contours, _ = cv2.findContours(img_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- _, temp_contours, _ = cv2.findContours(temp_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- temp_contours.sort(key=lambda c:cv2.contourArea(c), reverse=True)
- temp_contour = temp_contours[0]
- best_contour = None
- best_result = float('inf')
- for ic in img_contours:
- d = cv2.matchShapes(ic, temp_contour, cv2.CONTOURS_MATCH_I1, 0)
- if d < best_result:
- best_result = d
- best_contour = ic
- cv2.drawContours(img, best_contour, -1, (0, 255, 0), 3)
- cv2.drawContours(temp, temp_contours[0], -1, (0, 255, 0), 3)
- # print image
- cv2.imshow('img', img)
- cv2.imshow('temp', temp)
- cv2.waitKey()
- # Feature matching
- MIN_MATCH_COUNT = 10
- img1 = cv2.imread('box.png',0) # queryImage
- img2 = cv2.imread('box_in_scene.png',0) # trainImage
- # Initiate SIFT detector
- sift = cv2.ORB_create()
- # find the keypoints and descriptors with SIFT
- kp1, des1 = sift.detectAndCompute(img1,None)
- kp2, des2 = sift.detectAndCompute(img2,None)
- FLANN_INDEX_KDTREE = 1
- index_params = dict(algorithm = 6, trees = 5)
- search_params = dict(checks = 50)
- flann = cv2.FlannBasedMatcher(index_params, search_params)
- matches = flann.knnMatch(des1, des2, k=2)
- # store all the good matches as per Lowe's ratio test.
- good = []
- for l in matches:
- if (len(l) < 2):
- continue
- m = l[0]
- n = l[1]
- if m.distance/n.distance < 0.7:
- good.append(m)
- if len(good) > MIN_MATCH_COUNT:
- src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
- dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
- M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
- matchesMask = mask.ravel().tolist()
- h, w = img1.shape
- pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
- dst = cv2.perspectiveTransform(pts, M)
- img2 = cv2.polylines(img2, [np.int32(dst)], True, 0, 3, cv2.LINE_AA)
- else:
- matchesMask = None
- print ('Not enough matches are found')
- draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
- matchesMask=matchesMask, # draw only inliers
- flags=2)
- img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
- plt.imshow(img3, 'gray'), plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement