Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import numpy as np
- from matplotlib import pyplot as plt
- if __name__ == "__main__":
- def zad1():
- #wczytanie obrazu
- img = cv2.imread("k.jpg",cv2.IMREAD_COLOR)
- #tworzenie detektora
- fast = cv2.FastFeatureDetector_create()
- #szukanie cech punktowych
- kp = fast.detect(img,None)
- img = cv2.drawKeypoints(img,kp,None)
- print(len(kp))
- key = ord('a')
- while key != ord('q'):
- cv2.imshow("Zad1", img)
- key = cv2.waitKey(0)
- def zad2():
- #wczytanie obrazu
- img1 = cv2.imread("k.jpg",cv2.IMREAD_COLOR)
- #pobranie wymiarow
- w,h,b = img1.shape
- #tworzenie macierzy do translacji
- M = cv2.getRotationMatrix2D((h / 2, w / 2), 90, 1)
- #obraz obruzony o 90
- img2 = cv2.warpAffine(img1, M, (h, w))
- #tworzenie detektora fast
- #fast = cv2.FastFeatureDetector_create()
- #tworzenie detektora ORB ograniczenie do 50 cech punktowych
- ORB = cv2.ORB_create(nfeatures=50)
- # szukanie cech punktowych fast
- #KP1 = fast.detect(img1,None)
- #KP2 = fast.detect(img2,None)
- #szukanie cech punktowych ORB
- kp1 = ORB.detect(img1,None)
- kp2 = ORB.detect(img2,None)
- #tworzenie deskryptorow fast
- #KP1, DES1 = fast.compute(img1,KP1)
- #KP2, DES2 = fast.compute(img2, KP2)
- #tworzenie deskryptorow ORB
- kp1, des1 = ORB.compute(img1,kp1)
- kp2, des2 = ORB.compute(img2, kp2)
- #tworzenie obiektu dopasowania
- BFM = cv2.BFMatcher(cv2.NORM_HAMMING)
- # szukanie dopasowan fast
- #matches2 = BFM.match(DES1, DES2)
- #szukanie dopasowan ORB
- matches = BFM.match(des1,des2)
- #rysowanie dopasowan ORB
- img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches, None)
- #rysowanie dopasowan fast
- #img4 = cv2.drawMatches(img1,KP1, img2,KP2 ,matches2, None)
- key = ord('a')
- while key != ord('q'):
- cv2.imshow("Dopasowania orb", img3)
- #cv2.imshow("Dopasowania fast", img4)
- key = cv2.waitKey(0)
- def zad3():
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
- eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
- img = cv2.imread("t.jpg",cv2.IMREAD_COLOR)
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
- for (x, y, w, h) in faces:
- img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
- twarz = img[y:y+h, x:x+w]
- roi_gray = gray[y:y + h, x:x + w]
- roi_color = img[y:y + h, x:x + w]
- eyes = eye_cascade.detectMultiScale(roi_gray)
- for (ex, ey, ew, eh) in eyes:
- cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
- oczy = roi_color[ey:ey+eh,ex:ex+ew]
- key = ord('a')
- while key != ord('q'):
- cv2.imshow("Oczy",oczy)
- cv2.imshow("Twarz",twarz)
- cv2.imshow("Obraz", img)
- key = cv2.waitKey(30)
- def zad4():
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
- eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
- cap = cv2.VideoCapture(0)
- key = ord('a')
- while key != ord('q'):
- ret, img = cap.read()
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
- for (x, y, w, h) in faces:
- img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
- twarz = img[y:y + h, x:x + w]
- roi_gray = gray[y:y + h, x:x + w]
- roi_color = img[y:y + h, x:x + w]
- eyes = eye_cascade.detectMultiScale(roi_gray)
- for (ex, ey, ew, eh) in eyes:
- cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
- oczy = roi_color[ey:ey + eh, ex:ex + ew]
- cv2.imshow("Oczy", oczy)
- cv2.imshow("Twarz", twarz)
- cv2.imshow("Obraz", img)
- key = cv2.waitKey(30)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement