Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from scipy.spatial import distance as dist
- from imutils.video import VideoStream
- from imutils import face_utils
- from threading import Thread
- import numpy as np
- import argparse
- import imutils
- import time
- import dlib
- import cv2
- import math
- import serial
- def eye_aspect_ratio(eye):
- # compute the euclidean distances between the two sets of
- # vertical eye landmarks (x, y)-coordinates
- A = dist.euclidean(eye[1], eye[5])
- B = dist.euclidean(eye[2], eye[4])
- # compute the euclidean distance between the horizontal
- # eye landmark (x, y)-coordinates
- C = dist.euclidean(eye[0], eye[3])
- # compute the eye aspect ratio
- ear = (A + B) / (2.0 * C)
- # return the eye aspect ratio
- return ear
- def rect_to_bb(rect):
- # take a bounding predicted by dlib and convert it
- # to the format (x, y, w, h) as we would normally do
- # with OpenCV
- x = rect.left()
- y = rect.top()
- w = rect.right() - x
- h = rect.bottom() - y
- # return a tuple of (x, y, w, h)
- return (x, y, w, h)
- def bb_sred(rect):
- # take a bounding predicted by dlib and convert it
- # to the format (x, y, w, h) as we would normally do
- # with OpenCV
- x = rect.left()
- y = rect.top()
- w = rect.right() - x
- h = rect.bottom() - y
- # return a tuple of (x, y, w, h)
- return ((x + w) / 2, (y + h) / 2)
- # Vamos inicializar um detector de faces (HOG) para então
- # let's go code an faces detector(HOG) and after detect the
- # landmarks on this detected face
- # p = our pre-treined model directory, on my case, it's on the same script's diretory.
- p = "D:\Projects\ONTI_hack_neuro\shape_predictor_68_face_landmarks.dat"
- detector = dlib.get_frontal_face_detector()
- predictor = dlib.shape_predictor(p)
- cap = cv2.VideoCapture('train.mp4')
- (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
- (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
- (no1, no2) = face_utils.FACIAL_LANDMARKS_IDXS['nose']
- (mo1, mo2) = face_utils.FACIAL_LANDMARKS_IDXS['mouth']
- min = 1000
- ser = serial.Serial('COM9', 115200, timeout=1)
- ser.flushInput()
- def eye_aspect_ratio(eye):
- # compute the euclidean distances between the two sets of
- # vertical eye landmarks (x, y)-coordinates
- A = dist.euclidean(eye[1], eye[5])
- B = dist.euclidean(eye[2], eye[4])
- # compute the euclidean distance between the horizontal
- # eye landmark (x, y)-coordinates
- C = dist.euclidean(eye[0], eye[3])
- # compute the eye aspect ratio
- ear = (A + B) / (2.0 * C)
- # return the eye aspect ratio
- return ear
- # construct the argument parse and parse the arguments
- ap = argparse.ArgumentParser()
- ap.add_argument("-p", "--shape-predictor", required=True,
- help="path to facial landmark predictor")
- ap.add_argument("-w", "--webcam", type=int, default=0,
- help="index of webcam on system")
- args = vars(ap.parse_args())
- # define two constants, one for the eye aspect ratio to indicate
- # blink and then a second constant for the number of consecutive
- # frames the eye must be below the threshold for to set off the
- # alarm
- EYE_AR_THRESH = 0.25
- EYE_AR_CONSEC_FRAMES = 16
- # initialize the frame counter as well as a boolean used to
- # indicate if the alarm is going off
- COUNTER = 0
- ser.write(bytes([0]))
- # initialize dlib's face detector (HOG-based) and then create
- # the facial landmark predictor
- print("[INFO] loading facial landmark predictor...")
- detector = dlib.get_frontal_face_detector()
- predictor = dlib.shape_predictor(args["shape_predictor"])
- # grab the indexes of the facial landmarks for the left and
- # right eye, respectively
- (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
- (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
- # start the video stream thread
- print("[INFO] starting video stream thread...")
- vs = VideoStream(src=args["webcam"]).start()
- time.sleep(1.0)
- # loop over frames from the video stream
- while True:
- bobo = False
- kaka = False
- # grab the frame from the threaded video file stream, resize
- # it, and convert it to grayscale
- # channels)
- frame = vs.read()
- frame = imutils.resize(frame, width=450)
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- # detect faces in the grayscale frame
- rects = detector(gray, 0)
- # loop over the face detections
- for rect in rects:
- # determine the facial landmarks for the face region, then
- # convert the facial landmark (x, y)-coordinates to a NumPy
- # array
- shape = predictor(gray, rect)
- shape = face_utils.shape_to_np(shape)
- # extract the left and right eye coordinates, then use the
- # coordinates to compute the eye aspect ratio for both eyes
- leftEye = shape[lStart:lEnd]
- rightEye = shape[rStart:rEnd]
- leftEAR = eye_aspect_ratio(leftEye)
- rightEAR = eye_aspect_ratio(rightEye)
- # average the eye aspect ratio together for both eyes
- ear = (leftEAR + rightEAR) / 2.0
- # compute the convex hull for the left and right eye, then
- # visualize each of the eyes
- leftEyeHull = cv2.convexHull(leftEye)
- rightEyeHull = cv2.convexHull(rightEye)
- cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
- cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
- # check to see if the eye aspect ratio is below the blink
- # threshold, and if so, increment the blink frame counter
- if ear < EYE_AR_THRESH:
- COUNTER += 1
- # if the eyes were closed for a sufficient number of
- # then sound the alarm
- if COUNTER >= EYE_AR_CONSEC_FRAMES:
- # if the alarm is not on, turn it on
- kaka = True
- # draw an alarm on the frame
- cv2.putText(frame, "Look out!", (10, 30),
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
- # otherwise, the eye aspect ratio is not below the blink
- # threshold, so reset the counter and alarm
- else:
- kaka = False
- COUNTER = 0
- cv2.putText(frame, str(ser.readline()), (300, 30),
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
- # draw the computed eye aspect ratio on the frame to help
- # with debugging and setting the correct eye aspect ratio
- # thresholds and frame counters
- size = frame.shape
- image_points = np.array([
- shape[30], # Nose tip
- shape[8], # Chin
- shape[45], # Left eye left corner
- shape[36], # Right eye right corne
- shape[54], # Left Mouth corner
- shape[48] # Right mouth corner
- ], dtype="double")
- # 3D model points.
- model_points = np.array([
- (0.0, 0.0, 0.0), # Nose tip
- (0.0, -330.0, -65.0), # Chin
- (-225.0, 170.0, -135.0), # Left eye left corner
- (225.0, 170.0, -135.0), # Right eye right corne
- (-150.0, -150.0, -125.0), # Left Mouth corner
- (150.0, -150.0, -125.0) # Right mouth corner
- ])
- # Camera internals
- focal_length = size[1]
- center = (size[1] / 2, size[0] / 2)
- camera_matrix = np.array(
- [[focal_length, 0, center[0]],
- [0, focal_length, center[1]],
- [0, 0, 1]], dtype="double"
- )
- print("Camera Matrix :\n {0}".format(camera_matrix))
- dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
- (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
- dist_coeffs,
- flags=cv2.cv2.SOLVEPNP_ITERATIVE)
- print("Rotation Vector:\n {0}".format(rotation_vector))
- print("Translation Vector:\n {0}".format(translation_vector))
- # Project a 3D point (0, 0, 1000.0) onto the image plane.
- # We use this to draw a line sticking out of the nose
- (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
- translation_vector,
- camera_matrix, dist_coeffs)
- for p in image_points:
- cv2.circle(frame, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
- p1 = (int(image_points[0][0]), int(image_points[0][1]))
- p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
- print('vector', p1, p2, [p1[0] - p2[0], p1[1] - p2[1]])
- if math.sqrt((p1[1] - p2[1]) ** 2) < min:
- min = p1[1] - p2[1]
- cv2.line(frame, p1, p2, (255, 0, 0), 2)
- if min is not 29 and min < 38:
- print('alert')
- bobo = True
- else:
- bobo = False
- print(kaka, bobo)
- # show the frame
- # _, im = cap.read()
- # Display image
- print(min)
- if kaka:
- ser.write(bytes([1]))
- else:
- ser.write(bytes([0]))
- cv2.imshow("Output", frame)
- k = cv2.waitKey(5) & 0xFF
- if k == 27:
- break
- # do a bit of cleanup
- cv2.destroyAllWindows()
- vs.stop()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement