Advertisement
Guest User

Untitled

a guest
Mar 19th, 2019
69
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.30 KB | None | 0 0
  1. from scipy.spatial import distance as dist
  2. from imutils.video import VideoStream
  3. from imutils import face_utils
  4. from threading import Thread
  5. import numpy as np
  6. import argparse
  7. import imutils
  8. import time
  9. import dlib
  10. import cv2
  11. import math
  12. import serial
  13.  
  14.  
  15. def eye_aspect_ratio(eye):
  16. # compute the euclidean distances between the two sets of
  17. # vertical eye landmarks (x, y)-coordinates
  18. A = dist.euclidean(eye[1], eye[5])
  19. B = dist.euclidean(eye[2], eye[4])
  20.  
  21. # compute the euclidean distance between the horizontal
  22. # eye landmark (x, y)-coordinates
  23. C = dist.euclidean(eye[0], eye[3])
  24.  
  25. # compute the eye aspect ratio
  26. ear = (A + B) / (2.0 * C)
  27.  
  28. # return the eye aspect ratio
  29. return ear
  30.  
  31.  
  32. def rect_to_bb(rect):
  33. # take a bounding predicted by dlib and convert it
  34. # to the format (x, y, w, h) as we would normally do
  35. # with OpenCV
  36. x = rect.left()
  37. y = rect.top()
  38. w = rect.right() - x
  39. h = rect.bottom() - y
  40.  
  41. # return a tuple of (x, y, w, h)
  42. return (x, y, w, h)
  43.  
  44.  
  45. def bb_sred(rect):
  46. # take a bounding predicted by dlib and convert it
  47. # to the format (x, y, w, h) as we would normally do
  48. # with OpenCV
  49. x = rect.left()
  50. y = rect.top()
  51. w = rect.right() - x
  52. h = rect.bottom() - y
  53.  
  54. # return a tuple of (x, y, w, h)
  55. return ((x + w) / 2, (y + h) / 2)
  56.  
  57.  
  58. # Vamos inicializar um detector de faces (HOG) para então
  59. # let's go code an faces detector(HOG) and after detect the
  60. # landmarks on this detected face
  61.  
  62. # p = our pre-treined model directory, on my case, it's on the same script's diretory.
  63. p = "D:\Projects\ONTI_hack_neuro\shape_predictor_68_face_landmarks.dat"
  64. detector = dlib.get_frontal_face_detector()
  65. predictor = dlib.shape_predictor(p)
  66.  
  67. cap = cv2.VideoCapture('train.mp4')
  68. (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
  69. (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
  70. (no1, no2) = face_utils.FACIAL_LANDMARKS_IDXS['nose']
  71. (mo1, mo2) = face_utils.FACIAL_LANDMARKS_IDXS['mouth']
  72. min = 1000
  73.  
  74. ser = serial.Serial('COM9', 115200, timeout=1)
  75. ser.flushInput()
  76.  
  77.  
  78. def eye_aspect_ratio(eye):
  79. # compute the euclidean distances between the two sets of
  80. # vertical eye landmarks (x, y)-coordinates
  81. A = dist.euclidean(eye[1], eye[5])
  82. B = dist.euclidean(eye[2], eye[4])
  83.  
  84. # compute the euclidean distance between the horizontal
  85. # eye landmark (x, y)-coordinates
  86. C = dist.euclidean(eye[0], eye[3])
  87.  
  88. # compute the eye aspect ratio
  89. ear = (A + B) / (2.0 * C)
  90.  
  91. # return the eye aspect ratio
  92. return ear
  93.  
  94.  
  95. # construct the argument parse and parse the arguments
  96. ap = argparse.ArgumentParser()
  97. ap.add_argument("-p", "--shape-predictor", required=True,
  98. help="path to facial landmark predictor")
  99. ap.add_argument("-w", "--webcam", type=int, default=0,
  100. help="index of webcam on system")
  101. args = vars(ap.parse_args())
  102.  
  103. # define two constants, one for the eye aspect ratio to indicate
  104. # blink and then a second constant for the number of consecutive
  105. # frames the eye must be below the threshold for to set off the
  106. # alarm
  107. EYE_AR_THRESH = 0.25
  108. EYE_AR_CONSEC_FRAMES = 16
  109.  
  110. # initialize the frame counter as well as a boolean used to
  111. # indicate if the alarm is going off
  112. COUNTER = 0
  113. ser.write(bytes([0]))
  114. # initialize dlib's face detector (HOG-based) and then create
  115. # the facial landmark predictor
  116. print("[INFO] loading facial landmark predictor...")
  117. detector = dlib.get_frontal_face_detector()
  118. predictor = dlib.shape_predictor(args["shape_predictor"])
  119.  
  120. # grab the indexes of the facial landmarks for the left and
  121. # right eye, respectively
  122. (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
  123. (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
  124.  
  125. # start the video stream thread
  126. print("[INFO] starting video stream thread...")
  127. vs = VideoStream(src=args["webcam"]).start()
  128. time.sleep(1.0)
  129.  
  130. # loop over frames from the video stream
  131. while True:
  132. bobo = False
  133. kaka = False
  134. # grab the frame from the threaded video file stream, resize
  135. # it, and convert it to grayscale
  136. # channels)
  137. frame = vs.read()
  138. frame = imutils.resize(frame, width=450)
  139. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  140.  
  141. # detect faces in the grayscale frame
  142. rects = detector(gray, 0)
  143.  
  144. # loop over the face detections
  145. for rect in rects:
  146. # determine the facial landmarks for the face region, then
  147. # convert the facial landmark (x, y)-coordinates to a NumPy
  148. # array
  149. shape = predictor(gray, rect)
  150. shape = face_utils.shape_to_np(shape)
  151.  
  152. # extract the left and right eye coordinates, then use the
  153. # coordinates to compute the eye aspect ratio for both eyes
  154. leftEye = shape[lStart:lEnd]
  155. rightEye = shape[rStart:rEnd]
  156. leftEAR = eye_aspect_ratio(leftEye)
  157. rightEAR = eye_aspect_ratio(rightEye)
  158.  
  159. # average the eye aspect ratio together for both eyes
  160. ear = (leftEAR + rightEAR) / 2.0
  161.  
  162. # compute the convex hull for the left and right eye, then
  163. # visualize each of the eyes
  164. leftEyeHull = cv2.convexHull(leftEye)
  165. rightEyeHull = cv2.convexHull(rightEye)
  166. cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
  167. cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
  168.  
  169. # check to see if the eye aspect ratio is below the blink
  170. # threshold, and if so, increment the blink frame counter
  171. if ear < EYE_AR_THRESH:
  172. COUNTER += 1
  173.  
  174. # if the eyes were closed for a sufficient number of
  175. # then sound the alarm
  176. if COUNTER >= EYE_AR_CONSEC_FRAMES:
  177. # if the alarm is not on, turn it on
  178.  
  179. kaka = True
  180.  
  181. # draw an alarm on the frame
  182. cv2.putText(frame, "Look out!", (10, 30),
  183. cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
  184.  
  185. # otherwise, the eye aspect ratio is not below the blink
  186. # threshold, so reset the counter and alarm
  187. else:
  188. kaka = False
  189. COUNTER = 0
  190. cv2.putText(frame, str(ser.readline()), (300, 30),
  191. cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
  192. # draw the computed eye aspect ratio on the frame to help
  193. # with debugging and setting the correct eye aspect ratio
  194. # thresholds and frame counters
  195. size = frame.shape
  196. image_points = np.array([
  197. shape[30], # Nose tip
  198. shape[8], # Chin
  199. shape[45], # Left eye left corner
  200. shape[36], # Right eye right corne
  201. shape[54], # Left Mouth corner
  202. shape[48] # Right mouth corner
  203. ], dtype="double")
  204. # 3D model points.
  205. model_points = np.array([
  206. (0.0, 0.0, 0.0), # Nose tip
  207. (0.0, -330.0, -65.0), # Chin
  208. (-225.0, 170.0, -135.0), # Left eye left corner
  209. (225.0, 170.0, -135.0), # Right eye right corne
  210. (-150.0, -150.0, -125.0), # Left Mouth corner
  211. (150.0, -150.0, -125.0) # Right mouth corner
  212. ])
  213. # Camera internals
  214. focal_length = size[1]
  215. center = (size[1] / 2, size[0] / 2)
  216. camera_matrix = np.array(
  217. [[focal_length, 0, center[0]],
  218. [0, focal_length, center[1]],
  219. [0, 0, 1]], dtype="double"
  220. )
  221. print("Camera Matrix :\n {0}".format(camera_matrix))
  222. dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
  223. (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
  224. dist_coeffs,
  225. flags=cv2.cv2.SOLVEPNP_ITERATIVE)
  226. print("Rotation Vector:\n {0}".format(rotation_vector))
  227. print("Translation Vector:\n {0}".format(translation_vector))
  228. # Project a 3D point (0, 0, 1000.0) onto the image plane.
  229. # We use this to draw a line sticking out of the nose
  230. (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
  231. translation_vector,
  232. camera_matrix, dist_coeffs)
  233. for p in image_points:
  234. cv2.circle(frame, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
  235. p1 = (int(image_points[0][0]), int(image_points[0][1]))
  236. p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
  237. print('vector', p1, p2, [p1[0] - p2[0], p1[1] - p2[1]])
  238. if math.sqrt((p1[1] - p2[1]) ** 2) < min:
  239. min = p1[1] - p2[1]
  240. cv2.line(frame, p1, p2, (255, 0, 0), 2)
  241. if min is not 29 and min < 38:
  242. print('alert')
  243. bobo = True
  244. else:
  245. bobo = False
  246. print(kaka, bobo)
  247. # show the frame
  248. # _, im = cap.read()
  249.  
  250. # Display image
  251. print(min)
  252. if kaka:
  253. ser.write(bytes([1]))
  254. else:
  255. ser.write(bytes([0]))
  256. cv2.imshow("Output", frame)
  257. k = cv2.waitKey(5) & 0xFF
  258. if k == 27:
  259. break
  260.  
  261. # do a bit of cleanup
  262. cv2.destroyAllWindows()
  263. vs.stop()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement