Advertisement
Guest User

Untitled

a guest
Apr 25th, 2018
98
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.17 KB | None | 0 0
  1. from statistics import mode
  2.  
  3. import cv2
  4. from keras.models import load_model
  5. import numpy as np
  6.  
  7. from utils.datasets import get_labels
  8. from utils.inference import detect_faces
  9. from utils.inference import draw_text
  10. from utils.inference import draw_bounding_box
  11. from utils.inference import apply_offsets
  12. from utils.inference import load_detection_model
  13. from utils.preprocessor import preprocess_input
  14. import time
  15. import shutil, os
  16. import matplotlib.pyplot as plt
  17. import face_recognition
  18.  
  19. # parameters for loading data and images
  20. detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
  21. emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
  22. emotion_labels = get_labels('fer2013')
  23.  
  24. # hyper-parameters for bounding boxes shape
  25. frame_window = 10
  26. emotion_offsets = (20, 40)
  27.  
  28. # loading models
  29. face_detection = load_detection_model(detection_model_path)
  30. emotion_classifier = load_model(emotion_model_path, compile=False)
  31.  
  32. # getting input model shapes for inference
  33. emotion_target_size = emotion_classifier.input_shape[1:3]
  34.  
  35. # starting lists for calculating modes
  36. emotion_window = []
  37.  
  38. # starting video streaming
  39. cv2.namedWindow('window_frame')
  40. video_capture = cv2.VideoCapture(0)
  41.  
  42. try:
  43. shutil.rmtree('OUTPUT')
  44. finally:
  45. os.mkdir('OUTPUT')
  46.  
  47. fo = open("emoLog.txt", "w")
  48. fifthFrame = 1
  49.  
  50. # FACE REQ
  51.  
  52. suleymanov_image = face_recognition.load_image_file("suleymanov.jpg")
  53. suleymanov_face_encoding = face_recognition.face_encodings(suleymanov_image)[0]
  54.  
  55. # Load a second sample picture and learn how to recognize it.
  56. rim_image = face_recognition.load_image_file("remorenko.jpg")
  57. rim_face_encoding = face_recognition.face_encodings(rim_image)[0]
  58.  
  59. # Create arrays of known face encodings and their names
  60. known_face_encodings = [
  61. suleymanov_face_encoding,
  62. rim_face_encoding
  63. ]
  64. known_face_names = [
  65. "Ruslan",
  66. "RIM"
  67. ]
  68.  
  69. # Initialize some variables
  70. face_locations = []
  71. face_encodings = []
  72. face_names = []
  73. process_this_frame = True
  74.  
  75. ####
  76.  
  77. while True:
  78. bgr_image = video_capture.read()[1]
  79. frame = bgr_image
  80. gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
  81. rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
  82. faces = detect_faces(face_detection, gray_image)
  83.  
  84. # Display the resulting image
  85. #cv2.imshow('Video', frame)
  86.  
  87. for face_coordinates in faces:
  88.  
  89. x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
  90. gray_face = gray_image[y1:y2, x1:x2]
  91. try:
  92. gray_face = cv2.resize(gray_face, (emotion_target_size))
  93. except:
  94. continue
  95.  
  96. gray_face = preprocess_input(gray_face, True)
  97. gray_face = np.expand_dims(gray_face, 0)
  98. gray_face = np.expand_dims(gray_face, -1)
  99. emotion_prediction = emotion_classifier.predict(gray_face)
  100. emotion_probability = np.max(emotion_prediction)
  101. emotion_label_arg = np.argmax(emotion_prediction)
  102. emotion_text = emotion_labels[emotion_label_arg]
  103. emotion_window.append(emotion_text)
  104.  
  105. if len(emotion_window) > frame_window:
  106. emotion_window.pop(0)
  107. try:
  108. emotion_mode = mode(emotion_window)
  109. except:
  110. continue
  111.  
  112. if emotion_text == 'angry':
  113. color = emotion_probability * np.asarray((255, 0, 0))
  114. elif emotion_text == 'sad':
  115. color = emotion_probability * np.asarray((0, 0, 255))
  116. elif emotion_text == 'happy':
  117. color = emotion_probability * np.asarray((255, 255, 0))
  118. elif emotion_text == 'surprise':
  119. color = emotion_probability * np.asarray((0, 255, 255))
  120. else:
  121. color = emotion_probability * np.asarray((0, 255, 0))
  122.  
  123. color = color.astype(int)
  124. color = color.tolist()
  125.  
  126. emotion_mode += ' ' + str(int(emotion_probability * 100))
  127.  
  128. draw_bounding_box(face_coordinates, rgb_image, color)
  129. draw_text(face_coordinates, rgb_image, emotion_mode,
  130. color, 0, -45, 1, 1)
  131.  
  132. emotion_mode = emotion_mode.replace(' ', '\t')
  133. emotion_mode = emotion_mode.replace('\n', '')
  134.  
  135. bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
  136.  
  137. ### FACE
  138.  
  139. # Resize frame of video to 1/4 size for faster face recognition processing
  140. small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
  141.  
  142. # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
  143. rgb_small_frame = small_frame[:, :, ::-1]
  144.  
  145. # Only process every other frame of video to save time
  146. if process_this_frame:
  147. # Find all the faces and face encodings in the current frame of video
  148. face_locations = face_recognition.face_locations(rgb_small_frame)
  149. face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
  150.  
  151. face_names = []
  152. for face_encoding in face_encodings:
  153. # See if the face is a match for the known face(s)
  154. matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
  155. name = "Unknown"
  156.  
  157. # If a match was found in known_face_encodings, just use the first one.
  158. if True in matches:
  159. first_match_index = matches.index(True)
  160. name = known_face_names[first_match_index]
  161.  
  162. face_names.append(name)
  163.  
  164. process_this_frame = not process_this_frame
  165.  
  166. # Display the results
  167. for (top, right, bottom, left), name in zip(face_locations, face_names):
  168. # Scale back up face locations since the frame we detected in was scaled to 1/4 size
  169. top *= 4
  170. right *= 4
  171. bottom *= 4
  172. left *= 4
  173.  
  174. # Draw a box around the face
  175. #cv2.rectangle(bgr_image, (left, top), (right, bottom), (0, 0, 255), 2)
  176.  
  177. # Draw a label with a name below the face
  178. cv2.rectangle(bgr_image, (left, bottom + 35), (right, bottom), (0, 0, 255), cv2.FILLED)
  179. font = cv2.FONT_HERSHEY_DUPLEX
  180. cv2.putText(bgr_image, name, (left + 6, bottom + 25), font, 1.0, (255, 255, 255), 1)
  181.  
  182. ###
  183.  
  184. cv2.imshow('window_frame', bgr_image)
  185.  
  186. if (fifthFrame % 10 == 0):
  187. ts = int(time.time())
  188. photoFileName = str(ts) + '_' + str(fifthFrame)
  189. fo.write(photoFileName + '\t' + emotion_mode + '\n')
  190. cv2.imwrite('OUTPUT/' + photoFileName + '.jpg', bgr_image)
  191.  
  192. fifthFrame+=1
  193.  
  194. if cv2.waitKey(1) & 0xFF == ord('q'):
  195. break
  196.  
  197. fo.close()
  198.  
  199. stats = {'angry':0, 'disgust':0, 'fear':0, 'happy':0, 'sad':0, 'surprise':0, 'neutral':0}
  200.  
  201. fo = open("emoLog.txt")
  202. line = fo.readline()
  203. while line:
  204. temp = line.split('\t')
  205. stats[temp[1]]+=1
  206. line = fo.readline()
  207. fo.close()
  208.  
  209. print (stats)
  210.  
  211. total = 0
  212.  
  213. for cnt in stats.values():
  214. total += cnt
  215. print (total)
  216.  
  217. labels = stats.keys()
  218. sizes = stats.values()
  219. colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'red', 'brown', 'green']
  220. explode = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2) # explode 1st slice
  221.  
  222. # Plot
  223. plt.pie(sizes, explode=explode, labels=labels, colors=colors,
  224. autopct='%1.1f%%', shadow=True, startangle=140)
  225.  
  226. plt.axis('equal')
  227. plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement