Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from statistics import mode
- import cv2
- from keras.models import load_model
- import numpy as np
- from utils.datasets import get_labels
- from utils.inference import detect_faces
- from utils.inference import draw_text
- from utils.inference import draw_bounding_box
- from utils.inference import apply_offsets
- from utils.inference import load_detection_model
- from utils.preprocessor import preprocess_input
- import time
- import shutil, os
- import matplotlib.pyplot as plt
- import face_recognition
- # parameters for loading data and images
- detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
- emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
- emotion_labels = get_labels('fer2013')
- # hyper-parameters for bounding boxes shape
- frame_window = 10
- emotion_offsets = (20, 40)
- # loading models
- face_detection = load_detection_model(detection_model_path)
- emotion_classifier = load_model(emotion_model_path, compile=False)
- # getting input model shapes for inference
- emotion_target_size = emotion_classifier.input_shape[1:3]
- # starting lists for calculating modes
- emotion_window = []
- # starting video streaming
- cv2.namedWindow('window_frame')
- video_capture = cv2.VideoCapture(0)
- try:
- shutil.rmtree('OUTPUT')
- finally:
- os.mkdir('OUTPUT')
- fo = open("emoLog.txt", "w")
- fifthFrame = 1
- # FACE REQ
- suleymanov_image = face_recognition.load_image_file("suleymanov.jpg")
- suleymanov_face_encoding = face_recognition.face_encodings(suleymanov_image)[0]
- # Load a second sample picture and learn how to recognize it.
- rim_image = face_recognition.load_image_file("remorenko.jpg")
- rim_face_encoding = face_recognition.face_encodings(rim_image)[0]
- # Create arrays of known face encodings and their names
- known_face_encodings = [
- suleymanov_face_encoding,
- rim_face_encoding
- ]
- known_face_names = [
- "Ruslan",
- "RIM"
- ]
- # Initialize some variables
- face_locations = []
- face_encodings = []
- face_names = []
- process_this_frame = True
- ####
- while True:
- bgr_image = video_capture.read()[1]
- frame = bgr_image
- gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
- rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
- faces = detect_faces(face_detection, gray_image)
- # Display the resulting image
- #cv2.imshow('Video', frame)
- for face_coordinates in faces:
- x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
- gray_face = gray_image[y1:y2, x1:x2]
- try:
- gray_face = cv2.resize(gray_face, (emotion_target_size))
- except:
- continue
- gray_face = preprocess_input(gray_face, True)
- gray_face = np.expand_dims(gray_face, 0)
- gray_face = np.expand_dims(gray_face, -1)
- emotion_prediction = emotion_classifier.predict(gray_face)
- emotion_probability = np.max(emotion_prediction)
- emotion_label_arg = np.argmax(emotion_prediction)
- emotion_text = emotion_labels[emotion_label_arg]
- emotion_window.append(emotion_text)
- if len(emotion_window) > frame_window:
- emotion_window.pop(0)
- try:
- emotion_mode = mode(emotion_window)
- except:
- continue
- if emotion_text == 'angry':
- color = emotion_probability * np.asarray((255, 0, 0))
- elif emotion_text == 'sad':
- color = emotion_probability * np.asarray((0, 0, 255))
- elif emotion_text == 'happy':
- color = emotion_probability * np.asarray((255, 255, 0))
- elif emotion_text == 'surprise':
- color = emotion_probability * np.asarray((0, 255, 255))
- else:
- color = emotion_probability * np.asarray((0, 255, 0))
- color = color.astype(int)
- color = color.tolist()
- emotion_mode += ' ' + str(int(emotion_probability * 100))
- draw_bounding_box(face_coordinates, rgb_image, color)
- draw_text(face_coordinates, rgb_image, emotion_mode,
- color, 0, -45, 1, 1)
- emotion_mode = emotion_mode.replace(' ', '\t')
- emotion_mode = emotion_mode.replace('\n', '')
- bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
- ### FACE
- # Resize frame of video to 1/4 size for faster face recognition processing
- small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
- # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
- rgb_small_frame = small_frame[:, :, ::-1]
- # Only process every other frame of video to save time
- if process_this_frame:
- # Find all the faces and face encodings in the current frame of video
- face_locations = face_recognition.face_locations(rgb_small_frame)
- face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
- face_names = []
- for face_encoding in face_encodings:
- # See if the face is a match for the known face(s)
- matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
- name = "Unknown"
- # If a match was found in known_face_encodings, just use the first one.
- if True in matches:
- first_match_index = matches.index(True)
- name = known_face_names[first_match_index]
- face_names.append(name)
- process_this_frame = not process_this_frame
- # Display the results
- for (top, right, bottom, left), name in zip(face_locations, face_names):
- # Scale back up face locations since the frame we detected in was scaled to 1/4 size
- top *= 4
- right *= 4
- bottom *= 4
- left *= 4
- # Draw a box around the face
- #cv2.rectangle(bgr_image, (left, top), (right, bottom), (0, 0, 255), 2)
- # Draw a label with a name below the face
- cv2.rectangle(bgr_image, (left, bottom + 35), (right, bottom), (0, 0, 255), cv2.FILLED)
- font = cv2.FONT_HERSHEY_DUPLEX
- cv2.putText(bgr_image, name, (left + 6, bottom + 25), font, 1.0, (255, 255, 255), 1)
- ###
- cv2.imshow('window_frame', bgr_image)
- if (fifthFrame % 10 == 0):
- ts = int(time.time())
- photoFileName = str(ts) + '_' + str(fifthFrame)
- fo.write(photoFileName + '\t' + emotion_mode + '\n')
- cv2.imwrite('OUTPUT/' + photoFileName + '.jpg', bgr_image)
- fifthFrame+=1
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
- fo.close()
- stats = {'angry':0, 'disgust':0, 'fear':0, 'happy':0, 'sad':0, 'surprise':0, 'neutral':0}
- fo = open("emoLog.txt")
- line = fo.readline()
- while line:
- temp = line.split('\t')
- stats[temp[1]]+=1
- line = fo.readline()
- fo.close()
- print (stats)
- total = 0
- for cnt in stats.values():
- total += cnt
- print (total)
- labels = stats.keys()
- sizes = stats.values()
- colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'red', 'brown', 'green']
- explode = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2) # explode 1st slice
- # Plot
- plt.pie(sizes, explode=explode, labels=labels, colors=colors,
- autopct='%1.1f%%', shadow=True, startangle=140)
- plt.axis('equal')
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement