Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- """
- This example demonstrates the RabbitMQ "Direct reply-to" usage via
- `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html
- for more info about this feature.
- """
- from __future__ import absolute_import
- from __future__ import division
- from __future__ import print_function
- import tensorflow as tf
- from scipy import misc
- import cv2
- import numpy as np
- import argparse
- import facenet
- from align import detect_face
- import os
- from os.path import join as pjoin
- import sys
- import time
- import copy
- import math
- import pickle
- import glob
- from sklearn.svm import SVC
- from sklearn.externals import joblib
- from PIL import Image
- import io
- import time
- import pika
- import glob
- import base64
- from rabbitmq import RabbitMQ
- from eq_config import Config
- from eq_utils import crop_display_face, get_area_list
- DEBUG = False
- rabbit_data_dir = r'../data/rabbit'
- modeldir = r'../20170512-110547/20170512-110547.pb'
- classifier_filename = r'../models/users.pkl'
- embedding_filename = r'../models/embeddings.pkl'
- align_model_path = r'./align'
- datadir = r'../eq_data'
- dst_path = os.path.expanduser('~/eyeq-card-face-mapping-api')
- path_trunk = r'public/results'
- log = []
- rb = RabbitMQ(('admin', 'admin123'), ('localhost', 5672))
- # rb = RabbitMQ(('eyeq', 'eyeq@!'), ('125.212.233.106', 5672))
- with tf.Graph().as_default():
- gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
- sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
- with sess.as_default():
- start = time.time()
- pnet, rnet, onet = detect_face.create_mtcnn(sess, align_model_path)
- minsize = 20 # minimum size of face
- threshold = [0.6, 0.7, 0.7] # three steps's threshold
- factor = 0.709 # scale factor
- frame_interval = 1
- image_size = 182
- input_image_size = 160
- print('Loading feature extraction model')
- facenet.load_model(modeldir)
- end = time.time()
- log.append(('load models: ' + str(end-start)))
- images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
- embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
- phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
- embedding_size = embeddings.get_shape()[1]
- MIN_ASSIGN_THRESHOLD = 0.9
- INTER_SEP = '|'
- INTRA_SEP = '?'
- def main():
- """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with
- "Polo".
- NOTE Normally, the server would be running separately from the client, but
- in this very simple example both are running in the same thread and sharing
- connection and channel.
- """
- if DEBUG == False:
- logins, connections = ('admin', 'admin123'), ('localhost', 5672)
- # logins, connections = ('eyeq', 'eyeq@!'), ('125.212.233.106', 5672)
- credentials = pika.PlainCredentials(logins[0], logins[1])
- params = pika.ConnectionParameters(connections[0], connections[1], '/', credentials,
- frame_max=131072,
- socket_timeout=1000,
- heartbeat_interval=None)
- # connection = pika.BlockingConnection(params)
- # channel = connection.channel()
- print("rpc server")
- with pika.BlockingConnection(params) as conn:
- channel = conn.channel()
- # Set up server
- channel.queue_declare(queue='rpc.server.vingroup_api_detection',
- exclusive=False,
- auto_delete=False)
- channel.basic_consume(on_server_rx_rpc_request, queue='rpc.server.vingroup_api_detection')
- print('start consuming')
- channel.start_consuming()
- else:
- imgs, ids = receive_response_message_link(1)
- detected_face_dict = detect_and_assign(imgs, ids)
- def calculate_area(bounding_box):
- area = (bounding_box[3] - bounding_box[1])*(bounding_box[2]-bounding_box[0])
- return area
- def receive_response_message(msg):
- msg_body = msg.decode('ascii').split(INTER_SEP)
- img_list = msg_body[0].split(INTRA_SEP)
- imgs = []
- ids = []
- if '' not in img_list:
- for i in img_list:
- img = cv2.cvtColor(cv2.imdecode(np.fromstring(base64.b64decode(i.encode("ascii")), dtype=np.uint8),1),cv2.COLOR_BGR2RGB)
- imgs.append(img)
- ids = msg_body[1].split(INTRA_SEP)
- return imgs, ids
- def receive_response_message_link(msg):
- if DEBUG == False:
- msg_body = msg.decode('ascii').split(INTER_SEP)
- img_urls = msg_body[0].split(INTRA_SEP)
- ids = msg_body[1].split(INTRA_SEP)
- imgs = []
- if len(img_urls)>0 and len(ids)>0:
- url = img_urls[0]
- print(url)
- img_url = os.path.join(dst_path, url)
- img = misc.imread(img_url)
- imgs.append(img)
- else:
- print('Can not get Images or IDs list')
- return imgs, ids
- else:
- imgs = []
- msg_body = "1?2?3"
- ids = msg_body.split(INTRA_SEP)
- for i in range(0,3):
- img = misc.imread("/Users/tuhoang/Desktop/Dev/Test/" + str(i) + ".jpg")
- imgs.append(img)
- return imgs, ids
- def detect_and_assign(imgs, ids):
- # detect faces in images
- nrof_id = len(ids)
- nrof_img = len(imgs)
- detected_faces = []
- embedding_dict = {}
- bbs = {}
- C = 9
- for frame_index, frame in enumerate(imgs):
- start = time.time()
- if frame.ndim == 2:
- frame = facenet.to_rgb(frame)
- frame = frame[:, :, 0:3]
- bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
- nrof_faces = bounding_boxes.shape[0]
- print('Detected_FaceNum: %d' % nrof_faces)
- end = time.time()
- log.append(('detect a frame: ' + str(end - start)))
- nrof_faces = bounding_boxes.shape[0]
- emb_arrays = []
- detected_faces = []
- after_padded_bbs = []
- bbs[frame_index] = []
- if nrof_faces > 0:
- det = bounding_boxes[:, 0:4]
- # only get top n biggest face to avoid face in the background
- # if nrof_id < nrof_faces:
- # area_list = get_area_list(det)
- # sort_index = area_list.argsort()
- # top_area_rect_indices = sort_index[-nrof_id:]
- # det = det[top_area_rect_indices]
- # nrof_faces = nrof_id
- h, w, _ = frame.shape
- img_size = np.asarray(frame.shape)[0:2]
- cropped = []
- scaled = []
- scaled_reshape = []
- bb = np.zeros((nrof_faces,4), dtype=np.int32)
- for i in range(nrof_faces):
- print('Face#', i)
- # inner exception
- if det[i][0] < 0 or det[i][2] > w or det[i][1] < 0 or det[i][3] > h:
- print('face is inner of range!')
- continue
- bb[i][0] = det[i][0]
- bb[i][1] = det[i][1]
- bb[i][2] = det[i][2]
- bb[i][3] = det[i][3]
- #cropped for embeddings
- cropped.append(frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :])
- #detected_faces for large size faces
- # TODO: Index of detected face, crop_factor_dictionary[frame_index][image_index] = crop_factor (tlx_tly_brx_bry)]
- crop, after_padded_bb = crop_display_face(frame, bb[i])
- filename = "{}_{}".format(frame_index, after_padded_bb)
- print('Origin file name', filename)
- detected_faces.append(crop)
- after_padded_bbs.append(after_padded_bb)
- bbs[frame_index].append(bb[i])
- print("Bounding Box " + str(bb[i]))
- start = time.time()
- j = len(cropped) - 1
- cropped[j] = facenet.flip(cropped[j], False)
- scaled.append(misc.imresize(cropped[j], (image_size, image_size), interp='bilinear'))
- scaled[j] = cv2.resize(scaled[j], (input_image_size,input_image_size),
- interpolation=cv2.INTER_CUBIC)
- scaled[j] = facenet.prewhiten(scaled[j])
- scaled_reshape.append(scaled[j].reshape(-1,input_image_size,input_image_size,3))
- feed_dict = {images_placeholder: scaled_reshape[j], phase_train_placeholder: False}
- emb_array = sess.run(embeddings, feed_dict=feed_dict)
- #emb_array always have 1 emb coz we only input 1 image so:
- emb_arrays.append(emb_array[0,:])
- end = time.time()
- log.append(('extract a face: ' + str(end-start)))
- # C = C + 1
- # cv2.imshow(str(C),cropped[j])
- # while True:
- # key = cv2.waitKey(1)
- # if key > 0:
- # break
- embedding_dict[frame_index] = zip(emb_arrays, detected_faces, after_padded_bbs)
- # TODO: Save crop factor out side
- id_dict = {}
- bb_areas = []
- index = []
- Count = 0
- # assign faces to ID
- start = time.time()
- for img_index, emb_faces in embedding_dict.items():
- if img_index == 0:
- for ind, bb in enumerate(bbs[img_index]):
- bb_areas.append(calculate_area(bb))
- index.append(ind)
- print(bb_areas)
- print(index)
- sorted_bb_areas = [x for _,x in sorted(zip(bb_areas,index), reverse = True)]
- print(" Sorted BB Area " + str(sorted_bb_areas))
- for ind, (emb, face, after_padded_bb) in enumerate(emb_faces):
- id_dict[ind] = []
- id_dict[ind].append((emb, face, after_padded_bb, img_index))
- else:
- visited = []
- for (emb, face, after_padded_bb) in emb_faces:
- emb_list = []
- # TODO: How to retrieve crop factor here
- # face_list = []
- # id_list = []
- for k, ef_list in id_dict.items():
- this_id_emb = []
- # TODO: image_index = len(detected_faces)
- for (e, f, bb, _) in ef_list:
- # id_list.append(k)
- this_id_emb.append(e)
- # face_list.append(f)
- emb_list.append(this_id_emb)
- found_id, min_dist = find_id(emb_list,emb)
- print("found_id: " + str(found_id))
- if min_dist > MIN_ASSIGN_THRESHOLD:
- found_id = len(id_dict.keys())
- id_dict[found_id] = [(emb, face, after_padded_bb, img_index)]
- else:
- if found_id in visited:
- continue
- id_dict[found_id].append((emb, face, after_padded_bb, img_index))
- visited.append(found_id)
- remove_face_list = []
- for id_dict_index, id_info in id_dict.items():
- if len((id_info)) < nrof_img:
- remove_face_list.append(id_dict_index)
- for remove_id in remove_face_list:
- del id_dict[remove_id]
- while (len(id_dict.keys()) > nrof_id):
- id_dict.pop(sorted_bb_areas.pop())
- #fixe here, remove id_dict.keys() - 1, and pop all k out of id_dict
- if len(id_dict.keys()) < len(ids):
- added_keys = ids[len(id_dict.keys()): len(ids)]
- key_dict = dict([(k, v) for k, v in zip(id_dict.keys(), ids[:len(id_dict.keys())])])
- id_dict = dict([(key_dict[k], v) for k, v in id_dict.items()])
- for k in added_keys:
- id_dict.pop(k,None)
- elif len(id_dict.keys()) > len(ids):
- valid_keys = list(id_dict.keys())[:len(ids)]
- key_dict = dict([(k, v) for k, v in zip(valid_keys, ids)])
- id_dict = dict([(key_dict[k], v) for k, v in id_dict.items() if k in valid_keys])
- else:
- key_dict = dict([(k, v) for k, v in zip(id_dict.keys(), ids)])
- id_dict = dict([(key_dict[k], v) for k, v in id_dict.items()])
- end = time.time()
- log.append(('main algo: ' + str(end - start)))
- return id_dict
- def find_id(emb_list, emb):
- # print(len(emb_list))
- min_dist = 20000
- nearest_id = 0
- for id, embs in enumerate(emb_list):
- dists = np.sum(np.square(np.subtract(embs,emb)),1)
- min_dist_of_this_id = np.min(dists)
- if min_dist_of_this_id < min_dist:
- min_dist = min_dist_of_this_id
- nearest_id = id
- return nearest_id, min_dist
- def prepare_response_message(id_dict):
- msg_list = []
- for k, emb_face_list in id_dict.items():
- img_list = []
- emb_list = []
- for emb, image, _, _ in emb_face_list:
- cvt_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
- _, buffer = cv2.imencode('.png', cvt_img)
- bin_img = base64.b64encode(buffer).decode('ascii')
- img_list.append(bin_img)
- img_list_str = INTRA_SEP.join(img_list)
- msg_element = str(k) + INTRA_SEP + img_list_str
- msg_list.append(msg_element)
- msg = INTER_SEP.join(msg_list)
- #json = extract_response_json(id_dict)
- #write to ../data/json/response.json
- return msg
- def prepare_response_message_bb(id_dict):
- msg_list = []
- img_index_list = []
- bb_list = []
- id_list = []
- for k, emb_face_list in id_dict.items():
- for emb, image, bb, img_index in emb_face_list:
- img_index_list.append(img_index)
- bb_list.append(bb)
- id_list.append(k)
- img_index_str = INTRA_SEP.join(str(i) for i in img_index_list)
- bb_str = INTRA_SEP.join(bb_list)
- id_str = INTRA_SEP.join(id_list)
- msg = INTER_SEP.join([img_index_str, bb_str, id_str])
- #json = extract_response_json(id_dict)
- #write to ../data/json/response.json
- return msg
- def prepare_response_message_link(id_dict):
- msg_list = []
- id_list = []
- filename_list = []
- msg_list = []
- for k, emb_face_list in id_dict.items():
- filenames = [str(k)]
- print('id: ', str(k))
- for emb, image, bb, img_index in emb_face_list:
- id_list.append(k)
- filename = '{}_{}.png'.format(k, bb)
- print('File name', filename)
- print('Path trunk', path_trunk, os.path.join(path_trunk, filename))
- savename = os.path.join(dst_path, path_trunk, filename)
- print('Save name', savename)
- misc.imsave(savename, image)
- filename_list.append('{}/{}'.format(path_trunk, filename))
- filenames.append('{}/{}'.format(path_trunk, filename))
- msg_list.append(INTRA_SEP.join(filenames))
- msg = INTER_SEP.join(msg_list)
- return msg
- def prepare_response_for_central(id_dict, area_id):
- img_list = []
- emb_list = []
- after_padded_bbs = []
- order_list = []
- id_list = []
- for k, emb_face_list in id_dict.items():
- order_id = 0
- for emb, image, after_padded_bb, _ in emb_face_list:
- cvt_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
- _, buffer = cv2.imencode('.png', cvt_img)
- bin_img = base64.b64encode(buffer).decode('ascii')
- img_list.append(bin_img)
- embed_str = ' '.join(map(str, emb)).strip()
- emb_list.append(embed_str)
- after_padded_bbs.append(after_padded_bb)
- id_list.append(str(k))
- order_list.append(str(order_id))
- order_id += 1
- img_list_str = INTRA_SEP.join(img_list)
- emb_list_str = INTRA_SEP.join(emb_list)
- order_list_str = INTRA_SEP.join(order_list)
- id_list_str = INTRA_SEP.join(id_list)
- bb_list_str = INTRA_SEP.join(after_padded_bbs)
- msg = INTER_SEP.join([img_list_str, emb_list_str, order_list_str, id_list_str, area_id, str(int(time.time())), bb_list_str])
- return msg
- def on_server_rx_rpc_request(ch, method_frame, properties, body):
- log = []
- start_time = time.time()
- print('RPC Server got request:')
- print('Body', body)
- # imgs, ids = receive_response_message(body)
- imgs, ids = receive_response_message_link(body)
- detected_face_dict = detect_and_assign(imgs, ids)
- # send to vingroup
- # msg = prepare_response_message_bb(detected_face_dict)
- # msg = prepare_response_message(detected_face_dict)
- msg = prepare_response_message_link(detected_face_dict)
- # print('Msg', msg)
- ch.basic_publish('', routing_key=properties.reply_to, body=msg)
- ch.basic_ack(delivery_tag=method_frame.delivery_tag)
- # send to central worker
- #TODO: single value of crop ration, turn to list of [coordinate]
- # msg = prepare_response_for_central(detected_face_dict, 'CHECKIN_AREA')
- # ch.basic_publish('', routing_key=rb.API_SERVER_TO_CENTRAL, body=msg)
- print('RPC Server says good bye')
- print(time.time() - start_time)
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement