Advertisement
Guest User

Untitled

a guest
Jan 19th, 2018
70
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 17.23 KB | None | 0 0
  1. """
  2. This example demonstrates the RabbitMQ "Direct reply-to" usage via
  3. `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html
  4. for more info about this feature.
  5. """
  6.  
  7.  
  8. from __future__ import absolute_import
  9. from __future__ import division
  10. from __future__ import print_function
  11.  
  12. import tensorflow as tf
  13. from scipy import misc
  14. import cv2
  15. import numpy as np
  16. import argparse
  17. import facenet
  18. from align import detect_face
  19. import os
  20. from os.path import join as pjoin
  21. import sys
  22. import time
  23. import copy
  24. import math
  25. import pickle
  26. import glob
  27. from sklearn.svm import SVC
  28. from sklearn.externals import joblib
  29. from PIL import Image
  30. import io
  31. import time
  32.  
  33. import pika
  34. import glob
  35. import base64
  36.  
  37. from rabbitmq import RabbitMQ
  38. from eq_config import Config
  39.  
  40. from eq_utils import crop_display_face, get_area_list
  41.  
  42. DEBUG = False
  43.  
  44. rabbit_data_dir = r'../data/rabbit'
  45. modeldir = r'../20170512-110547/20170512-110547.pb'
  46. classifier_filename = r'../models/users.pkl'
  47. embedding_filename = r'../models/embeddings.pkl'
  48. align_model_path = r'./align'
  49. datadir = r'../eq_data'
  50.  
  51. dst_path = os.path.expanduser('~/eyeq-card-face-mapping-api')
  52. path_trunk = r'public/results'
  53.  
  54.  
  55. log = []
  56.  
  57. rb = RabbitMQ(('admin', 'admin123'), ('localhost', 5672))
  58. # rb = RabbitMQ(('eyeq', 'eyeq@!'), ('125.212.233.106', 5672))
  59.  
  60. with tf.Graph().as_default():
  61. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
  62. sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
  63. with sess.as_default():
  64. start = time.time()
  65. pnet, rnet, onet = detect_face.create_mtcnn(sess, align_model_path)
  66. minsize = 20 # minimum size of face
  67. threshold = [0.6, 0.7, 0.7] # three steps's threshold
  68. factor = 0.709 # scale factor
  69. frame_interval = 1
  70. image_size = 182
  71. input_image_size = 160
  72. print('Loading feature extraction model')
  73. facenet.load_model(modeldir)
  74.  
  75. end = time.time()
  76. log.append(('load models: ' + str(end-start)))
  77.  
  78. images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
  79. embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
  80. phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
  81. embedding_size = embeddings.get_shape()[1]
  82.  
  83. MIN_ASSIGN_THRESHOLD = 0.9
  84.  
  85. INTER_SEP = '|'
  86. INTRA_SEP = '?'
  87.  
  88.  
  89.  
  90. def main():
  91. """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with
  92. "Polo".
  93.  
  94. NOTE Normally, the server would be running separately from the client, but
  95. in this very simple example both are running in the same thread and sharing
  96. connection and channel.
  97.  
  98. """
  99. if DEBUG == False:
  100. logins, connections = ('admin', 'admin123'), ('localhost', 5672)
  101. # logins, connections = ('eyeq', 'eyeq@!'), ('125.212.233.106', 5672)
  102.  
  103. credentials = pika.PlainCredentials(logins[0], logins[1])
  104. params = pika.ConnectionParameters(connections[0], connections[1], '/', credentials,
  105. frame_max=131072,
  106. socket_timeout=1000,
  107. heartbeat_interval=None)
  108. # connection = pika.BlockingConnection(params)
  109. # channel = connection.channel()
  110.  
  111. print("rpc server")
  112. with pika.BlockingConnection(params) as conn:
  113. channel = conn.channel()
  114.  
  115. # Set up server
  116.  
  117. channel.queue_declare(queue='rpc.server.vingroup_api_detection',
  118. exclusive=False,
  119. auto_delete=False)
  120.  
  121. channel.basic_consume(on_server_rx_rpc_request, queue='rpc.server.vingroup_api_detection')
  122.  
  123. print('start consuming')
  124. channel.start_consuming()
  125. else:
  126. imgs, ids = receive_response_message_link(1)
  127. detected_face_dict = detect_and_assign(imgs, ids)
  128.  
  129. def calculate_area(bounding_box):
  130. area = (bounding_box[3] - bounding_box[1])*(bounding_box[2]-bounding_box[0])
  131. return area
  132.  
  133. def receive_response_message(msg):
  134. msg_body = msg.decode('ascii').split(INTER_SEP)
  135. img_list = msg_body[0].split(INTRA_SEP)
  136. imgs = []
  137. ids = []
  138. if '' not in img_list:
  139. for i in img_list:
  140. img = cv2.cvtColor(cv2.imdecode(np.fromstring(base64.b64decode(i.encode("ascii")), dtype=np.uint8),1),cv2.COLOR_BGR2RGB)
  141. imgs.append(img)
  142. ids = msg_body[1].split(INTRA_SEP)
  143.  
  144. return imgs, ids
  145.  
  146. def receive_response_message_link(msg):
  147. if DEBUG == False:
  148. msg_body = msg.decode('ascii').split(INTER_SEP)
  149. img_urls = msg_body[0].split(INTRA_SEP)
  150. ids = msg_body[1].split(INTRA_SEP)
  151. imgs = []
  152. if len(img_urls)>0 and len(ids)>0:
  153. url = img_urls[0]
  154. print(url)
  155. img_url = os.path.join(dst_path, url)
  156. img = misc.imread(img_url)
  157. imgs.append(img)
  158. else:
  159. print('Can not get Images or IDs list')
  160. return imgs, ids
  161. else:
  162. imgs = []
  163. msg_body = "1?2?3"
  164. ids = msg_body.split(INTRA_SEP)
  165. for i in range(0,3):
  166. img = misc.imread("/Users/tuhoang/Desktop/Dev/Test/" + str(i) + ".jpg")
  167. imgs.append(img)
  168. return imgs, ids
  169.  
  170. def detect_and_assign(imgs, ids):
  171. # detect faces in images
  172. nrof_id = len(ids)
  173. nrof_img = len(imgs)
  174. detected_faces = []
  175. embedding_dict = {}
  176. bbs = {}
  177. C = 9
  178. for frame_index, frame in enumerate(imgs):
  179.  
  180. start = time.time()
  181.  
  182. if frame.ndim == 2:
  183. frame = facenet.to_rgb(frame)
  184. frame = frame[:, :, 0:3]
  185. bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
  186. nrof_faces = bounding_boxes.shape[0]
  187. print('Detected_FaceNum: %d' % nrof_faces)
  188.  
  189. end = time.time()
  190. log.append(('detect a frame: ' + str(end - start)))
  191. nrof_faces = bounding_boxes.shape[0]
  192. emb_arrays = []
  193. detected_faces = []
  194. after_padded_bbs = []
  195. bbs[frame_index] = []
  196. if nrof_faces > 0:
  197. det = bounding_boxes[:, 0:4]
  198. # only get top n biggest face to avoid face in the background
  199. # if nrof_id < nrof_faces:
  200. # area_list = get_area_list(det)
  201. # sort_index = area_list.argsort()
  202. # top_area_rect_indices = sort_index[-nrof_id:]
  203. # det = det[top_area_rect_indices]
  204. # nrof_faces = nrof_id
  205.  
  206. h, w, _ = frame.shape
  207. img_size = np.asarray(frame.shape)[0:2]
  208.  
  209. cropped = []
  210. scaled = []
  211. scaled_reshape = []
  212.  
  213. bb = np.zeros((nrof_faces,4), dtype=np.int32)
  214.  
  215. for i in range(nrof_faces):
  216. print('Face#', i)
  217. # inner exception
  218. if det[i][0] < 0 or det[i][2] > w or det[i][1] < 0 or det[i][3] > h:
  219. print('face is inner of range!')
  220. continue
  221.  
  222. bb[i][0] = det[i][0]
  223. bb[i][1] = det[i][1]
  224. bb[i][2] = det[i][2]
  225. bb[i][3] = det[i][3]
  226.  
  227. #cropped for embeddings
  228. cropped.append(frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :])
  229.  
  230. #detected_faces for large size faces
  231. # TODO: Index of detected face, crop_factor_dictionary[frame_index][image_index] = crop_factor (tlx_tly_brx_bry)]
  232. crop, after_padded_bb = crop_display_face(frame, bb[i])
  233. filename = "{}_{}".format(frame_index, after_padded_bb)
  234. print('Origin file name', filename)
  235. detected_faces.append(crop)
  236. after_padded_bbs.append(after_padded_bb)
  237. bbs[frame_index].append(bb[i])
  238. print("Bounding Box " + str(bb[i]))
  239.  
  240. start = time.time()
  241.  
  242. j = len(cropped) - 1
  243. cropped[j] = facenet.flip(cropped[j], False)
  244. scaled.append(misc.imresize(cropped[j], (image_size, image_size), interp='bilinear'))
  245. scaled[j] = cv2.resize(scaled[j], (input_image_size,input_image_size),
  246. interpolation=cv2.INTER_CUBIC)
  247.  
  248. scaled[j] = facenet.prewhiten(scaled[j])
  249. scaled_reshape.append(scaled[j].reshape(-1,input_image_size,input_image_size,3))
  250. feed_dict = {images_placeholder: scaled_reshape[j], phase_train_placeholder: False}
  251. emb_array = sess.run(embeddings, feed_dict=feed_dict)
  252.  
  253. #emb_array always have 1 emb coz we only input 1 image so:
  254. emb_arrays.append(emb_array[0,:])
  255. end = time.time()
  256. log.append(('extract a face: ' + str(end-start)))
  257. # C = C + 1
  258. # cv2.imshow(str(C),cropped[j])
  259. # while True:
  260. # key = cv2.waitKey(1)
  261. # if key > 0:
  262. # break
  263. embedding_dict[frame_index] = zip(emb_arrays, detected_faces, after_padded_bbs)
  264.  
  265. # TODO: Save crop factor out side
  266. id_dict = {}
  267. bb_areas = []
  268. index = []
  269. Count = 0
  270. # assign faces to ID
  271. start = time.time()
  272. for img_index, emb_faces in embedding_dict.items():
  273. if img_index == 0:
  274. for ind, bb in enumerate(bbs[img_index]):
  275. bb_areas.append(calculate_area(bb))
  276. index.append(ind)
  277. print(bb_areas)
  278. print(index)
  279. sorted_bb_areas = [x for _,x in sorted(zip(bb_areas,index), reverse = True)]
  280. print(" Sorted BB Area " + str(sorted_bb_areas))
  281. for ind, (emb, face, after_padded_bb) in enumerate(emb_faces):
  282. id_dict[ind] = []
  283. id_dict[ind].append((emb, face, after_padded_bb, img_index))
  284. else:
  285. visited = []
  286. for (emb, face, after_padded_bb) in emb_faces:
  287. emb_list = []
  288. # TODO: How to retrieve crop factor here
  289. # face_list = []
  290. # id_list = []
  291.  
  292. for k, ef_list in id_dict.items():
  293. this_id_emb = []
  294. # TODO: image_index = len(detected_faces)
  295. for (e, f, bb, _) in ef_list:
  296. # id_list.append(k)
  297. this_id_emb.append(e)
  298. # face_list.append(f)
  299. emb_list.append(this_id_emb)
  300.  
  301. found_id, min_dist = find_id(emb_list,emb)
  302. print("found_id: " + str(found_id))
  303. if min_dist > MIN_ASSIGN_THRESHOLD:
  304. found_id = len(id_dict.keys())
  305. id_dict[found_id] = [(emb, face, after_padded_bb, img_index)]
  306. else:
  307. if found_id in visited:
  308. continue
  309. id_dict[found_id].append((emb, face, after_padded_bb, img_index))
  310. visited.append(found_id)
  311.  
  312. remove_face_list = []
  313.  
  314. for id_dict_index, id_info in id_dict.items():
  315. if len((id_info)) < nrof_img:
  316. remove_face_list.append(id_dict_index)
  317.  
  318. for remove_id in remove_face_list:
  319. del id_dict[remove_id]
  320.  
  321. while (len(id_dict.keys()) > nrof_id):
  322. id_dict.pop(sorted_bb_areas.pop())
  323. #fixe here, remove id_dict.keys() - 1, and pop all k out of id_dict
  324. if len(id_dict.keys()) < len(ids):
  325. added_keys = ids[len(id_dict.keys()): len(ids)]
  326. key_dict = dict([(k, v) for k, v in zip(id_dict.keys(), ids[:len(id_dict.keys())])])
  327. id_dict = dict([(key_dict[k], v) for k, v in id_dict.items()])
  328. for k in added_keys:
  329. id_dict.pop(k,None)
  330. elif len(id_dict.keys()) > len(ids):
  331. valid_keys = list(id_dict.keys())[:len(ids)]
  332. key_dict = dict([(k, v) for k, v in zip(valid_keys, ids)])
  333. id_dict = dict([(key_dict[k], v) for k, v in id_dict.items() if k in valid_keys])
  334. else:
  335. key_dict = dict([(k, v) for k, v in zip(id_dict.keys(), ids)])
  336. id_dict = dict([(key_dict[k], v) for k, v in id_dict.items()])
  337.  
  338. end = time.time()
  339. log.append(('main algo: ' + str(end - start)))
  340.  
  341. return id_dict
  342.  
  343.  
  344. def find_id(emb_list, emb):
  345. # print(len(emb_list))
  346. min_dist = 20000
  347. nearest_id = 0
  348. for id, embs in enumerate(emb_list):
  349. dists = np.sum(np.square(np.subtract(embs,emb)),1)
  350. min_dist_of_this_id = np.min(dists)
  351. if min_dist_of_this_id < min_dist:
  352. min_dist = min_dist_of_this_id
  353. nearest_id = id
  354. return nearest_id, min_dist
  355.  
  356.  
  357. def prepare_response_message(id_dict):
  358. msg_list = []
  359. for k, emb_face_list in id_dict.items():
  360. img_list = []
  361. emb_list = []
  362. for emb, image, _, _ in emb_face_list:
  363. cvt_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
  364. _, buffer = cv2.imencode('.png', cvt_img)
  365. bin_img = base64.b64encode(buffer).decode('ascii')
  366. img_list.append(bin_img)
  367. img_list_str = INTRA_SEP.join(img_list)
  368. msg_element = str(k) + INTRA_SEP + img_list_str
  369. msg_list.append(msg_element)
  370.  
  371. msg = INTER_SEP.join(msg_list)
  372. #json = extract_response_json(id_dict)
  373. #write to ../data/json/response.json
  374. return msg
  375.  
  376. def prepare_response_message_bb(id_dict):
  377. msg_list = []
  378. img_index_list = []
  379. bb_list = []
  380. id_list = []
  381. for k, emb_face_list in id_dict.items():
  382.  
  383. for emb, image, bb, img_index in emb_face_list:
  384. img_index_list.append(img_index)
  385. bb_list.append(bb)
  386. id_list.append(k)
  387.  
  388. img_index_str = INTRA_SEP.join(str(i) for i in img_index_list)
  389. bb_str = INTRA_SEP.join(bb_list)
  390. id_str = INTRA_SEP.join(id_list)
  391.  
  392. msg = INTER_SEP.join([img_index_str, bb_str, id_str])
  393. #json = extract_response_json(id_dict)
  394. #write to ../data/json/response.json
  395. return msg
  396.  
  397. def prepare_response_message_link(id_dict):
  398. msg_list = []
  399. id_list = []
  400. filename_list = []
  401. msg_list = []
  402. for k, emb_face_list in id_dict.items():
  403.  
  404. filenames = [str(k)]
  405. print('id: ', str(k))
  406. for emb, image, bb, img_index in emb_face_list:
  407. id_list.append(k)
  408. filename = '{}_{}.png'.format(k, bb)
  409. print('File name', filename)
  410. print('Path trunk', path_trunk, os.path.join(path_trunk, filename))
  411. savename = os.path.join(dst_path, path_trunk, filename)
  412. print('Save name', savename)
  413. misc.imsave(savename, image)
  414. filename_list.append('{}/{}'.format(path_trunk, filename))
  415.  
  416. filenames.append('{}/{}'.format(path_trunk, filename))
  417.  
  418. msg_list.append(INTRA_SEP.join(filenames))
  419.  
  420. msg = INTER_SEP.join(msg_list)
  421. return msg
  422.  
  423. def prepare_response_for_central(id_dict, area_id):
  424. img_list = []
  425. emb_list = []
  426. after_padded_bbs = []
  427. order_list = []
  428. id_list = []
  429.  
  430. for k, emb_face_list in id_dict.items():
  431. order_id = 0
  432.  
  433. for emb, image, after_padded_bb, _ in emb_face_list:
  434. cvt_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
  435. _, buffer = cv2.imencode('.png', cvt_img)
  436. bin_img = base64.b64encode(buffer).decode('ascii')
  437. img_list.append(bin_img)
  438.  
  439. embed_str = ' '.join(map(str, emb)).strip()
  440. emb_list.append(embed_str)
  441. after_padded_bbs.append(after_padded_bb)
  442.  
  443. id_list.append(str(k))
  444. order_list.append(str(order_id))
  445. order_id += 1
  446.  
  447.  
  448. img_list_str = INTRA_SEP.join(img_list)
  449. emb_list_str = INTRA_SEP.join(emb_list)
  450. order_list_str = INTRA_SEP.join(order_list)
  451. id_list_str = INTRA_SEP.join(id_list)
  452. bb_list_str = INTRA_SEP.join(after_padded_bbs)
  453.  
  454. msg = INTER_SEP.join([img_list_str, emb_list_str, order_list_str, id_list_str, area_id, str(int(time.time())), bb_list_str])
  455. return msg
  456.  
  457.  
  458.  
  459. def on_server_rx_rpc_request(ch, method_frame, properties, body):
  460. log = []
  461. start_time = time.time()
  462. print('RPC Server got request:')
  463. print('Body', body)
  464. # imgs, ids = receive_response_message(body)
  465. imgs, ids = receive_response_message_link(body)
  466.  
  467. detected_face_dict = detect_and_assign(imgs, ids)
  468.  
  469. # send to vingroup
  470. # msg = prepare_response_message_bb(detected_face_dict)
  471. # msg = prepare_response_message(detected_face_dict)
  472. msg = prepare_response_message_link(detected_face_dict)
  473. # print('Msg', msg)
  474.  
  475. ch.basic_publish('', routing_key=properties.reply_to, body=msg)
  476. ch.basic_ack(delivery_tag=method_frame.delivery_tag)
  477.  
  478. # send to central worker
  479. #TODO: single value of crop ration, turn to list of [coordinate]
  480. # msg = prepare_response_for_central(detected_face_dict, 'CHECKIN_AREA')
  481. # ch.basic_publish('', routing_key=rb.API_SERVER_TO_CENTRAL, body=msg)
  482.  
  483.  
  484. print('RPC Server says good bye')
  485. print(time.time() - start_time)
  486.  
  487. if __name__ == '__main__':
  488. main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement