Advertisement
Guest User

Untitled

a guest
Apr 30th, 2017
75
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.66 KB | None | 0 0
  1. import cv2
  2. import dlib
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. from skimage import io
  6. %matplotlib inline
  7.  
  8. paramsb = {
  9. 'nthread': 8,
  10. 'eta': .1,
  11. 'gamma': .0,
  12. 'max_depth': 4,
  13. 'subsample': .7,
  14. 'colsample_bylevel': .7,
  15. 'tree_method': 'approx',
  16. 'objective': 'multi:softprob',
  17. 'seed': 0,
  18. 'num_class': 2,
  19. 'silent': 1
  20. }
  21.  
  22. PREDICTOR_PATH = "../../../fapai/fap-ai/crawlers/grabber/shape_predictor_68_face_landmarks.dat"
  23. RECOGNIZER_PATH = "../../../fapai/fap-ai/crawlers/grabber/dlib_face_recognition_resnet_model_v1.dat"
  24. detector = dlib.get_frontal_face_detector()
  25. predictor = dlib.shape_predictor(PREDICTOR_PATH)
  26. recognizer = dlib.face_recognition_model_v1(RECOGNIZER_PATH)
  27.  
  28. TEMPLATE = np.float32([
  29. (0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
  30. (0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
  31. (0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
  32. (0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
  33. (0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
  34. (0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
  35. (0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
  36. (0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
  37. (0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
  38. (0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
  39. (0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
  40. (0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
  41. (0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
  42. (0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
  43. (0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
  44. (0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
  45. (0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
  46. (0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
  47. (0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
  48. (0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
  49. (0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
  50. (0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
  51. (0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
  52. (0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
  53. (0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
  54. (0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
  55. (0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
  56. (0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
  57. (0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
  58. (0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
  59. (0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
  60. (0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
  61. (0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
  62. (0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
  63.  
  64. TPL_MIN, TPL_MAX = np.min(TEMPLATE, axis=0), np.max(TEMPLATE, axis=0)
  65. MINMAX_TEMPLATE = (TEMPLATE - TPL_MIN) / (TPL_MAX - TPL_MIN)
  66.  
  67. INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]
  68. OUTER_EYES_AND_NOSE = [36, 45, 33]
  69.  
  70. IM_SIZE = 224
  71. FULL_RECT = dlib.rectangle(
  72. left=0,
  73. top=0,
  74. right=IM_SIZE,
  75. bottom=IM_SIZE,
  76. )
  77.  
  78. def getAllFaceBoundingBoxes(rgbImg):
  79. assert rgbImg is not None
  80. try:
  81. return detector(rgbImg, 1)
  82. except Exception as e:
  83. print("Warning: {}".format(e))
  84. return []
  85.  
  86.  
  87. def findLandmarks(rgbImg, bb):
  88. assert rgbImg is not None
  89. assert bb is not None
  90.  
  91. points = predictor(rgbImg, bb)
  92. return list(map(lambda p: (p.x, p.y), points.parts()))
  93.  
  94.  
  95. def getLargestFaceBoundingBox(rgbImg, skipMulti=False):
  96. assert rgbImg is not None
  97.  
  98. faces = getAllFaceBoundingBoxes(rgbImg)
  99. if (not skipMulti and len(faces) > 0) or len(faces) == 1:
  100. return max(faces, key=lambda rect: rect.width() * rect.height())
  101. else:
  102. return None
  103.  
  104. # def scale_dlib_rect(rect, scale=1):
  105.  
  106. # def resize(x, y, w, h, scale):
  107. # return (max(x - int(w * (scale - 1) / 2), 0),
  108. # max(y - int(h * (scale - 1) / 2), 0),
  109. # int(w * scale),
  110. # int(h * scale))
  111.  
  112. # x, y, w, h = rect.left(), rect.top(), rect.right()-rect.left(), rect.bottom()-rect.top()
  113. # print (x)
  114. # x, y, w, h = resize(x, y, w, h, scale)
  115. # print (x)
  116.  
  117. # dlib_rectangle = dlib.rectangle(
  118. # left=x,
  119. # top=y,
  120. # right=x+w,
  121. # bottom=y+h,
  122. # )
  123. # return rect
  124. # #return dlib_rectangle
  125.  
  126.  
  127. def align(rgbImg, imgDim=IM_SIZE, bb=None,
  128. landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
  129. one_face=False):
  130. assert imgDim is not None
  131. assert rgbImg is not None
  132. assert landmarkIndices is not None
  133. bbs = getAllFaceBoundingBoxes(rgbImg)
  134. #bbs = [scale_dlib_rect(rect) for rect in bbs]
  135.  
  136. all_landmarks = [findLandmarks(rgbImg, bb) for bb in bbs]
  137.  
  138. all_npLandmarks = [np.float32(landmarks) for landmarks in all_landmarks]
  139. npLandmarkIndices = np.array(landmarkIndices)
  140.  
  141. Hs = [cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
  142. imgDim * MINMAX_TEMPLATE[npLandmarkIndices]) for npLandmarks in all_npLandmarks]
  143.  
  144. thumbnails = [cv2.warpAffine(rgbImg, H, (imgDim, imgDim)) for H in Hs]
  145.  
  146. return thumbnails
  147.  
  148. def get_embs_from_aligned_img(crops_arr):
  149. fts = [recognizer.compute_face_descriptor(crop, predictor(crop, FULL_RECT))
  150. for crop in crops_arr]
  151. return fts
  152.  
  153. def classify(img):
  154. names = ['jewish' , 'african', 'european' , 'indus', 'arabs' , 'caucasian' , 'latino', 'asian']
  155. classifys = []
  156. for i in range(8):
  157. #print('~/xgb_'+'{}'.format(i) + '.gbm')
  158. classifys.append(xgb.Booster(paramsb, model_file='/home/libfun/lineage/ai.hack17/xgbb_'+'{}'.format(i) + '.gbm'))
  159. if len(align(img)) == 1:
  160. race = defaultdict()
  161. embs2 = get_embs_from_aligned_img(align(img))
  162. predic = []
  163. for i in range(8):
  164. race[names[i]] = classifys[i].predict(xgb.DMatrix(np.array(embs2[0]).reshape(1, -1)))[0, :][1]
  165. #predic.append(classifys[i].predict(xgb.DMatrix(np.array(embs2[0]).reshape(1, -1)))[0, :][1])
  166. #print(names[i] + " - " + "{:.0f}".format(classifys[i].predict(xgb.DMatrix(np.array(embs2[0]).reshape(1, -1)))[0, :][1]*100))
  167. return race
  168. return None
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement