Advertisement
Guest User

Untitled

a guest
Dec 5th, 2019
90
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 22.51 KB | None | 0 0
  1. import csv
  2. import glob
  3. import os
  4. import pathlib
  5. import random
  6. import subprocess
  7. import sys
  8. import cv2
  9. import dlib
  10. import joblib
  11. import numpy as np
  12. from tqdm import tqdm
  13. import shutil
  14. import Models.Config.SEResNet50_config as config
  15. from mtcnn.mtcnn import MTCNN
  16.  
  17. base_path=os.path.dirname(os.path.abspath(__file__))
  18.  
  19. ###AFEW utils
  20.  
  21. def _read_dataset(partition,input_path_ds,output_path_cache=base_path + '/CacheFrameProcessing',debug_max_num_samples=None , cache_p = None):
  22.     """read a partition of dataset"""
  23.     print("Init reading from video files")
  24.     data = []
  25.     if not os.path.isdir(output_path_cache):
  26.         os.makedirs(output_path_cache)
  27.     #iterate partition
  28.     for set in list_dirs(input_path_ds):
  29.         if partition == os.path.basename(set):
  30.             print("Processing partition: ",partition)
  31.             #for this partition extract all video frames
  32.             for class_dir in tqdm(list_dirs(set)):
  33.                 print("Procssing class: ",os.path.basename(class_dir))
  34.                 #init params
  35.                 openface_fdir = ""
  36.                 label = os.path.basename(class_dir)
  37.                 #exctract video frames for any video in a class
  38.                 openface_fdir, _ = extract_frames_from_video_folder(class_dir,output_path_cache, debug_max_num_samples, cache_p, partition)
  39.                 #preprocess every video frame by detectding and aligning faces
  40.                 returned_sequences, map_infos = pre_process_video(openface_fdir,output_path_cache,cache_p, partition)
  41.                 #append processed data
  42.                 data += process_data(returned_sequences,map_infos,label)
  43.     #check dataset integrity and get statistics
  44.     data = check_data(data,output_path_cache, cache_p, partition,input_path_ds)
  45.  
  46.     #flush
  47.     shutil.rmtree(output_path_cache)
  48.  
  49.     return data
  50.  
  51. def recover_data(input_path_ds, output_cache_path, cache_p, partition, failed_sequences):
  52.     print("Recovering failed videos")
  53.     recovered = []
  54.     recover_path = cache_p + "/to_recover"
  55.     if not os.path.isdir(recover_path):
  56.         os.makedirs(recover_path)
  57.     #iterate partition
  58.     for set in list_dirs(input_path_ds):
  59.         if partition == os.path.basename(set):
  60.             for class_dir in tqdm(list_dirs(set)):
  61.                 file_list = glob.glob('{}/*.avi'.format(class_dir))
  62.                 file_list.sort()
  63.                 for f in range(0, file_list.__len__()):
  64.                     aviName = file_list[f].split('/')[(-1)].rstrip('.avi')
  65.                     for item in failed_sequences:
  66.                         if aviName in item:
  67.                             shutil.copy(f,recover_path)
  68.                    
  69.     openface_fdir, _ = extract_frames_from_video_folder(recover_path,recover_path, None, cache_p, partition)
  70.     #generate all bbox for failed video with our detector
  71.     fd = MTCNN()
  72.     bbox_dir = get_bbox(recover_path,fd)
  73.     #preprocess every video frame by using our bbox for aligning faces
  74.     returned_sequences, map_infos = pre_process_video(openface_fdir,output_cache_path,cache_p, partition,bbox_dir)
  75.  
  76.     for i,video in enumerate(returned_sequences):
  77.         new_seq = list()
  78.         new_map_info = list()
  79.         for item in failed_sequences:
  80.             if map_infos[i]['video_name'] == item[0]:
  81.                 label = item[1]
  82.                 new_seq.append(returned_sequences[i])
  83.                 new_map_info.append(map_infos[i])
  84.                 recovered += process_data(new_seq,new_map_info,label)
  85.  
  86.     print("End recovering failed data")
  87.  
  88.     return recovered
  89.  
  90. def get_bbox(recover_path, fd):
  91.     pass
  92.                        
  93. def process_data(sequences, infos, label):
  94.     data = []
  95.     for i in range(len(sequences)):
  96.         example = {
  97.                 'frames': sequences[i],
  98.                 'label': label,
  99.                 'info': infos[i],
  100.             }
  101.         data.append(example)
  102.     return data
  103.  
  104. def check_data(data, output_cache_path, cache_p, partition, input_path_ds):
  105.     """Check data video integrity filtering out bad sequences, in addition a statistics log will be stored"""
  106.     total_frames = 0 #total frames in data
  107.     tatal_frames_discarded = 0 #without face or with wrong prediction
  108.     total_faces_recognized_percentage = list() #percentage of face recognition/alignment success
  109.     total_failed_sequences = list() #will contain all video's names failed during pre process
  110.  
  111.     print("Checking data integrity")
  112.     #open statistic file in order to store statistics data
  113.     csv.register_dialect('mydialect', delimiter = ';', quotechar = '"', lineterminator = '\r\n', quoting = csv.QUOTE_MINIMAL)
  114.     with open(os.path.join(cache_p,'dataset_' + partition + '_statistics.csv'), 'w', newline='') as stats_file:
  115.         print("Stats log file opened")
  116.         writer = csv.writer(stats_file,dialect='mydialect')
  117.         writer.writerow(["Video", "Label", "Total frames", "Discarded frames", "face_presence_percentage"])
  118.         #iterate over all items
  119.         for item in data:      
  120.             info = item['info']  
  121.             if info['total_frames'] - info['discarded_frames'] > 0:
  122.                 writer.writerow([info['video_name'], item['label'], info['total_frames'], info['discarded_frames'], info['face_present_percentage']])
  123.                 #update global stats variable
  124.                 total_frames += info['total_frames']
  125.                 tatal_frames_discarded += info['discarded_frames']
  126.                 total_faces_recognized_percentage.append(info['face_present_percentage'])
  127.             elif info['total_frames'] - info['discarded_frames'] == 0:
  128.                 total_failed_sequences.append((info['video_name'],item['label']))
  129.                 data.remove(item)
  130.        
  131.         #recover failed_sequences if there are
  132.         if len(total_failed_sequences) > 0:
  133.             #write dataset stats
  134.             writer.writerow([' '])
  135.             writer.writerow(['Recovered videos'])
  136.             writer.writerow(["Video", "Label", "Total frames", "Discarded frames", "face_presence_percentage"])
  137.             recovered = recover_data(input_path_ds, output_cache_path, cache_p, partition, total_failed_sequences)
  138.             #update new statistics based on new recovered videos
  139.             for item in recovered:
  140.                 info = item['info']  
  141.                 writer.writerow([info['video_name'], item['label'], info['total_frames'], info['discarded_frames'], info['face_present_percentage']])
  142.                 #update global stats variable
  143.                 total_frames += info['total_frames']
  144.                 tatal_frames_discarded += info['discarded_frames']
  145.                 total_faces_recognized_percentage.append(info['face_present_percentage'])
  146.             data += recovered
  147.  
  148.         #write dataset stats
  149.         writer.writerow([' '])
  150.         writer.writerow(['Dataset statistics'])
  151.         writer.writerow(["Total frames", "Total discarded frames", "face_presence_percentage_mean", "Failed sequences"])
  152.         writer.writerow([total_frames, tatal_frames_discarded, np.mean(total_faces_recognized_percentage), '\r\n'.join(total_failed_sequences) ])
  153.         stats_file.close()
  154.     print("End check data integrity")
  155.  
  156.     return data
  157.  
  158. def list_dirs(directory):
  159.     """Returns all directories in a given directory"""
  160.     return [f for f in pathlib.Path(directory).iterdir() if f.is_dir()]
  161.  
  162. def _get_video_confidence():
  163.     #non risolti
  164.     #005543160 casinò
  165.     #020913240 django
  166.     #011029550 harry potter amico
  167.     #001522614 piton
  168.     confidences = {
  169.         '002818854':0.15,
  170.         '004312720':0.15,
  171.         '012256040':0.15,
  172.         '015407880':0.25,
  173.         '001143440':0.25,
  174.         '002021400':0.15,
  175.         '004305440':0.15,
  176.         '003044960':0.15,
  177.         '023340360':0.24,
  178.         '004827807':0.162,
  179.         '014928240':0.15,
  180.         '004513880':0.17,
  181.         }
  182.     return confidences
  183.  
  184. def extract_frames_from_video_folder(input_avi, output_path_cache, debug_max_num_samples, cache_p, partition):
  185.     """Extract frames from a folder(class)"""
  186.     file_list = glob.glob('{}/*.avi'.format(input_avi))
  187.     file_list.sort()
  188.     data = []
  189.     error_video = []
  190.     #iterate over all video in dir
  191.     openface_fdir = []
  192.  
  193.     print("Init Frames Extraction")
  194.     current_num_samples = 0
  195.     for f in range(0, file_list.__len__()):
  196.         try:
  197.             aviName = file_list[f].split('/')[(-1)].rstrip('.avi')
  198.             #get path and file name
  199.             save_path = '{}/{}'.format(output_path_cache,aviName)
  200.             if not os.path.isdir(save_path):
  201.                 os.makedirs(save_path)
  202.             output = '{}/{}-%3d_frame.png'.format(save_path ,aviName)
  203.             #get aspect ratio
  204.             asr = get_output_size(file_list[f])
  205.             #extract all frames from a video
  206.             extract_frames(file_list[f], output, asr, cache_p, partition)
  207.             openface_fdir.append(save_path)
  208.             if debug_max_num_samples is not None:
  209.                 if current_num_samples == debug_max_num_samples-1:
  210.                     break
  211.         except:
  212.             #check and count video lost
  213.             error_video.append(aviName)
  214.             print(aviName + ' ffmpeg failed' + '\n')
  215.  
  216.         current_num_samples += 1
  217.  
  218.     print("End Frames Extraction")
  219.  
  220.     return openface_fdir, error_video
  221.  
  222. def extract_frames(src, dest, asr, cache_p, partition):
  223.     """Call ffmpeg service and save all frames in dest folder"""
  224.     print("Calling FFMPEG on video: ",os.path.basename(src))
  225.    
  226.     #command = ["ffmpeg", "-i", src,"-s", asr, "-q:a", "1", dest]
  227.     command = ['ffmpeg', '-loglevel','info','-hide_banner','-nostats','-i', src,'-s', asr, '-q:a', '1', dest]
  228.  
  229.     try:
  230.         log_file = open(os.path.join(cache_p,'FFMPEG_output_' + partition + '.log'),"a")
  231.         p = subprocess.Popen(command, stdout=log_file, stderr=log_file).wait()
  232.         log_file.close()
  233.     except Exception as e:
  234.         print(e)
  235.        
  236. def pre_process_video(openface_fdir,frames_dir,cache_p, partition, resize_shape=(224, 224), bbox = None):
  237.     """preprocess video"""
  238.     aligned_videos = []
  239.     all_maps = []
  240.     print("Init pre processing")
  241.     #create command for open face
  242.     command = ['/Users/dp.alex/OpenFace/build/bin/FeatureExtraction']
  243.     for _dir in openface_fdir:
  244.         command.append("-fdir")
  245.         command.append(_dir)
  246.  
  247.     if bbox is not None:
  248.         command.append("-bboxdir")
  249.         command.append(bbox)
  250.  
  251.     resize_shape = 400
  252.     scale = 1.46
  253.    
  254.     command += ['-out_dir', frames_dir, '-simsize', str(resize_shape), '-simscale', str(scale),
  255.                '-format_aligned', 'png', '-nomask', '-multiview', '1', '-simalign', '-wild', '-nobadaligned']
  256.  
  257.     try:
  258.         print("Calling OpenFace")
  259.         log_file = open(os.path.join(cache_p,'OpenFace_output_' + partition + '.log'),"a")
  260.         p = subprocess.Popen(command, stdout = log_file, stderr = log_file).wait()
  261.         log_file.close()
  262.         print("End OpenFace")
  263.     except Exception as e:
  264.         print(e)
  265.  
  266.     #theshold for diltering out bad faces
  267.     threshold_detection = 0.1
  268.  
  269.     #keep needed info from openface csv out
  270.     for filename in os.listdir(frames_dir):
  271.         if filename.endswith(".csv"):
  272.             aligned_frames = []
  273.             filename = filename[:-4]
  274.             aligned_frames_dir = frames_dir + "/" + filename + "_aligned"
  275.             #open csv
  276.             with open(frames_dir+"/"+filename+".csv",mode = 'r') as csv_file:
  277.                 csv_reader = csv.DictReader(csv_file, delimiter=',')
  278.                 line_count = 0
  279.                 map_info = {}
  280.                 map_frame = {}
  281.                 map_info['video_name'] = filename
  282.                 readed_frames = 0
  283.                 discarded_frames = 0
  284.                 for row in csv_reader:
  285.                     if int(row[' success']) == 1 and float(row[' confidence']) > threshold_detection:
  286.                         aligned_frame ='{}/frame_det_00_{:06d}.png'.format(aligned_frames_dir,int(row['frame']))
  287.                         aligned_frames.append(cv2.imread(aligned_frame))
  288.                         map_frame[row['frame']] = row[' confidence']
  289.                     else:
  290.                         discarded_frames += 1
  291.                     readed_frames = int(row['frame'])
  292.                 csv_file.close()
  293.                 map_info['total_frames'] = readed_frames
  294.                 map_info['discarded_frames'] = discarded_frames
  295.                 map_info['face_present_percentage'] = np.round((readed_frames - discarded_frames)/readed_frames,2)
  296.                 map_info['detections_info'] = map_frame
  297.                 all_maps.append(map_info)
  298.                 aligned_videos.append(aligned_frames)
  299.  
  300.                 #when everything is done flush directories
  301.                 shutil.rmtree(frames_dir+"/"+filename)
  302.                 shutil.rmtree(frames_dir+"/"+filename+"_aligned")
  303.                 os.remove(frames_dir+"/"+filename+".csv")
  304.     print("End pre processing")
  305.  
  306.     return aligned_videos, all_maps
  307.  
  308. def get_output_size(path, fixed = True, w=720, h=480):
  309.     """given input path of video, returns it's width and height"""
  310.     cap = cv2.VideoCapture(path)
  311.     if fixed:
  312.         width = w
  313.         height = h
  314.     else:
  315.         if cap.isOpened():
  316.             width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  317.             height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  318.     return '{}x{}'.format(width, height)
  319.  
  320. def split_video(item= None,split_len=16,partition = 'Train'):
  321.     splitted_video =[]
  322.     video = item['frames']
  323.     label = item['label']
  324.     len_video=len(video)
  325.     steps = len_video // split_len
  326.     rest = len_video % split_len
  327.     i = 0
  328.     #if video len is > of split len
  329.     if steps >0:
  330.         #get all possible sequences
  331.         while i < steps:
  332.             start = i*split_len
  333.             stop = (i*split_len)+split_len
  334.             actual = np.array(video[start:stop])
  335.             item = {
  336.                 'frames' : actual,
  337.                 'label' : label,
  338.                 }
  339.             splitted_video.append(item)
  340.             i += 1
  341.         pads = []
  342.         #do padding if there are enough samples left
  343.         if 'val' not in partition.lower():
  344.             print('Padding on train gen video')
  345.             if rest >= (split_len/2):                        
  346.                 for i in range(split_len-rest):
  347.                     pads.append(video[-1])
  348.                 start = stop
  349.                 last = np.concatenate( (video[start:], pads),axis=0)
  350.                 item = {
  351.                     'frames':np.array(last),
  352.                     'label':label,
  353.                     }
  354.                 splitted_video.append(item)
  355.     #do padding il video_len is < split_len
  356.     elif steps == 0:
  357.         rest = split_len - len_video
  358.         pads = []
  359.         for i in range(rest):
  360.             pads.append(video[-1])
  361.             last = np.concatenate( (video, pads),axis=0)
  362.         item = {
  363.             'frames':np.array(last),
  364.             'label':label,
  365.             }
  366.         splitted_video.append(item)
  367.     return splitted_video
  368.  
  369. def top_left(f):
  370.     return (f['roi'][0], f['roi'][1])
  371.  
  372. def bottom_right(f):
  373.     return (f['roi'][0]+f['roi'][2], f['roi'][1]+f['roi'][3])
  374.  
  375. def enclosing_square(rect):
  376.     def _to_wh(s,l,ss,ll, width_is_long):
  377.         if width_is_long:
  378.             return l,s,ll,ss
  379.         else:
  380.             return s,l,ss,ll
  381.     def _to_long_short(rect):
  382.         x,y,w,h = rect
  383.         if w>h:
  384.             l,s,ll,ss = x,y,w,h
  385.             width_is_long = True
  386.         else:
  387.             s,l,ss,ll = x,y,w,h
  388.             width_is_long = False
  389.         return s,l,ss,ll,width_is_long
  390.  
  391.     s,l,ss,ll,width_is_long = _to_long_short(rect)
  392.  
  393.     hdiff = (ll - ss)//2
  394.     s-=hdiff
  395.     ss = ll
  396.  
  397.     return _to_wh(s,l,ss,ll,width_is_long)
  398.  
  399. def add_margin(roi, qty):
  400.     return (
  401.      (roi[0]-qty),
  402.      (roi[1]-qty),
  403.      (roi[2]+2*qty),
  404.      (roi[3]+2*qty ))
  405.  
  406. def cut(frame, roi):
  407.     pA = ( int(roi[0]) , int(roi[1]) )
  408.     pB = ( int(roi[0]+roi[2]-1), int(roi[1]+roi[3]-1) ) #pB will be an internal point
  409.     W,H = frame.shape[1], frame.shape[0]
  410.     A0 = pA[0] if pA[0]>=0 else 0
  411.     A1 = pA[1] if pA[1]>=0 else 0
  412.     data = frame[ A1:pB[1], A0:pB[0] ]
  413.     if pB[0] < W and pB[1] < H and pA[0]>=0 and pA[1]>=0:
  414.         return data
  415.     w,h = int(roi[2]), int(roi[3])
  416.     img = np.zeros((h,w,frame.shape[2]), dtype=np.uint8)
  417.     offX = int(-roi[0]) if roi[0]<0 else 0
  418.     offY = int(-roi[1]) if roi[1]<0 else 0
  419.     np.copyto( img[ offY:offY+data.shape[0], offX:offX+data.shape[1] ], data )
  420.     return img
  421.  
  422. def cut_centered(frame, shape = (224, 224) ,random = True, random_values = None, max_change_fraction=0.045, only_narrow=False):
  423.     from PIL import Image
  424.     left = int((frame.shape[1] - shape[0])/2)
  425.     top = int((frame.shape[1] - shape[0])/2)
  426.     right = int((frame.shape[1] + shape[0])/2)
  427.     bottom = int((frame.shape[1] + shape[0])/2)
  428.     if random:
  429.         if random_values is None:
  430.             sigma = shape[0]*max_change_fraction
  431.             xy = _random_normal_crop(2, sigma, mean=-sigma/5).astype(int)
  432.             wh = _random_normal_crop(2, sigma*2, mean=sigma/2, positive=only_narrow).astype(int)
  433.         else:
  434.             xy, wh = random_values
  435.     else:
  436.         xy = [0,0]
  437.         wh = [0,0]
  438.  
  439.     return frame[(top + wh[0]):(bottom + wh[0]), (left + xy[0]):(right + xy[0]), :]
  440.  
  441. def pad(img):
  442.     w,h,c = img.shape
  443.     if w==h:
  444.         return img
  445.     size = max(w,h)
  446.     out = np.zeros((size,size,c))
  447.     np.copyto(out[0:w, 0:h], img)
  448.     return out
  449.  
  450. def findRelevantFace(objs, W,H):
  451.     mindistcenter = None
  452.     minobj = None
  453.     for o in objs:
  454.         cx = o['roi'][0] + (o['roi'][2]/2)
  455.         cy = o['roi'][1] + (o['roi'][3]/2)
  456.         distcenter = (cx-(W/2))**2 + (cy-(H/2))**2
  457.         if mindistcenter is None or distcenter < mindistcenter:
  458.             mindistcenter = distcenter
  459.             minobj = o
  460.     return minobj
  461.  
  462. tmp_A = []
  463. FIT_PLANE_SIZ=16
  464. for y in np.linspace(0,1,FIT_PLANE_SIZ):
  465.     for x in np.linspace(0,1,FIT_PLANE_SIZ):
  466.         tmp_A.append([y, x, 1])
  467. Amatrix = np.matrix(tmp_A)
  468.  
  469. def _fit_plane(im):
  470.     original_shape=im.shape
  471.     if len(im.shape)>2 and im.shape[2]>1:
  472.         im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
  473.     im = cv2.resize(im, (FIT_PLANE_SIZ,FIT_PLANE_SIZ))
  474.     if im.dtype==np.uint8:
  475.         im = im.astype(float)
  476.     # do fit
  477.     A = Amatrix
  478.     tmp_b = []
  479.     for y in range(FIT_PLANE_SIZ):
  480.         for x in range(FIT_PLANE_SIZ):
  481.             tmp_b.append(im[y,x])
  482.     b = np.matrix(tmp_b).T
  483.     fit = (A.T * A).I * A.T * b
  484.  
  485.     fit[0]/=original_shape[0]
  486.     fit[1]/=original_shape[1]
  487.  
  488.     def LR(x,y):
  489.         return np.repeat(fit[0]*x,len(y),axis=0).T + np.repeat(fit[1]*y,len(x),axis=0) + fit[2]
  490.     xaxis = np.array(range(original_shape[1]))
  491.     yaxis = np.array(range(original_shape[0]))
  492.     imest = LR(yaxis, xaxis)
  493.     return np.array(imest)
  494.  
  495. def linear_balance_illumination(im):
  496.     if im.dtype==np.uint8:
  497.         im = im.astype(float)
  498.     if len(im.shape)==2:
  499.         im = np.expand_dims(im,2)
  500.     if im.shape[2] > 1:
  501.         im = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
  502.     imout = im.copy()
  503.     imest = _fit_plane(im[:,:,0])
  504.     imout[:,:,0] = im[:,:,0] - imest + np.mean(imest)
  505.     if im.shape[2] > 1:
  506.         imout = cv2.cvtColor(imout, cv2.COLOR_YUV2BGR)
  507.     return imout.reshape(im.shape)
  508.  
  509. def mean_std_normalize(inp):
  510.     std = inp.flatten().std()
  511.     if std < 0.001:
  512.         std = 0.001
  513.     return (inp - inp.flatten().mean()) / inp.flatten().std()
  514.  
  515. def _random_normal_crop(n, maxval, positive=False, mean=0):
  516.     gauss = np.random.normal(mean,maxval/2,(n,1)).reshape((n,))
  517.     gauss = np.clip(gauss, mean-maxval, mean+maxval)
  518.     if positive:
  519.         return np.abs(gauss)
  520.     else:
  521.       return gauss
  522.  
  523. def random_change_image(img,random_values = (_random_normal_crop(1, 0.5, mean=1)[0],_random_normal_crop(1, 48)[0],random.randint(0,1))):  
  524.     #brightness and contrast
  525.     a, b, random = random_values
  526.     img=(img-128.0)*a + 128.0 + b
  527.     img = np.clip(img, 0, 255)
  528.     img = img.astype(np.uint8)
  529.     # flip
  530.     if random:
  531.         img=np.fliplr(img)
  532.     return img
  533.  
  534. def random_change_roi(roi, max_change_fraction=0.045, only_narrow=False, random_values = None):
  535.     #random crop con prob + alta su 0 (gaussiana)
  536.     sigma = roi[3]*max_change_fraction
  537.     if random_values is None:
  538.         xy = _random_normal_crop(2, sigma, mean=-sigma/5).astype(int)
  539.         wh = _random_normal_crop(2, sigma*2, mean=sigma/2, positive=only_narrow).astype(int)
  540.     else:
  541.         xy, wh = random_values
  542.     print( "orig roi: %s" % str(roi) )
  543.     print( "rand changes -> xy:%s, wh:%s" % (str(xy), str(wh)))
  544.     roi2 = (roi[0]+xy[0], roi[1]+xy[1], roi[2]-wh[0], roi[3]-wh[1])
  545.     print("new roi: %s" % str(roi2))
  546.  
  547.     return roi2
  548.  
  549. def roi_center(roi):
  550.     return (roi[0]+roi[2]//2, roi[1]+roi[3]//2)
  551.  
  552. def random_image_rotate(img, rotation_center,random_angle_deg = _random_normal_crop(1, 10)[0]):
  553.     angle_deg = random_angle_deg
  554.     M = cv2.getRotationMatrix2D(rotation_center, angle_deg, 1.0)
  555.     nimg = cv2.warpAffine(img, M, dsize=img.shape[0:2])
  556.     return nimg.reshape(img.shape)
  557.  
  558. def random_image_skew(img, rotation_center, random_skew = _random_normal_crop(2, 0.1, positive=True)):
  559.     s = random_skew
  560.     M=np.array( [ [1,s[0],1], [s[1],1,1]] )
  561.     nimg = cv2.warpAffine(img, M, dsize=img.shape[0:2])
  562.     return nimg.reshape(img.shape)
  563.  
  564. def equalize_hist(img):
  565.     if len(img.shape)>2 and img.shape[2] > 1:
  566.         img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
  567.         img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
  568.         return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
  569.     else:
  570.         return cv2.equalizeHist(img)
  571.  
  572. def draw_emotion(y, w,h):
  573.     EMOTIONS = config.CLASSES
  574.     COLORS = [(120,120,120), (50,50,255), (0,255,255), (255,0,0), (0,0,140), (0,200,0), (42,42,165), (100,100,200), (170,170,170), (80,80,80)]
  575.     emotionim = np.zeros((w,h,3), dtype=np.uint8)
  576.     barh = h//len(EMOTIONS)
  577.     MAXEMO = np.sum(y)
  578.     for i,yi in enumerate(y):
  579.         #print((EMOTIONS[i], yi))
  580.         p1,p2 = (0,i*barh), (int(yi*w//MAXEMO), (i+1)*20)
  581.         #cv2.rectangle(emotionim, p1,p2, COLORS[i], cv2.FILLED)
  582.         cv2.putText(emotionim, "%s: %.1f" % (EMOTIONS[i], yi), (0,i*20+14), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
  583.     return emotionim
  584.  
  585. def show_frame(frame, text):
  586.     font = cv2.FONT_HERSHEY_SIMPLEX
  587.     position = (10,20)
  588.     fontScale = 0.3
  589.     fontColor = (255,255,255)
  590.     lineType = 1
  591.     cv2.putText(frame,
  592.         text,
  593.         position,
  594.         font,
  595.         fontScale,
  596.         fontColor,
  597.         lineType)
  598.     cv2.imshow('frame',frame)
  599.     cv2.waitKey(0)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement