Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from validation.pycocotools.coco import COCO
- from validation.pycocotools.cocoeval import COCOeval
- import copy
- import numpy as np
- def _process_into_results(boxes, scores, orig_image_size=[900, 900], threshold=0.3, cpu=False):
- """
- Process the raw results of the detection model into an object that can be deserialized to JSON
- """
- #scores = scores[:, 0]
- scale = np.array([orig_image_size[1], orig_image_size[0], orig_image_size[1], orig_image_size[0]]) ##################
- inds = np.where(scores > threshold)
- if len(inds) == 0:
- all_boxes = np.empty([0, 7], dtype=np.float32)
- return []
- #print(inds)
- #print(scores)
- c_bboxes = boxes[inds]
- c_scores = scores[inds]
- c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(np.float32, copy=False)
- #keep = soft_nms(c_dets)#, 0.45, force_cpu=cpu)
- #keep = nms(c_dets, 0.45)
- #keep = keep[:50]
- #c_dets = c_dets[keep, :]
- #_points = c_points[keep, :]
- all_boxes = c_dets #np.hstack((c_dets, c_points))
- results = []
- for det in all_boxes:
- conf = det[4]
- if conf > threshold:
- xmin = int(det[0])
- ymin = int(det[1])
- xmax = int(det[2])
- ymax = int(det[3])
- #pt_x = int(round(det[5]))
- #pt_y = int(round(det[6]))
- result = {
- 'bbox': [xmin, ymin, xmax, ymax],
- #'ground': [pt_x, pt_y],
- 'conf': conf
- }
- results += [result]
- return results
- gen_ids = 1
- def process_into_coco(people, image_id, is_truth, orig_size=[1., 1.], ignore_mask=None):
- global gen_ids
- image_id = 0
- results = []
- for person in people:
- #print(person)
- if is_truth:
- score = 1.0
- bbox = person[:4] #get_bbox(person['bbox'])
- #floor_point = person['floor_point'][:2]
- #bbox = clip_bbox(bbox, orig_size[1], orig_size[0])
- bbox[2] -= bbox[0]
- bbox[3] -= bbox[1]
- bbox[0] = int(bbox[0])
- bbox[1] = int(bbox[1])
- bbox[2] = int(bbox[2])
- bbox[3] = int(bbox[3])
- # Normalize by orig_size
- #bbox /= np.tile(orig_size[::-1], (2,))
- #floor_point /= np.array(orig_size[::-1])
- area = bbox[2] * bbox[3]
- else:
- score = person['conf']
- xmin = person['bbox'][0]
- ymin = person['bbox'][1]
- xmax = person['bbox'][2]
- ymax = person['bbox'][3]
- bbox = [xmin, ymin, xmax, ymax]
- #bbox = clip_bbox(bbox, orig_size[1], orig_size[0])
- bbox[2] -= bbox[0]
- bbox[3] -= bbox[1]
- # Normalize by orig_size
- #bbox /= np.tile(orig_size[::-1], (2,))
- floor_point = [bbox[0], bbox[1]]
- if is_truth:
- # coco truth wants `id` and its predicted input wants `image_id`...
- coco = {'id': gen_ids, 'image_id': image_id, 'category_id': 1, 'score': float(score), 'bbox': list(bbox),
- 'iscrowd': 0, 'area': area, 'points': [list(floor_point)] }
- gen_ids += 1
- else:
- coco = {'image_id': image_id, 'category_id': 1, 'score': float(score), 'bbox': list(bbox), 'points': [list(floor_point), ] }
- #print(coco)
- results.append(coco)
- if len(results) == 0:
- # TODO: fix issue with missing gts for some images
- if is_truth:
- results = [{'id': gen_ids, 'image_id': image_id, 'category_id': 1, 'score': 0., 'bbox': [0, 0, 0, 0],
- 'iscrowd': 0, 'area': 0, 'ignore': 1}]
- gen_ids += 1
- return results
- def run_coco_validation(truth_data, validation_results, use_coco_val, conf_type_step):
- truths = COCO()
- truths.dataset['images'] = [{'id': gt['image_id']} for gt in truth_data]
- # it uses images but we just need the ID
- truths.dataset['annotations'] = truth_data
- truths.dataset['categories'] = [{
- "supercategory": "peplum",
- "id": 1,
- "name": "peplum",
- }]
- truths.createIndex()
- results = truths.loadRes(copy.deepcopy(validation_results))
- if results is None:
- logging.info("No validation to be run. Stopping program here.")
- return None, None
- if conf_type_step:
- coco_eval = COCOeval(truths, results, 'bbox', 'step', None)
- else:
- coco_eval = COCOeval(truths, results, 'bbox', None, None)
- coco_eval.evaluate()
- distances, _ = coco_eval.compute_distance_points()
- coco_eval.accumulate()
- coco_eval.summarize(use_coco_val, print_=False)
- return coco_eval.stats, distances
- def evaluate(output, filenames, gt_bboxes, mode):
- bboxes, scores = [], []
- validation_results = []
- truth_data = []
- for pict_idx in range(len(filenames)):
- bb, s = output[pict_idx][:, 1:], output[pict_idx][:, 0]
- #print(bb)
- #print(s)
- #for pict_idx in range(len(bb)):
- #for det_indx in range(len(bb[pict_idx])):
- result = _process_into_results(bb, s, threshold=0.01)
- #score.append(dets)
- #print(process_into_coco(gt_bboxes[pict_idx], filenames[pict_idx], True))
- validation_results += process_into_coco(result, filenames, False)
- truth_data += process_into_coco(gt_bboxes[pict_idx], filenames[pict_idx], True) ###################### разобраться с индексацией
- #plot_det(cv2.imread(filenames[batch_idx][pict_idx]), validation_results, mode)
- #print(truth_data)
- #print(validation_results)
- mscoco_summary_stats, distances = run_coco_validation(truth_data, validation_results, True, conf_type_step=0.5)
- print(mode + ' mAP : ')
- #print('IoU = 0.0 ' + str(mscoco_summary_stats['precision'][0.05]['all'][0.0]))
- print('IoU = 0.2 ' + str(mscoco_summary_stats['precision'][0.05]['all'][0.2]))
- print('IoU = 0.5 ' + str(mscoco_summary_stats['precision'][0.05]['all'][0.5]))
- print('IoU = 0.8 ' + str(mscoco_summary_stats['precision'][0.05]['all'][0.8]))
- return mscoco_summary_stats['precision'][0.05]['all'][0.2], mscoco_summary_stats['precision'][0.05]['all'][0.5], mscoco_summary_stats['precision'][0.05]['all'][0.8]
- # print(images[0].detach().numpy().transpose().shape
- output_to_eval = list(np.load('pred.npy'))
- gt_bboxes_to_eval = list(np.load('gt.npy'))
- filenames_to_eval = np.arange(len(output_to_eval))
- for i in range(len(gt_bboxes_to_eval)):
- for j in range(len(gt_bboxes_to_eval[i])):
- gt_bboxes_to_eval[i][j][4] = 0
- for i in range(len(output_to_eval)):
- for j in range(len(output_to_eval[i])):
- output_to_eval[i][j] = [output_to_eval[i][j][4], output_to_eval[i][j][0], output_to_eval[i][j][1], output_to_eval[i][j][2], output_to_eval[i][j][3]]
- evaluate(output_to_eval, filenames_to_eval, gt_bboxes_to_eval, 'Train')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement