Advertisement
Guest User

Untitled

a guest
Oct 14th, 2019
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.26 KB | None | 0 0
  1. def get_predection(image,net,LABELS,COLORS):
  2. (H, W) = image.shape[:2]
  3.  
  4. # determine only the *output* layer names that we need from YOLO
  5. ln = net.getLayerNames()
  6. ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  7.  
  8. # construct a blob from the input image and then perform a forward
  9. # pass of the YOLO object detector, giving us our bounding boxes and
  10. # associated probabilities
  11. blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
  12. swapRB=True, crop=False)
  13. net.setInput(blob)
  14. start = time.time()
  15. layerOutputs = net.forward(ln)
  16. print(layerOutputs)
  17. end = time.time()
  18.  
  19. # show timing information on YOLO
  20. print("[INFO] YOLO took {:.6f} seconds".format(end - start))
  21.  
  22. # initialize our lists of detected bounding boxes, confidences, and
  23. # class IDs, respectively
  24. boxes = []
  25. confidences = []
  26. classIDs = []
  27.  
  28. # loop over each of the layer outputs
  29. for output in layerOutputs:
  30. # loop over each of the detections
  31. for detection in output:
  32. # extract the class ID and confidence (i.e., probability) of
  33. # the current object detection
  34. scores = detection[5:]
  35. # print(scores)
  36. classID = np.argmax(scores)
  37. # print(classID)
  38. confidence = scores[classID]
  39.  
  40. # filter out weak predictions by ensuring the detected
  41. # probability is greater than the minimum probability
  42. if confidence > confthres:
  43. # scale the bounding box coordinates back relative to the
  44. # size of the image, keeping in mind that YOLO actually
  45. # returns the center (x, y)-coordinates of the bounding
  46. # box followed by the boxes' width and height
  47. box = detection[0:4] * np.array([W, H, W, H])
  48. (centerX, centerY, width, height) = box.astype("int")
  49.  
  50. # use the center (x, y)-coordinates to derive the top and
  51. # and left corner of the bounding box
  52. x = int(centerX - (width / 2))
  53. y = int(centerY - (height / 2))
  54.  
  55. # update our list of bounding box coordinates, confidences,
  56. # and class IDs
  57. boxes.append([x, y, int(width), int(height)])
  58. confidences.append(float(confidence))
  59. classIDs.append(classID)
  60.  
  61. # apply non-maxima suppression to suppress weak, overlapping bounding
  62. # boxes
  63. idxs = cv2.dnn.NMSBoxes(boxes, confidences, confthres,
  64. nmsthres)
  65.  
  66. # ensure at least one detection exists
  67. if len(idxs) > 0:
  68. # loop over the indexes we are keeping
  69. for i in idxs.flatten():
  70. # extract the bounding box coordinates
  71. (x, y) = (boxes[i][0], boxes[i][1])
  72. (w, h) = (boxes[i][2], boxes[i][3])
  73.  
  74. # draw a bounding box rectangle and label on the image
  75. color = [int(c) for c in COLORS[classIDs[i]]]
  76. cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
  77. text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
  78. print(boxes)
  79. print(classIDs)
  80. cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
  81. return image
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement