Guest User

Untitled

a guest
Jun 23rd, 2020
331
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 9.78 KB | None | 0 0
  1. # USAGE
  2. # To read and write back out to video:
  3. # python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
  4. #   --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \
  5. #   --output output/output_01.avi
  6. #
  7. # To read from webcam and write back out to disk:
  8. # python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
  9. #   --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \
  10. #   --output output/webcam_output.avi
  11.  
  12. # import the necessary packages
  13. from pyimagesearch.centroidtracker import CentroidTracker
  14. from pyimagesearch.trackableobject import TrackableObject
  15. from imutils.video import VideoStream
  16. from imutils.video import FPS
  17. import numpy as np
  18. import argparse
  19. import imutils
  20. import time
  21. import dlib
  22. import cv2
  23.  
  24. # construct the argument parse and parse the arguments
  25. ap = argparse.ArgumentParser()
  26. ap.add_argument("-p", "--prototxt", required=True,
  27.     help="path to Caffe 'deploy' prototxt file")
  28. ap.add_argument("-m", "--model", required=True,
  29.     help="path to Caffe pre-trained model")
  30. ap.add_argument("-i", "--input", type=str,
  31.     help="path to optional input video file")
  32. ap.add_argument("-o", "--output", type=str,
  33.     help="path to optional output video file")
  34. ap.add_argument("-c", "--confidence", type=float, default=0.4,
  35.     help="minimum probability to filter weak detections")
  36. ap.add_argument("-s", "--skip-frames", type=int, default=30,
  37.     help="# of skip frames between detections")
  38. args = vars(ap.parse_args())
  39.  
  40. # initialize the list of class labels MobileNet SSD was trained to
  41. # detect
  42. CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
  43.     "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
  44.     "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
  45.     "sofa", "train", "tvmonitor"]
  46.  
  47. # load our serialized model from disk
  48. print("[INFO] loading model...")
  49. net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
  50.  
  51. # if a video path was not supplied, grab a reference to the webcam
  52. if not args.get("input", False):
  53.     print("[INFO] starting video stream...")
  54.     vs = VideoStream(src=0).start()
  55.     time.sleep(2.0)
  56.  
  57. # otherwise, grab a reference to the video file
  58. else:
  59.     print("[INFO] opening video file...")
  60.     vs = cv2.VideoCapture(args["input"])
  61.  
  62. # initialize the video writer (we'll instantiate later if need be)
  63. writer = None
  64.  
  65. # initialize the frame dimensions (we'll set them as soon as we read
  66. # the first frame from the video)
  67. W = None
  68. H = None
  69.  
  70. # instantiate our centroid tracker, then initialize a list to store
  71. # each of our dlib correlation trackers, followed by a dictionary to
  72. # map each unique object ID to a TrackableObject
  73. ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
  74. trackers = []
  75. trackableObjects = {}
  76.  
  77. # initialize the total number of frames processed thus far, along
  78. # with the total number of objects that have moved either up or down
  79. totalFrames = 0
  80. totalDown = 0
  81. totalUp = 0
  82.  
  83. # start the frames per second throughput estimator
  84. fps = FPS().start()
  85.  
  86. # loop over frames from the video stream
  87. while True:
  88.     # grab the next frame and handle if we are reading from either
  89.     # VideoCapture or VideoStream
  90.     frame = vs.read()
  91.     frame = frame[1] if args.get("input", False) else frame
  92.  
  93.     # if we are viewing a video and we did not grab a frame then we
  94.     # have reached the end of the video
  95.     if args["input"] is not None and frame is None:
  96.         break
  97.  
  98.     # resize the frame to have a maximum width of 500 pixels (the
  99.     # less data we have, the faster we can process it), then convert
  100.     # the frame from BGR to RGB for dlib
  101.     frame = imutils.resize(frame, width=500)
  102.     rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  103.  
  104.     # if the frame dimensions are empty, set them
  105.     if W is None or H is None:
  106.         (H, W) = frame.shape[:2]
  107.  
  108.     # if we are supposed to be writing a video to disk, initialize
  109.     # the writer
  110.     if args["output"] is not None and writer is None:
  111.         fourcc = cv2.VideoWriter_fourcc(*"MJPG")
  112.         writer = cv2.VideoWriter(args["output"], fourcc, 30,
  113.             (W, H), True)
  114.  
  115.     # initialize the current status along with our list of bounding
  116.     # box rectangles returned by either (1) our object detector or
  117.     # (2) the correlation trackers
  118.     status = "Waiting"
  119.     rects = []
  120.  
  121.     # check to see if we should run a more computationally expensive
  122.     # object detection method to aid our tracker
  123.     if totalFrames % args["skip_frames"] == 0:
  124.         # set the status and initialize our new set of object trackers
  125.         status = "Detecting"
  126.         trackers = []
  127.  
  128.         # convert the frame to a blob and pass the blob through the
  129.         # network and obtain the detections
  130.         blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
  131.         net.setInput(blob)
  132.         detections = net.forward()
  133.  
  134.         # loop over the detections
  135.         for i in np.arange(0, detections.shape[2]):
  136.             # extract the confidence (i.e., probability) associated
  137.             # with the prediction
  138.             confidence = detections[0, 0, i, 2]
  139.  
  140.             # filter out weak detections by requiring a minimum
  141.             # confidence
  142.             if confidence > args["confidence"]:
  143.                 # extract the index of the class label from the
  144.                 # detections list
  145.                 idx = int(detections[0, 0, i, 1])
  146.  
  147.                 # if the class label is not a person, ignore it
  148.                 if CLASSES[idx] != "person":
  149.                     continue
  150.  
  151.                 # compute the (x, y)-coordinates of the bounding box
  152.                 # for the object
  153.                 box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
  154.                 (startX, startY, endX, endY) = box.astype("int")
  155.  
  156.                 # construct a dlib rectangle object from the bounding
  157.                 # box coordinates and then start the dlib correlation
  158.                 # tracker
  159.                 tracker = dlib.correlation_tracker()
  160.                 rect = dlib.rectangle(startX, startY, endX, endY)
  161.                 tracker.start_track(rgb, rect)
  162.  
  163.                 # add the tracker to our list of trackers so we can
  164.                 # utilize it during skip frames
  165.                 trackers.append(tracker)
  166.  
  167.     # otherwise, we should utilize our object *trackers* rather than
  168.     # object *detectors* to obtain a higher frame processing throughput
  169.     else:
  170.         # loop over the trackers
  171.         for tracker in trackers:
  172.             # set the status of our system to be 'tracking' rather
  173.             # than 'waiting' or 'detecting'
  174.             status = "Tracking"
  175.  
  176.             # update the tracker and grab the updated position
  177.             tracker.update(rgb)
  178.             pos = tracker.get_position()
  179.  
  180.             # unpack the position object
  181.             startX = int(pos.left())
  182.             startY = int(pos.top())
  183.             endX = int(pos.right())
  184.             endY = int(pos.bottom())
  185.  
  186.             # add the bounding box coordinates to the rectangles list
  187.             rects.append((startX, startY, endX, endY))
  188.  
  189.     # draw a horizontal line in the center of the frame -- once an
  190.     # object crosses this line we will determine whether they were
  191.     # moving 'up' or 'down'
  192.     cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
  193.  
  194.     # use the centroid tracker to associate the (1) old object
  195.     # centroids with (2) the newly computed object centroids
  196.     objects = ct.update(rects)
  197.  
  198.     # loop over the tracked objects
  199.     for (objectID, centroid) in objects.items():
  200.         # check to see if a trackable object exists for the current
  201.         # object ID
  202.         to = trackableObjects.get(objectID, None)
  203.  
  204.         # if there is no existing trackable object, create one
  205.         if to is None:
  206.             to = TrackableObject(objectID, centroid)
  207.  
  208.         # otherwise, there is a trackable object so we can utilize it
  209.         # to determine direction
  210.         else:
  211.             # the difference between the y-coordinate of the *current*
  212.             # centroid and the mean of *previous* centroids will tell
  213.             # us in which direction the object is moving (negative for
  214.             # 'up' and positive for 'down')
  215.             y = [c[1] for c in to.centroids]
  216.             direction = centroid[1] - np.mean(y)
  217.             to.centroids.append(centroid)
  218.  
  219.             # check to see if the object has been counted or not
  220.             if not to.counted:
  221.                 # if the direction is negative (indicating the object
  222.                 # is moving up) AND the centroid is above the center
  223.                 # line, count the object
  224.                 if direction < 0 and centroid[1] < H // 2:
  225.                     totalUp += 1
  226.                     to.counted = True
  227.  
  228.                 # if the direction is positive (indicating the object
  229.                 # is moving down) AND the centroid is below the
  230.                 # center line, count the object
  231.                 elif direction > 0 and centroid[1] > H // 2:
  232.                     totalDown += 1
  233.                     to.counted = True
  234.  
  235.         # store the trackable object in our dictionary
  236.         trackableObjects[objectID] = to
  237.  
  238.         # draw both the ID of the object and the centroid of the
  239.         # object on the output frame
  240.         text = "ID {}".format(objectID)
  241.         cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
  242.             cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
  243.         cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
  244.  
  245.     # construct a tuple of information we will be displaying on the
  246.     # frame
  247.     info = [
  248.         ("Up", totalUp),
  249.         ("Down", totalDown),
  250.         ("Status", status),
  251.     ]
  252.  
  253.     # loop over the info tuples and draw them on our frame
  254.     for (i, (k, v)) in enumerate(info):
  255.         text = "{}: {}".format(k, v)
  256.         cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
  257.             cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
  258.  
  259.     # check to see if we should write the frame to disk
  260.     if writer is not None:
  261.         writer.write(frame)
  262.  
  263.     # show the output frame
  264.     cv2.imshow("Frame", frame)
  265.     key = cv2.waitKey(1) & 0xFF
  266.  
  267.     # if the `q` key was pressed, break from the loop
  268.     if key == ord("q"):
  269.         break
  270.  
  271.     # increment the total number of frames processed thus far and
  272.     # then update the FPS counter
  273.     totalFrames += 1
  274.     fps.update()
  275.  
  276. # stop the timer and display FPS information
  277. fps.stop()
  278. print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
  279. print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
  280.  
  281. # check to see if we need to release the video writer pointer
  282. if writer is not None:
  283.     writer.release()
  284.  
  285. # if we are not using a video file, stop the camera video stream
  286. if not args.get("input", False):
  287.     vs.stop()
  288.  
  289. # otherwise, release the video file pointer
  290. else:
  291.     vs.release()
  292.  
  293. # close any open windows
  294. cv2.destroyAllWindows()
Add Comment
Please, Sign In to add comment