Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # USAGE
- # python pi_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
- from multiprocessing import Process, Queue
- from imutils.video import FPS
- import numpy as np
- import argparse
- import imutils
- import datetime
- import time
- import cv2
- def classify_frame(net, inputQueue, outputQueue):
- print("[INFO] starting classify process...")
- while True:
- if not inputQueue.empty():
- frame = inputQueue.get()
- frame = cv2.resize(frame, (300, 300))
- blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)
- net.setInput(blob)
- detections = net.forward()
- outputQueue.put(detections)
- time.sleep(2)
- def write_frame(writeQueue, startQueue):
- print("[INFO] starting write process...")
- fourcc = cv2.VideoWriter_fourcc('M','J','P','G') #*'MJPG'
- out = cv2.VideoWriter('output.avi', fourcc, 20, (640, 480), True)
- while True:
- if not writeQueue.empty():
- frame = writeQueue.get()
- out.write(frame)
- out.release()
- ap = argparse.ArgumentParser()
- ap.add_argument("-p", "--prototxt", required=True, help="path to Caffe 'deploy' prototxt file")
- ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model")
- args = vars(ap.parse_args())
- CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
- "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
- "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
- "sofa", "train", "tvmonitor"]
- COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
- inputQueue = Queue(maxsize=1)
- outputQueue = Queue(maxsize=1)
- writeQueue = Queue(maxsize=1)
- startQueue = Queue(maxsize=1)
- detections = None
- confidence_set = 0.8
- count = 0
- print("[INFO] loading model...")
- net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
- print("[INFO] starting process...")
- p = Process(target=classify_frame, args=(net, inputQueue, outputQueue,))
- p.daemon = True
- p.start()
- w = Process(target=write_frame, args=(writeQueue, startQueue))
- w.daemon = True
- w.start()
- print("[INFO] starting video stream...")
- vs = cv2.VideoCapture(0)
- fps = FPS().start()
- global fW, fH, framerate
- fW = vs.get(3)
- print ("[INFO] fW:", fW)
- fH = vs.get(4)
- print ("[INFO] fH:", fH)
- framerate = vs.get(5)
- print ("[INFO] ramerate:", framerate)
- while True:
- ret, frame = vs.read()
- if ret == True:
- if inputQueue.empty():
- inputQueue.put(frame)
- if not outputQueue.empty():
- detections = outputQueue.get()
- cv2.putText(frame, datetime.datetime.now().strftime("%c"), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 1)
- if detections is not None:
- for i in np.arange(0, detections.shape[2]):
- confidence = detections[0, 0, i, 2]
- if confidence > confidence_set:
- idx = int(detections[0, 0, i, 1])
- label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
- if idx == 8 or idx == 12 or idx == 15:
- print(label)
- count = 100
- if count > 1:# счетчик для записи кадров после пропадания объекта из кадра
- writeQueue.put(frame)
- count -= 1
- print(count)
- cv2.imshow("Camera", frame)
- key = cv2.waitKey(1) & 0xFF
- if key == ord("q"):
- break
- else:
- break
- fps.update()
- fps.stop()
- vs.release()
- print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
- print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
- cv2.destroyAllWindows()
Advertisement
Add Comment
Please, Sign In to add comment