Advertisement
Guest User

Untitled

a guest
Dec 11th, 2021
273
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 8.56 KB | None | 0 0
  1. # Import packages
  2. import os
  3. import argparse
  4. import cv2
  5. import numpy as np
  6. import sys
  7. import time
  8. from threading import Thread
  9. import importlib.util
  10.  
  11. # Define VideoStream class to handle streaming of video from webcam in separate processing thread
  12. # Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
  13. class VideoStream:
  14.     """Camera object that controls video streaming from the Picamera"""
  15.     def __init__(self,resolution=(640,480),framerate=30):
  16.         # Initialize the PiCamera and the camera image stream
  17.         self.stream = cv2.VideoCapture(0)
  18.         ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
  19.         ret = self.stream.set(3,resolution[0])
  20.         ret = self.stream.set(4,resolution[1])
  21.            
  22.         # Read first frame from the stream
  23.         (self.grabbed, self.frame) = self.stream.read()
  24.  
  25.     # Variable to control when the camera is stopped
  26.         self.stopped = False
  27.  
  28.     def start(self):
  29.     # Start the thread that reads frames from the video stream
  30.         Thread(target=self.update,args=()).start()
  31.         return self
  32.  
  33.     def update(self):
  34.         # Keep looping indefinitely until the thread is stopped
  35.         while True:
  36.             # If the camera is stopped, stop the thread
  37.             if self.stopped:
  38.                 # Close camera resources
  39.                 self.stream.release()
  40.                 return
  41.  
  42.             # Otherwise, grab the next frame from the stream
  43.             (self.grabbed, self.frame) = self.stream.read()
  44.  
  45.     def read(self):
  46.     # Return the most recent frame
  47.         return self.frame
  48.  
  49.     def stop(self):
  50.     # Indicate that the camera and thread should be stopped
  51.         self.stopped = True
  52.  
  53. # Define and parse input arguments
  54. parser = argparse.ArgumentParser()
  55. parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
  56.                     required=True)
  57. parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
  58.                     default='detect.tflite')
  59. parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
  60.                     default='labelmap.txt')
  61. parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
  62.                     default=0.5)
  63. parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
  64.                     default='1280x720')
  65. parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
  66.                     action='store_true')
  67.  
  68. args = parser.parse_args()
  69.  
  70. MODEL_NAME = args.modeldir
  71. GRAPH_NAME = args.graph
  72. LABELMAP_NAME = args.labels
  73. min_conf_threshold = float(args.threshold)
  74. resW, resH = args.resolution.split('x')
  75. imW, imH = int(resW), int(resH)
  76. use_TPU = args.edgetpu
  77.  
  78. # Import TensorFlow libraries
  79. # If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
  80. # If using Coral Edge TPU, import the load_delegate library
  81. pkg = importlib.util.find_spec('tflite_runtime')
  82. if pkg:
  83.     from tflite_runtime.interpreter import Interpreter
  84.     if use_TPU:
  85.         from tflite_runtime.interpreter import load_delegate
  86. else:
  87.     from tensorflow.lite.python.interpreter import Interpreter
  88.     if use_TPU:
  89.         from tensorflow.lite.python.interpreter import load_delegate
  90.  
  91. # If using Edge TPU, assign filename for Edge TPU model
  92. if use_TPU:
  93.     # If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
  94.     if (GRAPH_NAME == 'detect.tflite'):
  95.         GRAPH_NAME = 'edgetpu.tflite'      
  96.  
  97. # Get path to current working directory
  98. CWD_PATH = os.getcwd()
  99.  
  100. # Path to .tflite file, which contains the model that is used for object detection
  101. PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
  102.  
  103. # Path to label map file
  104. PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
  105.  
  106. # Load the label map
  107. with open(PATH_TO_LABELS, 'r') as f:
  108.     labels = [line.strip() for line in f.readlines()]
  109.  
  110. # Have to do a weird fix for label map if using the COCO "starter model" from
  111. # https://www.tensorflow.org/lite/models/object_detection/overview
  112. # First label is '???', which has to be removed.
  113. if labels[0] == '???':
  114.     del(labels[0])
  115.  
  116. # Load the Tensorflow Lite model.
  117. # If using Edge TPU, use special load_delegate argument
  118. if use_TPU:
  119.     interpreter = Interpreter(model_path=PATH_TO_CKPT,
  120.                               experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
  121.     print(PATH_TO_CKPT)
  122. else:
  123.     interpreter = Interpreter(model_path=PATH_TO_CKPT)
  124.  
  125. interpreter.allocate_tensors()
  126.  
  127. # Get model details
  128. input_details = interpreter.get_input_details()
  129. output_details = interpreter.get_output_details()
  130. height = input_details[0]['shape'][1]
  131. width = input_details[0]['shape'][2]
  132.  
  133. floating_model = (input_details[0]['dtype'] == np.float32)
  134.  
  135. input_mean = 127.5
  136. input_std = 127.5
  137.  
  138. # Initialize frame rate calculation
  139. frame_rate_calc = 1
  140. freq = cv2.getTickFrequency()
  141.  
  142. # Initialize video stream
  143. videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
  144. time.sleep(1)
  145.  
  146. #for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
  147. while True:
  148.  
  149.     # Start timer (for calculating frame rate)
  150.     t1 = cv2.getTickCount()
  151.  
  152.     # Grab frame from video stream
  153.     frame1 = videostream.read()
  154.  
  155.     # Acquire frame and resize to expected shape [1xHxWx3]
  156.     frame = frame1.copy()
  157.     frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  158.     frame_resized = cv2.resize(frame_rgb, (width, height))
  159.     input_data = np.expand_dims(frame_resized, axis=0)
  160.  
  161.     # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
  162.     if floating_model:
  163.         input_data = (np.float32(input_data) - input_mean) / input_std
  164.  
  165.     # Perform the actual detection by running the model with the image as input
  166.     interpreter.set_tensor(input_details[0]['index'],input_data)
  167.     interpreter.invoke()
  168.  
  169.     # Retrieve detection results
  170.     boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
  171.     classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
  172.     scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
  173.     #num = interpreter.get_tensor(output_details[3]['index'])[0]  # Total number of detected objects (inaccurate and not needed)
  174.  
  175.     # Loop over all detections and draw detection box if confidence is above minimum threshold
  176.     for i in range(len(scores)):
  177.         if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
  178.  
  179.             # Get bounding box coordinates and draw box
  180.             # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
  181.             ymin = int(max(1,(boxes[i][0] * imH)))
  182.             xmin = int(max(1,(boxes[i][1] * imW)))
  183.             ymax = int(min(imH,(boxes[i][2] * imH)))
  184.             xmax = int(min(imW,(boxes[i][3] * imW)))
  185.            
  186.             cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
  187.  
  188.             # Draw label
  189.             object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
  190.             label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
  191.             labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
  192.             label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
  193.             cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
  194.             cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
  195.  
  196.     # Draw framerate in corner of frame
  197.     cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
  198.  
  199.     # All the results have been drawn on the frame, so it's time to display it.
  200.     cv2.imshow('Object detector', frame)
  201.  
  202.     # Calculate framerate
  203.     t2 = cv2.getTickCount()
  204.     time1 = (t2-t1)/freq
  205.     frame_rate_calc= 1/time1
  206.  
  207.     # Press 'q' to quit
  208.     if cv2.waitKey(1) == ord('q'):
  209.         break
  210.  
  211. # Clean up
  212. cv2.destroyAllWindows()
  213. videostream.stop
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement