Advertisement
Guest User

Personal Project

a guest
Mar 9th, 2023
110
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 10.91 KB | Source Code | 0 0
  1. # Import packages
  2. import os
  3. import cv2
  4. import numpy as np
  5.  
  6. import tensorflow as tf
  7. import argparse
  8. import sys
  9. import tensorflow.compat.v1 as tf
  10. tf.disable_v2_behavior()
  11.  
  12.  
  13.  
  14.  
  15. # Set up camera constants
  16. IM_WIDTH = 640
  17. IM_HEIGHT = 480
  18.  
  19. # Select camera type (if user enters --usbcam when calling this script,
  20. # a USB webcam will be used)
  21.  
  22. parser = argparse.ArgumentParser()
  23. parser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',
  24.                     action='store_true')
  25. args = parser.parse_args()
  26. if args.usbcam:
  27.     camera_type = 'usb'
  28.  
  29. #### Initialize TensorFlow model ####
  30.  
  31. # This is needed since the working directory is the object_detection folder.
  32. sys.path.append('..')
  33.  
  34. # Import utilites
  35. from object_detection.utils import label_map_util
  36. from object_detection.utils import visualization_utils as vis_util
  37.  
  38. # Name of the directory containing the object detection module we're using
  39. MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
  40.  
  41. # Grab path to current working directory
  42. CWD_PATH = os.getcwd()
  43.  
  44. # Path to frozen detection graph .pb file, which contains the model that is used
  45. # for object detection.
  46. PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
  47.  
  48. # Path to label map file
  49. PATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')
  50.  
  51. # Number of classes the object detector can identify
  52. NUM_CLASSES = 90
  53.  
  54. ## Load the label map.
  55. # Label maps map indices to category names, so that when the convolution
  56. # network predicts `5`, we know that this corresponds to `airplane`.
  57. # Here we use internal utility functions, but anything that returns a
  58. # dictionary mapping integers to appropriate string labels would be fine
  59. label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
  60. categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
  61. category_index = label_map_util.create_category_index(categories)
  62.  
  63. # Load the Tensorflow model into memory.
  64. detection_graph = tf.Graph()
  65. with detection_graph.as_default():
  66.     od_graph_def = tf.GraphDef()
  67.     with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
  68.         serialized_graph = fid.read()
  69.         od_graph_def.ParseFromString(serialized_graph)
  70.         tf.import_graph_def(od_graph_def, name='')
  71.  
  72.     sess = tf.Session(graph=detection_graph)
  73.  
  74.  
  75. # Define input and output tensors (i.e. data) for the object detection classifier
  76.  
  77. # Input tensor is the image
  78. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
  79.  
  80. # Output tensors are the detection boxes, scores, and classes
  81. # Each box represents a part of the image where a particular object was detected
  82. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
  83.  
  84. # Each score represents level of confidence for each of the objects.
  85. # The score is shown on the result image, together with the class label.
  86. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
  87. detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
  88.  
  89. # Number of objects detected
  90. num_detections = detection_graph.get_tensor_by_name('num_detections:0')
  91.  
  92. #### Initialize other parameters ####
  93.  
  94. # Initialize frame rate calculation
  95. frame_rate_calc = 1
  96. freq = cv2.getTickFrequency()
  97. font = cv2.FONT_HERSHEY_SIMPLEX
  98.  
  99. # Define inside box coordinates (top left and bottom right)
  100. TL_inside = (int(IM_WIDTH*0.016),int(IM_HEIGHT*0.021))
  101. BR_inside = (int(IM_WIDTH*0.323),int(IM_HEIGHT*0.979))
  102.  
  103.  
  104.  
  105. # Define outside box coordinates (top left and bottom right)
  106. TL_outside = (int(IM_WIDTH*0.333),int(IM_HEIGHT*0.021))
  107. BR_outside = (int(IM_WIDTH*0.673),int(IM_HEIGHT*0.979))
  108.  
  109. # Define outside box coordinates (top left and bottom right)
  110. TL_right = (int(IM_WIDTH*0.683),int(IM_HEIGHT*0.021))
  111. BR_right = (int(IM_WIDTH*0.986),int(IM_HEIGHT*0.979))
  112.  
  113.  
  114.  
  115. # Initialize control variables used for pet detector
  116. detected_inside = False
  117. detected_outside = False
  118. detected_right = False
  119.  
  120. inside_counter = 0
  121. outside_counter = 0
  122. right_counter = 0
  123.  
  124. pause = 0
  125. pause_counter = 0
  126.  
  127. #### Pet detection function ####
  128.  
  129. # This function contains the code to detect a pet, determine if it's
  130. # inside or outside, and send a text to the user's phone.
  131. def pet_detector(frame):
  132.  
  133.     # Use globals for the control variables so they retain their value after function exits
  134.     global detected_inside, detected_outside, detected_right
  135.     global inside_counter, outside_counter, right_counter
  136.     global pause, pause_counter
  137.  
  138.     frame_expanded = np.expand_dims(frame, axis=0)
  139.  
  140.     # Perform the actual detection by running the model with the image as input
  141.     (boxes, scores, classes, num) = sess.run(
  142.         [detection_boxes, detection_scores, detection_classes, num_detections],
  143.         feed_dict={image_tensor: frame_expanded})
  144.  
  145.     # Draw the results of the detection (aka 'visulaize the results')
  146.     vis_util.visualize_boxes_and_labels_on_image_array(
  147.         frame,
  148.         np.squeeze(boxes),
  149.         np.squeeze(classes).astype(np.int32),
  150.         np.squeeze(scores),
  151.         category_index,
  152.         use_normalized_coordinates=True,
  153.         line_thickness=8,
  154.         min_score_thresh=0.40)
  155.  
  156.     # Draw boxes defining "outside" and "inside" locations.
  157.     cv2.rectangle(frame,TL_outside,BR_outside,(255,20,20),3)
  158.     cv2.putText(frame,"Outside box",(TL_outside[0]+10,TL_outside[1]-10),font,1,(255,20,255),3,cv2.LINE_AA)
  159.     cv2.rectangle(frame,TL_inside,BR_inside,(20,20,255),3)
  160.     cv2.putText(frame,"Inside box",(TL_inside[0]+10,TL_inside[1]-10),font,1,(20,255,255),3,cv2.LINE_AA)
  161.     cv2.rectangle(frame,TL_right,BR_right,(20,255,25),3)
  162.     cv2.putText(frame,"right box",(TL_right[0]+10,TL_right[1]-10),font,1,(20,255,255),3,cv2.LINE_AA)
  163.    
  164.     # Check the class of the top detected object by looking at classes[0][0].
  165.     # If the top detected object is a cat (17) or a dog (18) (or a teddy bear (88) for test purposes),
  166.     # find its center coordinates by looking at the boxes[0][0] variable.
  167.     # boxes[0][0] variable holds coordinates of detected objects as (ymin, xmin, ymax, xmax)
  168.     if (((int(classes[0][0]) == 1) or (int(classes[0][0] == 18) or (int(classes[0][0]) == 88))) and (pause == 0)):
  169.         x = int(((boxes[0][0][1]+boxes[0][0][3])/2)*IM_WIDTH)
  170.         y = int(((boxes[0][0][0]+boxes[0][0][2])/2)*IM_HEIGHT)
  171.  
  172.         # Draw a circle at center of object
  173.         cv2.circle(frame,(x,y), 5, (75,13,180), -1)
  174.  
  175.         # If object is in inside box, increment inside counter variable
  176.         if ((x > TL_inside[0]) and (x < BR_inside[0]) and (y > TL_inside[1]) and (y < BR_inside[1])):
  177.             inside_counter = inside_counter + 1
  178.  
  179.         # If object is in outside box, increment outside counter variable
  180.         if ((x > TL_outside[0]) and (x < BR_outside[0]) and (y > TL_outside[1]) and (y < BR_outside[1])):
  181.             outside_counter = outside_counter + 1
  182.        
  183.         # If object is in outside box, increment outside counter variable
  184.         if ((x > TL_right[0]) and (x < BR_right[0]) and (y > TL_right[1]) and (y < BR_right[1])):
  185.             right_counter = right_counter + 1
  186.  
  187.  
  188.  
  189.     # If pet has been detected inside for more than 10 frames, set detected_inside flag
  190.     # and send a text to the phone.
  191.     if inside_counter == 1:
  192.         detected_inside = True
  193.  
  194.         inside_counter = 0
  195.         outside_counter = 0
  196.         right_counter = 0
  197.         # Pause pet detection by setting "pause" flag
  198.         pause = 1
  199.  
  200.     # If pet has been detected outside for more than 10 frames, set detected_outside flag
  201.     # and send a text to the phone.
  202.     if outside_counter == 1:
  203.         detected_outside = True
  204.  
  205.         inside_counter = 0
  206.         outside_counter = 0
  207.         right_counter = 0
  208.         # Pause pet detection by setting "pause" flag
  209.         pause = 1
  210.        
  211.     # If pet has been detected outside for more than 10 frames, set detected_outside flag
  212.     # and send a text to the phone.
  213.     if right_counter == 1:
  214.         detected_right = True
  215.  
  216.         inside_counter = 0
  217.         outside_counter = 0
  218.         right_counter = 0
  219.         # Pause pet detection by setting "pause" flag
  220.         pause = 1
  221.  
  222.     # If pause flag is set, draw message on screen.
  223.     if pause == 1:
  224.         if detected_inside == True:
  225.             cv2.putText(frame,'Left detected!',(int(IM_WIDTH*0.027),int(IM_HEIGHT-60)),font,3,(0,0,0),7,cv2.LINE_AA)
  226.             cv2.putText(frame,'Left detected!',(int(IM_WIDTH*0.967),int(IM_HEIGHT-60)),font,3,(95,176,23),5,cv2.LINE_AA)
  227.  
  228.         if detected_outside == True:
  229.             cv2.putText(frame,'Mid detected!',(int(IM_WIDTH*0.027),int(IM_HEIGHT-60)),font,3,(0,0,0),7,cv2.LINE_AA)
  230.             cv2.putText(frame,'Mid detected!',(int(IM_WIDTH*0.967),int(IM_HEIGHT-60)),font,3,(95,176,23),5,cv2.LINE_AA)
  231.  
  232.         if detected_right == True:
  233.             cv2.putText(frame,'Right detected!',(int(IM_WIDTH*0.027),int(IM_HEIGHT-60)),font,3,(0,0,0),7,cv2.LINE_AA)
  234.             cv2.putText(frame,'Right detected!',(int(IM_WIDTH*0.967),int(IM_HEIGHT-60)),font,3,(95,176,23),5,cv2.LINE_AA)
  235.  
  236.         # Increment pause counter until it reaches 30 (for a framerate of 1.5 FPS, this is about 20 seconds),
  237.         # then unpause the application (set pause flag to 0).
  238.         pause_counter = pause_counter + 1
  239.         if pause_counter > 3:
  240.             pause = 0
  241.             pause_counter = 0
  242.             detected_inside = False
  243.             detected_outside = False
  244.             detected_right = False
  245.  
  246.     # Draw counter info
  247.     cv2.putText(frame,'Detection counter: ' + str(max(inside_counter,outside_counter, right_counter)),(10,100),font,0.5,(255,255,0),1,cv2.LINE_AA)
  248.     cv2.putText(frame,'Pause counter: ' + str(pause_counter),(10,150),font,0.5,(255,255,0),1,cv2.LINE_AA)
  249.  
  250.     return frame
  251.  
  252. #### Initialize camera and perform object detection ####
  253.  
  254. # The camera has to be set up and used differently depending on if it's a
  255. # Picamera or USB webcam.
  256.  
  257.  
  258.  
  259. ### USB webcam ###
  260.  
  261.     # Initialize USB webcam feed
  262. camera = cv2.VideoCapture(0)
  263. ret = camera.set(3,IM_WIDTH)
  264. ret = camera.set(4,IM_HEIGHT)
  265.  
  266.     # Continuously capture frames and perform object detection on them
  267. while(True):
  268.  
  269.         t1 = cv2.getTickCount()
  270.  
  271.         # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
  272.         # i.e. a single-column array, where each item in the column has the pixel RGB value
  273.         ret, frame = camera.read()
  274.  
  275.         # Pass frame into pet detection function
  276.         frame = pet_detector(frame)
  277.  
  278.         # Draw FPS
  279.         cv2.putText(frame,"FPS: {0:.2f}".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)
  280.  
  281.         # All the results have been drawn on the frame, so it's time to display it.
  282.         cv2.imshow('Object detector', frame)
  283.  
  284.         # FPS calculation
  285.         t2 = cv2.getTickCount()
  286.         time1 = (t2-t1)/freq
  287.         frame_rate_calc = 1/time1
  288.  
  289.         # Press 'q' to quit
  290.         if cv2.waitKey(1) == ord('q'):
  291.             break
  292.  
  293. camera.release()
  294.        
  295. cv2.destroyAllWindows()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement