Guest User

Untitled

a guest
Jul 20th, 2018
197
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.71 KB | None | 0 0
  1. ###
  2. ### script to detect objects in live video
  3. ### using MobileNets+SDD
  4. ###
  5. ### by: memeka <mihailescu2m@gmail.com>
  6. ###
  7.  
  8. import argparse, json
  9. import time, datetime, threading
  10. import numpy, cv2
  11. import os, signal
  12. from collections import deque
  13.  
  14. def read_frames(stream, queue):
  15. global detect
  16. while detect is True:
  17. (err, frame) = stream.read()
  18. queue.appendleft(frame)
  19. print('[INFO] exiting video stream thread...')
  20.  
  21. def execute_action(frame, timestamp, prediction, confidence, folder, action):
  22. # save frame to the 'detected' folder
  23. unixtime = (int)(time.mktime(timestamp.timetuple()))
  24. filename = '{0}/{1}_{2}.jpg'.format(folder, unixtime, prediction)
  25. cv2.imwrite(filename, frame)
  26. # execute action
  27. cmd = str(action).format(prediction, confidence, filename)
  28. os.system(cmd)
  29.  
  30. def exit_handler(signum, frame):
  31. global detect
  32. detect = False
  33.  
  34. ### main code ###
  35.  
  36. # construct the argument parser and parse the program arguments
  37. ap = argparse.ArgumentParser()
  38. ap.add_argument('-c', '--config', default='detect.json',
  39. help='path to configuration file')
  40. args = vars(ap.parse_args())
  41.  
  42. # load configuration file
  43. CONFIG_OPTIONS = [
  44. 'prototxt',
  45. 'caffemodel',
  46. 'classes',
  47. 'video_input',
  48. 'video_output',
  49. 'image_output',
  50. 'batch_size',
  51. 'base_confidence',
  52. 'detect_classes',
  53. 'detect_timeout',
  54. 'detect_action',
  55. 'statistics'
  56. ]
  57. print('[INFO] loading configuration file...')
  58. try:
  59. with open(args['config']) as config_file:
  60. config = json.load(config_file)
  61. except FileNotFoundError:
  62. print('[ERROR] configuration file [{0}] not found!'.format(args['config']))
  63. exit(-1)
  64.  
  65. # detect missing configuration options
  66. for option in CONFIG_OPTIONS:
  67. if option not in config:
  68. print('[ERROR] configuration option [{0}] not found in file [{1}]'.format(
  69. option, args['config']
  70. ))
  71. exit(-1)
  72.  
  73. # detect unknown configuration options
  74. for option in config:
  75. if option not in CONFIG_OPTIONS:
  76. print('[WARNING] unknown configuration option [{0}]'.format(option))
  77.  
  78. # check image folder exists and create it if necessary
  79. if not os.path.exists(config['image_output']):
  80. os.makedirs(config['image_output'])
  81.  
  82. # initialize the list of class labels MobileNets SSD was trained to
  83. # detect, then generate a set of bounding box colors for each class
  84. CLASSES = config['classes']
  85. COLORS = numpy.random.uniform(0, 255, size=(len(CLASSES), 3))
  86.  
  87. # load serialized model from disk
  88. print('[INFO] loading caffemodel...')
  89. try:
  90. open(config['prototxt'])
  91. except FileNotFoundError:
  92. print('[ERROR] prototxt file [{0}] not found!'.format(config['prototxt']))
  93. exit(-1)
  94. try:
  95. open(config['caffemodel'])
  96. except FileNotFoundError:
  97. print('[ERROR] caffemodel file [{0}] not found!'.format(config['caffemodel']))
  98. exit(-1)
  99. net = cv2.dnn.readNetFromCaffe(config['prototxt'], config['caffemodel'])
  100.  
  101. # initialize the input stream and allow the camera sensor to warmup
  102. print('[INFO] connecting to video stream...')
  103. vin = cv2.VideoCapture(config['video_input'])
  104. time.sleep(2.0)
  105.  
  106. # detect video attributes and initialize the output stream
  107. w = (int)(vin.get(cv2.CAP_PROP_FRAME_WIDTH))
  108. h = (int)(vin.get(cv2.CAP_PROP_FRAME_HEIGHT))
  109. fps = vin.get(cv2.CAP_PROP_FPS)
  110. print('[INFO] setting up '+str(w)+'x'+str(h)+'@'+str(fps)+' output stream...')
  111. vout = cv2.VideoWriter(config['video_output'], 0, fps, (w, h))
  112.  
  113. # initialize frames queue
  114. batch_size = (int)(config['batch_size'])
  115. queue = deque(maxlen=batch_size*2)
  116.  
  117. # start reading frames from video stream in separate thread
  118. detect = True
  119. reader = threading.Thread(name='reader', target=read_frames, args=(vin, queue,))
  120. reader.start()
  121.  
  122. # install CTRL-C signal handler to handle graceful program exit
  123. print('[INFO] installing CTRL-C handler...')
  124. signal.signal(signal.SIGINT, exit_handler)
  125.  
  126. # loop over the frames from the video stream
  127. print('[INFO] starting object detection...')
  128. DETECTIONS = {}
  129. STATISTICS = config['statistics']
  130. processed = 0
  131. start = datetime.datetime.now()
  132. while detect is True:
  133. # grab a batch of frames from the threaded video stream
  134. frames = []
  135. for f in range(batch_size):
  136. while not queue:
  137. # wait for frames to arrive
  138. time.sleep(0.001)
  139. frames.append(queue.pop())
  140.  
  141. if frames[0] is None:
  142. print('[ERROR] invalid frame received from input stream')
  143. detect = False
  144. continue
  145.  
  146. # convert detection frame to a blob
  147. blob = cv2.dnn.blobFromImage(cv2.resize(frames[0], (320, 320)), 0.007843, (320, 320), 127.5)
  148.  
  149. # pass the blob through the network and obtain the detections and predictions
  150. net.setInput(blob)
  151. #dnn_start = time.time()
  152. detections = net.forward()
  153. #dnn_end = time.time()
  154. #print('[DEBUG] dnn detection took %0.3f ms' % ((dnn_end-dnn_start)*1000.0))
  155.  
  156. # loop over the detections
  157. for i in numpy.arange(0, detections.shape[2]):
  158. # extract the prediction and the confidence (i.e., probability)
  159. # associated with the prediction
  160. obj = int(detections[0, 0, i, 1])
  161. prediction = CLASSES[obj]
  162. confidence = detections[0, 0, i, 2] * 100
  163. label = '{}: {:.2f}%'.format(prediction, confidence)
  164.  
  165. # check if it's an object we are interested in
  166. # and if confidence is within the desired levels
  167. timestamp = datetime.datetime.now()
  168. detection_event = False
  169. if prediction in config['detect_classes']:
  170. if not confidence > (float)(config['detect_classes'][prediction]):
  171. # confidence too low for desired object
  172. continue
  173. else:
  174. # we detected something we are interested in
  175. # so we execute action assosciated with event
  176. # but only if the object class was not already detected recently
  177. if prediction in DETECTIONS:
  178. prev_timestamp = DETECTIONS[prediction]
  179. duration = (timestamp - prev_timestamp).total_seconds()
  180. if duration > (float)(config['detect_timeout']):
  181. # detection event (elapsed timestamp)
  182. detection_event = True
  183. else:
  184. # detection event (first occurence)
  185. detection_event = True
  186. else:
  187. if not confidence > (float)(config['base_confidence']):
  188. # confidence too low for object
  189. continue
  190.  
  191. # compute the (x, y)-coordinates of the bounding box for the object
  192. box = detections[0, 0, i, 3:7] * numpy.array([w, h, w, h])
  193. (startX, startY, endX, endY) = box.astype("int")
  194. tStartY = startY - 15 if startY - 15 > 15 else startY + 15
  195.  
  196. # draw the prediction on the frame
  197. for frame in frames:
  198. cv2.rectangle(frame, (startX, startY), (endX, endY),
  199. COLORS[obj], 2)
  200. cv2.putText(frame, label, (startX, tStartY),
  201. cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[obj], 2)
  202.  
  203. # execute detection action
  204. if detection_event is True:
  205. DETECTIONS[prediction] = timestamp
  206. execute_action(frames[0], timestamp, prediction, confidence, config['image_output'], config['detect_action'])
  207.  
  208. # write frames to output stream
  209. for frame in frames:
  210. vout.write(frame)
  211. if STATISTICS is True:
  212. processed += len(frames)
  213.  
  214. # do cleanup
  215. end = datetime.datetime.now()
  216. reader.join()
  217. vin.release()
  218. vout.release()
  219.  
  220. # display statistics
  221. if STATISTICS is True:
  222. elapsed = (end - start).total_seconds()
  223. rfps = processed / elapsed
  224. print('[INFO] elapsed time: {:.2f} seconds'.format(elapsed))
  225. print('[INFO] approx. output FPS: {:.2f} (desired: {:.2f})'.format(rfps, fps))
  226. print('[INFO] approx. detection FPS: {:.2f}'.format(rfps/batch_size))
  227. else:
  228. print('[INFO] exiting program...')
Add Comment
Please, Sign In to add comment