Guest User

video_detect.py

a guest
Apr 10th, 2020
138
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 8.50 KB | None | 0 0
  1. # USAGE
  2. # python video_detect.py
  3.  
  4. # import the necessary packages
  5. import numpy as np
  6. import time
  7. import cv2
  8. import os
  9. import sendemail
  10. import getvideo
  11. import lockvideo
  12. import sendpush
  13.  
  14. # Construct the variables
  15.  
  16. yoloDir = "yolo-coco"
  17. setConfidence = float(0.5)
  18. setThreshold = float(0.3)
  19. detectFor = ["person", "dog", "car"]
  20.  
  21. # load the COCO class labels our YOLO model was trained on
  22. labelsPath = os.path.sep.join([yoloDir, "coco.names"])
  23. LABELS = open(labelsPath).read().strip().split("\n")
  24.  
  25. # initialize a list of colors to represent each possible class label
  26. np.random.seed(42)
  27. COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
  28.     dtype="uint8")
  29.  
  30. # derive the paths to the YOLO weights and model configuration
  31. weightsPath = os.path.sep.join([yoloDir, "yolov3.weights"])
  32. configPath = os.path.sep.join([yoloDir, "yolov3.cfg"])
  33.  
  34. # load our YOLO object detector trained on COCO dataset (80 classes)
  35. net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
  36.  
  37.  
  38. # Download and get filenames for analyze
  39. files = getvideo.getvideo()
  40.  
  41. # Download path
  42. path = 'downloads/'
  43.  
  44. videoPics = []
  45. sendPics = []
  46.  
  47. count = 0
  48. match = 0
  49. # Default frames to skip
  50. skipFrames = 14
  51. for video in files:
  52.     #print(video)
  53.     videoName = video.split('.')
  54.     videoName = videoName[0]
  55.  
  56.     # load the video and go through each frame to detect a human
  57.     cap = cv2.VideoCapture(path+video)
  58.  
  59.     # Skip first 10 seconds
  60.     cap.set(cv2.CAP_PROP_POS_FRAMES, 150)
  61.  
  62.     # Total number of frames
  63.     length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
  64.     #print("Total number of frames: "+str(length))
  65.  
  66.  
  67.     while not cap.isOpened():
  68.         #print("Not able to open file, nor it does not exist")
  69.         break
  70.  
  71.     while True:
  72.         pos_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
  73.         #print("Current frame: "+str(pos_frame)+"/"+str(length))
  74.  
  75.         flag, frame = cap.read()
  76.         if flag:
  77.             # load our input image and grab its spatial dimensions
  78.             (H, W) = frame.shape[:2]
  79.  
  80.             # determine only the *output* layer names that we need from YOLO
  81.             ln = net.getLayerNames()
  82.             ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  83.  
  84.             # construct a blob from the input image and then perform a forward
  85.             # pass of the YOLO object detector, giving us our bounding boxes and
  86.             # associated probabilities
  87.             blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
  88.                     swapRB=True, crop=False)
  89.             net.setInput(blob)
  90.             start = time.time()
  91.             layerOutputs = net.forward(ln)
  92.             end = time.time()
  93.  
  94.             # show timing information on YOLOprint("[INFO] YOLO took {:.6f} seconds".format(end - start))
  95.  
  96.             # initialize our lists of detected bounding boxes, confidences, and
  97.             # class IDs, respectively
  98.             boxes = []
  99.             confidences = []
  100.             classIDs = []
  101.  
  102.             # loop over each of the layer outputs
  103.             for output in layerOutputs:
  104.                 # loop over each of the detections
  105.                 for detection in output:
  106.                     # extract the class ID and confidence (i.e., probability) of
  107.                     # the current object detection
  108.                     scores = detection[5:]
  109.                     classID = np.argmax(scores)
  110.                     confidence = scores[classID]
  111.  
  112.                     # filter out weak predictions by ensuring the detected
  113.                     # probability is greater than the minimum probability
  114.                     if confidence > setConfidence:
  115.                         # scale the bounding box coordinates back relative to the
  116.                         # size of the image, keeping in mind that YOLO actually
  117.                         # returns the center (x, y)-coordinates of the bounding
  118.                         # box followed by the boxes' width and height
  119.                         box = detection[0:4] * np.array([W, H, W, H])
  120.                         (centerX, centerY, width, height) = box.astype("int")
  121.  
  122.                         # use the center (x, y)-coordinates to derive the top and
  123.                         # and left corner of the bounding box
  124.                         x = int(centerX - (width / 2))
  125.                         y = int(centerY - (height / 2))
  126.  
  127.                         # update our list of bounding box coordinates, confidences,
  128.                         # and class IDs
  129.                         boxes.append([x, y, int(width), int(height)])
  130.                         confidences.append(float(confidence))
  131.                         classIDs.append(classID)
  132.  
  133.             # apply non-maxima suppression to suppress weak, overlapping bounding
  134.             # boxes
  135.             idxs = cv2.dnn.NMSBoxes(boxes, confidences, setConfidence, setThreshold)
  136.  
  137.             # ensure at least one detection exists
  138.             if len(idxs) > 0:
  139.  
  140.                 # loop over the indexes we are keeping
  141.                 for i in idxs.flatten():
  142.                     # extract the bounding box coordinates
  143.                     (x, y) = (boxes[i][0], boxes[i][1])
  144.                     (w, h) = (boxes[i][2], boxes[i][3])
  145.  
  146.                     getObject = LABELS[classIDs[i]]
  147.  
  148.                     if getObject in detectFor:
  149.  
  150.                         # draw a bounding box rectangle and label on the image
  151.                         color = [int(c) for c in COLORS[classIDs[i]]]
  152.                         cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
  153.                         text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
  154.                         cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
  155.                                 0.5, color, 2)
  156.                         #print("Match!")
  157.  
  158.                         imgName = "output/"+videoName+"-%d.jpg" % count
  159.                         cv2.imwrite(imgName, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
  160.                         count = count + 1
  161.                         #skip 50 frames if detech
  162.                         if confidences[i] > 0.9:
  163.                             skipFrames = 300
  164.                             #print("Skipping 300")
  165.                             continue
  166.                         elif confidences[i] > 0.5:
  167.                             skipFrames = 50
  168.                             #print("Skipping 50")
  169.                             continue
  170.                         else:
  171.                             skipFrames = 30
  172.                             #print("Skipping 30")
  173.                             continue
  174.             if skipFrames > 0:
  175.                 cap.set(cv2.CAP_PROP_POS_FRAMES, pos_frame+skipFrames)
  176.                 skipFrames = 14
  177.             else:
  178.                 cap.set(cv2.CAP_PROP_POS_FRAMES, pos_frame+skipFrames)
  179.                 skipFrames = 14
  180.         else:
  181.             #print("Analyze of "+video+" is done")
  182.             if count > 0 :
  183.                 videoPics.append(imgName)
  184.                 count = 0
  185.                 match = match + 1
  186.             break
  187.  
  188. #Check if any detects, and send a notification mail
  189. if match > 0:
  190.     for videoPic in videoPics:
  191.         pic = videoPic.split('-')
  192.         pic1 = pic[2].split('.')
  193.         number = int(pic1[0])
  194.         if ( number < 1 ):
  195.             sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
  196.         elif number > 1 :
  197.             median = int(np.median([1, number]))
  198.             sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
  199.             sendPics.append(pic[0]+"-"+pic[1]+"-"+str(median)+".jpg")
  200.             sendPics.append(pic[0]+"-"+pic[1]+"-"+str(number)+".jpg")
  201.         else:
  202.             sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
  203.             sendPics.append(pic[0]+"-"+pic[1]+"-"+"1.jpg")
  204.  
  205.     sendemail.sendemail(from_addr = 'mail@mail.dk',
  206.         to_addr      = ['to-mail@mail.dk', 'to-mail2@mail.dk'],
  207.         subject      = 'Human detected',
  208.         message      = 'Video have detected a human',
  209.         login        = 'mail@mail.dk',
  210.         password     = '123456',
  211.         attach       = sendPics)
  212.     #print("Email notification sent!")
  213.  
  214.     # Mark videos as locked
  215.     lockvideo.lock(files)
  216.  
  217.     # Send push
  218.     sendpush.sendpush()
  219.  
  220.     # Cleaning up
  221.     #print("Cleaning up")
  222.     time.sleep(20)
  223.     for videoPic in videoPics:
  224.         pic = videoPic.split('-')
  225.         pic1 = pic[2].split('.')
  226.         number = int(pic1[0])
  227.         i = 0
  228.         while i <= number:
  229.             os.remove(str(pic[0])+"-"+str(pic[1])+"-"+str(i)+".jpg")
  230.             i += 1
  231.  
  232. for video in files:
  233.     os.remove(path+video)
Add Comment
Please, Sign In to add comment