Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # USAGE
- # python video_detect.py
- # import the necessary packages
- import numpy as np
- import time
- import cv2
- import os
- import sendemail
- import getvideo
- import lockvideo
- import sendpush
- # Construct the variables
- yoloDir = "yolo-coco"
- setConfidence = float(0.5)
- setThreshold = float(0.3)
- detectFor = ["person", "dog", "car"]
- # load the COCO class labels our YOLO model was trained on
- labelsPath = os.path.sep.join([yoloDir, "coco.names"])
- LABELS = open(labelsPath).read().strip().split("\n")
- # initialize a list of colors to represent each possible class label
- np.random.seed(42)
- COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
- dtype="uint8")
- # derive the paths to the YOLO weights and model configuration
- weightsPath = os.path.sep.join([yoloDir, "yolov3.weights"])
- configPath = os.path.sep.join([yoloDir, "yolov3.cfg"])
- # load our YOLO object detector trained on COCO dataset (80 classes)
- net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
- # Download and get filenames for analyze
- files = getvideo.getvideo()
- # Download path
- path = 'downloads/'
- videoPics = []
- sendPics = []
- count = 0
- match = 0
- # Default frames to skip
- skipFrames = 14
- for video in files:
- #print(video)
- videoName = video.split('.')
- videoName = videoName[0]
- # load the video and go through each frame to detect a human
- cap = cv2.VideoCapture(path+video)
- # Skip first 10 seconds
- cap.set(cv2.CAP_PROP_POS_FRAMES, 150)
- # Total number of frames
- length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
- #print("Total number of frames: "+str(length))
- while not cap.isOpened():
- #print("Not able to open file, nor it does not exist")
- break
- while True:
- pos_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
- #print("Current frame: "+str(pos_frame)+"/"+str(length))
- flag, frame = cap.read()
- if flag:
- # load our input image and grab its spatial dimensions
- (H, W) = frame.shape[:2]
- # determine only the *output* layer names that we need from YOLO
- ln = net.getLayerNames()
- ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
- # construct a blob from the input image and then perform a forward
- # pass of the YOLO object detector, giving us our bounding boxes and
- # associated probabilities
- blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
- swapRB=True, crop=False)
- net.setInput(blob)
- start = time.time()
- layerOutputs = net.forward(ln)
- end = time.time()
- # show timing information on YOLOprint("[INFO] YOLO took {:.6f} seconds".format(end - start))
- # initialize our lists of detected bounding boxes, confidences, and
- # class IDs, respectively
- boxes = []
- confidences = []
- classIDs = []
- # loop over each of the layer outputs
- for output in layerOutputs:
- # loop over each of the detections
- for detection in output:
- # extract the class ID and confidence (i.e., probability) of
- # the current object detection
- scores = detection[5:]
- classID = np.argmax(scores)
- confidence = scores[classID]
- # filter out weak predictions by ensuring the detected
- # probability is greater than the minimum probability
- if confidence > setConfidence:
- # scale the bounding box coordinates back relative to the
- # size of the image, keeping in mind that YOLO actually
- # returns the center (x, y)-coordinates of the bounding
- # box followed by the boxes' width and height
- box = detection[0:4] * np.array([W, H, W, H])
- (centerX, centerY, width, height) = box.astype("int")
- # use the center (x, y)-coordinates to derive the top and
- # and left corner of the bounding box
- x = int(centerX - (width / 2))
- y = int(centerY - (height / 2))
- # update our list of bounding box coordinates, confidences,
- # and class IDs
- boxes.append([x, y, int(width), int(height)])
- confidences.append(float(confidence))
- classIDs.append(classID)
- # apply non-maxima suppression to suppress weak, overlapping bounding
- # boxes
- idxs = cv2.dnn.NMSBoxes(boxes, confidences, setConfidence, setThreshold)
- # ensure at least one detection exists
- if len(idxs) > 0:
- # loop over the indexes we are keeping
- for i in idxs.flatten():
- # extract the bounding box coordinates
- (x, y) = (boxes[i][0], boxes[i][1])
- (w, h) = (boxes[i][2], boxes[i][3])
- getObject = LABELS[classIDs[i]]
- if getObject in detectFor:
- # draw a bounding box rectangle and label on the image
- color = [int(c) for c in COLORS[classIDs[i]]]
- cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
- text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
- cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
- 0.5, color, 2)
- #print("Match!")
- imgName = "output/"+videoName+"-%d.jpg" % count
- cv2.imwrite(imgName, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
- count = count + 1
- #skip 50 frames if detech
- if confidences[i] > 0.9:
- skipFrames = 300
- #print("Skipping 300")
- continue
- elif confidences[i] > 0.5:
- skipFrames = 50
- #print("Skipping 50")
- continue
- else:
- skipFrames = 30
- #print("Skipping 30")
- continue
- if skipFrames > 0:
- cap.set(cv2.CAP_PROP_POS_FRAMES, pos_frame+skipFrames)
- skipFrames = 14
- else:
- cap.set(cv2.CAP_PROP_POS_FRAMES, pos_frame+skipFrames)
- skipFrames = 14
- else:
- #print("Analyze of "+video+" is done")
- if count > 0 :
- videoPics.append(imgName)
- count = 0
- match = match + 1
- break
- #Check if any detects, and send a notification mail
- if match > 0:
- for videoPic in videoPics:
- pic = videoPic.split('-')
- pic1 = pic[2].split('.')
- number = int(pic1[0])
- if ( number < 1 ):
- sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
- elif number > 1 :
- median = int(np.median([1, number]))
- sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
- sendPics.append(pic[0]+"-"+pic[1]+"-"+str(median)+".jpg")
- sendPics.append(pic[0]+"-"+pic[1]+"-"+str(number)+".jpg")
- else:
- sendPics.append(pic[0]+"-"+pic[1]+"-"+"0.jpg")
- sendPics.append(pic[0]+"-"+pic[1]+"-"+"1.jpg")
- sendemail.sendemail(from_addr = 'mail@mail.dk',
- to_addr = ['to-mail@mail.dk', 'to-mail2@mail.dk'],
- subject = 'Human detected',
- message = 'Video have detected a human',
- login = 'mail@mail.dk',
- password = '123456',
- attach = sendPics)
- #print("Email notification sent!")
- # Mark videos as locked
- lockvideo.lock(files)
- # Send push
- sendpush.sendpush()
- # Cleaning up
- #print("Cleaning up")
- time.sleep(20)
- for videoPic in videoPics:
- pic = videoPic.split('-')
- pic1 = pic[2].split('.')
- number = int(pic1[0])
- i = 0
- while i <= number:
- os.remove(str(pic[0])+"-"+str(pic[1])+"-"+str(i)+".jpg")
- i += 1
- for video in files:
- os.remove(path+video)
Add Comment
Please, Sign In to add comment