Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Detect.py
- **********************************************************************************************************************************
- import cv2
- import argparse
- from get_background import get_background
- import time
- import numpy as np
- parser = argparse.ArgumentParser()
- parser.add_argument('-i', '--input', help='path to the input video',
- required=True)
- parser.add_argument('-c', '--consecutive-frames', default=4, type=int,
- dest='consecutive_frames', help='path to the input video')
- args = vars(parser.parse_args())
- #info = cv2.getBuildInformation()
- #print(info)
- print(cv2.useOptimized())
- cv2.setNumThreads(1)
- cap = cv2.VideoCapture('rtsp://admin:Phone439401@192.168.1.4/12', apiPreference=cv2.CAP_FFMPEG)
- # get the video frame height and width
- frame_width = int(cap.get(3))
- frame_height = int(cap.get(4))
- print(frame_height, frame_width)
- save_name = f"outputs/{args['input'].split('/')[-1]}"
- # define codec and create VideoWriter object
- #out = cv2.VideoWriter(
- # save_name,
- # cv2.VideoWriter_fourcc(*'mp4v'), 10,
- # (frame_width, frame_height)
- #)
- # get the background model
- #background = get_background(args['input'])
- # convert the background model to grayscale format
- frame_count = 0
- consecutive_frame = args['consecutive_frames']
- oldtime = time.time()
- ret, background = cap.read()
- background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
- color = (255, 255, 255)
- mask = cv2.rectangle(background, (600,0), (640,20), color, -1)
- cv2.imshow('bkg mask' , mask)
- while (cap.isOpened()):
- ret, frame = cap.read()
- if ret == True:
- frame_count += 1
- mask = cv2.rectangle(frame, (600,0), (640,20), color, -1)
- orig_frame = mask.copy()
- # IMPORTANT STEP: convert the frame to grayscale first
- gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
- if frame_count % consecutive_frame == 0 or frame_count == 1:
- frame_diff_list = []
- background_list = []
- # find the difference between current frame and base frame
- frame_diff = cv2.absdiff(gray, background)
- # thresholding to convert the frame to binary
- ret, thres = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
- # dilate the frame a bit to get some more white area...
- # ... makes the detection of contours a bit easier
- #dilate_frame = cv2.dilate(thres, None, iterations=2)
- # append the final result into the `frame_diff_list`
- frame_diff_list.append(thres)
- background_list.append(orig_frame)
- # if we have reached `consecutive_frame` number of frames
- if len(frame_diff_list) == consecutive_frame:
- # add all the frames in the `frame_diff_list`
- sum_frames = sum(frame_diff_list)
- median_bkg = np.median(background_list, axis=0).astype(np.uint8)
- background = cv2.cvtColor(median_bkg, cv2.COLOR_BGR2GRAY)
- x1,y1,w,h = cv2.boundingRect(sum_frames)
- x2 = x1+w
- y2 = y1+h
- # Draw bounding rectangle
- start = (x1, y1)
- end = (x2, y2)
- colour = (255, 0, 0)
- thickness = 2
- rectangle_img = cv2.rectangle(sum_frames, start, end, colour, thickness)
- #rectangle_img = cv2.rectangle(sum_frames, (0, 0), (320, 320), colour, thickness)
- # find the contours around the white segmented areas
- #contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- # draw the contours, not strictly necessary
- #for i, cnt in enumerate(contours):
- #cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
- #for contour in contours:
- # continue through the loop if contour area is less than 500...
- # ... helps in removing noise detection
- #if cv2.contourArea(contour) < 500:
- # continue
- # get the xmin, ymin, width, and height coordinates from the contours
- #(x, y, w, h) = cv2.boundingRect(contour)
- # draw the bounding boxes
- #cv2.rectangle(orig_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
- cv2.imshow('Detected Objects' , sum_frames)
- #print(1/(time.time() - oldtime))
- oldtime = time.time()
- #out.write(orig_frame)
- if cv2.waitKey(100) & 0xFF == ord('q'):
- break
- else:
- break
- cap.release()
- cv2.destroyAllWindows()
- *******************************************************************************************************
- get_background.py
- *******************************************************************************************************
- import numpy as np
- import cv2
- def get_background(file_path):
- cv2.setNumThreads(1)
- cap = cv2.VideoCapture('rtsp://admin:password@192.168.1.4/12', apiPreference=cv2.CAP_FFMPEG)
- # we will randomly select 50 frames for the calculating the median
- #frame_indices = cap.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=50)
- # we will store the frames in array
- frames = []
- #for idx in frame_indices:
- # set the frame id to read that particular frame
- #cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
- for x in range(50):
- ret, frame = cap.read()
- frames.append(frame)
- # calculate the median
- median_frame = np.median(frames, axis=0).astype(np.uint8)
- return median_frame
- **********************************************************************************************************
Add Comment
Please, Sign In to add comment