Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import cv2
- import os, sys
- sys.path.append(os.path.join(os.path.dirname(__file__), '..')) # /*
- from viddeid.ioutils import path, directory
- from viddeid import visualization
- import viddeid.filters.space as sfilters
- from viddeid import filters
- #CONTROLS#
- #=======================#
- #Show the initial video on start up
- show_initial_video = False
- #Process all the videos in the test_videos folder
- process_all = True
- #Process a specific video with index (ignored if process_all is active)
- process_index = 0
- #=======================#
- #Get test videos directory
- dir_path = path.find_in_ancestor(__file__, "data/test_videos")
- #Save all videos form the video directory
- videos = [cv2.VideoCapture(p) for p in directory.get_files(dir_path) ]
- #Initialize a viewer for the processing
- viewer = visualization.Viewer()
- #Load the haar cascades
- face_cascade = cv2.CascadeClassifier(
- path.find_in_ancestor(
- __file__,
- "data/cascade_classifiers/haarcascade_frontalface_default.xml"))
- def filter(image):
- ft_im = np.fft.fft2(image)
- ker = sfilters.gaussian_mixture([
- ((2, 3), np.array([[2, 0], [0, 2]]), 1),
- ((5, -4), np.array([[2, 1], [1, 2]]), 1),
- ((-2, -4), np.array([[2, -1], [-1, 2]]), 1),
- ])
- ker = filters.center_to_origin(ker, image.shape)
- ft_ker = np.fft.fft2(ker)
- ft_conv = ft_im * ft_ker
- im_conv = np.fft.ifft2(ft_conv)
- filtered_image = np.clip(im_conv.real, 0, 255).astype(np.uint8) #Not sure about this part
- return filtered_images
- if process_all != True:
- videos = [videos[process_index]]
- for vidcap in videos:
- #Make an array for the frames and keep count of the current frame
- count = 0
- images = []
- success,image = vidcap.read()
- while success:
- #Convert to grayscale
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- images.append(gray)
- count += 1
- print('Reading frame: ' + count.__str__())
- success, image = vidcap.read()
- print ('Succesfully read video')
- #Show the initial video
- if show_initial_video ==True:
- print('Showing initial video...')
- viewer.display_video(images, 'Initial video')
- #Process the images
- print('Processing images...')
- filtered_images =[]
- reconstructed_images = []
- #===============CASCADES=============#
- for im in images:
- faces = face_cascade.detectMultiScale(im, 1.3, 1)
- for (x, y, w, h) in faces:
- #Filter the pixels that contain the face
- face_pixels = im[y:y+h , x:x+w]
- im[y:y+h , x:x+w] = filter(face_pixels) #This function must be created
- filtered_images.append(im)
- #To reconstruct the face you can apply a reconstruct function to those pixels
- im[y:y+h , x:x+w] = reconstruct(im[y:y+h , x:x+w])
- reconstructed_images.append(im)
- #====================================#
- print('Processing done.')
- print('Showing processing video...')
- print('Press q or ESC to exit the video')
- viewer.display_video(filtered_images, 'Filtered images')
- viewer.display_video(reconstructed_images, 'Reconstructed images')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement