Advertisement
Guest User

Untitled

a guest
Jan 23rd, 2018
62
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.14 KB | None | 0 0
  1. import numpy as np
  2. import cv2
  3.  
  4. import os, sys
  5. sys.path.append(os.path.join(os.path.dirname(__file__), '..'))  # /*
  6.  
  7. from viddeid.ioutils import path, directory
  8. from viddeid import visualization
  9. import viddeid.filters.space as sfilters
  10. from viddeid import filters
  11.  
  12. #CONTROLS#
  13. #=======================#
  14. #Show the initial video on start up
  15. show_initial_video = False
  16. #Process all the videos in the test_videos folder
  17. process_all = True
  18. #Process a specific video with index (ignored if process_all is active)
  19. process_index = 0
  20. #=======================#
  21.  
  22. #Get test videos directory
  23. dir_path = path.find_in_ancestor(__file__, "data/test_videos")
  24. #Save all videos form the video directory
  25. videos = [cv2.VideoCapture(p) for p in directory.get_files(dir_path) ]
  26. #Initialize a  viewer for the processing
  27. viewer = visualization.Viewer()
  28. #Load the haar cascades
  29. face_cascade = cv2.CascadeClassifier(
  30.     path.find_in_ancestor(
  31.         __file__,
  32.         "data/cascade_classifiers/haarcascade_frontalface_default.xml"))
  33.  
  34. def filter(image):
  35.     ft_im = np.fft.fft2(image)
  36.  
  37.     ker = sfilters.gaussian_mixture([
  38.         ((2, 3), np.array([[2, 0], [0, 2]]), 1),
  39.         ((5, -4), np.array([[2, 1], [1, 2]]), 1),
  40.         ((-2, -4), np.array([[2, -1], [-1, 2]]), 1),
  41.     ])
  42.     ker = filters.center_to_origin(ker, image.shape)
  43.  
  44.     ft_ker = np.fft.fft2(ker)
  45.  
  46.     ft_conv = ft_im * ft_ker
  47.     im_conv = np.fft.ifft2(ft_conv)
  48.     filtered_image = np.clip(im_conv.real, 0, 255).astype(np.uint8) #Not sure about this part
  49.     return filtered_images
  50.  
  51.  
  52. if process_all != True:
  53.     videos = [videos[process_index]]
  54. for vidcap in videos:
  55.     #Make an array for the frames and keep count of the current frame
  56.     count = 0
  57.     images = []
  58.     success,image = vidcap.read()
  59.     while success:
  60.         #Convert to grayscale
  61.         gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  62.         images.append(gray)
  63.         count += 1
  64.         print('Reading frame: ' + count.__str__())
  65.         success, image = vidcap.read()
  66.     print ('Succesfully read video')
  67.     #Show the initial video
  68.     if show_initial_video ==True:
  69.         print('Showing initial video...')
  70.         viewer.display_video(images, 'Initial video')
  71.     #Process the images
  72.     print('Processing images...')
  73.     filtered_images =[]
  74.     reconstructed_images = []
  75.     #===============CASCADES=============#
  76.     for im in images:
  77.         faces = face_cascade.detectMultiScale(im, 1.3, 1)
  78.     for (x, y, w, h) in faces:
  79.         #Filter the pixels that contain the face
  80.         face_pixels = im[y:y+h , x:x+w]
  81.         im[y:y+h , x:x+w] = filter(face_pixels) #This function must be created
  82.         filtered_images.append(im)
  83.         #To reconstruct the face you can apply a reconstruct function to those pixels
  84.         im[y:y+h , x:x+w]  = reconstruct(im[y:y+h , x:x+w])
  85.         reconstructed_images.append(im)
  86.     #====================================#
  87.     print('Processing done.')
  88.     print('Showing processing video...')
  89.     print('Press q or ESC to exit the video')
  90.     viewer.display_video(filtered_images, 'Filtered images')
  91.     viewer.display_video(reconstructed_images, 'Reconstructed images')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement