Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/python
- # Multiprocess Transparent Webcam Demo | sept 17, 2010
- # by HartsAntler - bhartsho@yahoo.com
- # License: GNU GPL3
- # Tested with Ubuntu Lucid, all you should need is: apt-get install python-opencv python-pygame
- import multiprocessing, ctypes
- from multiprocessing import sharedctypes
- import os, sys, time
- import pygame
- #import Image, ImageDraw, ImageChops
- import gtk, glib, cairo, pango, gobject
- BG_RED = 0.98
- BG_GREEN = 0.98
- BG_BLUE = 0.99
- BG_ALPHA = 0.0
- BG_COLOR = (BG_RED,BG_GREEN,BG_BLUE,BG_ALPHA)
- def transparent_window( win, color=BG_COLOR, decorate=False ):
- win.set_decorated( decorate )
- # Tell GTK+ that we want to draw the windows background ourself.
- # If we don't do this then GTK+ will clear the window to the
- # opaque theme default color, which isn't what we want.
- win.set_app_paintable(True)
- make_transparent( win, color )
- def expose_transparent(widget, event):
- cr = widget.window.cairo_create()
- r,g,b,a = widget._trans_color_hack
- cr.set_source_rgba(r, g, b, a) # Transparent
- # Draw the background
- cr.set_operator(cairo.OPERATOR_SOURCE)
- cr.paint()
- return False
- def make_transparent(widget, color=BG_COLOR ):
- widget._trans_color_hack = color
- # The X server sends us an expose event when the window becomes
- # visible on screen. It means we need to draw the contents. On a
- # composited desktop expose is normally only sent when the window
- # is put on the screen. On a non-composited desktop it can be
- # sent whenever the window is uncovered by another.
- #
- # The screen-changed event means the display to which we are
- # drawing changed. GTK+ supports migration of running
- # applications between X servers, which might not support the
- # same features, so we need to check each time.
- widget.connect('expose-event', expose_transparent)
- #win.connect('screen-changed', self.screen_changed)
- # To check if the display supports alpha channels, get the colormap
- screen = widget.get_screen()
- colormap = screen.get_rgba_colormap()
- # Now we have a colormap appropriate for the screen, use it
- widget.set_colormap(colormap)
- return False
- import opencv as cv
- from opencv import highgui
- cv.cvSetNumThreads(3) # 4 threads might be wasteful
- print 'opencv threads', cv.cvGetNumThreads()
- '''
- useful utils:
- cv.Ipl2NumPy
- cv.Ipl2PIL
- cv.NumPy2Ipl
- cv.NumPy2PIL
- cv.PIL2Ipl
- cv.PIL2NumPy
- notes:
- CV_ADAPTIVE_THRESH_GAUSSIAN_C
- CV_ADAPTIVE_THRESH_MEAN_C
- CV_CALIB_CB_ADAPTIVE_THRESH
- CV_THRESH_BINARY
- CV_THRESH_BINARY_INV
- #something else? CV_THRESH_MASK # adaptive only?
- #CV_THRESH_OTSU
- CV_THRESH_TOZERO
- CV_THRESH_TOZERO_INV
- CV_THRESH_TRUNC
- cvAdaptiveThreshold
- cvThreshHist
- cvThreshold
- cvAdaptiveThreshold(*args)
- cvAdaptiveThreshold(CvArr src, CvArr dst, double max_value, int adaptive_method = 0,
- int threshold_type = 0, int block_size = 3,
- double param1 = 5)
- cvThreshold(*args)
- cvThreshold(CvArr src, CvArr dst, double threshold, double max_value,
- int threshold_type) -> double
- '''
- _colorspaces = '''
- CV_BGR2BGR555
- CV_BGR2BGR565
- CV_BGR2BGRA
- CV_BGR2GRAY
- CV_BGR2HLS
- CV_BGR2HSV
- CV_BGR2Lab
- CV_BGR2Luv
- CV_BGR2RGB
- CV_BGR2RGBA
- CV_BGR2XYZ
- CV_BGR2YCrCb
- CV_BGR5552BGR
- CV_BGR5552BGRA
- CV_BGR5552GRAY
- CV_BGR5552RGB
- CV_BGR5552RGBA
- CV_BGR5652BGR
- CV_BGR5652BGRA
- CV_BGR5652GRAY
- CV_BGR5652RGB
- CV_BGR5652RGBA
- CV_BGRA2BGR
- CV_BGRA2BGR555
- CV_BGRA2BGR565
- CV_BGRA2GRAY
- CV_BGRA2RGB
- CV_BGRA2RGBA
- CV_BayerBG2BGR
- CV_BayerBG2RGB
- CV_BayerGB2BGR
- CV_BayerGB2RGB
- CV_BayerGR2BGR
- CV_BayerGR2RGB
- CV_BayerRG2BGR
- CV_BayerRG2RGB
- CV_GRAY2BGR
- CV_GRAY2BGR555
- CV_GRAY2BGR565
- CV_GRAY2BGRA
- CV_GRAY2RGB
- CV_GRAY2RGBA
- CV_HLS2BGR
- CV_HLS2RGB
- CV_HSV2BGR
- CV_HSV2RGB
- CV_Lab2BGR
- CV_Lab2RGB
- CV_Luv2BGR
- CV_Luv2RGB
- CV_RGB2BGR
- CV_RGB2BGR555
- CV_RGB2BGR565
- CV_RGB2BGRA
- CV_RGB2GRAY
- CV_RGB2HLS
- CV_RGB2HSV
- CV_RGB2Lab
- CV_RGB2Luv
- CV_RGB2RGBA
- CV_RGB2XYZ
- CV_RGB2YCrCb
- CV_RGBA2BGR
- CV_RGBA2BGR555
- CV_RGBA2BGR565
- CV_RGBA2BGRA
- CV_RGBA2GRAY
- CV_RGBA2RGB
- CV_XYZ2BGR
- CV_XYZ2RGB
- CV_YCrCb2BGR
- CV_YCrCb2RGB
- '''
- ColorSpaces = {}
- ColorSpacesByValue = {}
- for name in _colorspaces.splitlines():
- name = name.strip()
- if name:
- value = getattr(cv,name)
- ColorSpacesByValue[ value ] = name
- ColorSpaces[ name ] = value
- def pygame_to_pil_img(pg_img):
- imgstr = pygame.image.tostring(pg_img, 'RGB')
- return Image.fromstring('RGB', pg_img.get_size(), imgstr)
- def pil_to_pygame_img(pil_img):
- imgstr = pil_img.tostring()
- return pygame.image.fromstring(imgstr, pil_img.size, 'RGB')
- class Trackable(object):
- def __init__(self, haar):
- self.haar = haar
- self._cv_storage = cv.cvCreateMemStorage(0)
- self.score = .0
- self.rects = []
- self.color = None
- self.grayscale = None
- # cv.cvSetImageROI( grayscale, cv.cvRect(fx, fy, fw, fh) )
- # cv.cvClearMemStorage(storage) # this invalidates the f.x, f.* attributes
- def detector( self, grayscale, scale=1 ):
- self.grayscale = grayscale
- storage = self._cv_storage
- cv.cvClearMemStorage(storage)
- # equalize histogram
- cv.cvEqualizeHist(grayscale, grayscale)
- _rects = cv.cvHaarDetectObjects(grayscale, self.haar, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(25, 25))
- rects = []
- if _rects:
- for r in _rects:
- rects.append( pygame.rect.Rect(r.x*scale, r.y*scale, r.width*scale, r.height*scale) )
- return rects
- _cfg_ubytes = 'active alpha blur athresh_block_size thresh_min thresh_max'.split()
- _cfg_ubytes += 'FXstencil FXblur FXsobel FXathresh FXthresh FXdetect'.split()
- class LayerConfig( ctypes.Structure ):
- _fields_ = [ ('colorspace',ctypes.c_int) ]
- for tag in _cfg_ubytes: _fields_.append( (tag, ctypes.c_ubyte) )
- #for tag in _cfg_ints: _fields_.append( (tag, ctypes.c_int) )
- del tag
- class UI(object):
- def __init__(self, active, layers):
- self.active = active # shared
- self.layers = layers # shared
- ## gtk ##
- self.window = win = gtk.Window()
- win.set_title( 'Harts MultiProcess Transparent WebCam' )
- win.connect('destroy', lambda w: gtk.main_quit() )
- transparent_window( win, decorate=True )
- root = gtk.HBox(); root.set_border_width( 3 )
- win.add( root )
- #####################################
- eb = gtk.EventBox()
- root.pack_start( eb )
- self._drawing_area = da = gtk.DrawingArea()
- da.set_size_request( 640,480 )
- da.connect('realize', self.realize)
- eb.add( da )
- make_transparent(da)
- ##################
- eb = gtk.EventBox()
- root.pack_start( eb, expand=False )
- split = gtk.VBox(); eb.add( split )
- ex = gtk.Expander( 'settings' ); ex.set_expanded(False)
- split.pack_start( ex, expand=False )
- ex = gtk.Expander( 'adjust layers' ); ex.set_expanded(True)
- split.pack_start( ex, expand=True)
- note = gtk.Notebook()
- note.set_tab_pos( gtk.POS_RIGHT )
- ex.add( note )
- for layer in layers:
- cspace = ColorSpacesByValue[ layer.colorspace ]
- tag = cspace.split('2')[-1]
- page = gtk.HBox()
- lab = gtk.Label(tag)
- note.append_page( page, lab )
- col1, col2 = gtk.VBox(), gtk.VBox()
- page.pack_start( col1, expand=False )
- page.pack_start( col2, expand=True )
- for name in dir(layer):
- if not name.startswith('_') and name != 'colorspace':
- val = getattr( layer, name )
- if name.startswith('FX') or name == 'active':
- b = gtk.CheckButton( name )
- b.set_active( bool(val) )
- b.connect('toggled', lambda b,lay,nam: setattr(lay,nam,bool(b.get_active())), layer, name)
- #frame = gtk.Frame(name)
- #frame.add( b )
- col1.pack_start( b )
- else:
- adjust = gtk.Adjustment(
- value=val,
- lower=0, upper=255,
- step_incr=1 )
- adjust.connect("value_changed", lambda a,lay,nam: setattr(lay,nam,int(a.value)), layer,name)
- scale = gtk.HScale( adjust )
- scale.set_digits(0)
- frame = gtk.Frame(name)
- frame.add( scale )
- col2.pack_start( frame )
- win.show_all()
- def realize( self, da ):
- wid = da.window.xid
- os.environ['SDL_WINDOWID'] = str(wid) # child fork respects environ
- self.subprocess = p = multiprocessing.Process(target=subprocess, args=(active,layers))
- p.start()
- #p.join()
- class Camera(object):
- try: ## opencv haar detect is multithreaded! ##
- DO_HAAR = True
- FaceCascade = cv.cvLoadHaarClassifierCascade('haarcascades/haarcascade_frontalface_alt.xml', cv.cvSize(1,1))
- EyesCascade = cv.cvLoadHaarClassifierCascade('haarcascades/haarcascade_eye.xml', cv.cvSize(1,1))
- except:
- print 'download opencv latest source code, and copy the haarcascades folder to here'
- DO_HAAR = False
- def __init__(self, layers):
- self.layers = layers
- pygame.display.init()
- SSBACKEND = pygame.transform.get_smoothscale_backend()
- if SSBACKEND == 'GENERIC':
- try: pygame.transform.set_smoothscale_backend( 'SSE' ); SSBACKEND = 'SSE'
- except: print 'SSE backend not available'
- print 'smooth scale backend', SSBACKEND
- self.prevfaces = None
- self.active = True
- self.index = 0
- for arg in sys.argv:
- if arg.startswith('camera='): self.index = int(arg.split('=')[-1]); break
- self.camera_pointer = highgui.cvCreateCameraCapture(self.index) # this is the old swig bindings ubuntu lucid works fine
- # HIGHGUI ERROR: V4L: setting property #16 is not supported
- #highgui.cvSetCaptureProperty( self.camera_pointer, highgui.CV_CAP_PROP_CONVERT_RGB, True )
- ## default color space is BGR - linux ##
- self.resize_capture( 640, 480 )
- self.resize_output( 640, 480 )
- if self.DO_HAAR:
- self.track_face = Trackable( self.FaceCascade )
- def resize_capture( self, x,y ):
- self.cwidth = x
- self.cheight = y
- highgui.cvSetCaptureProperty( self.camera_pointer, highgui.CV_CAP_PROP_FRAME_WIDTH, self.cwidth )
- highgui.cvSetCaptureProperty( self.camera_pointer, highgui.CV_CAP_PROP_FRAME_HEIGHT, self.cheight )
- def resize_output( self, x, y ):
- self.owidth = x
- self.oheight = y
- pygame.display.set_mode((x,y), 0, 32 ) # size, flags, depth
- self.screen = pygame.display.get_surface()
- def loop(self):
- ## BGR - linux
- _frame = highgui.cvQueryFrame(self.camera_pointer) # grabFrame returns 1?
- if not _frame: print 'lost connection to webcam?'
- self.screen.fill( (0,0,0,0) )
- surf = self.screen.copy()
- surf.fill( (0,0,255) )
- surf.set_alpha(255)
- stencil = None
- for layer in self.layers:
- if layer.active:
- #print layer
- _gray8 = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 1)
- _gray32 = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_32F, 1)
- a = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- cv.cvCvtColor(_frame, a, layer.colorspace)
- #print 'colorspaced converted'
- ## FX
- if layer.FXblur: # blur before threshing
- blur = layer.blur
- if blur < 1: blur = 1
- cv.cvSmooth( a, a, cv.CV_BLUR, blur )
- #if layer.FXsobel:
- if layer.FXthresh:
- cv.cvThreshold( a, a, layer.thresh_min, layer.thresh_max, cv.CV_THRESH_BINARY )
- if layer.FXathresh:
- cv.cvCvtColor(a, _gray8, cv.CV_RGB2GRAY)
- blocksize = layer.athresh_block_size
- if blocksize <= 2: blocksize = 3
- if blocksize % 2 != 1: blocksize += 1
- cv.cvAdaptiveThreshold(_gray8, _gray8, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, blocksize )
- cv.cvCvtColor(_gray8, a, cv.CV_GRAY2RGB)
- ## pygame
- b = pygame.image.frombuffer(a.imageData, (self.cwidth,self.cheight), 'RGB')
- b.set_alpha( layer.alpha )
- surf.blit(b, (0,0))
- self.screen.lock()
- array = pygame.surfarray.pixels_alpha(self.screen)
- if stencil:
- alpha = pygame.surfarray.pixels3d( stencil )
- array[:] = alpha[:,:,0]
- del alpha
- else: array[:] = 255 # values higher than 256 are additive!
- del array
- self.screen.unlock()
- self.screen.blit( surf, (0,0) )
- pygame.display.flip()
- return self.active
- _rgb = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- cv.cvCvtColor(_frame, _rgb, cv.CV_BGR2RGB) # interesting chaos if _frame,_rgb is flipped
- rgb = pygame.image.frombuffer(_rgb.imageData, (self.cwidth,self.cheight), 'RGB')
- cfg = self.config['rgb']
- rgb.set_alpha( cfg['alpha'] )
- cfg = self.config['hsv']
- _hsv = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- cv.cvCvtColor(_rgb, _hsv, cv.CV_RGB2HSV)
- blur = cfg['blur']
- if blur < 1: blur = 1
- cv.cvSmooth( _hsv, _hsv, cv.CV_BLUR, blur ) # big help for removing noise
- hsv = pygame.image.frombuffer(_hsv.imageData, (self.cwidth,self.cheight), 'RGB')
- hsv.set_alpha( cfg['alpha'] )
- _chrome = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- cv.cvCvtColor(_rgb, _chrome, cv.CV_RGB2YCrCb) #cv.cvCvtColor(_rgb, _luv, cv.CV_RGB2HLS) # blocky reds
- chrome = pygame.image.frombuffer(_chrome.imageData, (self.cwidth,self.cheight), 'RGB')
- cfg = self.config['chrome']
- chrome.set_alpha( cfg['alpha'] )
- # create grayscale version first (faster resize)
- grayscale = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 1)
- grayscale2 = cv.cvCreateImage((self.cwidth/2,self.cheight/2), cv.IPL_DEPTH_8U, 1)
- cv.cvCvtColor(_rgb, grayscale, cv.CV_RGB2GRAY)
- interpolation = 0 # detector much better without interp
- cv.cvResize(grayscale, grayscale2, interpolation)
- if self.DO_HAAR: faces = self.track_face.detector( grayscale2, scale=2 ) # operates in grayscale
- else: faces = []
- cfg = self.config['simple-thresh']
- blur = cfg['blur']
- if blur < 1: blur = 1
- cv.cvSmooth( grayscale, grayscale, cv.CV_BLUR, blur ) # big help for removing noise
- _thresh = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- _thresh8 = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 1)
- cv.cvThreshold( grayscale, _thresh8, cfg['min'], cfg['max'], cv.CV_THRESH_BINARY )
- cv.cvCvtColor(_thresh8, _thresh, cv.CV_GRAY2RGB)
- threshed = pygame.image.frombuffer(_thresh.imageData, (self.cwidth,self.cheight), 'RGB')
- threshed.set_alpha( cfg['alpha'] )
- cfg = self.config['opencv-adaptive-thresh']
- ## Gaussian gives more detail than Mean - AdaptiveThreshold
- blocksize = cfg['block-size']
- if blocksize <= 2: blocksize = 3
- if blocksize % 2 != 1: blocksize += 1
- cv.cvAdaptiveThreshold(grayscale, _thresh8, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, blocksize )
- cv.cvCvtColor(_thresh8, _thresh, cv.CV_GRAY2RGB)
- athreshed = pygame.image.frombuffer(_thresh.imageData, (self.cwidth,self.cheight), 'RGB')
- athreshed.set_alpha( cfg['alpha'] )
- _sobel = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 3)
- _sobel32 = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_32F, 1)
- _sobel8 = cv.cvCreateImage((self.cwidth,self.cheight), cv.IPL_DEPTH_8U, 1)
- cv.cvSobel( grayscale, _sobel32, 1, 1, 5 ) #xorder, yorder, aperture
- #cv.cvSobel( _thresh8, _sobel32, 3, 3, 19 ) #xorder, yorder, aperture
- cv.cvConvert( _sobel32, _sobel8 ); cv.cvCvtColor(_sobel8, _sobel, cv.CV_GRAY2RGB)
- sobel = pygame.image.frombuffer(_sobel.imageData, (self.cwidth,self.cheight), 'RGB')
- cfg = self.config['sobel']
- sobel.set_alpha( cfg['alpha'] )
- if not faces and self.prevfaces: faces = self.prevfaces
- else: self.prevfaces = faces
- pgthreshed = hsv.copy()
- for rect in faces:
- acolor = pygame.transform.average_color(rgb, rect)
- pygame.transform.threshold(pgthreshed,rgb,acolor,(30,100,30),(0,0,0),1) # arg1=dest, arg2=source (must be different)
- break
- cfg = self.config['pygame-adaptive-thresh']
- pgthreshed.set_alpha( cfg['alpha'] )
- #if self.cwidth*2 == self.owidth and self.cheight*2 == self.oheight:
- # surf = pygame.transform.scale2x( surf )
- #elif self.cwidth != self.owidth or self.cheight != self.oheight:
- # if SSBACKEND != 'GENERIC':
- # surf = pygame.transform.smoothscale( surf, (self.owidth, self.oheight) )
- # else:
- # surf = pygame.transform.scale( surf, (self.owidth, self.oheight) )
- self.screen.fill( (0,0,0,0) ) # my trick
- s = self.screen.copy()
- s.fill( (0,0,255) )
- for surf in (threshed, athreshed, pgthreshed, chrome, sobel, hsv, rgb):
- if surf.get_alpha():
- s.blit(surf, (0,0))
- #s=s.convert_alpha()
- self.screen.lock()
- alpha = pygame.surfarray.pixels3d( threshed )
- array = pygame.surfarray.pixels_alpha(self.screen)
- array[:] = alpha[:,:,0]
- del array
- del alpha
- self.screen.unlock()
- self.screen.blit( s, (0,0) )
- for rect in faces:
- pygame.draw.rect( self.screen, (0,255,0, 128), rect, 1 )
- #self.screen.set_alpha(0)
- pygame.display.flip()
- return self.active
- #CV_BGR2HLS
- #CV_BGR2Luv
- #CV_BGR2XYZ
- _default_spaces = [
- (cv.CV_BGR2RGB,),
- (cv.CV_BGR2HSV,),
- (cv.CV_BGR2Lab,),
- (cv.CV_BGR2YCrCb,),
- ]
- def subprocess( active, layers ):
- cam = Camera( layers )
- while active.value:
- cam.loop()
- if __name__ == '__main__':
- active = sharedctypes.Value('i', 1, lock=False)
- layers = sharedctypes.Array( LayerConfig, _default_spaces, lock=False )
- layers[0].active = 1
- layers[0].alpha = 255
- gui = UI( active, layers )
- gtk.main()
- gui.active.value = 0
- gui.subprocess.join()
- print 'main process exit'
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement