Advertisement
Guest User

..

a guest
May 28th, 2012
164
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 21.11 KB | None | 0 0
  1. #!/usr/bin/env python
  2.  
  3. # See also: http://sundararajana.blogspot.com/2007/05/motion-detection-using-opencv.html
  4.  
  5. import opencv
  6. import time
  7.  
  8. from scipy import *
  9. from scipy.cluster import vq
  10. import numpy
  11. import sys, os, random, hashlib
  12.  
  13. from math import *
  14.  
  15. """
  16. Python Motion Tracker
  17.  
  18. Reads an incoming video stream and tracks motion in real time.
  19. Detected motion events are logged to a text file.  Also has face detection.
  20. """
  21.  
  22. #
  23. # BBoxes must be in the format:
  24. # ( (topleft_x), (topleft_y) ), ( (bottomright_x), (bottomright_y) ) )
  25. top = 0
  26. bottom = 1
  27. left = 0
  28. right = 1
  29.  
  30. def merge_collided_bboxes( bbox_list ):
  31.     # For every bbox...
  32.     for this_bbox in bbox_list:
  33.        
  34.         # Collision detect every other bbox:
  35.         for other_bbox in bbox_list:
  36.             if this_bbox is other_bbox: continue  # Skip self
  37.            
  38.             # Assume a collision to start out with:
  39.             has_collision = True
  40.            
  41.             # These coords are in screen coords, so > means
  42.             # "lower than" and "further right than".  And <
  43.             # means "higher than" and "further left than".
  44.            
  45.             # We also inflate the box size by 10% to deal with
  46.             # fuzziness in the data.  (Without this, there are many times a bbox
  47.             # is short of overlap by just one or two pixels.)
  48.             if (this_bbox[bottom][0]*1.1 < other_bbox[top][0]*0.9): has_collision = False
  49.             if (this_bbox[top][0]*.9 > other_bbox[bottom][0]*1.1): has_collision = False
  50.            
  51.             if (this_bbox[right][1]*1.1 < other_bbox[left][1]*0.9): has_collision = False
  52.             if (this_bbox[left][1]*0.9 > other_bbox[right][1]*1.1): has_collision = False
  53.            
  54.             if has_collision:
  55.                 # merge these two bboxes into one, then start over:
  56.                 top_left_x = min( this_bbox[left][0], other_bbox[left][0] )
  57.                 top_left_y = min( this_bbox[left][1], other_bbox[left][1] )
  58.                 bottom_right_x = max( this_bbox[right][0], other_bbox[right][0] )
  59.                 bottom_right_y = max( this_bbox[right][1], other_bbox[right][1] )
  60.                
  61.                 new_bbox = ( (top_left_x, top_left_y), (bottom_right_x, bottom_right_y) )
  62.                
  63.                 bbox_list.remove( this_bbox )
  64.                 bbox_list.remove( other_bbox )
  65.                 bbox_list.append( new_bbox )
  66.                
  67.                 # Start over with the new list:
  68.                 return merge_collided_bboxes( bbox_list )
  69.    
  70.     # When there are no collions between boxes, return that list:
  71.     return bbox_list
  72.  
  73.  
  74. def detect_faces( image, haar_cascade, mem_storage ):
  75.  
  76.     faces = []
  77.     image_size = cv.GetSize( image )
  78.  
  79.     #faces = cv.HaarDetectObjects(grayscale, haar_cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (20, 20) )
  80.     #faces = cv.HaarDetectObjects(image, haar_cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING )
  81.     #faces = cv.HaarDetectObjects(image, haar_cascade, storage )
  82.     #faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( 16, 16 ) )
  83.     #faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( 4,4 ) )
  84.     faces = cv.HaarDetectObjects(image, haar_cascade, mem_storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, ( image_size[0]/10, image_size[1]/10) )
  85.    
  86.     for face in faces:
  87.         box = face[0]
  88.         cv.Rectangle(image, ( box[0], box[1] ),
  89.             ( box[0] + box[2], box[1] + box[3]), cv.RGB(255, 0, 0), 1, 8, 0)
  90.  
  91.  
  92. class Target:
  93.     def __init__(self):
  94.        
  95.         if len( sys.argv ) > 1:
  96.             self.writer = None
  97.             self.capture = cv.CaptureFromFile( sys.argv[1] )
  98.             frame = cv.QueryFrame(self.capture)
  99.             frame_size = cv.GetSize(frame)
  100.         else:
  101.             fps=15
  102.             is_color = True
  103.  
  104.             self.capture = cv.CaptureFromCam(0)
  105.             #cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640 );
  106.             #cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480 );
  107.             cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320 );
  108.             cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240 );
  109.             frame = cv.QueryFrame(self.capture)
  110.             frame_size = cv.GetSize(frame)
  111.            
  112.             self.writer = None
  113.             #self.writer = cv.CreateVideoWriter("/dev/shm/test1.mp4", cv.CV_FOURCC('D', 'I', 'V', 'X'), fps, frame_size, is_color )
  114.             #self.writer = cv.CreateVideoWriter("test2.mpg", cv.CV_FOURCC('P', 'I', 'M', '1'), fps, frame_size, is_color )
  115.             #self.writer = cv.CreateVideoWriter("test3.mp4", cv.CV_FOURCC('D', 'I', 'V', 'X'), fps, cv.GetSize(frame), is_color )
  116.             #self.writer = cv.CreateVideoWriter("test4.mpg", cv.CV_FOURCC('P', 'I', 'M', '1'), fps, (320, 240), is_color )
  117.            
  118.             # These both gave no error message, but saved no file:
  119.             ###self.writer = cv.CreateVideoWriter("test5.h263i", cv.CV_FOURCC('I', '2', '6', '3'), fps, cv.GetSize(frame), is_color )
  120.             ###self.writer = cv.CreateVideoWriter("test6.fli",   cv.CV_FOURCC('F', 'L', 'V', '1'), fps, cv.GetSize(frame), is_color )
  121.             # Can't play this one:
  122.             ###self.writer = cv.CreateVideoWriter("test7.mp4",   cv.CV_FOURCC('D', 'I', 'V', '3'), fps, cv.GetSize(frame), is_color )
  123.  
  124.         # 320x240 15fpx in DIVX is about 4 gigs per day.
  125.  
  126.         frame = cv.QueryFrame(self.capture)
  127.         cv.NamedWindow("Target", 1)
  128.         #cv.NamedWindow("Target2", 1)
  129.        
  130.  
  131.     def run(self):
  132.         # Initialize
  133.         #log_file_name = "tracker_output.log"
  134.         #log_file = file( log_file_name, 'a' )
  135.        
  136.         frame = cv.QueryFrame( self.capture )
  137.         frame_size = cv.GetSize( frame )
  138.        
  139.         # Capture the first frame from webcam for image properties
  140.         display_image = cv.QueryFrame( self.capture )
  141.        
  142.         # Greyscale image, thresholded to create the motion mask:
  143.         grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )
  144.        
  145.         # The RunningAvg() function requires a 32-bit or 64-bit image...
  146.         running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
  147.         # ...but the AbsDiff() function requires matching image depths:
  148.         running_average_in_display_color_depth = cv.CloneImage( display_image )
  149.        
  150.         # RAM used by FindContours():
  151.         mem_storage = cv.CreateMemStorage(0)
  152.        
  153.         # The difference between the running average and the current frame:
  154.         difference = cv.CloneImage( display_image )
  155.        
  156.         target_count = 1
  157.         last_target_count = 1
  158.         last_target_change_t = 0.0
  159.         k_or_guess = 1
  160.         codebook=[]
  161.         frame_count=0
  162.         last_frame_entity_list = []
  163.        
  164.         t0 = time.time()
  165.        
  166.         # For toggling display:
  167.         image_list = [ "camera", "difference", "threshold", "display", "faces" ]
  168.         image_index = 0   # Index into image_list
  169.    
  170.    
  171.         # Prep for text drawing:
  172.         text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
  173.         text_coord = ( 5, 15 )
  174.         text_color = cv.CV_RGB(255,255,255)
  175.  
  176.         ###############################
  177.         ### Face detection stuff
  178.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
  179.         haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt.xml' )
  180.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
  181.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
  182.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
  183.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
  184.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
  185.         #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )
  186.        
  187.         # Set this to the max number of targets to look for (passed to k-means):
  188.         max_targets = 3
  189.        
  190.         while True:
  191.            
  192.             # Capture frame from webcam
  193.             camera_image = cv.QueryFrame( self.capture )
  194.            
  195.             frame_count += 1
  196.             frame_t0 = time.time()
  197.            
  198.             # Create an image with interactive feedback:
  199.             display_image = cv.CloneImage( camera_image )
  200.            
  201.             # Create a working "color image" to modify / blur
  202.             color_image = cv.CloneImage( display_image )
  203.  
  204.             # Smooth to get rid of false positives
  205.             cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 )
  206.            
  207.             # Use the Running Average as the static background         
  208.             # a = 0.020 leaves artifacts lingering way too long.
  209.             # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
  210.             cv.RunningAvg( color_image, running_average_image, 0.320, None )
  211.            
  212.             # Convert the scale of the moving average.
  213.             cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )
  214.            
  215.             # Subtract the current frame from the moving average.
  216.             cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )
  217.            
  218.             # Convert the image to greyscale.
  219.             cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
  220.  
  221.             # Threshold the image to a black and white motion mask:
  222.             cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
  223.             # Smooth and threshold again to eliminate "sparkles"
  224.             cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )
  225.             cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY )
  226.            
  227.             grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
  228.             non_black_coords_array = numpy.where( grey_image_as_array > 3 )
  229.             # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
  230.             non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )
  231.            
  232.             points = []   # Was using this to hold either pixel coords or polygon coords.
  233.             bounding_box_list = []
  234.  
  235.             # Now calculate movements using the white pixels as "motion" data
  236.             contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )
  237.            
  238.             while contour:
  239.                
  240.                 bounding_rect = cv.BoundingRect( list(contour) )
  241.                 point1 = ( bounding_rect[0], bounding_rect[1] )
  242.                 point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )
  243.                
  244.                 bounding_box_list.append( ( point1, point2 ) )
  245.                 polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )
  246.                
  247.                 # To track polygon points only (instead of every pixel):
  248.                 #points += list(polygon_points)
  249.                
  250.                 # Draw the contours:
  251.                 ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
  252.                 cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
  253.                 cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
  254.                 #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
  255.  
  256.                 contour = contour.h_next()
  257.            
  258.            
  259.             # Find the average size of the bbox (targets), then
  260.             # remove any tiny bboxes (which are prolly just noise).
  261.             # "Tiny" is defined as any box with 1/10th the area of the average box.
  262.             # This reduces false positives on tiny "sparkles" noise.
  263.             box_areas = []
  264.             for box in bounding_box_list:
  265.                 box_width = box[right][0] - box[left][0]
  266.                 box_height = box[bottom][0] - box[top][0]
  267.                 box_areas.append( box_width * box_height )
  268.                
  269.                 #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
  270.            
  271.             average_box_area = 0.0
  272.             if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)
  273.            
  274.             trimmed_box_list = []
  275.             for box in bounding_box_list:
  276.                 box_width = box[right][0] - box[left][0]
  277.                 box_height = box[bottom][0] - box[top][0]
  278.                
  279.                 # Only keep the box if it's not a tiny noise box:
  280.                 if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )
  281.            
  282.             # Draw the trimmed box list:
  283.             #for box in trimmed_box_list:
  284.             #   cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
  285.                
  286.             bounding_box_list = merge_collided_bboxes( trimmed_box_list )
  287.  
  288.             # Draw the merged box list:
  289.             for box in bounding_box_list:
  290.                 cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )
  291.            
  292.             # Here are our estimate points to track, based on merged & trimmed boxes:
  293.             estimated_target_count = len( bounding_box_list )
  294.            
  295.             # Don't allow target "jumps" from few to many or many to few.
  296.             # Only change the number of targets up to one target per n seconds.
  297.             # This fixes the "exploding number of targets" when something stops moving
  298.             # and the motion erodes to disparate little puddles all over the place.
  299.            
  300.             if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
  301.                 estimated_target_count = last_target_count
  302.             else:
  303.                 if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
  304.                 if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
  305.                 last_target_change_t = frame_t0
  306.            
  307.             # Clip to the user-supplied maximum:
  308.             estimated_target_count = min( estimated_target_count, max_targets )
  309.            
  310.             # The estimated_target_count at this point is the maximum number of targets
  311.             # we want to look for.  If kmeans decides that one of our candidate
  312.             # bboxes is not actually a target, we remove it from the target list below.
  313.            
  314.             # Using the numpy values directly (treating all pixels as points): 
  315.             points = non_black_coords_array
  316.             center_points = []
  317.            
  318.             if len(points):
  319.                
  320.                 # If we have all the "target_count" targets from last frame,
  321.                 # use the previously known targets (for greater accuracy).
  322.                 k_or_guess = max( estimated_target_count, 1 )  # Need at least one target to look for.
  323.                 if len(codebook) == estimated_target_count:
  324.                     k_or_guess = codebook
  325.                
  326.                 #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
  327.                 codebook, distortion = vq.kmeans( array( points ), k_or_guess )
  328.                
  329.                 # Convert to tuples (and draw it to screen)
  330.                 for center_point in codebook:
  331.                     center_point = ( int(center_point[0]), int(center_point[1]) )
  332.                     center_points.append( center_point )
  333.                     #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
  334.                     #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)
  335.            
  336.             # Now we have targets that are NOT computed from bboxes -- just
  337.             # movement weights (according to kmeans).  If any two targets are
  338.             # within the same "bbox count", average them into a single target.  
  339.             #
  340.             # (Any kmeans targets not within a bbox are also kept.)
  341.             trimmed_center_points = []
  342.             removed_center_points = []
  343.                        
  344.             for box in bounding_box_list:
  345.                 # Find the centers within this box:
  346.                 center_points_in_box = []
  347.                
  348.                 for center_point in center_points:
  349.                     if  center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
  350.                         center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :
  351.                        
  352.                         # This point is within the box.
  353.                         center_points_in_box.append( center_point )
  354.                
  355.                 # Now see if there are more than one.  If so, merge them.
  356.                 if len( center_points_in_box ) > 1:
  357.                     # Merge them:
  358.                     x_list = y_list = []
  359.                     for point in center_points_in_box:
  360.                         x_list.append(point[0])
  361.                         y_list.append(point[1])
  362.                    
  363.                     average_x = int( float(sum( x_list )) / len( x_list ) )
  364.                     average_y = int( float(sum( y_list )) / len( y_list ) )
  365.                    
  366.                     trimmed_center_points.append( (average_x, average_y) )
  367.                    
  368.                     # Record that they were removed:
  369.                     removed_center_points += center_points_in_box
  370.                    
  371.                 if len( center_points_in_box ) == 1:
  372.                     trimmed_center_points.append( center_points_in_box[0] ) # Just use it.
  373.            
  374.             # If there are any center_points not within a bbox, just use them.
  375.             # (It's probably a cluster comprised of a bunch of small bboxes.)
  376.             for center_point in center_points:
  377.                 if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
  378.                     trimmed_center_points.append( center_point )
  379.            
  380.             # Draw what we found:
  381.             #for center_point in trimmed_center_points:
  382.             #   center_point = ( int(center_point[0]), int(center_point[1]) )
  383.             #   cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
  384.             #   cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
  385.             #   cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
  386.             #   cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)
  387.            
  388.             # Determine if there are any new (or lost) targets:
  389.             actual_target_count = len( trimmed_center_points )
  390.             last_target_count = actual_target_count
  391.            
  392.             # Now build the list of physical entities (objects)
  393.             this_frame_entity_list = []
  394.            
  395.             # An entity is list: [ name, color, last_time_seen, last_known_coords ]
  396.            
  397.             for target in trimmed_center_points:
  398.            
  399.                 # Is this a target near a prior entity (same physical entity)?
  400.                 entity_found = False
  401.                 entity_distance_dict = {}
  402.                
  403.                 for entity in last_frame_entity_list:
  404.                    
  405.                     entity_coords= entity[3]
  406.                     delta_x = entity_coords[0] - target[0]
  407.                     delta_y = entity_coords[1] - target[1]
  408.            
  409.                     distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
  410.                     entity_distance_dict[ distance ] = entity
  411.                
  412.                 # Did we find any non-claimed entities (nearest to furthest):
  413.                 distance_list = entity_distance_dict.keys()
  414.                 distance_list.sort()
  415.                
  416.                 for distance in distance_list:
  417.                    
  418.                     # Yes; see if we can claim the nearest one:
  419.                     nearest_possible_entity = entity_distance_dict[ distance ]
  420.                    
  421.                     # Don't consider entities that are already claimed:
  422.                     if nearest_possible_entity in this_frame_entity_list:
  423.                         #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
  424.                         continue
  425.                    
  426.                     #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
  427.                     # Found the nearest entity to claim:
  428.                     entity_found = True
  429.                     nearest_possible_entity[2] = frame_t0  # Update last_time_seen
  430.                     nearest_possible_entity[3] = target  # Update the new location
  431.                     this_frame_entity_list.append( nearest_possible_entity )
  432.                     #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
  433.                     break
  434.                
  435.                 if entity_found == False:
  436.                     # It's a new entity.
  437.                     color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
  438.                     name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
  439.                     last_time_seen = frame_t0
  440.                    
  441.                     new_entity = [ name, color, last_time_seen, target ]
  442.                     this_frame_entity_list.append( new_entity )
  443.                     #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
  444.            
  445.             # Now "delete" any not-found entities which have expired:
  446.             entity_ttl = 1.0  # 1 sec.
  447.            
  448.             for entity in last_frame_entity_list:
  449.                 last_time_seen = entity[2]
  450.                 if frame_t0 - last_time_seen > entity_ttl:
  451.                     # It's gone.
  452.                     #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
  453.                     pass
  454.                 else:
  455.                     # Save it for next time... not expired yet:
  456.                     this_frame_entity_list.append( entity )
  457.            
  458.             # For next frame:
  459.             last_frame_entity_list = this_frame_entity_list
  460.            
  461.             # Draw the found entities to screen:
  462.             for entity in this_frame_entity_list:
  463.                 center_point = entity[3]
  464.                 c = entity[1]  # RGB color tuple
  465.                 cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
  466.                 cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
  467.                 cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
  468.                 cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)
  469.            
  470.            
  471.             #print "min_size is: " + str(min_size)
  472.             # Listen for ESC or ENTER key
  473.             c = cv.WaitKey(7) % 0x100
  474.             if c == 27 or c == 10:
  475.                 break
  476.            
  477.             # Toggle which image to show
  478.             if chr(c) == 'd':
  479.                 image_index = ( image_index + 1 ) % len( image_list )
  480.            
  481.             image_name = image_list[ image_index ]
  482.            
  483.             # Display frame to user
  484.             if image_name == "camera":
  485.                 image = camera_image
  486.                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
  487.             elif image_name == "difference":
  488.                 image = difference
  489.                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
  490.             elif image_name == "display":
  491.                 image = display_image
  492.                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
  493.             elif image_name == "threshold":
  494.                 # Convert the image to color.
  495.                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
  496.                 image = display_image  # Re-use display image here
  497.                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
  498.             elif image_name == "faces":
  499.                 # Do face detection
  500.                 detect_faces( camera_image, haar_cascade, mem_storage )            
  501.                 image = camera_image  # Re-use camera image here
  502.                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
  503.            
  504.             cv.ShowImage( "Target", image )
  505.            
  506.             if self.writer:
  507.                 cv.WriteFrame( self.writer, image );
  508.            
  509.             #log_file.flush()
  510.            
  511.             # If only using a camera, then there is no time.sleep() needed,
  512.             # because the camera clips us to 15 fps.  But if reading from a file,
  513.             # we need this to keep the time-based target clipping correct:
  514.             frame_t1 = time.time()
  515.            
  516.  
  517.             # If reading from a file, put in a forced delay:
  518.             if not self.writer:
  519.                 delta_t = frame_t1 - frame_t0
  520.                 if delta_t < ( 1.0 / 15.0 ): time.sleep( ( 1.0 / 15.0 ) - delta_t )
  521.            
  522.         t1 = time.time()
  523.         time_delta = t1 - t0
  524.         processed_fps = float( frame_count ) / time_delta
  525.         print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )
  526.        
  527. if __name__=="__main__":
  528.     t = Target()
  529. #   import cProfile
  530. #   cProfile.run( 't.run()' )
  531.     t.run()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement