Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python
- #Reference: the baxter_stocking_stuffer project by students in Northwestern's MSR program - Josh Marino, Sabeen Admani, Andrew Turchina and Chu-Chu Igbokwe
- #Service provided - ObjLocation service - contains x,y,z coordinates of object in baxter's stationary body frame, whether it is ok to grasp and if objects were found in the current frame.
- import rospy
- import numpy as np
- import cv2
- import cv_bridge
- import baxter_interface
- import imutils
- import numpy
- from std_msgs.msg import String, Int32
- from sensor_msgs.msg import Image
- from geometry_msgs.msg import Point
- from cv_bridge import CvBridge, CvBridgeError
- from baxter_builder.srv import *
- '''
- #green
- g_low_h = 60
- g_high_h = 90
- g_low_s = 85
- g_high_s = 175
- g_low_v = 70
- g_high_v = 255
- #white
- w_low_h=0
- w_high_h=0
- w_low_s=0
- w_high_s=0
- w_low_v=0
- w_high_v=255
- #blue
- b_low_h = 105
- b_high_h = 115
- b_low_s = 135
- b_high_s = 160
- b_low_v = 20
- b_high_v = 60
- '''
- # global obj_found
- obj_found = False
- # global correct_location
- correct_location = True
- #Object color: 0 = green, 1 = blue
- obj_color = 0
- #Object centroid position in the baxter's stationary base frame
- xb = 0
- yb = 0
- '''
- Thresholds camera image and stores object centroid location (x,y) in Baxter's base frame.
- '''
- def callback(message):
- global xb, yb, obj_color, obj_found
- xb = 0
- yb = 0
- #Capturing image of camera
- bridge = CvBridge()
- cv_image = bridge.imgmsg_to_cv2(message, "bgr8")
- height, width, depth = cv_image.shape
- #print height, width, depth
- #kernel = numpy.ones((5 ,5), numpy.uint8)
- #while (True):
- # frame = cv_image
- # rangomax = numpy.array([255, 50, 50]) # B, G, R
- # rangomin = numpy.array([52, 0, 0])
- # mask = cv2.inRange(frame, rangomin, rangomax)
- # reduce the noise
- # opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
- # x, y, w, h = cv2.boundingRect(opening)
- # cv2.rectangle(frame, (x, y), (x+w, y + h), (0, 255, 0), 3)
- # cv2.circle(frame, (x+w/2, y+h/2), 5, (0, 0, 255), -1)
- # cv2.imshow('camera', frame)
- # k = cv2.waitKey(1) & 0xFF
- hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
- obj_color = 0
- #cv2.namedWindow("Feed", cv2.WINDOW_NORMAL)
- #Green colored objects
- if obj_color == 0:
- # Analyze image for green objects
- low_h = 105
- high_h = 115
- low_s = 135
- high_s = 160
- low_v = 20
- high_v = 60
- print 1
- thresholded = cv2.inRange(hsv, np.array([51,81,24]), np.array([180,255,255]))
- print 22
- #Morphological opening (remove small objects from the foreground)
- thresholded = cv2.erode(thresholded, np.ones((2,2), np.uint8), iterations=2)
- thresholded = cv2.dilate(thresholded, np.ones((2,2), np.uint8), iterations=2)
- print 33
- #Morphological closing (fill small holes in the foreground)
- thresholded = cv2.dilate(thresholded, np.ones((2,2), np.uint8), iterations=1)
- thresholded = cv2.erode(thresholded, np.ones((2,2), np.uint8), iterations=1)
- print 44
- ##Finding center of object
- #cv2.threshold(source image in greyscale, threshold value which is used to classify the pixel values, the maxVal which represents the value to be given if pixel value is more than (sometimes less than) the threshold value )
- #output: retval and thresholded image
- ret,thresh = cv2.threshold(thresholded,157,255,0)
- #print 55
- #findCountours(source image, contour retrieval mode, contour approximation method )
- #contours is a Python list of all the contours in the image. Array of arrays.
- #Each individual contour is a Numpy array of (x,y) coordinates of boundary points of the object.
- #hierarchy is holding information on the nesting of contours
- im2, contours, hierarchy = cv2.findContours(thresholded,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
- #can also use cv2.CHAIN_APPROX_SIMPLE that removes all redundant points and compresses the contour, thereby saving memory.
- #print 66
- # contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
- # biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
- # mask = np.zeros(cv_image.shape, np.uint8)
- # cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
- #Draw the countours.
- #drawCountours(source image, contours as a Python list, index of contours (useful when drawing individual contour. To draw all contours, pass -1),color, thickness)
- cv2.drawContours(cv_image, contours, -1, (0,255,0), 1)
- #print 77
- numobj = len(contours) # number of objects found in current frame
- print 'Number of objects found in the current frame: ' , numobj
- #print 88
- if numobj > 0:
- moms = cv2.moments(contours[0])
- if moms['m00']>500:
- cx = int(moms['m10']/moms['m00'])
- cy = int(moms['m01']/moms['m00'])
- print 2
- # print 'cx = ', cx
- # print 'cy = ', cy
- #dist = baxter_interface.analog_io.AnalogIO('left_hand_range').state()
- #print "Distance is %f" % dist
- #dist = dist/1000
- #Position in the baxter's stationary base frame
- xb = (cy - (height/2))*.0031*.461 + .712 - .02
- yb = (cx - (width/2))*.0031*.461 + .316 - .02
- print xb
- print yb
- #print "Found green ", numobj, "object(s)"
- obj_found = True
- print 99
- cv2.imshow("Thresholded", thresholded)
- #Printing to screen the images
- cv2.imshow("Original", cv_image)
- cv2.imshow("Thresholded", thresholded)
- cv2.waitKey(3)
- '''
- Creates and returns a response with object location info for the object_location_service.
- '''
- def get_obj_location(request):
- global xb, yb, obj_color, correct_location, obj_found
- zb = 0
- while xb == 0 and yb == 0:
- rospy.sleep(1)
- return ObjLocationResponse(xb, yb, zb, obj_found, obj_color)
- '''
- Creates a service that provides information about the loaction of an object.
- Subscribes to left hand camera image feed
- '''
- def main():
- #Initiate left hand camera object detection node
- rospy.init_node('left_camera_node')
- #Create names for OpenCV images and orient them appropriately
- #cv2.namedWindow("Original", 1)
- #cv2.namedWindow("Thresholded", 2)
- #Subscribe to left hand camera image
- rospy.Subscriber("/cameras/left_hand_camera/image", Image, callback)
- #Declare object location service called object_location_srv with ObjLocation service type.
- #All requests are passed to get_obj_location function
- #print(get_obj_location)
- obj_location_srv = rospy.Service("object_location_service", ObjLocation, get_obj_location)
- #print get_obj_location
- print "Left - Ready to find object."
- #Keeps code from exiting until service is shutdown
- rospy.spin()
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement