Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #importing some useful packages
- import matplotlib.pyplot as plt
- import matplotlib.image as mpimg
- import numpy as np
- import cv2
- from scipy import ndimage, misc
- %matplotlib inline
- import math
- def grayscale(img):
- img=cv2.imread('test_images/solidWhiteRight.jpg')
- """Applies the Grayscale transform
- This will return an image with only one color channel
- but NOTE: to see the returned image as grayscale
- (assuming your grayscaled image is called 'gray')
- you should call plt.imshow(gray, cmap='gray')"""
- #return np.dot(img[...,:3], [0.299, 0.587, 0.114])
- #return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
- # Or use BGR2GRAY if you read an image with cv2.imread()
- return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- gr=grayscale(img)
- #plt.imshow(gr,cmap = plt.get_cmap('gray'))
- plt.imshow(gr)
- plt.show()
- #img = mpimg.imread('test_images/solidWhiteRight.jpg')
- #gray = grayscale(img)
- #plt.imshow(gray, cmap = plt.get_cmap('gray'))
- #plt.show()
- def canny(gr, low_threshold, high_threshold):
- # """Applies the Canny transform"""
- #blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
- low_threshold = 50
- high_threshold = 180
- #misc.imsave('blur_gray.jpg', blur_gray)
- #canny = ndimage.imread('blur_gray.jpg',0)
- #edges = cv2.Canny(canny, low_threshold, high_threshold)
- return cv2.Canny(img, low_threshold, high_threshold)
- e=canny(img,50,180)
- #plt.imshow(e, plt.get_cmap('gray'))
- plt.imshow(e)
- plt.show()
- def gaussian_blur(e, kernel_size):
- #"""Applies a Gaussian Noise kernel"""
- kernel_size = 3
- return cv2.GaussianBlur(e, (kernel_size, kernel_size), 0)
- m=gaussian_blur(e,5)
- plt.imshow(m)
- plt.show()
- def region_of_interest(m, vertices):
- """
- Applies an image mask.
- Only keeps the region of the image defined by the polygon
- formed from `vertices`. The rest of the image is set to black.
- """
- imshape = m.shape
- vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)
- mask = np.zeros_like(m)
- masked_edges = cv2.bitwise_and(m, mask)
- #defining a blank mask to start with
- #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
- if len(m.shape) > 2:
- channel_count = m.shape[2] # i.e. 3 or 4 depending on your image
- ignore_mask_color = (255,) * channel_count
- else:
- ignore_mask_color = 255
- #filling pixels inside the polygon defined by "vertices" with the fill color
- cv2.fillPoly(mask, vertices, ignore_mask_color)
- #returning the image only where mask pixels are nonzero
- masked_image = cv2.bitwise_and(m, mask)
- return masked_image
- f=region_of_interest(m,((190, 470),(450, 270),(490, 270),(590, 470)))
- plt.imshow(f)
- plt.show()
- def draw_lines(masked, lines, color=[255, 0, 0], thickness=2):
- """
- NOTE: this is the function you might want to use as a starting point once you want to
- average/extrapolate the line segments you detect to map out the full
- extent of the lane (going from the result shown in raw-lines-example.mp4
- to that shown in P1_example.mp4).
- Think about things like separating line segments by their
- slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
- line vs. the right line. Then, you can average the position of each of
- the lines and extrapolate to the top and bottom of the lane.
- This function draws `lines` with `color` and `thickness`.
- Lines are drawn on the image inplace (mutates the image).
- If you want to make the lines semi-transparent, think about combining
- this function with the weighted_img() function below
- """
- for line in lines:
- for x1,y1,x2,y2 in line:
- cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
- color_edges = np.dstack((edges, edges, edges))
- lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
- def hough_lines(masked_image, rho, theta, threshold, min_line_len, max_line_gap):
- """
- `img` should be the output of a Canny transform.
- Returns an image with hough lines drawn.
- """
- lines = cv2.HoughLinesP(img, rho, theta, threshold, minLineLength=min_line_len, maxLineGap=max_line_gap)
- rho = 1 # distance resolution in pixels of the Hough grid
- theta = np.pi/180 # angular resolution in radians of the Hough grid
- threshold = 1 # minimum number of votes (intersections in Hough grid cell)
- min_line_len = 50 #minimum number of pixels making up a line
- max_line_gap = 10 # maximum gap in pixels between connectable line segments
- # Python 3 has support for cool math symbols.
- line_img = np.zeros((edges.shape[0], edges.shape[1], 3), dtype=np.uint8)
- draw_lines(line_img, lines)
- return draw_lines
- ht=hough_lines(masked_image,1,np.pi/180,1,50,10)
- plt.imshow(ht)
- plt.show()
- g=hough_lines(edges, rho, theta, threshold, min_line_len, max_line_gap)
- plt.show()
- def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
- """
- `img` is the output of the hough_lines(), An image with lines drawn on it.
- Should be a blank image (all black) with lines drawn on it.
- `initial_img` should be the image before any processing.
- The result image is computed as follows:
- initial_img * α + img * β + λ
- NOTE: initial_img and img must be the same shape!
- """
- return cv2.addWeighted(initial_img, α, img, β, λ)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement