Advertisement
resta89

Untitled

Oct 11th, 2017
53
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.20 KB | None | 0 0
  1. #importing some useful packages
  2. import matplotlib.pyplot as plt
  3. import matplotlib.image as mpimg
  4. import numpy as np
  5. import cv2
  6. from scipy import ndimage, misc
  7. %matplotlib inline
  8.  
  9. import math
  10.  
  11. def grayscale(img):
  12.  
  13. """Applies the Grayscale transform
  14. This will return an image with only one color channel
  15. but NOTE: to see the returned image as grayscale
  16. (assuming your grayscaled image is called 'gray')
  17. you should call plt.imshow(gray, cmap='gray')"""
  18. return np.dot(img[...,:3], [0.299, 0.587, 0.114])
  19. #return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
  20. # Or use BGR2GRAY if you read an image with cv2.imread()
  21. # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  22.  
  23. img = mpimg.imread('test_images/solidWhiteRight.jpg')
  24. gray = grayscale(img)
  25. plt.imshow(gray, cmap = plt.get_cmap('gray'))
  26. plt.show()
  27.  
  28.  
  29.  
  30. #def canny(img, low_threshold, high_threshold):
  31. # """Applies the Canny transform"""
  32.  
  33. # return cv2.Canny(img, low_threshold, high_threshold)
  34. kernel_size = 5
  35. blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
  36. low_threshold = 50
  37. high_threshold = 180
  38. misc.imsave('blur_gray.jpg', blur_gray)
  39. blurred = ndimage.imread('blur_gray.jpg',0)
  40. edges = cv2.Canny(blurred, low_threshold, high_threshold)
  41.  
  42.  
  43. plt.imshow(edges, cmap='Greys_r')
  44. plt.show()
  45.  
  46. #def gaussian_blur(img, kernel_size):
  47. #"""Applies a Gaussian Noise kernel"""
  48. #return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
  49.  
  50. def region_of_interest(edges, vertices):
  51. """
  52. Applies an image mask.
  53.  
  54. Only keeps the region of the image defined by the polygon
  55. formed from `vertices`. The rest of the image is set to black.
  56. """
  57. imshape = edges.shape
  58. vertices = np.array([[(0,imshape[0]),(450, 270), (490, 270), (imshape[1],imshape[0])]], dtype=np.int32)
  59. cv2.fillPoly(mask, vertices, ignore_mask_color)
  60. masked_edges = cv2.bitwise_and(edges, mask)
  61. #defining a blank mask to start with
  62. mask = np.zeros_like(edges)
  63.  
  64. #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
  65. if len(edges.shape) > 2:
  66. channel_count = edges.shape[2] # i.e. 3 or 4 depending on your image
  67. ignore_mask_color = (255,) * channel_count
  68. else:
  69. ignore_mask_color = 255
  70.  
  71. #filling pixels inside the polygon defined by "vertices" with the fill color
  72. cv2.fillPoly(mask, vertices, ignore_mask_color)
  73.  
  74. #returning the image only where mask pixels are nonzero
  75. masked_image = cv2.bitwise_and(edges, mask)
  76. return masked_image
  77. masked=masked_image
  78. plt.imshow(masked)
  79. plt.show()
  80.  
  81. def draw_lines(masked, lines, color=[255, 0, 0], thickness=2):
  82. """
  83. NOTE: this is the function you might want to use as a starting point once you want to
  84. average/extrapolate the line segments you detect to map out the full
  85. extent of the lane (going from the result shown in raw-lines-example.mp4
  86. to that shown in P1_example.mp4).
  87.  
  88. Think about things like separating line segments by their
  89. slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
  90. line vs. the right line. Then, you can average the position of each of
  91. the lines and extrapolate to the top and bottom of the lane.
  92.  
  93. This function draws `lines` with `color` and `thickness`.
  94. Lines are drawn on the image inplace (mutates the image).
  95. If you want to make the lines semi-transparent, think about combining
  96. this function with the weighted_img() function below
  97. """
  98. for line in lines:
  99. for x1,y1,x2,y2 in line:
  100. cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
  101. color_edges = np.dstack((edges, edges, edges))
  102. lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
  103. plt.imshow(lines_edges)
  104. plt.show()
  105.  
  106. def hough_lines(edges, rho, theta, threshold, min_line_len, max_line_gap):
  107. """
  108. `img` should be the output of a Canny transform.
  109.  
  110. Returns an image with hough lines drawn.
  111. """
  112. lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
  113. line_img = np.zeros((edges.shape[0], edges.shape[1], 3), dtype=np.uint8)
  114. draw_lines(line_img, lines)
  115. return line_img
  116.  
  117. rho = 1 # distance resolution in pixels of the Hough grid
  118. theta = np.pi/180 # angular resolution in radians of the Hough grid
  119. threshold = 1 # minimum number of votes (intersections in Hough grid cell)
  120. min_line_length = 50 #minimum number of pixels making up a line
  121. max_line_gap = 10 # maximum gap in pixels between connectable line segments
  122. # Python 3 has support for cool math symbols.
  123.  
  124. plt.imshow(line_img)
  125. plt.show()
  126.  
  127.  
  128. def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
  129. """
  130. `img` is the output of the hough_lines(), An image with lines drawn on it.
  131. Should be a blank image (all black) with lines drawn on it.
  132.  
  133. `initial_img` should be the image before any processing.
  134.  
  135. The result image is computed as follows:
  136.  
  137. initial_img * α + img * β + λ
  138. NOTE: initial_img and img must be the same shape!
  139. """
  140. return cv2.addWeighted(initial_img, α, img, β, λ)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement