Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import matplotlib.pyplot as plt
- import numpy as np
- from mpl_toolkits.mplot3d import Axes3D
- from matplotlib import cm
- from matplotlib.ticker import LinearLocator, FormatStrFormatter
- from math import factorial
- from collections import Counter
- from sklearn.cluster import AffinityPropagation
- from itertools import count
- img_1 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/1.jpg')
- img_2 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/2.jpg')
- img_3 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/3.jpg')
- img_4 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/4.jpg')
- img_5 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/5.jpg')
- img_6 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/6.jpg')
- img_7 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/7.jpg')
- img_8 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/8.jpg')
- img_9 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/9.jpg')
- img_10 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/10.jpg')
- img_11 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/1_ECU.jpg')
- img_12 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/2_ECU.jpg')
- img_13 = cv2.imread('E:/PythonProjects/ComponentProcessing1.0/3_ECU.jpg')
- def show_img(img):
- plot = plt.imshow(img)
- plt.set_cmap('bone')
- plt.show()
- def convert_to_LAB(img):
- """ Step 1: convert to LAB color-space
- Argumentation: It enable to extract L factor which stands for luminance
- Input: BGR image
- Output: L channel """
- l, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab))
- return l
- def purity_extraction(img):
- b, g, r = cv2.split(img)
- s = (1 - np.divide(np.minimum.reduce([b, g, r]), np.maximum.reduce([b, g, r]))) * 255
- nans = np.isnan(s)
- s[nans] = 255
- return s.astype(np.uint8, copy=False)
- def stripe_over_axis(img, axis: int, axis_coord: int):
- return img[:, axis_coord] if axis else img[axis_coord, :]
- def std_dev_summary_over_axis(img, axis: int):
- return np.std(img, axis=axis)
- def std_dev_over_axis_with_kernel(img, axis: int, axis_coord: int, kernel_size: int):
- stripe = stripe_over_axis(img, axis, axis_coord)
- stripe_divided = np.zeros((img.shape[not axis]-kernel_size+1, kernel_size))
- for i in range(img.shape[not axis] - kernel_size + 1):
- stripe_divided[i] = stripe[i:kernel_size+i]
- return np.std(stripe_divided, axis=1)
- def plot_arrays(img_original, img_lab, *arrays):
- titles = ['img original', 'img lab', 'std dev over columns', 'std dev over rows',
- 'std dev over columns kernel', 'std dev over rows kernel']
- plt.figure(1)
- def mark_img(img, position: int, title_idx: int):
- plt.subplot(position)
- plt.imshow(img)
- plt.set_cmap('bone')
- plt.title(titles[title_idx])
- grid_size = len(arrays)*50 + 100 + 20
- mark_img(img_original, grid_size+1, 0)
- mark_img(img_lab, grid_size + 2, 1)
- for idx, arr in enumerate(arrays):
- plt.subplot(grid_size + 3 + idx)
- plt.plot(np.arange(arr.size), arr)
- plt.title(titles[2+idx])
- plt.show()
- def create_std_dev_grid(img, axis: int, kernel_size: int):
- """ Step 2: Return mask which represents standard deviation of color, calculated within specified axis with
- kernel of given size.
- Input: img: L channel of image
- Output: np.ndarray which """
- matrix_dim = (img.shape[not axis] - int(2 * (kernel_size - 1) / 2))
- points = np.zeros((img.shape[0], matrix_dim)) if not axis else np.zeros((matrix_dim, img.shape[1]))
- for i in range(img.shape[axis]):
- if axis:
- points[:, i] = std_dev_over_axis_with_kernel(img, axis, i, kernel_size)
- else:
- points[i] = std_dev_over_axis_with_kernel(img, axis, i, kernel_size)
- return points
- def normalize_matrix(std_dev_matrix: np.ndarray):
- """ Step 2b: """
- return std_dev_matrix/np.max(std_dev_matrix)
- def summarize_std_dev(img, kernel_size: int):
- """ Step 2: Function is controller for create_std_dev_grid function, because create_std_dev_grid is axis dependent
- and what we need is information both from 0 and 1 axis. It also convolve extracted results.
- TODO: set operator (current is multiplication as parameter to method)
- Argumentation: Simplifying code
- Input: img - image in L channel (from LAB colorspace), kernel_size - kernel size that will be used within
- create_std_dev_grid.
- Output: np.ndarray which is convolution of results over axis.
- """
- std_over_rows = normalize_matrix(create_std_dev_grid(img, 0, kernel_size))
- std_over_columns = normalize_matrix(create_std_dev_grid(img, 1, kernel_size))
- dim_diff = int((std_over_columns.shape[1] - std_over_rows.shape[1])/2)
- return std_over_rows[dim_diff:-dim_diff, :] * std_over_columns[:, dim_diff:-dim_diff], dim_diff
- # return np.maximum(std_over_rows[dim_diff:-dim_diff, :], std_over_columns[:, dim_diff:-dim_diff]), dim_diff
- def plot_matrix(points: np.ndarray):
- X = np.arange(points.shape[1])
- Y = np.arange(points.shape[0])
- X, Y = np.meshgrid(X, Y)
- fig = plt.figure()
- ax = fig.gca(projection='3d')
- surf = ax.plot_surface(X, Y, points, cmap=cm.coolwarm,
- linewidth=0, antialiased=False)
- # Customize the z axis.
- ax.set_zlim(-0.1, 1.2)
- ax.zaxis.set_major_locator(LinearLocator(10))
- ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
- # Set labels for axes.
- ax.set_xlabel('X')
- ax.set_ylabel('Y')
- ax.set_zlabel('Z')
- # Add a color bar which maps values to colors.
- fig.colorbar(surf, shrink=0.5, aspect=5)
- plt.show()
- def select_extended_std_dev_matrix(original_img, summarized_std_dev: np.ndarray, offset: int, thresh: float):
- extended_std_dev_matrix_shape = (summarized_std_dev.shape[0] + 2*offset, summarized_std_dev.shape[1] + 2*offset)
- extended_std_dev_matrix = np.zeros(extended_std_dev_matrix_shape)
- extended_std_dev_matrix[offset:-offset, offset:-offset] = summarized_std_dev
- mask = np.zeros(extended_std_dev_matrix.shape, np.uint8)
- selected_points = np.where(extended_std_dev_matrix < thresh)
- mask[selected_points] = 255
- return cv2.bitwise_and(original_img, mask), mask
- def savitzky_golay(y, window_size, order, deriv=0, rate=1):
- window_size = np.abs(np.int(window_size))
- order = np.abs(np.int(order))
- order_range = range(order+1)
- half_window = (window_size -1) // 2
- # precompute coefficients
- b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
- m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
- # pad the signal at the extremes with
- # values taken from the signal itself
- firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
- lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
- y = np.concatenate((firstvals, y, lastvals))
- return np.convolve(m[::-1], y, mode='valid')
- def smooth_iteratively(arr: np.ndarray, iterations: int):
- new_arr = np.array(arr)
- for _ in range(iterations):
- new_arr = savitzky_golay(new_arr, 31, 3)
- return new_arr
- def get_stripe(mask: np.ndarray, axis_coord: int, axis: int):
- if axis:
- # select row
- return mask[axis_coord, :]
- else:
- # select column
- return mask[:, axis_coord]
- def filter_points(mask_stripe: np.ndarray, points: np.ndarray):
- filtered_points = {'black': [], 'white': []}
- left_border = mask_stripe[0]
- def append_point(color: str, left_border: int, right_border: int):
- filtered_points[color].append(np.array([left_border, right_border, right_border-left_border]))
- def stack_array(color: str):
- return np.stack(filtered_points[color])
- def remove_noise(points: np.ndarray, minimal_length: int):
- return points[np.where(points[:, 2] > minimal_length)]
- # for right_border in points[:-1]:
- for right_border in points:
- right_border += 1
- if all(mask_stripe[left_border:right_border] == 255):
- append_point('black', left_border, right_border)
- else:
- append_point('white', left_border, right_border)
- left_border = right_border
- black_points = stack_array('black')
- white_points = stack_array('white')
- return remove_noise(black_points, 20), remove_noise(white_points, 20)
- def get_mask_with_points(img, points: dict, axis:int, color_l = (0, 0, 255), color_r = (0, 255, 0)):
- if len(img.shape) == 2:
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- for k, v in points.items():
- for r in v:
- if not axis:
- cv2.circle(img, (k, r[0]), 2, color_l, 1)
- cv2.circle(img, (k, r[1]), 2, color_r, 1)
- else:
- cv2.circle(img, (r[0], k), 2, color_l, 1)
- cv2.circle(img, (r[1], k), 2, color_r, 1)
- return img
- def get_mask_from_dict(img_shape: tuple, points: dict, axis: int):
- mask = np.zeros(img_shape)
- for k, v in points.items():
- for r in v:
- if not axis:
- mask[k, r[0]] = 255
- mask[k, r[1]] = 255
- else:
- mask[r[0], k] = 255
- mask[r[1], k] = 255
- return mask
- def create_counters(mask: np.ndarray, axis: int):
- number_of_points = 200
- white, black = {}, {}
- # sample_points = np.random.randint(0, mask.shape[axis], (number_of_points,))
- sample_points = np.random.choice(mask.shape[not axis], size=number_of_points, replace=False)
- for axis_coord in sample_points:
- stripe = get_stripe(mask, axis_coord, axis)
- value_changes_idx = np.where(stripe[:-1] != stripe[1:])[0]
- if value_changes_idx.size > 0:
- filtered_points_black, filtered_points_white = filter_points(stripe, value_changes_idx)
- white[axis_coord] = filtered_points_white
- black[axis_coord] = filtered_points_black
- # black, white = np.vstack(tuple(black)), np.vstack(tuple(white))
- return white, black
- def stack_from_dict(points: dict):
- number_of_rows = 0
- for _, v in points.items():
- number_of_rows += v.shape[0]
- stacked = np.zeros((number_of_rows, 3))
- idx = 0
- for _, v in points.items():
- stacked[idx: idx + v.shape[0]] = v
- idx += v.shape[0]
- return stacked
- def find_lines(points: dict, axis: int):
- points = stack_from_dict(points)[:, axis].reshape(-1, 1)
- aff_propagation = AffinityPropagation(max_iter=280, copy=False, convergence_iter=5).fit(points)
- return aff_propagation.labels_, aff_propagation.cluster_centers_
- def find_lines_kernel(mask: np.ndarray, kernel_size: int, axis: int):
- left_border = 0
- # nonzero_vals = np.zeros((1+int((mask.shape[not axis] - kernel_size)/2), 2))
- step = int(kernel_size/2)
- nonzero_vals = np.zeros((int(mask.shape[not axis]/step), 2))
- for i, right_border in zip(count(0), range(kernel_size, mask.shape[axis], step)):
- if axis == 0:
- # nonzero_vals.append(np.count_nonzero(mask[left_border:right_border, :]))
- nonzero_vals[i, :] = np.array([np.count_nonzero(mask[left_border:right_border, :]),
- int((left_border+right_border)/2)])
- else:
- # nonzero_vals.append(np.count_nonzero(mask[:, left_border:right_border]))
- nonzero_vals[i, :] = np.array([np.count_nonzero(mask[:, left_border:right_border]),
- int((left_border + right_border) / 2)])
- left_border += int(kernel_size/2)
- return nonzero_vals
- def select_pin_areas_from_mask(mask: np.ndarray):
- sum_over_columns = np.sum(mask, axis=1)
- sum_smoothed = smooth_iteratively(sum_over_columns, 5)
- plt.subplot(221)
- plt.plot(np.arange(sum_smoothed.size), sum_smoothed)
- plt.subplot(222)
- plt.plot(np.arange(sum_over_columns.size), sum_over_columns)
- plt.subplot(223)
- plt.imshow(mask)
- plt.show()
- def get_two_peaks(arr: np.ndarray):
- def get_idx(arr: np.ndarray):
- return np.argpartition(arr[:, 0], -1)[-1:]
- cut_point = int(arr.shape[0]/2)
- idx1 = get_idx(arr[0:cut_point, :])
- idx2 = get_idx(arr[cut_point:, :])
- return arr[idx1, 1], arr[idx2+cut_point, 1]
- img = img_12
- img_lab = convert_to_LAB(img)
- img_purity = purity_extraction(img)
- # plot_arrays(img,
- # img_lab,
- # std_dev_summary_over_axis(img_lab, 0),
- # std_dev_summary_over_axis(img_lab, 1),
- # std_dev_over_axis_with_kernel(img_lab, 0, 160, 41),
- # std_dev_over_axis_with_kernel(img_lab, 1, 480, 41))
- # plot3D_cut_overview(img_lab, 41)
- summarized_std_dev, offset = summarize_std_dev(img_lab, 31)
- masked_img, mask = select_extended_std_dev_matrix(img_lab, summarized_std_dev, offset, 0.2)
- plot_matrix(summarized_std_dev)
- show_img(np.hstack((img_lab, masked_img)))
- show_img(mask)
- ## select_pin_areas_from_mask(mask)
- axis = 0
- w_x, b_x = create_counters(mask, axis)
- # img_with_mask_black_x = get_mask_with_points(img_lab, w_x, axis)
- # img_with_both_masks_x = get_mask_with_points(img_with_mask_black_x, b_x, axis, color_l=(255, 255, 0), color_r=(0, 255, 255))
- axis = 1
- w_y, b_y = create_counters(mask, axis)
- # img_with_mask_black_y = get_mask_with_points(img_lab, w_y, axis)
- # img_with_both_masks_y = get_mask_with_points(img_with_mask_black_y, b_y, axis, color_l=(255, 255, 0), color_r=(0, 255, 255))
- # show_img(np.hstack((img_with_both_masks_x, img_with_both_masks_y)))
- # labels, centers = find_lines(w_x, 0)
- # counter = Counter(labels).most_common(1)
- # most_common_x = centers[counter[0][0]]
- # cv2.line(img_with_mask_black_y, (most_common_x, 0), (most_common_x, img_with_both_masks_y.shape[0]), (255, 0, 170), 2)
- # show_img(img_with_mask_black_y)
- mask_x_wx = get_mask_from_dict(img_lab.shape, w_x, 1)
- mask_x_wy = get_mask_from_dict(img_lab.shape, b_x, 1)
- mask_y_wx = get_mask_from_dict(img_lab.shape, w_y, 0)
- mask_y_wy = get_mask_from_dict(img_lab.shape, b_y, 0)
- mask_f = cv2.bitwise_or(mask_x_wx, mask_x_wy)
- mask_f = cv2.bitwise_or(mask_y_wx, mask_f)
- mask_f = cv2.bitwise_or(mask_y_wy, mask_f)
- show_img(mask_f)
- nonzero_rows = find_lines_kernel(mask_f, 15, 0)
- nonzero_cols = find_lines_kernel(mask_f, 15, 1)
- vertical_line_1, vertical_line_2 = get_two_peaks(nonzero_rows)
- horizontal_line_1, horizontal_line_2 = get_two_peaks(nonzero_cols)
- # img_colored = cv2.cvtColor(np.array(mask_f), cv2.COLOR_GRAY2BGR)
- cv2.line(mask_f, (int(horizontal_line_1), 0), (int(horizontal_line_1), mask_f.shape[0]), (127, 127, 0), 5)
- cv2.line(mask_f, (int(horizontal_line_2), 0), (int(horizontal_line_2), mask_f.shape[0]), (127, 127, 0), 5)
- cv2.line(mask_f, (0, int(vertical_line_1)), (mask_f.shape[1], int(vertical_line_1)), (250, 127, 127), 5)
- cv2.line(mask_f, (0, int(vertical_line_2)), (mask_f.shape[1], int(vertical_line_2)), (250, 127, 127), 5)
- show_img(mask_f)
- plt.subplot(121)
- plt.plot(nonzero_rows[:, 0])
- plt.subplot(122)
- plt.plot(nonzero_cols[:, 0])
- plt.show()
- '''TODO: uzaleznic miare od najmniejszej odleglosci miedzy kolejnymi punktami w wycinku
- (proba znalezienia prostej najlepiej pasujacej do wycinka i stosunek lba punktow/odl od prostej)
- TODO: uzaleznic rozmiar jadra od rozmiaru obrazka'''
- #######################################################################################
- import numpy as np
- from sklearn import linear_model
- import matplotlib.pyplot as plt
- import cv2
- def show_img(img):
- plot = plt.imshow(img)
- plt.set_cmap('bone')
- plt.show()
- mtrx = np.zeros((40, 700))
- amount_of_points = 200
- points_y = np.random.randint(0, 40, amount_of_points)
- points_x = np.random.randint(0, 700, amount_of_points)
- mtrx[points_y, points_x] = 255
- non_zero_idx = np.flipud(np.vstack(np.where(mtrx != 0))).T
- ransac = linear_model.RANSACRegressor()
- ransac.fit(non_zero_idx[:, 0].reshape(-1, 1), non_zero_idx[:, 1])
- line_x = np.arange(0, 700)[:, np.newaxis].astype(np.int)
- line_y = ransac.predict(line_x).astype(np.int)
- img_colored = cv2.cvtColor(mtrx.astype(np.uint8), cv2.COLOR_GRAY2BGR)
- cv2.line(img_colored, (line_x[0], line_y[0]), (line_x[-1], line_y[-1]), (255, 255, 0), 1)
- show_img(img_colored)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement