Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- RNG rng(12345);
- int main() {
- sorting_contours();
- return 0;}
- void sorting_contours() {
- Mat image = imread("7.jpg");
- Mat orig = image.clone();
- Mat gray;
- cvtColor(image, gray, CV_BGR2GRAY);
- threshold(gray, gray, 229, 255, CV_THRESH_BINARY);
- gray = 255 - gray;
- Mat element = getStructuringElement(MORPH_ELLIPSE, Size(3, 3), Point(2, 2));
- morphologyEx(gray, gray, MORPH_CLOSE, element);
- Mat edged = imutils::auto_canny(gray);
- vector<Vec4i> hierarchy;
- vector<vector<Point>> contours;
- findContours(edged, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
- Mat drawing = image.clone();
- vector<Rect> boundRect;
- contours = imutils::sort_contours(contours, boundRect, imutils::SortContoursMethods::left_to_right);
- int pixelsPerMetric;
- Point2f tl, tr, br, bl, tltrX, tltrY, blbrX, blbrY;
- vector<RotatedRect> minRect(contours.size());
- for (int i = 0; i < contours.size(); i++)
- {
- if (contours[i].size() > 100)
- {
- minRect[i] = minAreaRect(Mat(contours[i]));
- Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
- Point2f rect_points[4]; minRect[i].points(rect_points);
- for (int j = 0; j < 4; j++)
- line(drawing, rect_points[j], rect_points[(j + 1) % 4], color, 1, 8);
- }
- }
- imshow("left_to_right", drawing);
- waitKey(0);}
- boxPoints(minRect[i], pointss);
- pointss = imutils::order_points(pointss);
- //what i should do next?
- import imageio
- import numpy
- import scipy
- import skimage
- import skimage.morphology
- import skimage.segmentation
- import skimage.io
- image = imageio.imread(r'C:UsersJeremiahPicturesR9j7GPe.jpg')
- image_array = numpy.float64(image)
- #This mess of code here is for a sobel filter. I always hard-code it because I don't
- #like the way that any of the pre-built functions that I have tried do the filter for
- #color images. If you want to use a stock filter instead, replace everything between
- #here and the next comment with that function. You will probably need to convert the
- #image to grayscale if you do that though.
- R_x = scipy.ndimage.filters.correlate(image_array[:, :, 0], [[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
- G_x = scipy.ndimage.filters.correlate(image_array[:, :, 1], [[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
- B_x = scipy.ndimage.filters.correlate(image_array[:, :, 2], [[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
- R_y = scipy.ndimage.filters.correlate(image_array[:, :, 0], [[1, 0 , -1], [2, 0, -2], [1, 0, -1]])
- G_y = scipy.ndimage.filters.correlate(image_array[:, :, 1], [[1, 0 , -1], [2, 0, -2], [1, 0, -1]])
- B_y = scipy.ndimage.filters.correlate(image_array[:, :, 2], [[1, 0 , -1], [2, 0, -2], [1, 0, -1]])
- Jacobian_x = R_x**2 + G_x**2 + B_x**2
- Jacobian_y = R_y**2 + G_y**2 + B_y**2
- Jacobian_xy = R_x * R_y + G_x * G_y + B_x * B_y
- Determinant = numpy.sqrt(numpy.fabs((Jacobian_x**2) - (2 * Jacobian_x * Jacobian_y) + (Jacobian_y**2) + 4 * (Jacobian_xy**2)))
- Maximum_Eigenvalue = (Jacobian_x + Jacobian_y + Determinant) / 2
- Edges = numpy.sqrt(Maximum_Eigenvalue)
- #Next, you convert the image to binary and dilate it to close any holes.
- Threshold = skimage.filters.threshold_mean(Edges)
- Binary_Image = Edges > Threshold
- Dilated_Binary_Image = scipy.ndimage.morphology.binary_dilation(Binary_Image)
- #Then clear the noise on the border, and fill in all objects. After the fill is
- #done, an erosion step is necessary to restore all objects to their original
- #dimensions. Once this is complete, you can extract the edges using thresholding.
- Cleared_Borders = skimage.segmentation.clear_border(Dilated_Binary_Image, buffer_size=10)
- Filled_Holes = scipy.ndimage.morphology.binary_fill_holes(Cleared_Borders)
- Restored_Height = scipy.ndimage.morphology.binary_erosion(Filled_Holes)
- selem = skimage.morphology.disk(1)
- Object_Borders = (skimage.filters.rank.minimum(Restored_Height, selem) == 0) & (skimage.filters.rank.maximum(Restored_Height, selem) == 255)
- #Notice that, in the image above, the rectangles are isolated, but, if you look back
- #at the dilated image, you will see that the people make closed objects in themselves
- #and so the rectangles aren't actually necessary in order to extract the highest and
- #lowest points on the individuals. The final step is to label our objects. Labeled
- #Edges is a slice containing each individual object. Features is the number of
- #objects in the image.
- Labeled_Edges, features = scipy.ndimage.label(Object_Borders)
- #So now we know where our objects are. If you want to keep using the rectangles, you
- #can basically start here. The key is to extract the x and y coordinates of your
- #object borders, rather than their indices. That way, you can just subtract your
- #max and min y values. In this case, this is actually a tiny bit off. This is
- #because the stars on the edges of your rectangles take up pixels. If the rectangles
- #weren't there, and the people themselves were the objects, this would go from the
- #top of their head to the bottom of their shoes every time.
- Heights = []
- for objects in range(features):
- sliced = scipy.ndimage.find_objects(Labeled_Edges)[objects] #Get slice containing each object.
- slices = Labeled_Edges[sliced] #Get the actual pixels contained in the slice.
- [y, x] = numpy.where(slices > 0) #finds the x and y coordinates of the object edges.
- Height = max(y) - min(y)
- if Height > 10: #Clears out noise by ignoring objects too small to be people.
- Heights.append(Height)
- print(Heights)
- >>>[289, 287, 285, 273]
Add Comment
Please, Sign In to add comment