Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /**
- * @file SURF_FlannMatcher
- * @brief SURF detector + descriptor + FLANN Matcher
- * @author A. Huaman
- */
- #include <stdio.h>
- #include <iostream>
- #include "opencv2/core/core.hpp"
- #include "opencv2/highgui/highgui.hpp"
- #include "opencv2/calib3d/calib3d.hpp"
- //#include "opencv2/xfeatures2d.hpp"
- //#include "opencv2/features2d/features2d.hpp"
- #include "opencv2/imgproc/imgproc.hpp"
- #include <time.h>
- //#include "ORB_source.cpp"
- //#include "filtering.cpp"
- using namespace cv;
- /**
- * @function main
- * @brief Main function
- */
- int main( int argc, char** argv )
- {
- //freopen("d:\\only_ranking.txt","w",stdout);
- //The following variables are for execution time calculation using Clock
- clock_t start, end_time , det_time , descr_time , match_time , ransac_time ;
- double cpu_time_used;
- start = clock();
- printf("The Program is starting\n");
- //Mat img_1 = imread( "C://uav/img_5.bmp", CV_LOAD_IMAGE_COLOR );
- //Mat img_2 = imread( "C://uav/img_10.bmp", CV_LOAD_IMAGE_COLOR );
- // Mat img_1 = imread( "C://view_pnt_graffiti/img1.ppm", CV_LOAD_IMAGE_COLOR );
- //Mat img_2 = imread( "C://view_pnt_graffiti/img5.ppm", CV_LOAD_IMAGE_COLOR );
- //Mat img_1 = imread( "C://zoom_rotn_boat/img1.pgm", CV_LOAD_IMAGE_COLOR );
- //Mat img_2 = imread( "C://zoom_rotn_boat/img5.pgm", CV_LOAD_IMAGE_COLOR );
- // Taking the pair of 1 and 7 as the performance drastically degrades for 1 and 8 image pair
- Mat img_1 = imread( "/home/dell/Documents/Postgraduate_Application/NUS/Dropbox/NUS_Sem_1/EE5003/eclipse/ORB/Source/photo1.png", CV_LOAD_IMAGE_COLOR );
- Mat img_2 = imread( "/home/dell/Documents/Postgraduate_Application/NUS/Dropbox/NUS_Sem_1/EE5003/eclipse/ORB/Source/photo2.png", CV_LOAD_IMAGE_COLOR );
- if( !img_1.data || !img_2.data )
- {
- std::cout<< " --(!) Error reading images " << std::endl; return -1;
- }
- //-- Step 1: Detect the keypoints using SURF Detector
- //int minHessian = 400;
- //int count = 0;
- //OrbFeatureDetector detector(400,1.8f,1,0,0,2,0,31);
- //OrbFeatureDetector detector(400,1.2f,8,31,0,2,0,31);
- //OrbFeatureDetector detector(1000,1.5f,8,0,0,2,0,31);
- cv::Ptr<cv::ORB> detector = cv::ORB::create();
- std::vector<KeyPoint> keypoints_1, keypoints_2;
- //detector -> detect( img_1, keypoints_1 );
- //detector -> detect( img_2, keypoints_2 );
- Mat descriptors_1, descriptors_2;
- detector -> detectAndCompute( img_1, Mat(), keypoints_1, descriptors_1 );
- detector -> detectAndCompute( img_2, Mat(), keypoints_2, descriptors_2 );
- det_time = clock();
- int No_of_keypoints_in_Image1 = keypoints_1.size();
- int No_of_keypoints_in_Image2 = keypoints_2.size();
- //printf("No_of_keypoints_in_Image1=%d\nNo_of_keypoints_in_Image2=%d\n", No_of_keypoints_in_Image1, No_of_keypoints_in_Image2);
- //for( int q = 0 ; q< keypoints_1.size(); q++)
- //printf(" x = %f, y = %f \n", keypoints_1[q].pt.x ,keypoints_1[q].pt.y);
- //-- Step 2: Calculate descriptors (feature vectors)
- //OrbDescriptorExtractor extractor;
- cv::DescriptorExtractor extractor;
- printf("Size_of_Descriptor_1 = %d\n", descriptors_1.size());
- printf("Size_of_Descriptor_2 = %d \n", descriptors_2.size());
- descr_time = clock();
- //-- Step 3: Matching descriptor vectors using FLANN matcher
- //FlannBasedMatcher matcher;
- BFMatcher matcher(NORM_HAMMING);
- std::vector< DMatch > matches;
- matcher.match( descriptors_1, descriptors_2, matches );
- double max_dist = 0; double min_dist = 100;
- //-- Quick calculation of max and min distances between keypoints
- for( int i = 0; i < descriptors_1.rows; i++ )
- {
- double dist = matches[i].distance;
- if( dist < min_dist )
- {
- min_dist = dist;
- //printf("value of matches[i]=%d\n",matches[i]);
- // printf("min_dist=%f\n",min_dist);
- }
- if( dist > max_dist )
- {
- max_dist = dist;
- //printf("value of matches[i]=%d\n",matches[i]);
- }
- }
- // printf("-- Max dist : %f \n", max_dist );
- // printf("-- Min dist : %f \n", min_dist );
- //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
- //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
- //-- small)
- //-- PS.- radiusMatch can also be used here.
- std::vector< DMatch > good_matches, good_matches1;
- for( int i = 0; i < descriptors_1.rows; i++ )
- {
- if( matches[i].distance <= (2*min_dist))//max(2*min_dist, 0.02) )
- {
- good_matches.push_back( matches[i]);
- }
- }
- // the code for the new matcher is adder here
- //look if the match is inside a defined area of the image
- // First part of code taken from the the internet for matching
- /* double tresholdDist = 0.25 * sqrt(double(img_1.size().height*img_1.size().height + img_1.size().width*img_1.size().width));
- good_matches.reserve(matches.size());
- for (size_t i = 0; i < matches.size(); ++i)
- {
- for (int j = 0; j < matches[i].size(); j++)
- {
- //calculate local distance for each possible match
- Point2f from = keypoints_1[matches[i][j].queryIdx].pt;
- Point2f to = keypoints_2[matches[i][j].trainIdx].pt;
- double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));
- //save as best match if local distance is in specified area
- if ((dist < tresholdDist) && (abs(from.y-to.y)<5))
- {
- good_matches.push_back(matches[i][j]);
- j = matches[i].size();
- }
- }
- }*/
- // second part of matching taken from the internet
- ransac_time = clock();
- void ransacTest(const std::vector<cv::DMatch> good_matches,const std::vector<cv::KeyPoint>&keypoints_1,
- const std::vector<cv::KeyPoint>& keypoints_2,std::vector<cv::DMatch>& good_matches1,double distance,double confidence,double minInlierRatio);
- {
- //good_matches.clear();
- // Convert keypoints into Point2f
- std::vector<cv::Point2f> points1, points2;
- for (std::vector<cv::DMatch>::const_iterator it= good_matches.begin();it!= good_matches.end(); ++it)
- {
- // Get the position of left keypoints
- float x= keypoints_1[it->queryIdx].pt.x;
- float y= keypoints_1[it->queryIdx].pt.y;
- points1.push_back(cv::Point2f(x,y));
- // Get the position of right keypoints
- x= keypoints_2[it->trainIdx].pt.x;
- y= keypoints_2[it->trainIdx].pt.y;
- points2.push_back(cv::Point2f(x,y));
- }
- // Compute F matrix using RANSAC
- std::vector<uchar> inliers(points1.size(),0);
- cv::Mat fundemental= cv::findFundamentalMat(cv::Mat(points1),cv::Mat(points2),inliers,8,3,.99); // confidence probability
- // extract the surviving (inliers) matches
- std::vector<uchar>::const_iterator
- itIn= inliers.begin();
- std::vector<cv::DMatch>::const_iterator
- itM= good_matches.begin();
- // for all matches
- for ( ;itIn!= inliers.end(); ++itIn, ++itM)
- {
- if (*itIn)
- { // it is a valid match
- good_matches1.push_back(*itM);
- }
- }
- }
- //-- Draw only "good" matches
- Mat img_matches;
- drawMatches( img_1, keypoints_1, img_2, keypoints_2,
- good_matches1, img_matches, Scalar::all(-1), Scalar::all(-1),
- std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
- match_time = clock();
- //-- Localize the object from img_1 in img_2
- std::vector<Point2f> obj;
- std::vector<Point2f> scene;
- for( size_t i = 0; i < good_matches.size(); i++ )
- {
- //-- Get the keypoints from the good matches
- obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
- scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
- }
- Mat H = findHomography(obj, scene ,CV_RANSAC);
- //-- Get the corners from the image_1 ( the object to be "detected" )
- std::vector<Point2f> obj_corners(4);
- obj_corners[0] = cvPoint(0,0);
- obj_corners[1] = cvPoint( img_1.cols, 0 );
- obj_corners[2] = cvPoint( img_1.cols, img_1.rows );
- obj_corners[3] = cvPoint( 0, img_1.rows );
- std::vector<Point2f> scene_corners(4);
- perspectiveTransform( obj_corners, scene_corners, H);
- //-- Draw lines between the corners (the mapped object in the scene - image_2 )
- Point2f offset( (float)img_1.cols, 0);
- line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
- line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
- line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
- line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
- //-- Show detected matches
- imshow( "Good Matches", img_matches );
- /*for( int i = 0; i < (int)good_matches.size(); i++ )
- {
- printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
- }*/
- //printf("\n");
- //The following code is for calculating the total execution time
- end_time = clock();
- cpu_time_used = ((double)(end_time - start)) / ((double)CLOCKS_PER_SEC);
- double time_for_detection = det_time - start;
- double time_for_description = descr_time - det_time;
- double time_for_matching = match_time - descr_time;
- double time_for_RANSAC = match_time - ransac_time;
- //printf("\n\n########################################################################\nDetails on Execution time\n########################################################################\n");
- printf("Time for detection = %.10f\n\nTime for description = %.10f\n\nTime for matching = %.10f\n\nTime for RANSAC = %.10f\n\n",
- time_for_detection,time_for_description,time_for_matching,time_for_RANSAC);
- printf("Total matches found=%d\n\n",matches.size());
- printf("No of good matches=%d\n\n",good_matches1.size());
- /*printf("start = %.10f\n\nend_time = %.10fs\n\ndet_time = %.10f\n\ndescr_time = %.10f\n\nmatch_time = %.10f\n\n",
- ((double) start), ((double) end_time),((double) det_time), ((double) descr_time), ((double) match_time));*/
- printf("Total_time = %.10f\n\n", ((double) (end_time - start)));
- printf("cpu_time_used = %.10fs\n\n", cpu_time_used);
- printf("CLOCKS_PER_SEC = %i\n\n", CLOCKS_PER_SEC);
- // Keep window there until user presses 'q' to quit.
- char c = ' ';
- while ((c = waitKey(0)) != 'q');
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement