Advertisement
Guest User

linemod_cpp_layout

a guest
Nov 5th, 2013
530
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 21.25 KB | None | 0 0
  1. #include <opencv2/core/core.hpp>
  2. #include <opencv2/imgproc/imgproc.hpp>
  3. #include <opencv2/objdetect/objdetect.hpp>
  4. #include <opencv2/highgui/highgui.hpp>
  5. #include <iterator>
  6. #include <set>
  7. #include <cstdio>
  8. #include <iostream>
  9.  
  10. // Function prototypes
  11. void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<cv::Point>& chain, double f);
  12.  
  13. std::vector<cv::Point> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
  14.                                         int num_modalities, cv::Point offset, cv::Size size,
  15.                                         cv::Mat& mask, cv::Mat& dst);
  16.  
  17. void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
  18.                         int num_modalities, cv::Point offset, cv::Size size,
  19.                         cv::Mat& dst);
  20.  
  21. void drawResponse(const std::vector<cv::linemod::Template>& templates,
  22.                   int num_modalities, cv::Mat& dst, cv::Point offset, int T);
  23.  
  24. cv::Mat displayQuantized(const cv::Mat& quantized);
  25.  
  26. // Copy of cv_mouse from cv_utilities
  27. class Mouse
  28. {
  29. public:
  30.   static void start(const std::string& a_img_name)
  31.   {
  32.     cv::setMouseCallback(a_img_name, Mouse::cv_on_mouse, 0);
  33.   }
  34.   static int event(void)
  35.   {
  36.     int l_event = m_event;
  37.     m_event = -1;
  38.     return l_event;
  39.   }
  40.   static int x(void)
  41.   {
  42.     return m_x;
  43.   }
  44.   static int y(void)
  45.   {
  46.     return m_y;
  47.   }
  48.  
  49. private:
  50.   static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
  51.   {
  52.     m_event = a_event;
  53.     m_x = a_x;
  54.     m_y = a_y;
  55.   }
  56.  
  57.   static int m_event;
  58.   static int m_x;
  59.   static int m_y;
  60. };
  61. int Mouse::m_event;
  62. int Mouse::m_x;
  63. int Mouse::m_y;
  64.  
  65. static void help()
  66. {
  67.   printf("Usage: openni_demo [templates.yml]\n\n"
  68.          "Place your object on a planar, featureless surface. With the mouse,\n"
  69.          "frame it in the 'color' window and right click to learn a first template.\n"
  70.          "Then press 'l' to enter online learning mode, and move the camera around.\n"
  71.          "When the match score falls between 90-95%% the demo will add a new template.\n\n"
  72.          "Keys:\n"
  73.          "\t h   -- This help page\n"
  74.          "\t l   -- Toggle online learning\n"
  75.          "\t m   -- Toggle printing match result\n"
  76.          "\t t   -- Toggle printing timings\n"
  77.          "\t w   -- Write learned templates to disk\n"
  78.          "\t [ ] -- Adjust matching threshold: '[' down,  ']' up\n"
  79.          "\t q   -- Quit\n\n");
  80. }
  81.  
  82. // Adapted from cv_timer in cv_utilities
  83. class Timer
  84. {
  85. public:
  86.   Timer() : start_(0), time_(0) {}
  87.  
  88.   void start()
  89.   {
  90.     start_ = cv::getTickCount();
  91.   }
  92.  
  93.   void stop()
  94.   {
  95.     CV_Assert(start_ != 0);
  96.     int64 end = cv::getTickCount();
  97.     time_ += end - start_;
  98.     start_ = 0;
  99.   }
  100.  
  101.   double time()
  102.   {
  103.     double ret = time_ / cv::getTickFrequency();
  104.     time_ = 0;
  105.     return ret;
  106.   }
  107.  
  108. private:
  109.   int64 start_, time_;
  110. };
  111.  
  112. // Functions to store detector and templates in single XML/YAML file
  113. static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
  114. {
  115.   cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
  116.   cv::FileStorage fs(filename, cv::FileStorage::READ);
  117.   detector->read(fs.root());
  118.  
  119.   cv::FileNode fn = fs["classes"];
  120.   for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
  121.     detector->readClass(*i);
  122.  
  123.   return detector;
  124. }
  125.  
  126. static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
  127. {
  128.   cv::FileStorage fs(filename, cv::FileStorage::WRITE);
  129.   detector->write(fs);
  130.  
  131.   std::vector<std::string> ids = detector->classIds();
  132.   fs << "classes" << "[";
  133.   for (int i = 0; i < (int)ids.size(); ++i)
  134.   {
  135.     fs << "{";
  136.     detector->writeClass(ids[i], fs);
  137.     fs << "}"; // current class
  138.   }
  139.   fs << "]"; // classes
  140. }
  141.  
  142.  
  143. int main(int argc, char * argv[])
  144. {
  145.   // Various settings and flags
  146.   bool show_match_result = true;
  147.   bool show_timings = false;
  148.   bool learn_online = false;
  149.   int num_classes = 0;
  150.   int matching_threshold = 80;
  151.   /// @todo Keys for changing these?
  152.   cv::Size roi_size(200, 200);
  153.   int learning_lower_bound = 90;
  154.   int learning_upper_bound = 95;
  155.  
  156.   // Timers
  157.   Timer extract_timer;
  158.   Timer match_timer;
  159.  
  160.   // Initialize HighGUI
  161.   help();
  162.   cv::namedWindow("color");
  163.   cv::namedWindow("normals");
  164.   Mouse::start("color");
  165.  
  166.   // Initialize LINEMOD data structures
  167.   cv::Ptr<cv::linemod::Detector> detector;
  168.   std::string filename;
  169.   if (argc == 1)
  170.   {
  171.     filename = "linemod_templates.yml";
  172.     detector = cv::linemod::getDefaultLINEMOD();
  173.   }
  174.   else
  175.   {
  176.     detector = readLinemod(argv[1]);
  177.  
  178.     std::vector<std::string> ids = detector->classIds();
  179.     num_classes = detector->numClasses();
  180.     printf("Loaded %s with %d classes and %d templates\n",
  181.            argv[1], num_classes, detector->numTemplates());
  182.     if (!ids.empty())
  183.     {
  184.       printf("Class ids:\n");
  185.       std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
  186.     }
  187.   }
  188.   int num_modalities = (int)detector->getModalities().size();
  189.  
  190.   // Open Kinect sensor
  191.   cv::VideoCapture capture( CV_CAP_OPENNI );
  192.   if (!capture.isOpened())
  193.   {
  194.     printf("Could not open OpenNI-capable sensor\n");
  195.     return -1;
  196.   }
  197.   capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1);
  198.   double focal_length = capture.get(CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
  199.   //printf("Focal length = %f\n", focal_length);
  200.  
  201.   // Main loop
  202.   cv::Mat color, depth;
  203.   for(;;)
  204.   {
  205.     // Capture next color/depth pair
  206.     capture.grab();
  207.     capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP);
  208.     capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE);
  209.  
  210.     std::vector<cv::Mat> sources;
  211.     sources.push_back(color);
  212.     sources.push_back(depth);
  213.     cv::Mat display = color.clone();
  214.  
  215.     if (!learn_online)
  216.     {
  217.       cv::Point mouse(Mouse::x(), Mouse::y());
  218.       int event = Mouse::event();
  219.  
  220.       // Compute ROI centered on current mouse location
  221.       cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
  222.       cv::Point pt1 = mouse - roi_offset; // top left
  223.       cv::Point pt2 = mouse + roi_offset; // bottom right
  224.  
  225.       if (event == CV_EVENT_RBUTTONDOWN)
  226.       {
  227.         // Compute object mask by subtracting the plane within the ROI
  228.         std::vector<cv::Point> chain(4);
  229.         chain[0] = pt1;
  230.         chain[1] = cv::Point(pt2.x, pt1.y);
  231.         chain[2] = pt2;
  232.         chain[3] = cv::Point(pt1.x, pt2.y);
  233.         cv::Mat mask;
  234.         subtractPlane(depth, mask, chain, focal_length);
  235.  
  236.         cv::imshow("mask", mask);
  237.  
  238.         // Extract template
  239.         std::string class_id = cv::format("class%d", num_classes);
  240.         cv::Rect bb;
  241.         extract_timer.start();
  242.         int template_id = detector->addTemplate(sources, class_id, mask, &bb);
  243.         extract_timer.stop();
  244.         if (template_id != -1)
  245.         {
  246.           printf("*** Added template (id %d) for new object class %d***\n",
  247.                  template_id, num_classes);
  248.           //printf("Extracted at (%d, %d) size %dx%d\n", bb.x, bb.y, bb.width, bb.height);
  249.         }
  250.  
  251.         ++num_classes;
  252.       }
  253.  
  254.       // Draw ROI for display
  255.       cv::rectangle(display, pt1, pt2, cv::Scalar(0,0,0), 3);
  256.       cv::rectangle(display, pt1, pt2, cv::Scalar(0,255,255), 1);
  257.     }
  258.  
  259.     // Perform matching
  260.     std::vector<cv::linemod::Match> matches;
  261.     std::vector<std::string> class_ids;
  262.     std::vector<cv::Mat> quantized_images;
  263.     match_timer.start();
  264.     detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
  265.     match_timer.stop();
  266.  
  267.     int classes_visited = 0;
  268.     std::set<std::string> visited;
  269.  
  270.     for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
  271.     {
  272.       cv::linemod::Match m = matches[i];
  273.  
  274.       if (visited.insert(m.class_id).second)
  275.       {
  276.         ++classes_visited;
  277.  
  278.         if (show_match_result)
  279.         {
  280.           printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
  281.                  m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
  282.         }
  283.  
  284.         // Draw matching template
  285.         const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
  286.         drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
  287.  
  288.         if (learn_online == true)
  289.         {
  290.           /// @todo Online learning possibly broken by new gradient feature extraction,
  291.           /// which assumes an accurate object outline.
  292.  
  293.           // Compute masks based on convex hull of matched template
  294.           cv::Mat color_mask, depth_mask;
  295.           std::vector<cv::Point> chain = maskFromTemplate(templates, num_modalities,
  296.                                                           cv::Point(m.x, m.y), color.size(),
  297.                                                           color_mask, display);
  298.           subtractPlane(depth, depth_mask, chain, focal_length);
  299.  
  300.           cv::imshow("mask", depth_mask);
  301.  
  302.           // If pretty sure (but not TOO sure), add new template
  303.           if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
  304.           {
  305.             extract_timer.start();
  306.             int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
  307.             extract_timer.stop();
  308.             if (template_id != -1)
  309.             {
  310.               printf("*** Added template (id %d) for existing object class %s***\n",
  311.                      template_id, m.class_id.c_str());
  312.             }
  313.           }
  314.         }
  315.       }
  316.     }
  317.  
  318.     if (show_match_result && matches.empty())
  319.       printf("No matches found...\n");
  320.     if (show_timings)
  321.     {
  322.       printf("Training: %.2fs\n", extract_timer.time());
  323.       printf("Matching: %.2fs\n", match_timer.time());
  324.     }
  325.     if (show_match_result || show_timings)
  326.       printf("------------------------------------------------------------\n");
  327.  
  328.     cv::imshow("color", display);
  329.     cv::imshow("normals", quantized_images[1]);
  330.  
  331.     cv::FileStorage fs;
  332.     char key = (char)cv::waitKey(10);
  333.     if( key == 'q' )
  334.         break;
  335.  
  336.     switch (key)
  337.     {
  338.       case 'h':
  339.         help();
  340.         break;
  341.       case 'm':
  342.         // toggle printing match result
  343.         show_match_result = !show_match_result;
  344.         printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
  345.         break;
  346.       case 't':
  347.         // toggle printing timings
  348.         show_timings = !show_timings;
  349.         printf("Show timings %s\n", show_timings ? "ON" : "OFF");
  350.         break;
  351.       case 'l':
  352.         // toggle online learning
  353.         learn_online = !learn_online;
  354.         printf("Online learning %s\n", learn_online ? "ON" : "OFF");
  355.         break;
  356.       case '[':
  357.         // decrement threshold
  358.         matching_threshold = std::max(matching_threshold - 1, -100);
  359.         printf("New threshold: %d\n", matching_threshold);
  360.         break;
  361.       case ']':
  362.         // increment threshold
  363.         matching_threshold = std::min(matching_threshold + 1, +100);
  364.         printf("New threshold: %d\n", matching_threshold);
  365.         break;
  366.       case 'w':
  367.         // write model to disk
  368.         writeLinemod(detector, filename);
  369.         printf("Wrote detector and templates to %s\n", filename.c_str());
  370.         break;
  371.       default:
  372.         ;
  373.     }
  374.   }
  375.   return 0;
  376. }
  377.  
  378. static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
  379. {
  380.   real.resize(proj.size());
  381.   double f_inv = 1.0 / f;
  382.  
  383.   for (int i = 0; i < (int)proj.size(); ++i)
  384.   {
  385.     double Z = proj[i].z;
  386.     real[i].x = (proj[i].x - 320.) * (f_inv * Z);
  387.     real[i].y = (proj[i].y - 240.) * (f_inv * Z);
  388.     real[i].z = Z;
  389.   }
  390. }
  391.  
  392. static void filterPlane(cv::Mat & ap_depth, std::vector<cv::Mat> & a_masks, std::vector<cv::Point> & a_chain, double f)
  393. {
  394.   const int l_num_cost_pts = 200;
  395.  
  396.   float l_thres = 4;
  397.  
  398.   cv::Mat lp_mask = cv::Mat::zeros(ap_depth.size(), CV_8UC1);
  399.  
  400.   std::vector<cv::Point> l_chain_vector;
  401.  
  402.   float l_chain_length = 0;
  403.   float * lp_seg_length = new float[a_chain.size()];
  404.  
  405.   for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  406.   {
  407.     float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
  408.     float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
  409.     lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
  410.     l_chain_length += lp_seg_length[l_i];
  411.   }
  412.   for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  413.   {
  414.     if (lp_seg_length[l_i] > 0)
  415.     {
  416.       int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
  417.       float l_cur_len = lp_seg_length[l_i] / l_cur_num;
  418.  
  419.       for (int l_j = 0; l_j < l_cur_num; ++l_j)
  420.       {
  421.         float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
  422.  
  423.         cv::Point l_pts;
  424.  
  425.         l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
  426.         l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
  427.  
  428.         l_chain_vector.push_back(l_pts);
  429.       }
  430.     }
  431.   }
  432.   std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
  433.  
  434.   for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  435.   {
  436.     lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
  437.     lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
  438.     lp_src_3Dpts[l_i].z = ap_depth.at<int>(cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
  439.     //lp_mask.at<uchar>((int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X) = 255;
  440.   }
  441.  
  442.   reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
  443.  
  444.   cv::Mat lp_pts = cv::Mat((int)l_chain_vector.size(), 4, CV_32F);
  445.   cv::Mat lp_v = cv::Mat(4, 4, CV_32F);
  446.   cv::Mat lp_w = cv::Mat(4, 1, CV_32F);
  447.  
  448.   for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  449.   {
  450.     lp_pts.at<float>(l_i, 0) = (float)lp_src_3Dpts[l_i].x;
  451.     lp_pts.at<float>(l_i, 1) = (float)lp_src_3Dpts[l_i].y;
  452.     lp_pts.at<float>(l_i, 2) = (float)lp_src_3Dpts[l_i].z;
  453.     lp_pts.at<float>(l_i, 3) = 1.0f;
  454.   }
  455.   cv::SVD::compute(lp_pts, lp_w, cv::Mat(), lp_v);
  456.  
  457.   float l_n[4] = {lp_v.at<float>(0, 3),
  458.                   lp_v.at<float>(1, 3),
  459.                   lp_v.at<float>(2, 3),
  460.                   lp_v.at<float>(3, 3)};
  461.  
  462.   float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
  463.  
  464.   l_n[0] /= l_norm;
  465.   l_n[1] /= l_norm;
  466.   l_n[2] /= l_norm;
  467.   l_n[3] /= l_norm;
  468.  
  469.   float l_max_dist = 0;
  470.  
  471.   for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  472.   {
  473.     float l_dist =  l_n[0] * lp_pts.at<float>(l_i, 0) +
  474.                     l_n[1] * lp_pts.at<float>(l_i, 1) +
  475.                     l_n[2] * lp_pts.at<float>(l_i, 2) +
  476.                     l_n[3] * lp_pts.at<float>(l_i, 3);
  477.  
  478.     if (fabs(l_dist) > l_max_dist)
  479.       l_max_dist = l_dist;
  480.   }
  481.   //std::cerr << "plane: " << l_n[0] << ";" << l_n[1] << ";" << l_n[2] << ";" << l_n[3] << " maxdist: " << l_max_dist << " end" << std::endl;
  482.   int l_minx = ap_depth.cols;
  483.   int l_miny = ap_depth.rows;
  484.   int l_maxx = 0;
  485.   int l_maxy = 0;
  486.  
  487.   for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  488.   {
  489.     l_minx = std::min(l_minx, a_chain[l_i].x);
  490.     l_miny = std::min(l_miny, a_chain[l_i].y);
  491.     l_maxx = std::max(l_maxx, a_chain[l_i].x);
  492.     l_maxy = std::max(l_maxy, a_chain[l_i].y);
  493.   }
  494.   int l_w = l_maxx - l_minx + 1;
  495.   int l_h = l_maxy - l_miny + 1;
  496.   int l_nn = (int)a_chain.size();
  497.  
  498.   std::vector<cv::Point> lp_chain;
  499.  
  500.   for (int l_i = 0; l_i < l_nn; ++l_i)
  501.     lp_chain.push_back(a_chain[l_i]);
  502.  
  503.   // Create the proper structure for the fillPoly function
  504.   std::vector< std::vector<cv::Point> > filled;
  505.   filled.push_back(lp_chain);
  506.   cv::fillPoly(lp_mask, filled, cv::Scalar(255, 255, 255));
  507.  
  508.   //cv::imshow(lp_mask,"hallo1");
  509.  
  510.   std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
  511.  
  512.   int l_ind = 0;
  513.  
  514.   for (int l_r = 0; l_r < l_h; ++l_r)
  515.   {
  516.     for (int l_c = 0; l_c < l_w; ++l_c)
  517.     {
  518.       lp_dst_3Dpts[l_ind].x = l_c + l_minx;
  519.       lp_dst_3Dpts[l_ind].y = l_r + l_miny;
  520.       lp_dst_3Dpts[l_ind].z = ap_depth.at<uchar>(l_r + l_miny, l_c + l_minx);
  521.       ++l_ind;
  522.     }
  523.   }
  524.   reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
  525.  
  526.   l_ind = 0;
  527.  
  528.   for (int l_r = 0; l_r < l_h; ++l_r)
  529.   {
  530.     for (int l_c = 0; l_c < l_w; ++l_c)
  531.     {
  532.       float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
  533.  
  534.       ++l_ind;
  535.  
  536.       if (lp_mask.at<uchar>(l_r + l_miny, l_c + l_minx) != 0)
  537.       {
  538.         if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
  539.         {
  540.           for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
  541.           {
  542.             int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
  543.             int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
  544.  
  545.             a_masks[l_p].at<uchar>(l_row, l_col) = 0;
  546.           }
  547.         }
  548.         else
  549.         {
  550.           for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
  551.           {
  552.             int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
  553.             int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
  554.  
  555.             a_masks[l_p].at<uchar>(l_row, l_col) = 255;
  556.           }
  557.         }
  558.       }
  559.     }
  560.   }
  561. }
  562.  
  563. void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<cv::Point>& chain, double f)
  564. {
  565.   mask = cv::Mat::zeros(depth.rows, depth.cols, CV_8U);
  566.   std::vector<cv::Mat> tmp;
  567.   tmp.push_back(mask);
  568.   cv::Mat temp_depth = depth.clone();
  569.   filterPlane(temp_depth, tmp, chain, f);
  570. }
  571.  
  572. std::vector<cv::Point> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
  573.                                       int num_modalities, cv::Point offset, cv::Size size,
  574.                                       cv::Mat& mask, cv::Mat& dst)
  575. {
  576.   templateConvexHull(templates, num_modalities, offset, size, mask);
  577.  
  578.   const int OFFSET = 30;
  579.   cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
  580.  
  581.   cv::Mat mask_copy = mask.clone();
  582.   std::vector<cv::Mat> contours;
  583.   cv::findContours(mask_copy, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
  584.  
  585.   std::vector<cv::Point> l_pts1 = contours[0];  // to use as input to cv_primesensor::filter_plane
  586.  
  587.   // Create the lines from the contour set, on the dst matrix
  588.   // Add first element to the end to make the contour complete
  589.   l_pts1.push_back(l_pts1[0]);
  590.  
  591.   for(size_t i = 0; i < l_pts1.size() - 1; i++)
  592.   {
  593.     cv::Point pt0 = l_pts1[i];
  594.     cv::Point pt1 = l_pts1[i+1];
  595.     cv::line(dst, pt0, pt1, cv::Scalar(0,255,0), 2);
  596.   }
  597.  
  598.   return l_pts1;
  599. }
  600.  
  601. // Adapted from cv_show_angles
  602. cv::Mat displayQuantized(const cv::Mat& quantized)
  603. {
  604.   cv::Mat color(quantized.size(), CV_8UC3);
  605.   for (int r = 0; r < quantized.rows; ++r)
  606.   {
  607.     const uchar* quant_r = quantized.ptr(r);
  608.     cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
  609.  
  610.     for (int c = 0; c < quantized.cols; ++c)
  611.     {
  612.       cv::Vec3b& bgr = color_r[c];
  613.       switch (quant_r[c])
  614.       {
  615.         case 0:   bgr[0]=  0; bgr[1]=  0; bgr[2]=  0;    break;
  616.         case 1:   bgr[0]= 55; bgr[1]= 55; bgr[2]= 55;    break;
  617.         case 2:   bgr[0]= 80; bgr[1]= 80; bgr[2]= 80;    break;
  618.         case 4:   bgr[0]=105; bgr[1]=105; bgr[2]=105;    break;
  619.         case 8:   bgr[0]=130; bgr[1]=130; bgr[2]=130;    break;
  620.         case 16:  bgr[0]=155; bgr[1]=155; bgr[2]=155;    break;
  621.         case 32:  bgr[0]=180; bgr[1]=180; bgr[2]=180;    break;
  622.         case 64:  bgr[0]=205; bgr[1]=205; bgr[2]=205;    break;
  623.         case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230;    break;
  624.         case 255: bgr[0]=  0; bgr[1]=  0; bgr[2]=255;    break;
  625.         default:  bgr[0]=  0; bgr[1]=255; bgr[2]=  0;    break;
  626.       }
  627.     }
  628.   }
  629.  
  630.   return color;
  631. }
  632.  
  633. // Adapted from cv_line_template::convex_hull
  634. void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
  635.                         int num_modalities, cv::Point offset, cv::Size size,
  636.                         cv::Mat& dst)
  637. {
  638.   std::vector<cv::Point> points;
  639.   for (int m = 0; m < num_modalities; ++m)
  640.   {
  641.     for (int i = 0; i < (int)templates[m].features.size(); ++i)
  642.     {
  643.       cv::linemod::Feature f = templates[m].features[i];
  644.       points.push_back(cv::Point(f.x, f.y) + offset);
  645.     }
  646.   }
  647.  
  648.   std::vector<cv::Point> hull;
  649.   cv::convexHull(points, hull);
  650.  
  651.   dst = cv::Mat::zeros(size, CV_8U);
  652.   const int hull_count = (int)hull.size();
  653.   const cv::Point* hull_pts = &hull[0];
  654.   cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
  655. }
  656.  
  657. void drawResponse(const std::vector<cv::linemod::Template>& templates,
  658.                   int num_modalities, cv::Mat& dst, cv::Point offset, int T)
  659. {
  660.   static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
  661.                                         CV_RGB(0, 255, 0),
  662.                                         CV_RGB(255, 255, 0),
  663.                                         CV_RGB(255, 140, 0),
  664.                                         CV_RGB(255, 0, 0) };
  665.  
  666.   for (int m = 0; m < num_modalities; ++m)
  667.   {
  668.     // NOTE: Original demo recalculated max response for each feature in the TxT
  669.     // box around it and chose the display color based on that response. Here
  670.     // the display color just depends on the modality.
  671.     cv::Scalar color = COLORS[m];
  672.  
  673.     for (int i = 0; i < (int)templates[m].features.size(); ++i)
  674.     {
  675.       cv::linemod::Feature f = templates[m].features[i];
  676.       cv::Point pt(f.x + offset.x, f.y + offset.y);
  677.       cv::circle(dst, pt, T / 2, color);
  678.     }
  679.   }
  680. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement