Advertisement
lamiastella

almost working half way through SR300Camera.cpp

May 23rd, 2017
160
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 9.14 KB | None | 0 0
  1. #include "SR300Camera.h"
  2. #include "RealSense/SenseManager.h"
  3. #include "RealSense/SampleReader.h"
  4. #include "util_render.h"
  5.  
  6.  
  7. #include <iostream>
  8.  
  9. /***
  10. Private constructor for the SR300 Camera depth sensor
  11. ***/
  12. using namespace std;
  13. using namespace Intel::RealSense;
  14.  
  15.  
  16. SenseManager *pp = SenseManager::CreateInstance();
  17. Capture::Device *device;
  18. CaptureManager *cm;
  19.  
  20. const int depth_fps = 30;
  21. const int depth_width = 640;
  22. const int depth_height = 480;
  23. cv::Size bufferSize;
  24. const PXCCapture::Sample *sample;
  25. std::vector<cv::Point3f> xyzBuffer;
  26.  
  27.  
  28.  
  29. SR300Camera::SR300Camera(bool use_live_sensor)
  30. {
  31.  
  32.     CONFIDENCE_THRESHHOLD = (60.0f / 255.0f*500.0f);
  33.     //INVALID_FLAG_VALUE = PMD_FLAG_INVALID;
  34.     X_DIMENSION = depth_width;
  35.     Y_DIMENSION = depth_height;
  36.  
  37.     int num_pixels = X_DIMENSION * Y_DIMENSION;
  38.     Point3DF32* vertices = new Point3DF32[num_pixels];
  39.  
  40.     if (!use_live_sensor) {
  41.         return;
  42.     }
  43.  
  44.     int res;
  45.     std::cout << "Trying to open SR300 camera\n";
  46.  
  47.    
  48.    
  49.     if (!pp) {
  50.         wprintf_s(L"Unable to create the SenseManager\n");
  51.     }
  52.  
  53.     /* Sets file recording or playback */
  54.     cm = pp->QueryCaptureManager();
  55.     //Status sts= STATUS_NO_ERROR;
  56.     Status sts = STATUS_DATA_UNAVAILABLE; //mona is this the correct initialization?
  57.     // Create stream renders
  58.     UtilRender renderc(L"Color"), renderd(L"Depth"), renderi(L"IR"), renderr(L"Right"), renderl(L"Left");
  59.    
  60.     do {
  61.         bool revert = false;
  62.         /*
  63.         DataDesc desc = {};
  64.        
  65.         if (cm->QueryCapture()) {
  66.             cm->QueryCapture()->QueryDeviceInfo(0, &desc.deviceInfo);
  67.         }
  68.         else {
  69.             //desc.deviceInfo.streams = Capture::STREAM_TYPE_COLOR | Capture::STREAM_TYPE_DEPTH;
  70.             desc.deviceInfo.streams = Capture::STREAM_TYPE_DEPTH;
  71.             revert = true;
  72.         }
  73.         */
  74.         //pp->EnableStreams(&desc);
  75.         pp->EnableStream(Capture::StreamType::STREAM_TYPE_DEPTH,
  76.             X_DIMENSION, Y_DIMENSION, depth_fps);
  77.         revert = true;
  78.         sts = pp->Init();
  79.         if (sts < Status::STATUS_NO_ERROR) {
  80.            
  81.             if (revert) {
  82.                
  83.                 pp->Close();
  84.                 pp->EnableStream(Capture::STREAM_TYPE_DEPTH);
  85.                 sts = pp->Init();
  86.                 if (sts < Status::STATUS_NO_ERROR) {
  87.                     pp->Close();
  88.                     //pp->EnableStream(Capture::STREAM_TYPE_COLOR);
  89.                     sts = pp->Init();
  90.                 }
  91.             }
  92.             if (sts < Status::STATUS_NO_ERROR) {
  93.                 wprintf_s(L"Failed to locate any video stream(s)\n");
  94.                 pp->Release();
  95.             }
  96.         }
  97.  
  98.  
  99.        
  100.         device = pp->QueryCaptureManager()->QueryDevice();
  101.  
  102.         device->ResetProperties(Capture::STREAM_TYPE_ANY);
  103.         //cout << "accuracy is: "<<device->IVCAM_ACCURACY_MEDIAN;
  104.  
  105.  
  106.  
  107.         for (int nframes = 0; ; nframes++) {
  108.             sts = pp->AcquireFrame(false);
  109.  
  110.             if (sts < Status::STATUS_NO_ERROR) {
  111.                 if (sts == Status::STATUS_STREAM_CONFIG_CHANGED) {
  112.                     wprintf_s(L"Stream configuration was changed, re-initilizing\n");
  113.                     pp->Close();
  114.                 }
  115.                 break;
  116.             }
  117.  
  118.             sample = pp->QuerySample();
  119.  
  120.             //const Capture::Sample *sample = pp->QuerySample();
  121.             if (sample) {
  122.                 cv::Mat img;
  123.                 //Copy all the vertices from Point3DF32 to the mat image
  124.                 //QueryVertices(Image *depth, Point3DF32 *vertices)
  125.                 //sample->color->QueryInfo()
  126.            
  127.                 //cv::imshow("depth from OpenARK", Visualizer::visualizeDepthMap(img))
  128.                 if (sample->depth && !renderd.RenderFrame(sample->depth)) break;
  129.                 //if (sample->color && !renderc.RenderFrame(sample->color)) break;
  130.                 //if (sample->ir && !renderi.RenderFrame(sample->ir))    break;
  131.                 Image::ImageData depthImage;
  132.                 Image *depthMap = sample->depth;
  133.                 //depthMap->AcquireAccess(Image::ACCESS_READ, &depthImage);
  134.                 depthMap->AcquireAccess(Image::ACCESS_WRITE, &depthImage);
  135.                
  136.                 Image::ImageInfo imgInfo = depthMap->QueryInfo();
  137.                 int depth_stride = depthImage.pitches[0] / sizeof(pxcU16);
  138.                 Projection * projection = device->CreateProjection();
  139.                 cout << "projection is: " << projection;
  140.                 cout << "device is: " << device;
  141.                 //int num_pixels = depth_height * depth_width;
  142.                
  143.                 //projection->QueryVertices(depthMap, vertices);
  144.                 pxcU16 *dpixels = (pxcU16*)depthImage.planes[0];
  145.                 unsigned int dpitch = depthImage.pitches[0] / sizeof(pxcU16);
  146.  
  147.                 /*for (int k = 0; k < num_pixels; k++) {
  148.                     cout << "xx is " << vertices[k].x << endl;
  149.                     cout << "yy is " << vertices[k].y << endl;
  150.                     cout << "zz is " << vertices[k].z << endl;
  151.                 }*/
  152.                 //cout << "xx is " << vertices[0].x << endl;
  153.                 //cout << "yy is " << vertices[0].y << endl;
  154.                 //cout << "zz is " << vertices[0].z << endl;
  155.                 //cout << "*******************" << endl;
  156.                
  157.                 Point3DF32* posUVZ = new Point3DF32[num_pixels];
  158.                 Point3DF32 *pos3D = new Point3DF32[num_pixels];
  159.  
  160.                 for (int y = 0; y < Y_DIMENSION; y++) {
  161.                     for (int x = 0; x < X_DIMENSION; x++) {
  162.                         // populate the point with depth image pixel coordinates and the depth value
  163.                         posUVZ[y*Y_DIMENSION + x].x = (pxcF32)x;
  164.                         posUVZ[y*Y_DIMENSION + x].y = (pxcF32)y;
  165.                         posUVZ[y*Y_DIMENSION + x].z = dpixels[nframes*X_DIMENSION*Y_DIMENSION + y*Y_DIMENSION+ x];
  166.                     }
  167.                 }
  168.  
  169.                 //projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D);
  170.  
  171.  
  172.                 /*
  173.                 for (unsigned int yy = 0, k = 0; yy < Y_DIMENSION; yy++)
  174.                 {
  175.                     for (unsigned int xx = 0; xx < X_DIMENSION; xx++, k++)
  176.                     {
  177.                         posUVZ[k].x = (pxcF32)xx;
  178.                         posUVZ[k].y = (pxcF32)yy;
  179.                         posUVZ[k].z = (pxcF32)dpixels[yy * dpitch + xx];
  180.                             //  cout << "xx is " << posUVZ[k].x << endl;
  181.                              // cout << "yy is " << posUVZ[k].y << endl;
  182.                              // cout << "zz is " << posUVZ[k].z<< endl;
  183.                     }
  184.                 }
  185.                 */
  186.  
  187.  
  188.  
  189.  
  190.                 // convert the array of depth coordinates + depth value (posUVZ) into the world coordinates (pos3D) in mm
  191.                
  192.                 //Step 1: Set up the point cloud in camera coordinates and do the projection
  193.                
  194.  
  195.  
  196.                 //projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D);
  197.                
  198.                 if (projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D) < PXC_STATUS_NO_ERROR)
  199.                 {
  200.                 delete[] posUVZ;
  201.                 delete[] pos3D;
  202.                 cout << "projection unsucessful";
  203.                 return;
  204.                 }
  205.                
  206.                 cout << "*************" << endl;
  207.                 for (unsigned int yy = 0, k = 0; yy < Y_DIMENSION; yy++)
  208.                 {
  209.                     for (unsigned int xx = 0; xx < X_DIMENSION; xx++, k++)
  210.                     {
  211.                         cout << "xx is " << pos3D[k].x << endl;
  212.                         cout << "yy is " << pos3D[k].y << endl;
  213.                         cout << "zz is " << pos3D[k].z << endl;
  214.                         xyzBuffer.push_back(cv::Point3f(pos3D[k].x, pos3D[k].y, pos3D[k].z));
  215.                     }
  216.                 }
  217.                 cout << "/////////\\\\\\\\" << endl;
  218.                 projection->Release();
  219.                 depthMap->ReleaseAccess(&depthImage); //is this in the right place mona?
  220.            
  221.             }
  222.             printf("opened sensor\n");
  223.             pp->ReleaseFrame();
  224.             printf("acquired image\n");
  225.  
  226.         }
  227.  
  228.     } while (sts == Status::STATUS_STREAM_CONFIG_CHANGED);
  229.    
  230.     //numRows = width and numCol = height? ?
  231.     //numPixels = dd.img.numRows * dd.img.numColumns; // Number of pixels in camera
  232.     numPixels = depth_width * depth_height;
  233.     dists = new float[3 * numPixels]; // Dists contains XYZ values. needs to be 3x the size of numPixels
  234.     amps = new float[numPixels];
  235.     //frame = cvCreateImage(cvSize(dd.img.numColumns, dd.img.numRows), 8, 3); // Create the frame
  236.     frame = cvCreateImage(cvSize(depth_width, depth_height), 8, 3); // Create the frame
  237.  
  238.     KF.init(6, 3, 0);
  239.     KF.transitionMatrix = (cv::Mat_<float>(6, 6) << 1, 0, 0, 1, 0, 0,
  240.                                                     0, 1, 0, 0, 1, 0,
  241.                                                     0, 0, 1, 0, 0, 1,
  242.                                                     0, 0, 0, 1, 0, 0,
  243.                                                     0, 0, 0, 0, 1, 0,
  244.                                                     0, 0, 0, 0, 0, 1);
  245.     measurement = cv::Mat_<float>::zeros(3, 1);
  246.  
  247.     //Initaite Kalman
  248.     KF.statePre.setTo(0);
  249.     setIdentity(KF.measurementMatrix);
  250.     setIdentity(KF.processNoiseCov, cv::Scalar::all(.001)); // Adjust this for faster convergence - but higher noise
  251.     setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
  252.     setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
  253.  
  254.     /* mona will test this
  255.     setIdentity(KF.measurementMatrix);
  256.     setIdentity(KF.processNoiseCov, Scalar::all(smoothness));
  257.     setIdentity(KF.measurementNoiseCov, Scalar::all(rapidness));
  258.     setIdentity(KF.errorCovPost, Scalar::all(.1));
  259.     */
  260. }
  261.  
  262. /***
  263. Public deconstructor for the SR300 Camera depth sensor
  264. ***/
  265. SR300Camera::~SR300Camera() {};
  266.  
  267. void SR300Camera::destroyInstance()
  268. {
  269.     printf("closing sensor\n");
  270.     pp->Release();
  271.     printf("sensor closed\n");
  272. }
  273.  
  274. /***
  275. Create xyzMap, zMap, ampMap, and flagMap from sensor input
  276. ***/
  277. void SR300Camera::update()
  278. {
  279.     initilizeImages();
  280.    
  281.     fillInAmps();
  282. }
  283.  
  284.  
  285.  
  286. /***
  287. Reads the amplitude data from the sensor and fills in the matrix
  288. ***/
  289. void SR300Camera::fillInAmps()
  290. {
  291.     //int res = pmdGetAmplitudes(hnd, amps, numPixels * sizeof(float));
  292.     //float * dataPtr = amps;
  293.     //ampMap.data = (uchar *)amps;
  294. }
  295.  
  296. /***
  297. Returns the X value at (i, j)
  298. ***/
  299. float SR300Camera::getX(int i, int j) const
  300. {
  301.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  302.     int flat = j * depth_width * 3 + i * 3;
  303.     return dists[flat];
  304. }
  305.  
  306. /***
  307. Returns the Y value at (i, j)
  308. ***/
  309. float SR300Camera::getY(int i, int j) const
  310. {
  311.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  312.     int flat = j * depth_width * 3 + i * 3;
  313.     return dists[flat + 1];
  314. }
  315.  
  316. /***
  317. Returns the Z value at (i, j)
  318. ***/
  319. float SR300Camera::getZ(int i, int j) const
  320. {
  321.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  322.     int flat = j * depth_width * 3 + i * 3;
  323.     return dists[flat + 2];
  324. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement