Advertisement
lamiastella

SR300Camera.cpp dirty

May 23rd, 2017
155
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 9.35 KB | None | 0 0
  1. #include "SR300Camera.h"
  2. #include "RealSense/SenseManager.h"
  3. #include "RealSense/SampleReader.h"
  4. #include "util_render.h"
  5.  
  6.  
  7. #include <iostream>
  8.  
  9. /***
  10. Private constructor for the SR300 Camera depth sensor
  11. ***/
  12. using namespace std;
  13. using namespace Intel::RealSense;
  14.  
  15.  
  16. SenseManager *pp = SenseManager::CreateInstance();
  17. Capture::Device *device;
  18. CaptureManager *cm;
  19.  
  20. const int depth_fps = 30;
  21. const int depth_width = 640;
  22. const int depth_height = 480;
  23. cv::Size bufferSize;
  24. const PXCCapture::Sample *sample;
  25. std::vector<cv::Point3f> xyzBuffer;
  26. const int NFrames = 400;
  27. float* depthFrames = new float[NFrames*depth_width*depth_height];
  28.  
  29.  
  30.  
  31.  
  32.  
  33.  
  34. SR300Camera::SR300Camera(bool use_live_sensor)
  35. {
  36.  
  37.     CONFIDENCE_THRESHHOLD = (60.0f / 255.0f*500.0f);
  38.     //INVALID_FLAG_VALUE = PMD_FLAG_INVALID;
  39.     X_DIMENSION = depth_width;
  40.     Y_DIMENSION = depth_height;
  41.  
  42.     int num_pixels = X_DIMENSION * Y_DIMENSION;
  43.     Point3DF32* vertices = new Point3DF32[num_pixels];
  44.  
  45.     if (!use_live_sensor) {
  46.         return;
  47.     }
  48.  
  49.     int res;
  50.     std::cout << "Trying to open SR300 camera\n";
  51.  
  52.    
  53.    
  54.     if (!pp) {
  55.         wprintf_s(L"Unable to create the SenseManager\n");
  56.     }
  57.  
  58.     /* Sets file recording or playback */
  59.     cm = pp->QueryCaptureManager();
  60.     //Status sts= STATUS_NO_ERROR;
  61.     Status sts = STATUS_DATA_UNAVAILABLE; //mona is this the correct initialization?
  62.     // Create stream renders
  63.     UtilRender renderc(L"Color"), renderd(L"Depth"), renderi(L"IR"), renderr(L"Right"), renderl(L"Left");
  64.    
  65.     do {
  66.         bool revert = false;
  67.         /*
  68.         DataDesc desc = {};
  69.        
  70.         if (cm->QueryCapture()) {
  71.             cm->QueryCapture()->QueryDeviceInfo(0, &desc.deviceInfo);
  72.         }
  73.         else {
  74.             //desc.deviceInfo.streams = Capture::STREAM_TYPE_COLOR | Capture::STREAM_TYPE_DEPTH;
  75.             desc.deviceInfo.streams = Capture::STREAM_TYPE_DEPTH;
  76.             revert = true;
  77.         }
  78.         */
  79.         //pp->EnableStreams(&desc);
  80.         pp->EnableStream(Capture::StreamType::STREAM_TYPE_DEPTH,
  81.             X_DIMENSION, Y_DIMENSION, depth_fps);
  82.         revert = true;
  83.         sts = pp->Init();
  84.         if (sts < Status::STATUS_NO_ERROR) {
  85.            
  86.             if (revert) {
  87.                
  88.                 pp->Close();
  89.                 pp->EnableStream(Capture::STREAM_TYPE_DEPTH);
  90.                 sts = pp->Init();
  91.                 if (sts < Status::STATUS_NO_ERROR) {
  92.                     pp->Close();
  93.                     //pp->EnableStream(Capture::STREAM_TYPE_COLOR);
  94.                     sts = pp->Init();
  95.                 }
  96.             }
  97.             if (sts < Status::STATUS_NO_ERROR) {
  98.                 wprintf_s(L"Failed to locate any video stream(s)\n");
  99.                 pp->Release();
  100.             }
  101.         }
  102.  
  103.  
  104.        
  105.         device = pp->QueryCaptureManager()->QueryDevice();
  106.  
  107.         device->ResetProperties(Capture::STREAM_TYPE_ANY);
  108.         //cout << "accuracy is: "<<device->IVCAM_ACCURACY_MEDIAN;
  109.  
  110.  
  111.  
  112.         for (int nframes = 0; ; nframes++) {
  113.             sts = pp->AcquireFrame(false);
  114.  
  115.             if (sts < Status::STATUS_NO_ERROR) {
  116.                 if (sts == Status::STATUS_STREAM_CONFIG_CHANGED) {
  117.                     wprintf_s(L"Stream configuration was changed, re-initilizing\n");
  118.                     pp->Close();
  119.                 }
  120.                 break;
  121.             }
  122.  
  123.             sample = pp->QuerySample();
  124.  
  125.             //const Capture::Sample *sample = pp->QuerySample();
  126.             if (sample) {
  127.                 cv::Mat img;
  128.                 //Copy all the vertices from Point3DF32 to the mat image
  129.                 //QueryVertices(Image *depth, Point3DF32 *vertices)
  130.                 //sample->color->QueryInfo()
  131.            
  132.                 //cv::imshow("depth from OpenARK", Visualizer::visualizeDepthMap(img))
  133.                 if (sample->depth && !renderd.RenderFrame(sample->depth)) break;
  134.                 //if (sample->color && !renderc.RenderFrame(sample->color)) break;
  135.                 //if (sample->ir && !renderi.RenderFrame(sample->ir))    break;
  136.                 Image::ImageData depthImage;
  137.                 Image *depthMap = sample->depth;
  138.                 //depthMap->AcquireAccess(Image::ACCESS_READ, &depthImage);
  139.                 depthMap->AcquireAccess(Image::ACCESS_WRITE, &depthImage);
  140.                 memcpy(&depthFrames[nframes*Y_DIMENSION*X_DIMENSION], depthImage.planes[0], sizeof(float)*X_DIMENSION*Y_DIMENSION);
  141.                 Image::ImageInfo imgInfo = depthMap->QueryInfo();
  142.                 int depth_stride = depthImage.pitches[0] / sizeof(pxcU16);
  143.                 Projection * projection = device->CreateProjection();
  144.                 cout << "projection is: " << projection;
  145.                 cout << "device is: " << device;
  146.                 //int num_pixels = depth_height * depth_width;
  147.                
  148.                 //projection->QueryVertices(depthMap, vertices);
  149.                 pxcU16 *dpixels = (pxcU16*)depthImage.planes[0];
  150.                 unsigned int dpitch = depthImage.pitches[0] / sizeof(pxcU16);
  151.  
  152.                 /*for (int k = 0; k < num_pixels; k++) {
  153.                     cout << "xx is " << vertices[k].x << endl;
  154.                     cout << "yy is " << vertices[k].y << endl;
  155.                     cout << "zz is " << vertices[k].z << endl;
  156.                 }*/
  157.                 //cout << "xx is " << vertices[0].x << endl;
  158.                 //cout << "yy is " << vertices[0].y << endl;
  159.                 //cout << "zz is " << vertices[0].z << endl;
  160.                 //cout << "*******************" << endl;
  161.                
  162.                 Point3DF32* posUVZ = new Point3DF32[num_pixels];
  163.                 Point3DF32 *pos3D = new Point3DF32[num_pixels];
  164.  
  165.                 for (int y = 0; y < Y_DIMENSION; y++) {
  166.                     for (int x = 0; x < X_DIMENSION; x++) {
  167.                         // populate the point with depth image pixel coordinates and the depth value
  168.                         posUVZ[y*Y_DIMENSION + x].x = (pxcF32)x;
  169.                         posUVZ[y*Y_DIMENSION + x].y = (pxcF32)y;
  170.                         posUVZ[y*Y_DIMENSION + x].z = depthFrames[nframes*X_DIMENSION*Y_DIMENSION + y*Y_DIMENSION+ x];
  171.                     }
  172.                 }
  173.  
  174.                 //projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D);
  175.  
  176.  
  177.                 /*
  178.                 for (unsigned int yy = 0, k = 0; yy < Y_DIMENSION; yy++)
  179.                 {
  180.                     for (unsigned int xx = 0; xx < X_DIMENSION; xx++, k++)
  181.                     {
  182.                         posUVZ[k].x = (pxcF32)xx;
  183.                         posUVZ[k].y = (pxcF32)yy;
  184.                         posUVZ[k].z = (pxcF32)dpixels[yy * dpitch + xx];
  185.                             //  cout << "xx is " << posUVZ[k].x << endl;
  186.                              // cout << "yy is " << posUVZ[k].y << endl;
  187.                              // cout << "zz is " << posUVZ[k].z<< endl;
  188.                     }
  189.                 }
  190.                 */
  191.  
  192.  
  193.  
  194.  
  195.                 // convert the array of depth coordinates + depth value (posUVZ) into the world coordinates (pos3D) in mm
  196.                
  197.                 //Step 1: Set up the point cloud in camera coordinates and do the projection
  198.                
  199.  
  200.  
  201.                 //projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D);
  202.                
  203.                 if (projection->ProjectDepthToCamera(num_pixels, posUVZ, pos3D) < PXC_STATUS_NO_ERROR)
  204.                 {
  205.                 delete[] posUVZ;
  206.                 delete[] pos3D;
  207.                 cout << "projection unsucessful";
  208.                 return;
  209.                 }
  210.                
  211.                 cout << "*************" << endl;
  212.                 for (unsigned int yy = 0, k = 0; yy < Y_DIMENSION; yy++)
  213.                 {
  214.                     for (unsigned int xx = 0; xx < X_DIMENSION; xx++, k++)
  215.                     {
  216.                         cout << "xx is " << pos3D[k].x << endl;
  217.                         cout << "yy is " << pos3D[k].y << endl;
  218.                         cout << "zz is " << pos3D[k].z << endl;
  219.                         xyzBuffer.push_back(cv::Point3f(pos3D[k].x, pos3D[k].y, pos3D[k].z));
  220.                     }
  221.                 }
  222.                 cout << "/////////\\\\\\\\" << endl;
  223.                 projection->Release();
  224.                 depthMap->ReleaseAccess(&depthImage); //is this in the right place mona?
  225.            
  226.             }
  227.             printf("opened sensor\n");
  228.             pp->ReleaseFrame();
  229.             printf("acquired image\n");
  230.  
  231.         }
  232.  
  233.     } while (sts == Status::STATUS_STREAM_CONFIG_CHANGED);
  234.    
  235.     //numRows = width and numCol = height? ?
  236.     //numPixels = dd.img.numRows * dd.img.numColumns; // Number of pixels in camera
  237.     numPixels = depth_width * depth_height;
  238.     dists = new float[3 * numPixels]; // Dists contains XYZ values. needs to be 3x the size of numPixels
  239.     amps = new float[numPixels];
  240.     //frame = cvCreateImage(cvSize(dd.img.numColumns, dd.img.numRows), 8, 3); // Create the frame
  241.     frame = cvCreateImage(cvSize(depth_width, depth_height), 8, 3); // Create the frame
  242.  
  243.     KF.init(6, 3, 0);
  244.     KF.transitionMatrix = (cv::Mat_<float>(6, 6) << 1, 0, 0, 1, 0, 0,
  245.                                                     0, 1, 0, 0, 1, 0,
  246.                                                     0, 0, 1, 0, 0, 1,
  247.                                                     0, 0, 0, 1, 0, 0,
  248.                                                     0, 0, 0, 0, 1, 0,
  249.                                                     0, 0, 0, 0, 0, 1);
  250.     measurement = cv::Mat_<float>::zeros(3, 1);
  251.  
  252.     //Initaite Kalman
  253.     KF.statePre.setTo(0);
  254.     setIdentity(KF.measurementMatrix);
  255.     setIdentity(KF.processNoiseCov, cv::Scalar::all(.001)); // Adjust this for faster convergence - but higher noise
  256.     setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
  257.     setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
  258.  
  259.     /* mona will test this
  260.     setIdentity(KF.measurementMatrix);
  261.     setIdentity(KF.processNoiseCov, Scalar::all(smoothness));
  262.     setIdentity(KF.measurementNoiseCov, Scalar::all(rapidness));
  263.     setIdentity(KF.errorCovPost, Scalar::all(.1));
  264.     */
  265. }
  266.  
  267. /***
  268. Public deconstructor for the SR300 Camera depth sensor
  269. ***/
  270. SR300Camera::~SR300Camera() {};
  271.  
  272. void SR300Camera::destroyInstance()
  273. {
  274.     printf("closing sensor\n");
  275.     pp->Release();
  276.     printf("sensor closed\n");
  277. }
  278.  
  279. /***
  280. Create xyzMap, zMap, ampMap, and flagMap from sensor input
  281. ***/
  282. void SR300Camera::update()
  283. {
  284.     initilizeImages();
  285.    
  286.     fillInAmps();
  287. }
  288.  
  289.  
  290.  
  291. /***
  292. Reads the amplitude data from the sensor and fills in the matrix
  293. ***/
  294. void SR300Camera::fillInAmps()
  295. {
  296.     //int res = pmdGetAmplitudes(hnd, amps, numPixels * sizeof(float));
  297.     //float * dataPtr = amps;
  298.     //ampMap.data = (uchar *)amps;
  299. }
  300.  
  301. /***
  302. Returns the X value at (i, j)
  303. ***/
  304. float SR300Camera::getX(int i, int j) const
  305. {
  306.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  307.     int flat = j * depth_width * 3 + i * 3;
  308.     return dists[flat];
  309. }
  310.  
  311. /***
  312. Returns the Y value at (i, j)
  313. ***/
  314. float SR300Camera::getY(int i, int j) const
  315. {
  316.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  317.     int flat = j * depth_width * 3 + i * 3;
  318.     return dists[flat + 1];
  319. }
  320.  
  321. /***
  322. Returns the Z value at (i, j)
  323. ***/
  324. float SR300Camera::getZ(int i, int j) const
  325. {
  326.     //int flat = j * dd.img.numColumns * 3 + i * 3;
  327.     int flat = j * depth_width * 3 + i * 3;
  328.     return dists[flat + 2];
  329. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement