Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include "SR300Camera.h"
- #include "RealSense/SenseManager.h"
- #include "RealSense/SampleReader.h"
- #include "util_render.h"
- #include <iostream>
- /***
- Private constructor for the SR300 Camera depth sensor
- ***/
- using namespace std;
- using namespace Intel::RealSense;
- SenseManager *pp = SenseManager::CreateInstance();
- Capture::Device *device;
- CaptureManager *cm;
- const int depth_fps = 30;
- int depth_width;
- int depth_height;
- cv::Size bufferSize;
- const PXCCapture::Sample *sample;
- std::vector<cv::Point3f> xyzBuffer;
- SR300Camera::SR300Camera(bool use_live_sensor)
- {
- CONFIDENCE_THRESHHOLD = (60.0f / 255.0f*500.0f);
- //INVALID_FLAG_VALUE = PMD_FLAG_INVALID;
- X_DIMENSION = 176;
- Y_DIMENSION = 120;
- if (!use_live_sensor) {
- return;
- }
- int res;
- std::cout << "Trying to open SR300 camera\n";
- if (!pp) {
- wprintf_s(L"Unable to create the SenseManager\n");
- }
- /* Sets file recording or playback */
- cm = pp->QueryCaptureManager();
- Status sts= STATUS_NO_ERROR;
- // Create stream renders
- UtilRender renderc(L"Color"), renderd(L"Depth"), renderi(L"IR"), renderr(L"Right"), renderl(L"Left");
- do {
- bool revert = false;
- DataDesc desc = {};
- if (cm->QueryCapture()) {
- cm->QueryCapture()->QueryDeviceInfo(0, &desc.deviceInfo);
- }
- else {
- desc.deviceInfo.streams = Capture::STREAM_TYPE_COLOR | Capture::STREAM_TYPE_DEPTH;
- revert = true;
- }
- pp->EnableStreams(&desc);
- /*sts = pp->EnableStream(Capture::StreamType::STREAM_TYPE_DEPTH,
- X_DIMENSION, Y_DIMENSION, depth_fps);
- if (sts < PXC_STATUS_NO_ERROR) {
- throw std::runtime_error("depth stream encoutered an error!!");
- }
- VideoModule::DataDesc ddesc = {};
- ddesc.deviceInfo.streams = PXCCapture::STREAM_TYPE_COLOR | PXCCapture::STREAM_TYPE_DEPTH;
- pp->EnableStreams(&ddesc);
- */
- sts = pp->Init();
- if (sts < Status::STATUS_NO_ERROR) {
- if (revert) {
- pp->Close();
- pp->EnableStream(Capture::STREAM_TYPE_DEPTH);
- sts = pp->Init();
- if (sts < Status::STATUS_NO_ERROR) {
- pp->Close();
- //pp->EnableStream(Capture::STREAM_TYPE_COLOR);
- sts = pp->Init();
- }
- }
- if (sts < Status::STATUS_NO_ERROR) {
- wprintf_s(L"Failed to locate any video stream(s)\n");
- pp->Release();
- }
- }
- device = pp->QueryCaptureManager()->QueryDevice();
- device->ResetProperties(Capture::STREAM_TYPE_ANY);
- cout << "accuracy is: "<<device->IVCAM_ACCURACY_MEDIAN;
- for (int nframes = 0; ; nframes++) {
- sts = pp->AcquireFrame(false);
- if (sts < Status::STATUS_NO_ERROR) {
- if (sts == Status::STATUS_STREAM_CONFIG_CHANGED) {
- wprintf_s(L"Stream configuration was changed, re-initilizing\n");
- pp->Close();
- }
- break;
- }
- sample = pp->QuerySample();
- //const Capture::Sample *sample = pp->QuerySample();
- if (sample) {
- cv::Mat img;
- //Copy all the vertices from Point3DF32 to the mat image
- //QueryVertices(Image *depth, Point3DF32 *vertices)
- //sample->color->QueryInfo()
- depth_width = sample->depth->QueryInfo().width;
- depth_height = sample->depth->QueryInfo().height;
- cout << "depth width is: " << depth_width << endl;
- cout << "depth heigth is: " << depth_height << endl;
- //cv::imshow("depth from OpenARK", Visualizer::visualizeDepthMap(img))
- if (sample->depth && !renderd.RenderFrame(sample->depth)) break;
- if (sample->color && !renderc.RenderFrame(sample->color)) break;
- if (sample->ir && !renderi.RenderFrame(sample->ir)) break;
- Image::ImageData depthImage;
- Image *depthMap = sample->depth;
- depthMap->AcquireAccess(Image::ACCESS_READ, &depthImage);
- Image::ImageInfo imgInfo = depthMap->QueryInfo();
- int depth_stride = depthImage.pitches[0] / sizeof(pxcU16);
- Projection * projection = device->CreateProjection();
- cout << "projection is: " << projection;
- cout << "device is: " << device;
- //int num_pixels = depth_height * depth_width;
- int num_pixels = X_DIMENSION * Y_DIMENSION;
- Point3DF32* vertices = new Point3DF32[num_pixels];
- projection->QueryVertices(depthMap, vertices);
- pxcU16 *dpixels = (pxcU16*)depthImage.planes[0];
- unsigned int dpitch = depthImage.pitches[0] / sizeof(pxcU16);
- for (int k = 0; k < num_pixels; k++) {
- cout << "xx is " << vertices[k].x << endl;
- cout << "yy is " << vertices[k].y << endl;
- cout << "zz is " << vertices[k].z << endl;
- }
- }
- printf("opened sensor\n");
- pp->ReleaseFrame();
- printf("acquired image\n");
- }
- } while (sts == Status::STATUS_STREAM_CONFIG_CHANGED);
- /*res = pmdOpen(&hnd, SOURCE_PLUGIN, SOURCE_PARAM, PROC_PLUGIN, PROC_PARAM); //Open the PMD sensor
- if (res != PMD_OK)
- {
- pmdGetLastError(0, err, 128);
- fprintf(stderr, "Could not connect: %s\n", err);
- return;
- }
- */
- // res: Structure which contains various meta-information about the data delivered by your Nano.
- // It is advisabe to always use the data delivered by this struct (for example the width and height of the imager
- // and the image format). Please refer to the PMSDSK documentation for more information
- /*
- res = pmdGetSourceDataDescription(hnd, &dd);
- if (sts < STATUS_NO_ERROR) {
- pp->Release();
- return;
- }
- */
- /*
- if (res != PMD_OK)
- {
- pmdGetLastError(hnd, err, 128);
- fprintf(stderr, "Couldn't get data description: %s\n", err);
- pp->Release();
- //pmdClose(hnd);
- return;
- }
- printf("retrieved source data description\n");
- if (dd.subHeaderType != PMD_IMAGE_DATA)
- {
- fprintf(stderr, "Source data is not an image!\n");
- pmdClose(hnd);
- return;
- }
- */
- //numRows = width and numCol = height? ?
- //numPixels = dd.img.numRows * dd.img.numColumns; // Number of pixels in camera
- numPixels = depth_width * depth_height;
- dists = new float[3 * numPixels]; // Dists contains XYZ values. needs to be 3x the size of numPixels
- amps = new float[numPixels];
- //frame = cvCreateImage(cvSize(dd.img.numColumns, dd.img.numRows), 8, 3); // Create the frame
- frame = cvCreateImage(cvSize(depth_width, depth_height), 8, 3); // Create the frame
- KF.init(6, 3, 0);
- KF.transitionMatrix = (cv::Mat_<float>(6, 6) << 1, 0, 0, 1, 0, 0,
- 0, 1, 0, 0, 1, 0,
- 0, 0, 1, 0, 0, 1,
- 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 1);
- measurement = cv::Mat_<float>::zeros(3, 1);
- //Initaite Kalman
- KF.statePre.setTo(0);
- setIdentity(KF.measurementMatrix);
- setIdentity(KF.processNoiseCov, cv::Scalar::all(.001)); // Adjust this for faster convergence - but higher noise
- setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
- setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
- /* mona will test this
- setIdentity(KF.measurementMatrix);
- setIdentity(KF.processNoiseCov, Scalar::all(smoothness));
- setIdentity(KF.measurementNoiseCov, Scalar::all(rapidness));
- setIdentity(KF.errorCovPost, Scalar::all(.1));
- */
- }
- /***
- Public deconstructor for the SR300 Camera depth sensor
- ***/
- SR300Camera::~SR300Camera() {};
- void SR300Camera::destroyInstance()
- {
- printf("closing sensor\n");
- pp->Release();
- printf("sensor closed\n");
- }
- /***
- Create xyzMap, zMap, ampMap, and flagMap from sensor input
- ***/
- void SR300Camera::update()
- {
- initilizeImages();
- fillInAmps();
- //fillInZCoords();
- // Flags. Helps with denoising.
- /*
- unsigned *flags = new unsigned[ampMap.cols*ampMap.rows];
- int res = pmdGetFlags(hnd, flags, numPixels * sizeof(unsigned));
- flagMap.data = (uchar *)flags;
- pmdUpdate(hnd);
- delete(flags);
- */
- //mona pp->ReleaseFrame(); //correct?
- //mona pxcStatus sts = pp->AcquireFrame(true);
- //if (sts < PXC_STATUS_NO_ERROR)
- // return;
- }
- /***
- Reads the depth data from the sensor and fills in the matrix
- ***/
- /*
- void SR300Camera::fillInZCoords()
- {
- Image::ImageData depthImage;
- Image *depthMap = sample->depth;
- depthMap->AcquireAccess(Image::ACCESS_READ, &depthImage);
- Image::ImageInfo imgInfo = depthMap->QueryInfo();
- int depth_stride = depthImage.pitches[0] / sizeof(pxcU16);
- unsigned int wxhDepth = depth_width * depth_height;
- auto sts = pp->EnableStream(Capture::StreamType::STREAM_TYPE_DEPTH,
- depth_width, depth_height, depth_fps);
- if (sts < PXC_STATUS_NO_ERROR) {
- throw std::runtime_error("depth stream encoutered an error!!");
- }
- pp->Init();
- device = pp->QueryCaptureManager()->QueryDevice();
- //cout<<"accuracy is: "<<device->IVCAM_ACCURACY_MEDIAN;
- Projection * projection = device->CreateProjection();
- cout << "projection is: " << projection;
- cout << "device is: " << device;
- Point3DF32* vertices = new Point3DF32[wxhDepth];
- projection->QueryVertices(depthMap, vertices);
- pxcU16 *dpixels = (pxcU16*)depthImage.planes[0];
- unsigned int dpitch = depthImage.pitches[0] / sizeof(pxcU16);
- for (int k = 0; k < wxhDepth; k++) {
- cout << "xx is " << vertices[k].x << endl;
- cout << "yy is " << vertices[k].y << endl;
- cout << "zz is " << vertices[k].z << endl;
- }
- }
- */
- /*
- for (unsigned int yy = 0, k = 0; yy < depth_height; yy++)
- {
- for (unsigned int xx = 0; xx < depth_width; xx++, k++)
- {
- cout << "xx is " << pos3D[k].x*1000.0 << endl;
- cout << "yy is " << pos3D[k].y*1000.0 << endl;
- cout << "zz is " << pos3D[k].z*1000.0 << endl;
- xyzBuffer.push_back(cv::Point3f(pos3D[k].x, pos3D[k].y, pos3D[k].z));
- }
- }
- */
- /*
- for (int idx = 0; idx < wxhDepth; idx++) {
- cout << "x is " << pos3D[idx].x*1000.0 << endl;
- cout << "y is " << pos3D[idx].y*1000.0 << endl;
- cout << "z is " << pos3D[idx].z*1000.0 << endl;
- xyzBuffer.push_back(cv::Point3f(pos3D[idx].x, pos3D[idx].y, pos3D[idx].z));
- }
- */
- //xyzMap = cv::Mat(xyzMap.size(), xyzMap.type, &pos3D);
- //xyzMap = cv::Mat(xyzBuffer);
- //cout << "xyzMap = " << endl << " " << xyzMap << endl << endl;
- //projection->Release();
- //delete[] posUVZ;
- //delete[] pos3D;
- //};
- /***
- Reads the amplitude data from the sensor and fills in the matrix
- ***/
- void SR300Camera::fillInAmps()
- {
- //int res = pmdGetAmplitudes(hnd, amps, numPixels * sizeof(float));
- //float * dataPtr = amps;
- //ampMap.data = (uchar *)amps;
- }
- /***
- Returns the X value at (i, j)
- ***/
- float SR300Camera::getX(int i, int j) const
- {
- //int flat = j * dd.img.numColumns * 3 + i * 3;
- int flat = j * depth_width * 3 + i * 3;
- return dists[flat];
- }
- /***
- Returns the Y value at (i, j)
- ***/
- float SR300Camera::getY(int i, int j) const
- {
- //int flat = j * dd.img.numColumns * 3 + i * 3;
- int flat = j * depth_width * 3 + i * 3;
- return dists[flat + 1];
- }
- /***
- Returns the Z value at (i, j)
- ***/
- float SR300Camera::getZ(int i, int j) const
- {
- //int flat = j * dd.img.numColumns * 3 + i * 3;
- int flat = j * depth_width * 3 + i * 3;
- return dists[flat + 2];
- }
- /***
- Convert the depth coordinates to world coordinates
- Taken from the Projection.cpp from the RSSDK samples
- ***/
- /*
- void SR300Camera::DepthToWorld(Image *depth, vector<PointF32> dcords, vector<PointF32> &wcords) const
- {
- Image::ImageData ddata;
- if (!projection) return;
- if (Status::STATUS_NO_ERROR > depth->AcquireAccess(Image::ACCESS_READ, Image::PIXEL_FORMAT_DEPTH, &ddata))
- return;
- int dsize = dcords.size();
- Point3DF32 invP3D = { -1.f, -1.f, 0.f };
- vector<Point3DF32> dcordsAbs(dsize, invP3D);
- vector<Point3DF32> wcords3D(dsize, invP3D);
- PointF32 invP = { -1.f, -1.f };
- wcords.resize(dsize, invP);
- auto dwidth = static_cast<float>(depth->QueryInfo().width), dheight = static_cast<float>(depth->QueryInfo().height);
- for (auto i = 0; i < dsize; i++) {
- if (dcords[i].x < 0) continue;
- dcordsAbs[i].x = dcords[i].x * dwidth, dcordsAbs[i].y = dcords[i].y * dheight;
- dcordsAbs[i].z = static_cast<pxcF32>(reinterpret_cast<pxcI16*>(ddata.planes[0] + static_cast<int>(dcordsAbs[i].y) * ddata.pitches[0])[static_cast<int>(dcordsAbs[i].x)]);
- }
- depth->ReleaseAccess(&ddata);
- projection->ProjectDepthToCamera(dsize, &dcordsAbs[0], &wcords3D[0]);
- for (auto i = 0; i < dsize; i++) {
- if (dcordsAbs[i].z <= 0) wcords[i].x = wcords[i].y = -1.f;
- else wcords[i].x = dcords[i].x; wcords[i].y = dcords[i].y;
- }
- }
- */
- /*
- void SR300Camera::getXYZbuffer()
- {
- Projection *projection = pp->QueryCaptureManager()->QueryDevice()->CreateProjection();
- std::vector<Point3DF32> vertices;
- vertices.resize(bufferSize.width * bufferSize.height);
- projection->QueryVertices(sample->depth, &vertices[0]);
- xyzBuffer.clear();
- for (int i = 0; i < bufferSize.width*bufferSize.height; i++) {
- Point3f p;
- p.x = vertices[i].x;
- p.y = vertices[i].y;
- p.z = vertices[i].z;
- xyzBuffer.push_back(p);
- }
- //xyzMap = cv::Mat(xyzMap.size(), xyzMap.type(), xyzBuffer);
- //xyzMap = xyzBuffer;
- cv::Mat xyzMat(xyzBuffer);
- projection->Release();
- }
- */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement