#include "yWebcam.h"
#include "Engine/yEngineServices.h"
#include "Textures/yTexture.h"
#include "Textures/yTextureManager.h"
#include "Macros/yMacros.h"
#include "OgreStagingTexture.h"
#include "OgreTextureGpu.h"
#include "OgrePixelFormatGpuUtils.h"
#include "OgreImage2.h"
#include "OgreRoot.h"
#include <QTime>
#include <QVariant>
#include <dshow.h>
#include <iostream>
yWebcam::yWebcam()
{
yAssert( yUtils::isLogicThread(), "new yWebcam must be called in Logic thread" );
mCapturing = false;
mCaptureReady = false;
mDeviceOpen = false;
mDeviceIndex = -1;
mTexture = nullptr;
mDatabase = nullptr;
mDistanceToObect = 1.0;
}
#include <iostream>
yWebcam::~yWebcam(){
}
bool yWebcam::openDevice( std::string deviceName ){
yAssert( yUtils::isLogicThread(), "new yWebcam must be called in Logic thread" );
std::vector<std::string> deviceList;
deviceList = getVideoDevices();
if( deviceName.empty() && deviceList.size() > 0 ){
mDeviceIndex = 0;
}
else{
for(size_t i=0; i<deviceList.size(); ++i){
if( deviceList.at(i) == deviceName ){
mDeviceIndex = i;
break;
}
}
}
if( mDeviceIndex >= 0 ){//device specified found
if( !mCvCapture.open( mDeviceIndex ) ){
std::cout<<yWarning<<"Could not open webcam at index "<<mDeviceIndex<<std::endl;
}
else{
std::cout<<"yWebcam. Opened device: "<<deviceList.at(mDeviceIndex)<<std::endl;
mDeviceOpen = true;
// mCvCapture.set(cv::CAP_PROP_FRAME_WIDTH,1280);
// mCvCapture.set(cv::CAP_PROP_FRAME_HEIGHT,720);
// mCvCapture.set(cv::CAP_PROP_FPS,30);
// mCvCapture.set(cv::CAP_PROP_WHITE_BALANCE_BLUE_U,7000);
// mCvCapture.set(cv::CAP_PROP_WHITE_BALANCE_RED_V,7000);
// mCvCapture.set(cv::CAP_PROP_AUTO_EXPOSURE,0.75);
// mCvCapture.set(cv::CAP_PROP_AUTO_WB,1);
mCvCapture >> mCvFrame;
mTexture = yEngineServices::getTextureManager().createTexture( "WebcamCapture_"+std::to_string( mDeviceIndex ),
mCvFrame.size().width * 2.0f, mCvFrame.size().height * 2.0f, 0,
Ogre::PixelFormatGpu::PFG_RGBA8_UNORM_SRGB,
Ogre::TextureFlags::ManualTexture,
Ogre::TextureTypes::Type2DArray );
}
}
return mDeviceOpen;
}
int yWebcam::loadConfigParameter( QString parameterName ){
QSqlQuery query = mDatabase->exec("SELECT Value FROM CONFIG WHERE ParameterName='" + parameterName + "'; ");
query.next();
return query.value(0).toInt();
}
void yWebcam::closeDevice(){
yAssert( yUtils::isLogicThread(), "openWebcam must be called in Logic thread" );
stopCapturing();
mCvCapture.release();
}
void yWebcam::runCaptureThread(){
while( mCapturing ){
if( !mCaptureReady ){
mCvCapture >> mCvFrame;
if( !mCvFrame.empty() ){
cv::cvtColor( mCvFrame, mCvFrame, cv::COLOR_BGR2RGBA, 0 );
cv::resize( mCvFrame, mCvFrame, cv::Size( mCvFrame.cols * 2.0f, mCvFrame.rows * 2.0f ), 0, 0, cv::INTER_LINEAR);
resizeStereoImage();
//processImage();
mCaptureReady = true;
}
}
}
}
bool yWebcam::frameRenderingQueued(const Ogre::FrameEvent &evt){
if( mCaptureReady ){
Ogre::Image2 image;
size_t sizeBytes = Ogre::PixelFormatGpuUtils::calculateSizeBytes(
mTexture->mOgreTexture->getWidth(), mTexture->mOgreTexture->getHeight(), 1u, 1u, mTexture->mOgreTexture->getPixelFormat(), 1u, 4u );
Ogre::uint8 *data = reinterpret_cast<Ogre::uint8*>(
OGRE_MALLOC_SIMD( sizeBytes, Ogre::MEMCATEGORY_GENERAL ) );
image.loadDynamicImage( data, mTexture->mOgreTexture->getWidth(), mTexture->mOgreTexture->getHeight(), 1u,
Ogre::TextureTypes::Type2DArray, mTexture->mOgreTexture->getPixelFormat(),
true, 1u );
memcpy( data, (Ogre::uint8*)mCvFrame.data, sizeBytes );
image.uploadTo( mTexture->mOgreTexture, 0, mTexture->mOgreTexture->getNumMipmaps() - 1u );
mCaptureReady = false;
}
return true;
}
void yWebcam::resizeStereoImage(){
cv::Mat eyeImage[2];
eyeImage[0] = mCvFrame( cv::Rect( 0, 0, mCvFrame.cols/2, mCvFrame.rows ) );
eyeImage[1] = mCvFrame( cv::Rect( mCvFrame.cols/2, 0, mCvFrame.cols/2, mCvFrame.rows ) );
//transparent BG image
cv::Mat bgImage;
mCvFrame.copyTo( bgImage );
std::vector<cv::Mat> rgbaChannels(4);
cv::split( bgImage, rgbaChannels );
rgbaChannels[3].setTo(0);
cv::merge( rgbaChannels, bgImage );
float clampedDistance = Ogre::Math::Clamp( mDistanceToObect, 0.1f, 0.6f );
float mainScale = 0.29f + clampedDistance*0.5f;
float scaleX = mainScale;
float scaleY = mainScale*0.65f; //0.65 needed ratio
float offsetX[2];
offsetX[0] = 215.0f - clampedDistance*150;
offsetX[1] = 230.0f - clampedDistance*150;
float offsetY = 275.0f - clampedDistance*110;
for( size_t i=0; i<2; ++i ){
cv::Mat resizedEyeImage;
cv::resize( eyeImage[i], resizedEyeImage, cv::Size( eyeImage[i].cols * scaleX, eyeImage[i].rows * scaleY ), 0, 0, cv::INTER_LINEAR);
cv::Point2f src_p[4];
cv::Point2f dst_p[4];
float w = (float)resizedEyeImage.cols;
float h = (float)resizedEyeImage.rows;
float hw = w / 2.0f;
float hh = h / 2.0f;
// The 4 points that select quadilateral on the input , from top-left in clockwise order
float perspectiveOffset = 45.0f;
src_p[0] = cv::Point2f( -perspectiveOffset, 0.0f);
src_p[1] = cv::Point2f( w + perspectiveOffset, 0.0f);
src_p[2] = cv::Point2f( w, h);
src_p[3] = cv::Point2f(0.0f, h);
// The 4 points where the mapping is to be done , from top-left in clockwise order
dst_p[0] = cv::Point2f(0.0f, 0.0f);
dst_p[1] = cv::Point2f( w, 0.0f);
dst_p[2] = cv::Point2f( w, h);
dst_p[3] = cv::Point2f(0.0f, h);
cv::Mat trans_mat33 = cv::getPerspectiveTransform(src_p, dst_p);
cv::warpPerspective( resizedEyeImage, resizedEyeImage, trans_mat33, resizedEyeImage.size(), cv::INTER_LINEAR);
resizedEyeImage.copyTo( bgImage( cv::Rect( i*mCvFrame.cols/2 + offsetX[i], offsetY, resizedEyeImage.cols, resizedEyeImage.rows) ) );
}
mCvFrame = bgImage;
}
void yWebcam::processImage(){
if( !mDatabase ){
mDatabase = new QSqlDatabase( QSqlDatabase::addDatabase("QSQLITE") );
mDatabase->setDatabaseName( "Config.db" );
mDatabase->open();
mDatabase->exec("CREATE TABLE IF NOT EXISTS CONFIG( ParameterName TEXT UNIQUE, Value TEXT );");
mConfig.hsvMin[0] = loadConfigParameter( "hMin"+QString::number(0) );
mConfig.hsvMax[0] = loadConfigParameter( "hMax"+QString::number(0) );
mConfig.hsvMin[1] = loadConfigParameter( "sMin"+QString::number(0) );
mConfig.hsvMax[1] = loadConfigParameter( "sMax"+QString::number(0) );
mConfig.hsvMin[2] = loadConfigParameter( "vMin"+QString::number(0) );
mConfig.hsvMax[2] = loadConfigParameter( "vMax"+QString::number(0) );
mConfig.rgbMin[0] = loadConfigParameter( "rMin"+QString::number(0) );
mConfig.rgbMax[0] = loadConfigParameter( "rMax"+QString::number(0) );
mConfig.rgbMin[1] = loadConfigParameter( "gMin"+QString::number(0) );
mConfig.rgbMax[1] = loadConfigParameter( "gMax"+QString::number(0) );
mConfig.rgbMin[2] = loadConfigParameter( "bMin"+QString::number(0) );
mConfig.rgbMax[2] = loadConfigParameter( "bMax"+QString::number(0) );
}
std::vector<cv::Mat> hsvChannels(3);
std::vector<cv::Mat> rgbaChannels(4);
cv::Mat hsvImage;
cv::Mat maskHSV;
cv::Mat maskRGB;
cv::Mat frameMask;
cv::cvtColor( mCvFrame, hsvImage, cv::COLOR_BGR2HSV, 0 );
cv::split( hsvImage, hsvChannels );
cv::split( mCvFrame, rgbaChannels );
cv::inRange( hsvImage, mConfig.hsvMin, mConfig.hsvMax, maskHSV);
cv::inRange( mCvFrame, mConfig.rgbMin, mConfig.rgbMax, maskRGB );
cv::multiply( maskHSV, maskHSV, frameMask );
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(frameMask, frameMask, cv::MORPH_OPEN, kernel);
cv::morphologyEx(frameMask, frameMask, cv::MORPH_CLOSE, kernel);
cv::threshold(frameMask,frameMask,0, 255, cv::THRESH_BINARY_INV);
rgbaChannels[3] = frameMask;
cv::merge( rgbaChannels, mCvFrame );
}
std::vector<std::string> yWebcam::getVideoDevices()
{
std::vector<std::string> deviceList;
IEnumMoniker *pEnum = NULL;
// Create the System Device Enumerator.
ICreateDevEnum *pDevEnum;
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL,
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pDevEnum));
bool found = false;
if (SUCCEEDED(hr))
{
// Create an enumerator for the category.
hr = pDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEnum, 0);
if (hr == S_FALSE)
{
hr = VFW_E_NOT_FOUND; // The category is empty. Treat as an error.
}
pDevEnum->Release();
}
if( !pEnum ) return deviceList;
IMoniker *pMoniker = NULL;
while (pEnum->Next(1, &pMoniker, NULL) == S_OK)
{
IPropertyBag *pPropBag;
HRESULT hr = pMoniker->BindToStorage(0, 0, IID_PPV_ARGS(&pPropBag));
if (FAILED(hr))
{
pMoniker->Release();
continue;
}
VARIANT var;
VariantInit(&var);
// Get description or friendly name.
hr = pPropBag->Read(L"Description", &var, 0);
if (FAILED(hr))
{
hr = pPropBag->Read(L"FriendlyName", &var, 0);
}
if (SUCCEEDED(hr))
{
std::wstring ws(var.bstrVal, SysStringLen(var.bstrVal));
std::string str( ws.begin(), ws.end() );
deviceList.push_back( str );
VariantClear(&var);
}
hr = pPropBag->Write(L"FriendlyName", &var);
// WaveInID applies only to audio capture devices.
hr = pPropBag->Read(L"WaveInID", &var, 0);
if (SUCCEEDED(hr))
{
//printf("WaveIn ID: %d\n", var.lVal);
VariantClear(&var);
}
// hr = pPropBag->Read(L"DevicePath", &var, 0);
// if (SUCCEEDED(hr))
// {
// // The device path is not intended for display.
// printf("Device path: %S\n", var.bstrVal);
// VariantClear(&var);
// }
pPropBag->Release();
pMoniker->Release();
}
return deviceList;
}
void yWebcam::startCapturing(){
if( !mCapturing && mDeviceOpen ){
mCapturing = true;
mCaptureThread = std::thread(&yWebcam::runCaptureThread,this);
Ogre::Root::getSingletonPtr()->addFrameListener(this);
}
}
void yWebcam::stopCapturing(){
if( mCapturing && mDeviceOpen ){
mCapturing = false;
Ogre::Root::getSingletonPtr()->removeFrameListener(this);
mCaptureThread.join();
}
}
void yWebcam::notifyDistanceToObject( float distance ){
mDistanceToObect = distance;
}