Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- // ResourcePointer is a smart pointer class for managing COM objects.
- // It has been used extensively for many years in other parts of our code.
- std::deque<ResourcePointer<IMFSample>> inputQueue; // Queue of encoded video samples waiting to be decoded.
- std::deque<ResourcePointer<IMFSample>> nv12Queue; // Queue of NV12 frames waiting to be converted to RGB.
- std::deque<ResourcePointer<IMFSample>> outputQueue; // Queue of RGB frames awaiting display.
- ResourcePointer<IMFTransform> decoder; // Decodes HEVC to NV12
- ResourcePointer<IMFTransform> processor; // Converts NV12 to RGB
- // Update is called once per frame...
- void update() {
- pushSamples(processor, nv12Queue);
- pullSamples(processor, outputQueue);
- pushSamples(decoder, inputQueue);
- pullSamples(decoder, nv12Queue);
- updateTexture();
- }
- // Queues incoming HEVC data for feeding to the decoder in update()...
- void streamVideoData(const VideoDecoderPayload& p) {
- ResourcePointer<IMFSample> sample;
- ResourcePointer<IMFMediaBuffer> mediaBuffer;
- BYTE* memory;
- MFCreateMemoryBuffer(static_cast<DWORD>(p.size), mediaBuffer.internalPointer());
- mediaBuffer->Lock(&memory, nullptr, nullptr);
- std::memcpy(memory, p.data, p.size);
- mediaBuffer->Unlock();
- mediaBuffer->SetCurrentLength(static_cast<DWORD>(p.size));
- MFCreateSample(sample.internalPointer());
- sample->SetSampleTime(p.timestamp);
- sample->AddBuffer(mediaBuffer);
- inputQueue.push_back(sample);
- }
- // Initialises the decoder and video processor MF transforms...
- bool createDecoder() {
- MFT_REGISTER_TYPE_INFO inputInfo = {};
- IMFActivate** activators = nullptr;
- UINT32 activatorCount = 0;
- ResourcePointer<IMFMediaType> inputType;
- ResourcePointer<IMFAttributes> attributes;
- MFCreateMediaType(inputType.internalPointer());
- inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
- inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_HEVC_ES);
- inputInfo.guidMajorType = MFMediaType_Video;
- inputInfo.guidSubtype = MFVideoFormat_HEVC_ES;
- MFTEnumEx(MFT_CATEGORY_VIDEO_DECODER,
- MFT_ENUM_FLAG_SORTANDFILTER | MFT_ENUM_FLAG_SYNCMFT,
- &inputInfo,
- nullptr,
- &activators,
- &activatorCount);
- activators[0]->ActivateObject(IID_IMFTransform, decoder.internalVoidPointer());
- MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, outputWidth, outputHeight);
- decoder->SetInputType(0, inputType, 0);
- decoder->GetAttributes(attributes.internalPointer());
- decoder->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER,
- reinterpret_cast<ULONG_PTR>(getDXGIDeviceManager()));
- attributes->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
- // <Clean up and release activators>
- inputInfo.guidSubtype = MFVideoFormat_NV12;
- MFTEnumEx(MFT_CATEGORY_VIDEO_PROCESSOR,
- MFT_ENUM_FLAG_SORTANDFILTER | MFT_ENUM_FLAG_SYNCMFT,
- &inputInfo,
- nullptr,
- &activators,
- &activatorCount));
- activators[0]->ActivateObject(IID_IMFTransform, processor.internalVoidPointer());
- processor->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER,
- reinterpret_cast<ULONG_PTR>(getDXGIDeviceManager()));
- // <Clean up and release activators>
- configureIO();
- decoder->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
- decoder->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
- return true;
- }
- void updateTexture() {
- int64_t now = std::chrono::nanoseconds(std::chrono::steady_clock().now().time_since_epoch()).count();
- ResourcePointer<IMFSample> displaySample;
- while (!outputQueue.empty()) {
- LONGLONG sampleTime = 0;
- outputQueue.front()->GetSampleTime(&sampleTime);
- if (sampleTime <= now) {
- displaySample = outputQueue.front();
- // pop_front will call ~ResourcePointer which calls Release.
- outputQueue.pop_front();
- }
- else {
- break;
- }
- }
- if (displaySample) {
- ResourcePointer<IMFMediaBuffer> buffer;
- byte* data = nullptr;
- displaySample->GetBufferByIndex(0, buffer.internalPointer());
- buffer->Lock(&data, nullptr, nullptr);
- // <Update a texture with the buffer's content.>
- buffer->Unlock();
- // Calling buffer->Release() here will crash when the buffer destructor is called.
- // I.e., buffer isn't being leaked.
- // Ditto for displaySample->Release();
- }
- }
- // Configures the decoder output, and the processor input and output...
- void configureIO() {
- ResourcePointer<IMFMediaType> type;
- MFCreateMediaType(type.internalPointer());
- type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
- type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
- MFSetAttributeSize(type, MF_MT_FRAME_SIZE, outputWidth, outputHeight);
- MFSetAttributeRatio(type, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
- decoder->SetOutputType(0, type, 0);
- processor->SetInputType(0, type, 0);
- type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
- type->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1);
- processor->SetOutputType(0, type, 0);
- }
- // Pushes data to the specified transform...
- void pushSamples(ResourcePointer<IMFTransform>& transform, std::deque<ResourcePointer<IMFSample>>& source) {
- while (!source.empty()) {
- if (transform->ProcessInput(0, source.front(), 0) != S_OK) {
- return;
- }
- source.pop_front();
- }
- }
- // Fetches decoded/processed samples from the specified transform...
- void pullSamples(ResourcePointer<IMFTransform>& transform, std::deque<ResourcePointer<IMFSample>>& destination) {
- while (true) {
- DWORD flags = 0;
- transform->GetOutputStatus(&flags);
- if (flags != MFT_OUTPUT_STATUS_SAMPLE_READY) {
- return;
- }
- // Code to call GetOutputStream and create samples if required was here.
- // But for DX11 connected transforms, it's not necessary.
- // streamInfo.dwFlags always has MFT_OUTPUT_STREAM_PROVIDES_SAMPLES set,
- // and we have 100% control over the hardware and OS we deploy on.
- DWORD status = 0;
- MFT_OUTPUT_DATA_BUFFER outputDataBuffer;
- memset(&outputDataBuffer, 0, sizeof(outputDataBuffer));
- outputDataBuffer.dwStreamID = 0;
- outputDataBuffer.pSample = nullptr;
- outputDataBuffer.dwStatus = 0;
- outputDataBuffer.pEvents = nullptr;
- // This call will hang when called for the decoder after half a dozen frames have been processed
- // in the Media Foundation function CDXVAFrameManager::WaitOnSampleReturnedByRenderer
- HRESULT result = transform->ProcessOutput(0, 1, &outputDataBuffer, &status);
- if (result == S_OK) {
- if (outputDataBuffer.dwStatus == 0) {
- // This push_back does NOT call AddRef.
- destination.push_back(outputDataBuffer.pSample);
- }
- else if (outputDataBuffer.dwStatus != MFT_OUTPUT_DATA_BUFFER_INCOMPLETE) {
- return;
- }
- }
- else if (result == MF_E_TRANSFORM_STREAM_CHANGE) {
- configureIO();
- }
- else
- return;
- }
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement