I'm working on a c++ executable that grabs my display output using DXGI output duplication in the form of DXGI surface textures(I think), directly encodes it in HEVC using my GPU hardware encoder, which then downloads the bitstream to system memory so it can be output to stdout.

I would like to encode asynchronously, which is reported to be possible in the docs. As I understand it, asynchronously would mean a single blocking method call that accepts a video frame in gpu memory and returns a compressed frame (nal unit I think it's called)

The executable is part of a remote access implementation I'm working on and would be ran as a subprocess by a main golang app when the client connects and authenticates.

Here's my code so far:



#include <iostream>

#pragma comment(lib, "d3d11")
#include <d3d11.h>

#pragma comment(lib, "dxgi")
#include <dxgi1_2.h>

//#include <nvEncodeAPI.h>

using namespace std;

int main()
{   

    //NvEncodeAPICreateInstance();
    
    // intermediate variables for casting
    IDXGIOutput* pDisplay_old;

    IDXGIFactory1* pFactory;
    IDXGIAdapter* pGPU;
    ID3D11Device* pD3D;
    IDXGIOutput1* pDisplay;

    // create DXGI factory
    if (CreateDXGIFactory1(__uuidof(IDXGIFactory1), (void**)&pFactory) != S_OK) return 1;

    // get GPU adapter
    if (pFactory -> EnumAdapters(0, &pGPU) != S_OK) return 2;

    // create D3D11 device
    D3D_FEATURE_LEVEL D3DFeatures [6]
    {
        D3D_FEATURE_LEVEL_11_0,
        D3D_FEATURE_LEVEL_10_1,
        D3D_FEATURE_LEVEL_10_0,
        D3D_FEATURE_LEVEL_9_3,
        D3D_FEATURE_LEVEL_9_2,
        D3D_FEATURE_LEVEL_9_1
    };
    if (D3D11CreateDevice(pGPU, D3D_DRIVER_TYPE_UNKNOWN, NULL, 0, D3DFeatures, ARRAYSIZE(D3DFeatures), D3D11_SDK_VERSION, &pD3D, NULL, NULL) != S_OK) return 3;

    // get display
    if (pGPU -> EnumOutputs(0, &pDisplay_old) != S_OK) return 4;
    pDisplay_old -> QueryInterface(&pDisplay);


    IDXGIOutputDuplication* pCapture;
    DXGI_OUTDUPL_DESC captureDesc;

    DXGI_OUTDUPL_FRAME_INFO frameInfo;
    IDXGIResource* pFrame;

    HRESULT captureResult;
    do
    {
        // create capture

        if (pDisplay -> DuplicateOutput(pD3D, &pCapture) != S_OK) return 5;
        pCapture -> GetDesc(&captureDesc);

        cout << captureDesc.ModeDesc.Width << ' ' << captureDesc.ModeDesc.Height;

        do
        {
            captureResult = pCapture -> AcquireNextFrame(INFINITE, &frameInfo, &pFrame);
            if (captureResult == S_OK)
            {
                if (frameInfo.LastPresentTime.QuadPart != 0)
                {

                    // === async blocking Encode logic and printing to stdout should be here =========

            


                    // =========================================================

                }

                captureResult = pCapture -> ReleaseFrame();
            }
            else if (captureResult == DXGI_ERROR_ACCESS_LOST) break;
            else return 6;
        }
        while (true);
    }
    while (true);

}

It successfully grabs the display framebuffer in the form of IDXGIResource objects, which I (pCapture is the pointer to them), and now I need to figure out how to setup the nvenc session, get it to accept these strange objects as frame input, and getting the output into system memoryin a form that can be printed to stdout. (ideally async as described above)

I had a look at the docs https://docs.nvidia.com/video-technologies/video-codec-sdk/nvenc-video-encoder-api-prog-guide/index.html, and while it's reasonably descriptive, it doesn't seem to offer any code examples, and I couldn't find any examples online either.

I downloaded the SDK headers and tried some stuff but I feel that it would be better for those familiar to guide me through this.

Thanks!

0

There are 0 answers