Files
video-shader-toys/apps/LoopThroughWithOpenGLCompositing/OpenGLComposite.cpp
2026-05-02 16:40:21 +10:00

1507 lines
43 KiB
C++

/* -LICENSE-START-
** Copyright (c) 2012 Blackmagic Design
**
** Permission is hereby granted, free of charge, to any person or organization
** obtaining a copy of the software and accompanying documentation (the
** "Software") to use, reproduce, display, distribute, sub-license, execute,
** and transmit the Software, and to prepare derivative works of the Software,
** and to permit third-parties to whom the Software is furnished to do so, in
** accordance with:
**
** (1) if the Software is obtained from Blackmagic Design, the End User License
** Agreement for the Software Development Kit ("EULA") available at
** https://www.blackmagicdesign.com/EULA/DeckLinkSDK; or
**
** (2) if the Software is obtained from any third party, such licensing terms
** as notified by that third party,
**
** and all subject to the following:
**
** (3) the copyright notices in the Software and this entire statement,
** including the above license grant, this restriction and the following
** disclaimer, must be included in all copies of the Software, in whole or in
** part, and all derivative works of the Software, unless such copies or
** derivative works are solely in the form of machine-executable object code
** generated by a source language processor.
**
** (4) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
** SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
** FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
** ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
** DEALINGS IN THE SOFTWARE.
**
** A copy of the Software is available free of charge at
** https://www.blackmagicdesign.com/desktopvideo_sdk under the EULA.
**
** -LICENSE-END-
*/
#include "ControlServer.h"
#include "OpenGLComposite.h"
#include "GLExtensions.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include <initguid.h>
DEFINE_GUID(IID_PinnedMemoryAllocator,
0xddf921a6, 0x279d, 0x4dcd, 0x86, 0x26, 0x75, 0x7f, 0x58, 0xa8, 0xc4, 0x35);
namespace
{
constexpr GLuint kVideoTextureUnit = 1;
constexpr GLuint kGlobalParamsBindingPoint = 0;
const char* kDisplayModeName = "1080p59.94";
const char* kVertexShaderSource =
"#version 430 core\n"
"out vec2 vTexCoord;\n"
"void main()\n"
"{\n"
" vec2 positions[3] = vec2[3](vec2(-1.0, -1.0), vec2(3.0, -1.0), vec2(-1.0, 3.0));\n"
" vec2 texCoords[3] = vec2[3](vec2(0.0, 0.0), vec2(2.0, 0.0), vec2(0.0, 2.0));\n"
" gl_Position = vec4(positions[gl_VertexID], 0.0, 1.0);\n"
" vTexCoord = texCoords[gl_VertexID];\n"
"}\n";
void CopyErrorMessage(const std::string& message, int errorMessageSize, char* errorMessage)
{
if (!errorMessage || errorMessageSize <= 0)
return;
strncpy_s(errorMessage, errorMessageSize, message.c_str(), _TRUNCATE);
}
std::size_t AlignStd140(std::size_t offset, std::size_t alignment)
{
const std::size_t mask = alignment - 1;
return (offset + mask) & ~mask;
}
template <typename TValue>
void AppendStd140Value(std::vector<unsigned char>& buffer, std::size_t alignment, const TValue& value)
{
const std::size_t offset = AlignStd140(buffer.size(), alignment);
if (buffer.size() < offset + sizeof(TValue))
buffer.resize(offset + sizeof(TValue), 0);
std::memcpy(buffer.data() + offset, &value, sizeof(TValue));
}
void AppendStd140Float(std::vector<unsigned char>& buffer, float value)
{
AppendStd140Value(buffer, 4, value);
}
void AppendStd140Int(std::vector<unsigned char>& buffer, int value)
{
AppendStd140Value(buffer, 4, value);
}
void AppendStd140Vec2(std::vector<unsigned char>& buffer, float x, float y)
{
const std::size_t offset = AlignStd140(buffer.size(), 8);
if (buffer.size() < offset + sizeof(float) * 2)
buffer.resize(offset + sizeof(float) * 2, 0);
float values[2] = { x, y };
std::memcpy(buffer.data() + offset, values, sizeof(values));
}
void AppendStd140Vec4(std::vector<unsigned char>& buffer, float x, float y, float z, float w)
{
const std::size_t offset = AlignStd140(buffer.size(), 16);
if (buffer.size() < offset + sizeof(float) * 4)
buffer.resize(offset + sizeof(float) * 4, 0);
float values[4] = { x, y, z, w };
std::memcpy(buffer.data() + offset, values, sizeof(values));
}
}
OpenGLComposite::OpenGLComposite(HWND hWnd, HDC hDC, HGLRC hRC) :
hGLWnd(hWnd), hGLDC(hDC), hGLRC(hRC),
mCaptureDelegate(NULL), mPlayoutDelegate(NULL),
mDLInput(NULL), mDLOutput(NULL),
mPlayoutAllocator(NULL),
mFrameWidth(0), mFrameHeight(0),
mHasNoInputSource(true),
mFastTransferExtensionAvailable(false),
mCaptureTexture(0),
mFBOTexture(0),
mFullscreenVAO(0),
mGlobalParamsUBO(0),
mProgram(0),
mVertexShader(0),
mFragmentShader(0),
mGlobalParamsUBOSize(0)
{
InitializeCriticalSection(&pMutex);
mRuntimeHost = std::make_unique<RuntimeHost>();
mControlServer = std::make_unique<ControlServer>();
}
OpenGLComposite::~OpenGLComposite()
{
// Cleanup for Capture
if (mDLInput != NULL)
{
mDLInput->SetCallback(NULL);
mDLInput->Release();
mDLInput = NULL;
}
if (mCaptureDelegate != NULL)
{
mCaptureDelegate->Release();
mCaptureDelegate = NULL;
}
// Cleanup for Playout
while (!mDLOutputVideoFrameQueue.empty())
{
IDeckLinkMutableVideoFrame* frameToRelease = mDLOutputVideoFrameQueue.front();
if (frameToRelease != NULL)
{
frameToRelease->Release();
frameToRelease = NULL;
}
mDLOutputVideoFrameQueue.pop_front();
}
if (mDLOutput != NULL)
{
mDLOutput->SetScheduledFrameCompletionCallback(NULL);
mDLOutput->Release();
mDLOutput = NULL;
}
if (mPlayoutDelegate != NULL)
{
mPlayoutDelegate->Release();
mPlayoutDelegate = NULL;
}
if (mPlayoutAllocator != NULL)
{
mPlayoutAllocator->Release();
mPlayoutAllocator = NULL;
}
if (mFullscreenVAO != 0)
glDeleteVertexArrays(1, &mFullscreenVAO);
if (mGlobalParamsUBO != 0)
glDeleteBuffers(1, &mGlobalParamsUBO);
if (mIdFrameBuf != 0)
glDeleteFramebuffers(1, &mIdFrameBuf);
if (mIdColorBuf != 0)
glDeleteRenderbuffers(1, &mIdColorBuf);
if (mIdDepthBuf != 0)
glDeleteRenderbuffers(1, &mIdDepthBuf);
if (mCaptureTexture != 0)
glDeleteTextures(1, &mCaptureTexture);
if (mFBOTexture != 0)
glDeleteTextures(1, &mFBOTexture);
if (mUnpinnedTextureBuffer != 0)
glDeleteBuffers(1, &mUnpinnedTextureBuffer);
destroyShaderProgram();
if (mControlServer)
mControlServer->Stop();
DeleteCriticalSection(&pMutex);
}
bool OpenGLComposite::InitDeckLink()
{
bool bSuccess = false;
IDeckLinkIterator* pDLIterator = NULL;
IDeckLink* pDL = NULL;
IDeckLinkProfileAttributes* deckLinkAttributes = NULL;
IDeckLinkDisplayModeIterator* pDLDisplayModeIterator = NULL;
IDeckLinkDisplayMode* pDLDisplayMode = NULL;
BMDDisplayMode displayMode = bmdModeHD1080p5994; // mode to use for capture and playout
int outputFrameRowBytes;
HRESULT result;
result = CoCreateInstance(CLSID_CDeckLinkIterator, NULL, CLSCTX_ALL, IID_IDeckLinkIterator, (void**)&pDLIterator);
if (FAILED(result))
{
MessageBox(NULL, _T("Please install the Blackmagic DeckLink drivers to use the features of this application."), _T("This application requires the DeckLink drivers installed."), MB_OK);
return false;
}
while (pDLIterator->Next(&pDL) == S_OK)
{
int64_t duplexMode;
if (result = pDL->QueryInterface(IID_IDeckLinkProfileAttributes, (void**)&deckLinkAttributes) != S_OK)
{
printf("Could not obtain the IDeckLinkProfileAttributes interface - result %08x\n", result);
pDL->Release();
pDL = NULL;
continue;
}
result = deckLinkAttributes->GetInt(BMDDeckLinkDuplex, &duplexMode);
deckLinkAttributes->Release();
deckLinkAttributes = NULL;
if (result != S_OK || duplexMode == bmdDuplexInactive)
{
pDL->Release();
pDL = NULL;
continue;
}
// Use a full duplex device as capture and playback, or half-duplex device
// as capture or playback.
bool inputUsed = false;
if (!mDLInput && pDL->QueryInterface(IID_IDeckLinkInput, (void**)&mDLInput) == S_OK)
inputUsed = true;
if (!mDLOutput && (!inputUsed || (duplexMode == bmdDuplexFull)))
{
if (pDL->QueryInterface(IID_IDeckLinkOutput, (void**)&mDLOutput) != S_OK)
mDLOutput = NULL;
}
pDL->Release();
pDL = NULL;
if (mDLOutput && mDLInput)
break;
}
if (! mDLOutput || ! mDLInput)
{
MessageBox(NULL, _T("Expected both Input and Output DeckLink devices"), _T("This application requires two DeckLink devices."), MB_OK);
goto error;
}
if (mDLOutput->GetDisplayModeIterator(&pDLDisplayModeIterator) != S_OK)
{
MessageBox(NULL, _T("Cannot get Display Mode Iterator."), _T("DeckLink error."), MB_OK);
goto error;
}
while (pDLDisplayModeIterator->Next(&pDLDisplayMode) == S_OK)
{
if (pDLDisplayMode->GetDisplayMode() == displayMode)
break;
pDLDisplayMode->Release();
pDLDisplayMode = NULL;
}
pDLDisplayModeIterator->Release();
pDLDisplayModeIterator = NULL;
if (pDLDisplayMode == NULL)
{
MessageBox(NULL, _T("Cannot get specified BMDDisplayMode."), _T("DeckLink error."), MB_OK);
goto error;
}
mFrameWidth = pDLDisplayMode->GetWidth();
mFrameHeight = pDLDisplayMode->GetHeight();
if (! CheckOpenGLExtensions())
goto error;
if (! InitOpenGLState())
goto error;
pDLDisplayMode->GetFrameRate(&mFrameDuration, &mFrameTimescale);
// Resize window to match video frame, but scale large formats down by half for viewing
if (mFrameWidth < 1920)
resizeWindow(mFrameWidth, mFrameHeight);
else
resizeWindow(mFrameWidth / 2, mFrameHeight / 2);
if (mFastTransferExtensionAvailable)
{
// Initialize fast video frame transfers
if (! VideoFrameTransfer::initialize(mFrameWidth, mFrameHeight, mCaptureTexture, mFBOTexture))
{
MessageBox(NULL, _T("Cannot initialize video transfers."), _T("VideoFrameTransfer error."), MB_OK);
goto error;
}
}
{
// Use custom allocators so we pin only once then recycle them
CComPtr<IDeckLinkVideoBufferAllocatorProvider> captureAllocator(new (std::nothrow) InputAllocatorPool(hGLDC, hGLRC));
if (mDLInput->EnableVideoInputWithAllocatorProvider(displayMode, bmdFormat8BitYUV, bmdVideoInputFlagDefault, captureAllocator) != S_OK)
goto error;
}
mCaptureDelegate = new CaptureDelegate(this);
if (mDLInput->SetCallback(mCaptureDelegate) != S_OK)
goto error;
if (mDLOutput->RowBytesForPixelFormat(bmdFormat8BitBGRA, mFrameWidth, &outputFrameRowBytes) != S_OK)
goto error;
// Use a custom allocator so we pin only once then recycle them
mPlayoutAllocator = new PinnedMemoryAllocator(hGLDC, hGLRC, VideoFrameTransfer::GPUtoCPU, 1, outputFrameRowBytes * mFrameHeight);
if (mDLOutput->EnableVideoOutput(displayMode, bmdVideoOutputFlagDefault) != S_OK)
goto error;
// Create a queue of 10 IDeckLinkMutableVideoFrame objects to use for scheduling output video frames.
// The ScheduledFrameCompleted() callback will immediately schedule a new frame using the next video frame from this queue.
for (int i = 0; i < 10; i++)
{
// The frame read back from the GPU frame buffer and used for the playout video frame is in BGRA format.
// The BGRA frame will be converted on playout to YCbCr either in hardware on most DeckLink cards or in software
// within the DeckLink API for DeckLink devices without this hardware conversion.
// If you want RGB 4:4:4 format to be played out "over the wire" in SDI, turn on the "Use 4:4:4 SDI" in the control
// panel or turn on the bmdDeckLinkConfig444SDIVideoOutput flag using the IDeckLinkConfiguration interface.
IDeckLinkMutableVideoFrame* outputFrame;
IDeckLinkVideoBuffer* outputFrameBuffer = NULL;
if (mPlayoutAllocator->AllocateVideoBuffer(&outputFrameBuffer) != S_OK)
goto error;
if (mDLOutput->CreateVideoFrameWithBuffer(mFrameWidth, mFrameHeight, outputFrameRowBytes, bmdFormat8BitBGRA, bmdFrameFlagFlipVertical, outputFrameBuffer, &outputFrame) != S_OK)
goto error;
mDLOutputVideoFrameQueue.push_back(outputFrame);
}
mPlayoutDelegate = new PlayoutDelegate(this);
if (mPlayoutDelegate == NULL)
goto error;
if (mDLOutput->SetScheduledFrameCompletionCallback(mPlayoutDelegate) != S_OK)
goto error;
bSuccess = true;
error:
if (!bSuccess)
{
if (mDLInput != NULL)
{
mDLInput->Release();
mDLInput = NULL;
}
if (mDLOutput != NULL)
{
mDLOutput->Release();
mDLOutput = NULL;
}
}
if (pDL != NULL)
{
pDL->Release();
pDL = NULL;
}
if (pDLDisplayMode != NULL)
{
pDLDisplayMode->Release();
pDLDisplayMode = NULL;
}
if (pDLIterator != NULL)
{
pDLIterator->Release();
pDLIterator = NULL;
}
return bSuccess;
}
void OpenGLComposite::paintGL()
{
// The DeckLink API provides IDeckLinkGLScreenPreviewHelper as a convenient way to view the playout video frames
// in a window. However, it performs a copy from host memory to the GPU which is wasteful in this case since
// we already have the rendered frame to be played out sitting in the GPU in the mIdFrameBuf frame buffer.
// Simply copy the off-screen frame buffer to on-screen frame buffer, scaling to the viewing window size.
glBindFramebuffer(GL_READ_FRAMEBUFFER, mIdFrameBuf);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glViewport(0, 0, mViewWidth, mViewHeight);
glBlitFramebuffer(0, 0, mFrameWidth, mFrameHeight, 0, 0, mViewWidth, mViewHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
SwapBuffers(hGLDC);
ValidateRect(hGLWnd, NULL);
}
void OpenGLComposite::resizeGL(WORD width, WORD height)
{
// We don't set the project or model matrices here since the window data is copied directly from
// an off-screen FBO in paintGL(). Just save the width and height for use in paintGL().
mViewWidth = width;
mViewHeight = height;
}
void OpenGLComposite::resizeWindow(int width, int height)
{
RECT r;
if (GetWindowRect(hGLWnd, &r))
{
SetWindowPos(hGLWnd, HWND_TOP, r.left, r.top, r.left + width, r.top + height, 0);
}
}
bool OpenGLComposite::InitOpenGLState()
{
if (! ResolveGLExtensions())
return false;
std::string runtimeError;
if (!mRuntimeHost->Initialize(runtimeError))
{
MessageBoxA(NULL, runtimeError.c_str(), "Runtime host failed to initialize", MB_OK);
return false;
}
ControlServer::Callbacks callbacks;
callbacks.getStateJson = [this]() { return GetRuntimeStateJson(); };
callbacks.selectShader = [this](const std::string& shaderId, std::string& error) { return SelectShader(shaderId, error); };
callbacks.updateParameter = [this](const std::string& shaderId, const std::string& parameterId, const std::string& valueJson, std::string& error) {
return UpdateParameterJson(shaderId, parameterId, valueJson, error);
};
callbacks.setBypass = [this](bool bypassEnabled, std::string& error) { return SetBypassEnabled(bypassEnabled, error); };
callbacks.setMixAmount = [this](double mixAmount, std::string& error) { return SetMixAmount(mixAmount, error); };
callbacks.reloadShader = [this](std::string& error) {
if (!ReloadShader())
{
error = "Shader reload failed. See native app status for details.";
return false;
}
return true;
};
if (!mControlServer->Start(mRuntimeHost->GetUiRoot(), mRuntimeHost->GetServerPort(), callbacks, runtimeError))
{
MessageBoxA(NULL, runtimeError.c_str(), "Local control server failed to start", MB_OK);
return false;
}
mRuntimeHost->SetServerPort(mControlServer->GetPort());
// Prepare the runtime shader program generated from the active shader package.
char compilerErrorMessage[1024];
if (! compileFragmentShader(sizeof(compilerErrorMessage), compilerErrorMessage))
{
MessageBoxA(NULL, compilerErrorMessage, "OpenGL shader failed to load or compile", MB_OK);
return false;
}
glClearColor( 0.0f, 0.0f, 0.0f, 0.5f ); // Black background
glDisable(GL_DEPTH_TEST);
if (! mFastTransferExtensionAvailable)
{
glGenBuffers(1, &mUnpinnedTextureBuffer);
}
// Setup the texture which will hold the captured video frame pixels
glGenTextures(1, &mCaptureTexture);
glBindTexture(GL_TEXTURE_2D, mCaptureTexture);
// Parameters to control how texels are sampled from the texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Create texture with empty data, we will update it using glTexSubImage2D each frame.
// The captured video is YCbCr 4:2:2 packed into a UYVY macropixel. OpenGL has no YCbCr format
// so treat it as RGBA 4:4:4:4 by halving the width and using GL_RGBA internal format.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, mFrameWidth/2, mFrameHeight, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
// Create Frame Buffer Object (FBO) to perform off-screen rendering of scene.
// This allows the render to be done on a framebuffer with width and height exactly matching the video format.
glGenFramebuffers(1, &mIdFrameBuf);
glGenRenderbuffers(1, &mIdColorBuf);
glGenRenderbuffers(1, &mIdDepthBuf);
glGenVertexArrays(1, &mFullscreenVAO);
glGenBuffers(1, &mGlobalParamsUBO);
glBindFramebuffer(GL_FRAMEBUFFER, mIdFrameBuf);
// Texture for FBO
glGenTextures(1, &mFBOTexture);
glBindTexture(GL_TEXTURE_2D, mFBOTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, mFrameWidth, mFrameHeight, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
// Attach a depth buffer
glBindRenderbuffer(GL_RENDERBUFFER, mIdDepthBuf);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, mFrameWidth, mFrameHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER, mIdDepthBuf);
// Attach the texture which stores the playback image
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mFBOTexture, 0);
GLenum glStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (glStatus != GL_FRAMEBUFFER_COMPLETE)
{
MessageBox(NULL, _T("Cannot initialize framebuffer."), _T("OpenGL initialization error."), MB_OK);
return false;
}
glBindTexture(GL_TEXTURE_2D, 0);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindVertexArray(mFullscreenVAO);
glBindVertexArray(0);
glBindBuffer(GL_UNIFORM_BUFFER, mGlobalParamsUBO);
glBufferData(GL_UNIFORM_BUFFER, 1024, NULL, GL_DYNAMIC_DRAW);
glBindBufferBase(GL_UNIFORM_BUFFER, kGlobalParamsBindingPoint, mGlobalParamsUBO);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
broadcastRuntimeState();
return true;
}
//
// Update the captured video frame texture
//
void OpenGLComposite::VideoFrameArrived(IDeckLinkVideoInputFrame* inputFrame, bool hasNoInputSource)
{
mHasNoInputSource = hasNoInputSource;
if (mRuntimeHost)
mRuntimeHost->SetSignalStatus(!hasNoInputSource, mFrameWidth, mFrameHeight, kDisplayModeName);
if (mHasNoInputSource)
return; // don't transfer texture when there's no input
long textureSize = inputFrame->GetRowBytes() * inputFrame->GetHeight();
IDeckLinkVideoBuffer* inputFrameBuffer = NULL;
void* videoPixels;
if (inputFrame->QueryInterface(IID_IDeckLinkVideoBuffer, (void**)&inputFrameBuffer) != S_OK)
return;
if (inputFrameBuffer->StartAccess(bmdBufferAccessRead) != S_OK)
{
inputFrameBuffer->Release();
return;
}
inputFrameBuffer->GetBytes(&videoPixels);
EnterCriticalSection(&pMutex);
wglMakeCurrent( hGLDC, hGLRC ); // make OpenGL context current in this thread
if (mFastTransferExtensionAvailable)
{
CComQIPtr<PinnedMemoryAllocator, &IID_PinnedMemoryAllocator> allocator(inputFrameBuffer);
if (!allocator || !allocator->transferFrame(videoPixels, mCaptureTexture))
OutputDebugStringA("Capture: transferFrame() failed\n");
allocator->waitForTransferComplete(videoPixels);
}
else
{
// Use a straightforward texture buffer
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mUnpinnedTextureBuffer);
glBufferData(GL_PIXEL_UNPACK_BUFFER, textureSize, videoPixels, GL_DYNAMIC_DRAW);
glBindTexture(GL_TEXTURE_2D, mCaptureTexture);
// NULL for last arg indicates use current GL_PIXEL_UNPACK_BUFFER target as texture data
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mFrameWidth/2, mFrameHeight, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
wglMakeCurrent( NULL, NULL );
LeaveCriticalSection(&pMutex);
inputFrameBuffer->EndAccess(bmdBufferAccessRead);
inputFrameBuffer->Release();
}
// Render the live video texture through the runtime shader into the off-screen framebuffer.
// Read the result back from the frame buffer and schedule it for playout.
void OpenGLComposite::PlayoutFrameCompleted(IDeckLinkVideoFrame* completedFrame, BMDOutputFrameCompletionResult completionResult)
{
EnterCriticalSection(&pMutex);
// Get the first frame from the queue
IDeckLinkMutableVideoFrame* outputVideoFrame = mDLOutputVideoFrameQueue.front();
mDLOutputVideoFrameQueue.push_back(outputVideoFrame);
mDLOutputVideoFrameQueue.pop_front();
// make GL context current in this thread
wglMakeCurrent( hGLDC, hGLRC );
// Draw the effect output to the off-screen framebuffer.
glBindFramebuffer(GL_FRAMEBUFFER, mIdFrameBuf);
renderEffect();
if (mRuntimeHost)
mRuntimeHost->AdvanceFrame();
IDeckLinkVideoBuffer* outputVideoFrameBuffer;
if (outputVideoFrame->QueryInterface(IID_IDeckLinkVideoBuffer, (void**)&outputVideoFrameBuffer) != S_OK)
{
LeaveCriticalSection(&pMutex);
return;
}
if (outputVideoFrameBuffer->StartAccess(bmdBufferAccessWrite) != S_OK)
{
outputVideoFrameBuffer->Release();
LeaveCriticalSection(&pMutex);
return;
}
void* pFrame;
outputVideoFrameBuffer->GetBytes(&pFrame);
if (mFastTransferExtensionAvailable)
{
// Finished with mCaptureTexture
VideoFrameTransfer::endTextureInUse(VideoFrameTransfer::CPUtoGPU);
if (! mPlayoutAllocator->transferFrame(pFrame, mFBOTexture))
OutputDebugStringA("Playback: transferFrame() failed\n");
paintGL();
// Wait for transfer to system memory to complete
mPlayoutAllocator->waitForTransferComplete(pFrame);
}
else
{
glReadPixels(0, 0, mFrameWidth, mFrameHeight, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, pFrame);
paintGL();
}
outputVideoFrameBuffer->EndAccess(bmdBufferAccessWrite);
outputVideoFrameBuffer->Release();
// If the last completed frame was late or dropped, bump the scheduled time further into the future
if (completionResult == bmdOutputFrameDisplayedLate || completionResult == bmdOutputFrameDropped)
mTotalPlayoutFrames += 2;
// Schedule the next frame for playout
HRESULT hr = mDLOutput->ScheduleVideoFrame(outputVideoFrame, (mTotalPlayoutFrames * mFrameDuration), mFrameDuration, mFrameTimescale);
if (SUCCEEDED(hr))
mTotalPlayoutFrames++;
wglMakeCurrent( NULL, NULL );
LeaveCriticalSection(&pMutex);
}
bool OpenGLComposite::Start()
{
mTotalPlayoutFrames = 0;
// Preroll frames
for (unsigned i = 0; i < 5; i++)
{
// Take each video frame from the front of the queue and move it to the back
IDeckLinkMutableVideoFrame* outputVideoFrame = mDLOutputVideoFrameQueue.front();
mDLOutputVideoFrameQueue.push_back(outputVideoFrame);
mDLOutputVideoFrameQueue.pop_front();
// Start with a black frame for playout
IDeckLinkVideoBuffer* outputVideoFrameBuffer;
if (outputVideoFrame->QueryInterface(IID_IDeckLinkVideoBuffer, (void**)&outputVideoFrameBuffer) != S_OK)
return false;
if (outputVideoFrameBuffer->StartAccess(bmdBufferAccessWrite) != S_OK)
{
outputVideoFrameBuffer->Release();
return false;
}
void* pFrame;
outputVideoFrameBuffer->GetBytes((void**)&pFrame);
memset(pFrame, 0, outputVideoFrame->GetRowBytes() * mFrameHeight); // 0 is black in RGBA format
outputVideoFrameBuffer->EndAccess(bmdBufferAccessWrite);
outputVideoFrameBuffer->Release();
if (mDLOutput->ScheduleVideoFrame(outputVideoFrame, (mTotalPlayoutFrames * mFrameDuration), mFrameDuration, mFrameTimescale) != S_OK)
return false;
mTotalPlayoutFrames++;
}
mDLInput->StartStreams();
mDLOutput->StartScheduledPlayback(0, mFrameTimescale, 1.0);
return true;
}
bool OpenGLComposite::Stop()
{
if (mControlServer)
mControlServer->Stop();
mDLInput->StopStreams();
mDLInput->DisableVideoInput();
mDLOutput->StopScheduledPlayback(0, NULL, 0);
mDLOutput->DisableVideoOutput();
return true;
}
bool OpenGLComposite::ReloadShader()
{
char compilerErrorMessage[1024];
EnterCriticalSection(&pMutex);
wglMakeCurrent(hGLDC, hGLRC);
bool success = compileFragmentShader(sizeof(compilerErrorMessage), compilerErrorMessage);
if (mRuntimeHost)
mRuntimeHost->ClearReloadRequest();
wglMakeCurrent(NULL, NULL);
LeaveCriticalSection(&pMutex);
if (!success)
{
if (mRuntimeHost)
mRuntimeHost->SetCompileStatus(false, compilerErrorMessage);
MessageBoxA(NULL, compilerErrorMessage, "Slang shader reload failed", MB_OK);
}
else
{
if (mRuntimeHost)
mRuntimeHost->SetCompileStatus(true, "Shader compiled successfully.");
broadcastRuntimeState();
}
return success;
}
void OpenGLComposite::destroyShaderProgram()
{
if (mProgram != 0)
{
glDeleteProgram(mProgram);
mProgram = 0;
}
if (mFragmentShader != 0)
{
glDeleteShader(mFragmentShader);
mFragmentShader = 0;
}
if (mVertexShader != 0)
{
glDeleteShader(mVertexShader);
mVertexShader = 0;
}
}
void OpenGLComposite::renderEffect()
{
PollRuntimeChanges();
glViewport(0, 0, mFrameWidth, mFrameHeight);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (mHasNoInputSource)
return;
if (mFastTransferExtensionAvailable)
{
// Signal that we're about to draw using mCaptureTexture onto mFBOTexture.
VideoFrameTransfer::beginTextureInUse(VideoFrameTransfer::CPUtoGPU);
}
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glActiveTexture(GL_TEXTURE0 + kVideoTextureUnit);
glBindTexture(GL_TEXTURE_2D, mCaptureTexture);
glBindVertexArray(mFullscreenVAO);
glUseProgram(mProgram);
if (mRuntimeHost)
{
const RuntimeRenderState state = mRuntimeHost->GetRenderState(mFrameWidth, mFrameHeight);
updateGlobalParamsBuffer(state);
}
glDrawArrays(GL_TRIANGLES, 0, 3);
glUseProgram(0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0);
if (mFastTransferExtensionAvailable)
VideoFrameTransfer::endTextureInUse(VideoFrameTransfer::CPUtoGPU);
}
// Compile a fullscreen shader pass from the runtime Slang source into a core-profile
// GLSL program. The renderer owns the fullscreen pass and parameter UBO layout.
bool OpenGLComposite::compileFragmentShader(int errorMessageSize, char* errorMessage)
{
GLsizei errorBufferSize = 0;
GLint compileResult = GL_FALSE;
GLint linkResult = GL_FALSE;
std::string fragmentShaderSource;
std::string loadError;
const char* vertexSource = kVertexShaderSource;
if (!mRuntimeHost->BuildActiveFragmentShaderSource(fragmentShaderSource, loadError))
{
mRuntimeHost->SetCompileStatus(false, loadError);
CopyErrorMessage(loadError, errorMessageSize, errorMessage);
return false;
}
const char* fragmentSource = fragmentShaderSource.c_str();
GLuint newVertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(newVertexShader, 1, (const GLchar**)&vertexSource, NULL);
glCompileShader(newVertexShader);
glGetShaderiv(newVertexShader, GL_COMPILE_STATUS, &compileResult);
if (compileResult == GL_FALSE)
{
glGetShaderInfoLog(newVertexShader, errorMessageSize, &errorBufferSize, errorMessage);
glDeleteShader(newVertexShader);
return false;
}
GLuint newFragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(newFragmentShader, 1, (const GLchar**)&fragmentSource, NULL);
glCompileShader(newFragmentShader);
glGetShaderiv(newFragmentShader, GL_COMPILE_STATUS, &compileResult);
if (compileResult == GL_FALSE)
{
glGetShaderInfoLog(newFragmentShader, errorMessageSize, &errorBufferSize, errorMessage);
glDeleteShader(newVertexShader);
glDeleteShader(newFragmentShader);
return false;
}
GLuint newProgram = glCreateProgram();
glAttachShader(newProgram, newVertexShader);
glAttachShader(newProgram, newFragmentShader);
glLinkProgram(newProgram);
glGetProgramiv(newProgram, GL_LINK_STATUS, &linkResult);
if (linkResult == GL_FALSE)
{
glGetProgramInfoLog(newProgram, errorMessageSize, &errorBufferSize, errorMessage);
glDeleteProgram(newProgram);
glDeleteShader(newVertexShader);
glDeleteShader(newFragmentShader);
return false;
}
destroyShaderProgram();
mProgram = newProgram;
mVertexShader = newVertexShader;
mFragmentShader = newFragmentShader;
const RuntimeRenderState state = mRuntimeHost->GetRenderState(mFrameWidth, mFrameHeight);
if (!updateGlobalParamsBuffer(state))
{
CopyErrorMessage("Failed to allocate the runtime parameter UBO.", errorMessageSize, errorMessage);
destroyShaderProgram();
return false;
}
mRuntimeHost->SetCompileStatus(true, "Shader compiled successfully.");
mRuntimeHost->ClearReloadRequest();
return true;
}
bool OpenGLComposite::PollRuntimeChanges()
{
if (!mRuntimeHost)
return true;
bool registryChanged = false;
bool reloadRequested = false;
std::string runtimeError;
if (!mRuntimeHost->PollFileChanges(registryChanged, reloadRequested, runtimeError))
{
mRuntimeHost->SetCompileStatus(false, runtimeError);
broadcastRuntimeState();
return false;
}
if (registryChanged)
broadcastRuntimeState();
if (!reloadRequested)
return true;
char compilerErrorMessage[1024] = {};
if (!compileFragmentShader(sizeof(compilerErrorMessage), compilerErrorMessage))
{
mRuntimeHost->SetCompileStatus(false, compilerErrorMessage);
mRuntimeHost->ClearReloadRequest();
broadcastRuntimeState();
return false;
}
broadcastRuntimeState();
return true;
}
void OpenGLComposite::broadcastRuntimeState()
{
if (mControlServer)
mControlServer->BroadcastState();
}
bool OpenGLComposite::updateGlobalParamsBuffer(const RuntimeRenderState& state)
{
std::vector<unsigned char> buffer;
buffer.reserve(512);
AppendStd140Float(buffer, static_cast<float>(state.timeSeconds));
AppendStd140Vec2(buffer, static_cast<float>(state.inputWidth), static_cast<float>(state.inputHeight));
AppendStd140Vec2(buffer, static_cast<float>(state.outputWidth), static_cast<float>(state.outputHeight));
AppendStd140Float(buffer, static_cast<float>(state.frameCount));
AppendStd140Float(buffer, static_cast<float>(state.mixAmount));
AppendStd140Float(buffer, static_cast<float>(state.bypass));
for (const ShaderParameterDefinition& definition : state.parameterDefinitions)
{
auto valueIt = state.parameterValues.find(definition.id);
const ShaderParameterValue value = valueIt != state.parameterValues.end()
? valueIt->second
: ShaderParameterValue();
switch (definition.type)
{
case ShaderParameterType::Float:
AppendStd140Float(buffer, value.numberValues.empty() ? 0.0f : static_cast<float>(value.numberValues[0]));
break;
case ShaderParameterType::Vec2:
AppendStd140Vec2(buffer,
value.numberValues.size() > 0 ? static_cast<float>(value.numberValues[0]) : 0.0f,
value.numberValues.size() > 1 ? static_cast<float>(value.numberValues[1]) : 0.0f);
break;
case ShaderParameterType::Color:
AppendStd140Vec4(buffer,
value.numberValues.size() > 0 ? static_cast<float>(value.numberValues[0]) : 1.0f,
value.numberValues.size() > 1 ? static_cast<float>(value.numberValues[1]) : 1.0f,
value.numberValues.size() > 2 ? static_cast<float>(value.numberValues[2]) : 1.0f,
value.numberValues.size() > 3 ? static_cast<float>(value.numberValues[3]) : 1.0f);
break;
case ShaderParameterType::Boolean:
AppendStd140Int(buffer, value.booleanValue ? 1 : 0);
break;
case ShaderParameterType::Enum:
{
int selectedIndex = 0;
for (std::size_t optionIndex = 0; optionIndex < definition.enumOptions.size(); ++optionIndex)
{
if (definition.enumOptions[optionIndex].value == value.enumValue)
{
selectedIndex = static_cast<int>(optionIndex);
break;
}
}
AppendStd140Int(buffer, selectedIndex);
break;
}
}
}
buffer.resize(AlignStd140(buffer.size(), 16), 0);
glBindBuffer(GL_UNIFORM_BUFFER, mGlobalParamsUBO);
if (mGlobalParamsUBOSize != static_cast<GLsizeiptr>(buffer.size()))
{
glBufferData(GL_UNIFORM_BUFFER, static_cast<GLsizeiptr>(buffer.size()), buffer.data(), GL_DYNAMIC_DRAW);
mGlobalParamsUBOSize = static_cast<GLsizeiptr>(buffer.size());
}
else
{
glBufferSubData(GL_UNIFORM_BUFFER, 0, static_cast<GLsizeiptr>(buffer.size()), buffer.data());
}
glBindBufferBase(GL_UNIFORM_BUFFER, kGlobalParamsBindingPoint, mGlobalParamsUBO);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
return true;
}
std::string OpenGLComposite::GetRuntimeStateJson() const
{
return mRuntimeHost ? mRuntimeHost->BuildStateJson() : "{}";
}
bool OpenGLComposite::SelectShader(const std::string& shaderId, std::string& error)
{
if (!mRuntimeHost->SelectShader(shaderId, error))
return false;
ReloadShader();
broadcastRuntimeState();
return true;
}
bool OpenGLComposite::UpdateParameterJson(const std::string& shaderId, const std::string& parameterId, const std::string& valueJson, std::string& error)
{
JsonValue parsedValue;
if (!ParseJson(valueJson, parsedValue, error))
return false;
if (!mRuntimeHost->UpdateParameter(shaderId, parameterId, parsedValue, error))
return false;
broadcastRuntimeState();
return true;
}
bool OpenGLComposite::SetBypassEnabled(bool bypassEnabled, std::string& error)
{
if (!mRuntimeHost->SetBypass(bypassEnabled, error))
return false;
broadcastRuntimeState();
return true;
}
bool OpenGLComposite::SetMixAmount(double mixAmount, std::string& error)
{
if (!mRuntimeHost->SetMixAmount(mixAmount, error))
return false;
broadcastRuntimeState();
return true;
}
bool OpenGLComposite::CheckOpenGLExtensions()
{
mFastTransferExtensionAvailable = VideoFrameTransfer::checkFastMemoryTransferAvailable();
if (!mFastTransferExtensionAvailable)
OutputDebugStringA("Fast memory transfer extension not available, using regular OpenGL transfer fallback instead\n");
return true;
}
////////////////////////////////////////////
// PinnedMemoryAllocator
////////////////////////////////////////////
// PinnedMemoryAllocator implements the IDeckLinkVideoBufferAllocator interface to be used instead of the
// built-in buffer allocator
//
// For this sample application a custom buffer allocator is used to ensure each address
// of buffer memory is aligned on a 4kB boundary required by the OpenGL pinned memory extension.
// If the pinned memory extension is not available, this allocator will still be used and
// demonstrates how to cache buffer allocations for efficiency.
//
// The frame cache delays the releasing of buffers until the cache fills up, thereby avoiding an
// allocate plus pin operation for every frame, followed by an unpin and deallocate on every frame.
PinnedMemoryAllocator::PinnedMemoryAllocator(HDC hdc, HGLRC hglrc, VideoFrameTransfer::Direction direction, unsigned cacheSize, unsigned bufferSize) :
mHGLDC(hdc),
mHGLRC(hglrc),
mRefCount(1),
mDirection(direction),
mBufferSize(bufferSize),
mFrameCacheSize(cacheSize) // large cache size will keep more memory pinned and may result in out of memory errors
{
}
PinnedMemoryAllocator::~PinnedMemoryAllocator()
{
// Cleanup any unused buffers that remain in the cache
while (!mFrameCache.empty())
{
unPinAddress(mFrameCache.back());
VirtualFree(mFrameCache.back(), 0, MEM_RELEASE);
mFrameCache.pop_back();
}
for (auto iter = mFrameTransfer.begin(); iter != mFrameTransfer.end(); ++iter)
{
delete iter->second;
}
mFrameTransfer.clear();
}
bool PinnedMemoryAllocator::transferFrame(void* address, GLuint gpuTexture)
{
if (mFrameTransfer.count(address) == 0)
{
// VideoFrameTransfer prepares and pins address
mFrameTransfer[address] = new VideoFrameTransfer(mBufferSize, address, mDirection);
}
return mFrameTransfer[address]->performFrameTransfer();
}
void PinnedMemoryAllocator::waitForTransferComplete(void* address)
{
if (mFrameTransfer.count(address))
mFrameTransfer[address]->waitForTransferComplete();
}
void PinnedMemoryAllocator::unPinAddress(void* address)
{
// un-pin address only if it has been pinned for transfer
if (mFrameTransfer.count(address) > 0)
{
wglMakeCurrent( mHGLDC, mHGLRC );
mFrameTransfer.erase(address);
wglMakeCurrent( NULL, NULL );
}
}
// IUnknown methods
HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::QueryInterface(REFIID iid, LPVOID* ppv)
{
if (!ppv)
{
return E_POINTER;
}
if (iid == IID_IUnknown || iid == IID_PinnedMemoryAllocator)
{
*ppv = this;
}
else if (iid == IID_IDeckLinkVideoBufferAllocator)
{
*ppv = static_cast<IDeckLinkVideoBufferAllocator*>(this);
}
else
{
*ppv = nullptr;
return E_NOINTERFACE;
}
AddRef();
return S_OK;
}
ULONG STDMETHODCALLTYPE PinnedMemoryAllocator::AddRef(void)
{
return ++mRefCount;
}
ULONG STDMETHODCALLTYPE PinnedMemoryAllocator::Release(void)
{
int newCount = --mRefCount;
if (newCount == 0)
delete this;
return newCount;
}
// IDeckLinkMemoryAllocator methods
HRESULT STDMETHODCALLTYPE PinnedMemoryAllocator::AllocateVideoBuffer (IDeckLinkVideoBuffer** allocatedBuffer)
{
std::shared_ptr<void> sharedMemBuffer;
// Manage caching of allocated buffers via shared_ptr deleter.
auto deleter = [this](void* buffer) mutable {
if (mFrameCache.size() < mFrameCacheSize)
{
mFrameCache.push_back(buffer);
}
else
{
// No room left in cache, so un-pin (if it was pinned) and free this buffer
unPinAddress(buffer);
VirtualFree(buffer, 0, MEM_RELEASE);
}
// We AddRef this class once the deleter is used because this class owns the mem
Release();
};
if (mFrameCache.empty())
{
// Allocate memory on a page boundary
void* memBuffer = VirtualAlloc(NULL, mBufferSize, MEM_COMMIT | MEM_RESERVE | MEM_WRITE_WATCH, PAGE_READWRITE);
if (!memBuffer)
return E_OUTOFMEMORY;
sharedMemBuffer = std::shared_ptr<void>(memBuffer, deleter);
}
else
{
// Re-use most recently released address
sharedMemBuffer = std::shared_ptr<void>(mFrameCache.back(), deleter);
mFrameCache.pop_back();
}
// This class owns the mem so the buffer we return needs to AddRef() this, and Release() in the deleter
AddRef();
*allocatedBuffer = new DeckLinkVideoBuffer(sharedMemBuffer, this);
return S_OK;
}
////////////////////////////////////////////
// InputAllocatorPool Class
////////////////////////////////////////////
InputAllocatorPool::InputAllocatorPool(HDC hdc, HGLRC hglrc)
{
mHDC = hdc;
mHGLRC = hglrc;
}
HRESULT InputAllocatorPool::QueryInterface(REFIID iid, void** ppv)
{
if (!ppv)
{
return E_POINTER;
}
if (iid == IID_IUnknown)
{
*ppv = this;
}
else if (iid == IID_IDeckLinkVideoBufferAllocatorProvider)
{
*ppv = static_cast<IDeckLinkVideoBufferAllocatorProvider*>(this);
}
else
{
*ppv = nullptr;
return E_NOINTERFACE;
}
AddRef();
return S_OK;
}
ULONG InputAllocatorPool::AddRef(void)
{
return ++mRefCount;
}
ULONG InputAllocatorPool::Release(void)
{
int newCount = --mRefCount;
if (newCount == 0)
delete this;
return newCount;
}
HRESULT InputAllocatorPool::GetVideoBufferAllocator(
/* [in] */ unsigned int bufferSize,
/* [in] */ unsigned int,
/* [in] */ unsigned int,
/* [in] */ unsigned int,
/* [in] */ BMDPixelFormat,
/* [out] */ IDeckLinkVideoBufferAllocator **allocator)
{
if (!allocator)
return E_POINTER;
auto existing = mAllocatorBySize.find(bufferSize);
if (existing != mAllocatorBySize.end())
{
*allocator = &*existing->second;
(*allocator)->AddRef();
return S_OK;
}
CComPtr<PinnedMemoryAllocator> newAllocator;
newAllocator.Attach(new (std::nothrow) PinnedMemoryAllocator(mHDC, mHGLRC, VideoFrameTransfer::CPUtoGPU, 3, bufferSize));
if (!newAllocator)
return E_OUTOFMEMORY;
mAllocatorBySize.emplace(std::make_pair(bufferSize, newAllocator));
*allocator = newAllocator.Detach();
return S_OK;
}
////////////////////////////////////////////
// DeckLink Video Buffer Class
////////////////////////////////////////////
DeckLinkVideoBuffer::DeckLinkVideoBuffer(std::shared_ptr<void>& buffer, PinnedMemoryAllocator* parent) :
mParentAllocator(parent),
mRefCount(1),
mBuffer(buffer)
{
}
HRESULT STDMETHODCALLTYPE DeckLinkVideoBuffer::QueryInterface(REFIID riid, void** ppvObject)
{
HRESULT result = S_OK;
if (ppvObject == nullptr)
return E_POINTER;
if (riid == IID_IUnknown)
{
*ppvObject = this;
AddRef();
}
else if (riid == IID_IDeckLinkVideoBuffer)
{
*ppvObject = static_cast<IDeckLinkVideoBuffer*>(this);
AddRef();
}
else if (riid == IID_PinnedMemoryAllocator)
{
result = mParentAllocator->QueryInterface(riid, ppvObject);
}
else
{
*ppvObject = nullptr;
result = E_NOINTERFACE;
}
return result;
}
ULONG STDMETHODCALLTYPE DeckLinkVideoBuffer::AddRef()
{
return ++mRefCount;
}
ULONG STDMETHODCALLTYPE DeckLinkVideoBuffer::Release()
{
int newValue = --mRefCount;
if (newValue == 0)
delete this;
return newValue;
}
HRESULT STDMETHODCALLTYPE DeckLinkVideoBuffer::GetBytes(void** buffer)
{
if (buffer == nullptr)
return E_POINTER;
*buffer = mBuffer.get();
return S_OK;
}
HRESULT STDMETHODCALLTYPE DeckLinkVideoBuffer::GetSize(uint64_t* size)
{
if (size == nullptr)
return E_POINTER;
*size = mParentAllocator->bufferSize();
return S_OK;
}
HRESULT STDMETHODCALLTYPE DeckLinkVideoBuffer::StartAccess(BMDBufferAccessFlags)
{
return S_OK;
}
HRESULT STDMETHODCALLTYPE DeckLinkVideoBuffer::EndAccess(BMDBufferAccessFlags)
{
return S_OK;
}
////////////////////////////////////////////
// DeckLink Capture Delegate Class
////////////////////////////////////////////
CaptureDelegate::CaptureDelegate(OpenGLComposite* pOwner) :
m_pOwner(pOwner),
mRefCount(1)
{
}
HRESULT CaptureDelegate::QueryInterface(REFIID iid, LPVOID *ppv)
{
*ppv = NULL;
return E_NOINTERFACE;
}
ULONG CaptureDelegate::AddRef()
{
return InterlockedIncrement(&mRefCount);
}
ULONG CaptureDelegate::Release()
{
int newCount = InterlockedDecrement(&mRefCount);
if (newCount == 0)
delete this;
return newCount;
}
HRESULT CaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* inputFrame, IDeckLinkAudioInputPacket* /*audioPacket*/)
{
if (! inputFrame)
{
// It's possible to receive a NULL inputFrame, but a valid audioPacket. Ignore audio-only frame.
return S_OK;
}
bool hasNoInputSource = (inputFrame->GetFlags() & bmdFrameHasNoInputSource) == bmdFrameHasNoInputSource;
m_pOwner->VideoFrameArrived(inputFrame, hasNoInputSource);
return S_OK;
}
HRESULT CaptureDelegate::VideoInputFormatChanged(BMDVideoInputFormatChangedEvents notificationEvents, IDeckLinkDisplayMode *newDisplayMode, BMDDetectedVideoInputFormatFlags detectedSignalFlags)
{
return S_OK;
}
////////////////////////////////////////////
// DeckLink Playout Delegate Class
////////////////////////////////////////////
PlayoutDelegate::PlayoutDelegate(OpenGLComposite* pOwner) :
m_pOwner(pOwner),
mRefCount(1)
{
}
HRESULT PlayoutDelegate::QueryInterface(REFIID iid, LPVOID *ppv)
{
*ppv = NULL;
return E_NOINTERFACE;
}
ULONG PlayoutDelegate::AddRef()
{
return InterlockedIncrement(&mRefCount);
}
ULONG PlayoutDelegate::Release()
{
int newCount = InterlockedDecrement(&mRefCount);
if (newCount == 0)
delete this;
return newCount;
}
HRESULT PlayoutDelegate::ScheduledFrameCompleted (IDeckLinkVideoFrame* completedFrame, BMDOutputFrameCompletionResult result)
{
switch (result)
{
case bmdOutputFrameDisplayedLate:
OutputDebugStringA("ScheduledFrameCompleted() frame did not complete: Frame Displayed Late\n");
break;
case bmdOutputFrameDropped:
OutputDebugStringA("ScheduledFrameCompleted() frame did not complete: Frame Dropped\n");
break;
case bmdOutputFrameCompleted:
case bmdOutputFrameFlushed:
// Don't log bmdOutputFrameFlushed result since it is expected when Stop() is called
break;
default:
OutputDebugStringA("ScheduledFrameCompleted() frame did not complete: Unknown error\n");
}
m_pOwner->PlayoutFrameCompleted(completedFrame, result);
return S_OK;
}
HRESULT PlayoutDelegate::ScheduledPlaybackHasStopped ()
{
return S_OK;
}