29 Commits

Author SHA1 Message Date
Aiden
2531d871e8 Doc cleanup
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m49s
CI / Windows Release Package (push) Successful in 3m8s
2026-05-12 01:37:20 +10:00
Aiden
709d3d3fa4 Test works
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m53s
CI / Windows Release Package (push) Successful in 3m1s
2026-05-12 01:30:30 +10:00
Aiden
ea31d0ca13 Clean 2026-05-12 01:21:42 +10:00
Aiden
f1f4e3421b Frame timing
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m53s
CI / Windows Release Package (push) Successful in 3m6s
2026-05-12 01:08:32 +10:00
Aiden
ac729dc2b9 Stage 1 rewrite 2026-05-12 00:52:33 +10:00
Aiden
bf23cd880a faliure
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Failing after 2m52s
CI / Windows Release Package (push) Has been skipped
2026-05-12 00:35:01 +10:00
Aiden
9e3412712c Improvement
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m52s
CI / Windows Release Package (push) Successful in 3m0s
2026-05-12 00:00:23 +10:00
Aiden
a434a88108 Performance chasing
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m51s
CI / Windows Release Package (push) Successful in 2m55s
2026-05-11 23:10:45 +10:00
Aiden
c5cead6003 Phase 7.5 step 2
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m45s
CI / Windows Release Package (push) Successful in 2m52s
2026-05-11 21:36:17 +10:00
Aiden
f8adbbe0fe Phase 7.5 timing logs
Some checks failed
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m45s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 21:32:40 +10:00
Aiden
0a7954e879 Phase 7 done
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m47s
CI / Windows Release Package (push) Successful in 3m2s
2026-05-11 21:15:51 +10:00
Aiden
f288455709 Phase 7
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m47s
CI / Windows Release Package (push) Successful in 3m2s
2026-05-11 21:05:11 +10:00
Aiden
50d5880835 Step 3 2026-05-11 20:49:36 +10:00
Aiden
52eaf16a8c Phase 7 step 2
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m44s
CI / Windows Release Package (push) Successful in 2m57s
2026-05-11 20:45:58 +10:00
Aiden
6b0638336a Phase 7 step 1
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m47s
CI / Windows Release Package (push) Successful in 2m53s
2026-05-11 20:39:01 +10:00
Aiden
0da6ad6802 Docs update
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m44s
CI / Windows Release Package (push) Successful in 2m47s
2026-05-11 20:14:10 +10:00
Aiden
dd3cd6b66c Clean up
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Windows Release Package (push) Has been cancelled
CI / Native Windows Build And Tests (push) Has been cancelled
2026-05-11 20:11:20 +10:00
Aiden
1d08dec5fe step 6
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m44s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 20:06:14 +10:00
Aiden
0d57920bc1 step 5 2026-05-11 20:02:26 +10:00
Aiden
1629dbc77a step 4 2026-05-11 19:58:14 +10:00
Aiden
205c90e52e Step 3 2026-05-11 19:53:31 +10:00
Aiden
ab38bfad24 step 2
All checks were successful
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m41s
CI / Windows Release Package (push) Successful in 2m46s
2026-05-11 19:49:05 +10:00
Aiden
68503256dc Phase 6 step 1
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m40s
CI / Windows Release Package (push) Successful in 2m47s
2026-05-11 19:44:35 +10:00
Aiden
a91cc91a21 Clean up shape
All checks were successful
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m41s
CI / Windows Release Package (push) Successful in 2m48s
2026-05-11 19:37:44 +10:00
Aiden
a530325fa1 Organisation
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m42s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 19:31:06 +10:00
Aiden
d332dceb5b Step 6
Some checks failed
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m43s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 19:25:29 +10:00
Aiden
79855d788c Step 5 storng option
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m41s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 19:20:23 +10:00
Aiden
ff10b66d1d Phase 5 step 5
Some checks failed
CI / React UI Build (push) Successful in 10s
CI / Native Windows Build And Tests (push) Successful in 2m42s
CI / Windows Release Package (push) Has been cancelled
2026-05-11 19:14:59 +10:00
Aiden
fdcc38c6ae Step 4 2026-05-11 19:09:01 +10:00
87 changed files with 7764 additions and 3993 deletions

68
.vscode/launch.json vendored
View File

@@ -9,7 +9,12 @@
"args": [], "args": [],
"stopAtEntry": false, "stopAtEntry": false,
"cwd": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug", "cwd": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"environment": [], "environment": [
{
"name": "VST_DISABLE_INPUT_CAPTURE",
"value": "1"
}
],
"console": "internalConsole", "console": "internalConsole",
"symbolSearchPath": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug", "symbolSearchPath": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"requireExactSource": true, "requireExactSource": true,
@@ -17,6 +22,67 @@
"moduleLoad": true "moduleLoad": true
}, },
"preLaunchTask": "Build LoopThroughWithOpenGLCompositing Debug x64" "preLaunchTask": "Build LoopThroughWithOpenGLCompositing Debug x64"
},
{
"name": "Debug LoopThroughWithOpenGLCompositing - sync readback experiment",
"type": "cppvsdbg",
"request": "launch",
"program": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug\\LoopThroughWithOpenGLCompositing.exe",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"environment": [
{
"name": "VST_OUTPUT_READBACK_MODE",
"value": "sync"
}
],
"console": "internalConsole",
"symbolSearchPath": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"requireExactSource": true,
"logging": {
"moduleLoad": true
},
"preLaunchTask": "Build LoopThroughWithOpenGLCompositing Debug x64"
},
{
"name": "Debug LoopThroughWithOpenGLCompositing - cached output experiment",
"type": "cppvsdbg",
"request": "launch",
"program": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug\\LoopThroughWithOpenGLCompositing.exe",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"environment": [
{
"name": "VST_OUTPUT_READBACK_MODE",
"value": "cached_only"
}
],
"console": "internalConsole",
"symbolSearchPath": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"requireExactSource": true,
"logging": {
"moduleLoad": true
},
"preLaunchTask": "Build LoopThroughWithOpenGLCompositing Debug x64"
},
{
"name": "Debug DeckLinkRenderCadenceProbe",
"type": "cppvsdbg",
"request": "launch",
"program": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug\\DeckLinkRenderCadenceProbe.exe",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"environment": [],
"console": "externalTerminal",
"symbolSearchPath": "${workspaceFolder}\\build\\vs2022-x64-debug\\Debug",
"requireExactSource": true,
"logging": {
"moduleLoad": true
},
"preLaunchTask": "Build DeckLinkRenderCadenceProbe Debug x64"
} }
] ]
} }

16
.vscode/tasks.json vendored
View File

@@ -36,6 +36,22 @@
"group": "build", "group": "build",
"problemMatcher": "$msCompile" "problemMatcher": "$msCompile"
}, },
{
"label": "Build DeckLinkRenderCadenceProbe Debug x64",
"type": "process",
"command": "C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\Common7\\IDE\\CommonExtensions\\Microsoft\\CMake\\CMake\\bin\\cmake.exe",
"args": [
"--build",
"${workspaceFolder}\\build\\vs2022-x64-debug",
"--config",
"Debug",
"--target",
"DeckLinkRenderCadenceProbe",
"--parallel"
],
"group": "build",
"problemMatcher": "$msCompile"
},
{ {
"label": "Clean LoopThroughWithOpenGLCompositing Debug x64", "label": "Clean LoopThroughWithOpenGLCompositing Debug x64",
"type": "process", "type": "process",

View File

@@ -61,18 +61,18 @@ set(APP_SOURCES
"${APP_DIR}/gl/renderer/GlScopedObjects.h" "${APP_DIR}/gl/renderer/GlScopedObjects.h"
"${APP_DIR}/gl/shader/GlShaderSources.cpp" "${APP_DIR}/gl/shader/GlShaderSources.cpp"
"${APP_DIR}/gl/shader/GlShaderSources.h" "${APP_DIR}/gl/shader/GlShaderSources.h"
"${APP_DIR}/gl/OpenGLComposite.cpp" "${APP_DIR}/gl/composite/OpenGLComposite.cpp"
"${APP_DIR}/gl/OpenGLComposite.h" "${APP_DIR}/gl/composite/OpenGLComposite.h"
"${APP_DIR}/gl/OpenGLCompositeRuntimeControls.cpp" "${APP_DIR}/gl/composite/OpenGLCompositeRuntimeControls.cpp"
"${APP_DIR}/gl/RenderCommandQueue.cpp" "${APP_DIR}/gl/threading/RenderCommandQueue.cpp"
"${APP_DIR}/gl/RenderCommandQueue.h" "${APP_DIR}/gl/threading/RenderCommandQueue.h"
"${APP_DIR}/gl/RenderEngine.cpp" "${APP_DIR}/gl/RenderEngine.cpp"
"${APP_DIR}/gl/RenderEngine.h" "${APP_DIR}/gl/RenderEngine.h"
"${APP_DIR}/gl/RenderFrameState.h" "${APP_DIR}/gl/frame/RenderFrameState.h"
"${APP_DIR}/gl/RenderFrameStateResolver.cpp" "${APP_DIR}/gl/frame/RenderFrameStateResolver.cpp"
"${APP_DIR}/gl/RenderFrameStateResolver.h" "${APP_DIR}/gl/frame/RenderFrameStateResolver.h"
"${APP_DIR}/gl/RuntimeUpdateController.cpp" "${APP_DIR}/gl/frame/RuntimeUpdateController.cpp"
"${APP_DIR}/gl/RuntimeUpdateController.h" "${APP_DIR}/gl/frame/RuntimeUpdateController.h"
"${APP_DIR}/gl/pipeline/OpenGLRenderPass.cpp" "${APP_DIR}/gl/pipeline/OpenGLRenderPass.cpp"
"${APP_DIR}/gl/pipeline/OpenGLRenderPass.h" "${APP_DIR}/gl/pipeline/OpenGLRenderPass.h"
"${APP_DIR}/gl/pipeline/OpenGLRenderPipeline.cpp" "${APP_DIR}/gl/pipeline/OpenGLRenderPipeline.cpp"
@@ -117,12 +117,17 @@ set(APP_SOURCES
"${APP_DIR}/runtime/events/RuntimeEventPayloads.h" "${APP_DIR}/runtime/events/RuntimeEventPayloads.h"
"${APP_DIR}/runtime/events/RuntimeEventQueue.h" "${APP_DIR}/runtime/events/RuntimeEventQueue.h"
"${APP_DIR}/runtime/events/RuntimeEventType.h" "${APP_DIR}/runtime/events/RuntimeEventType.h"
"${APP_DIR}/runtime/live/CommittedLiveState.cpp"
"${APP_DIR}/runtime/live/CommittedLiveState.h"
"${APP_DIR}/runtime/live/RenderStateComposer.cpp" "${APP_DIR}/runtime/live/RenderStateComposer.cpp"
"${APP_DIR}/runtime/live/RenderStateComposer.h" "${APP_DIR}/runtime/live/RenderStateComposer.h"
"${APP_DIR}/runtime/live/RuntimeStateLayerModel.cpp" "${APP_DIR}/runtime/live/RuntimeStateLayerModel.cpp"
"${APP_DIR}/runtime/live/RuntimeStateLayerModel.h" "${APP_DIR}/runtime/live/RuntimeStateLayerModel.h"
"${APP_DIR}/runtime/live/RuntimeLiveState.cpp" "${APP_DIR}/runtime/live/RuntimeLiveState.cpp"
"${APP_DIR}/runtime/live/RuntimeLiveState.h" "${APP_DIR}/runtime/live/RuntimeLiveState.h"
"${APP_DIR}/runtime/persistence/PersistenceRequest.h"
"${APP_DIR}/runtime/persistence/PersistenceWriter.cpp"
"${APP_DIR}/runtime/persistence/PersistenceWriter.h"
"${APP_DIR}/runtime/presentation/RuntimeStateJson.cpp" "${APP_DIR}/runtime/presentation/RuntimeStateJson.cpp"
"${APP_DIR}/runtime/presentation/RuntimeStateJson.h" "${APP_DIR}/runtime/presentation/RuntimeStateJson.h"
"${APP_DIR}/runtime/presentation/RuntimeStatePresenter.cpp" "${APP_DIR}/runtime/presentation/RuntimeStatePresenter.cpp"
@@ -160,7 +165,18 @@ set(APP_SOURCES
"${APP_DIR}/videoio/VideoIOFormat.h" "${APP_DIR}/videoio/VideoIOFormat.h"
"${APP_DIR}/videoio/VideoBackend.cpp" "${APP_DIR}/videoio/VideoBackend.cpp"
"${APP_DIR}/videoio/VideoBackend.h" "${APP_DIR}/videoio/VideoBackend.h"
"${APP_DIR}/videoio/VideoBackendLifecycle.cpp"
"${APP_DIR}/videoio/VideoBackendLifecycle.h"
"${APP_DIR}/videoio/VideoIOTypes.h" "${APP_DIR}/videoio/VideoIOTypes.h"
"${APP_DIR}/videoio/OutputProductionController.cpp"
"${APP_DIR}/videoio/OutputProductionController.h"
"${APP_DIR}/videoio/RenderCadenceController.cpp"
"${APP_DIR}/videoio/RenderCadenceController.h"
"${APP_DIR}/videoio/RenderOutputQueue.cpp"
"${APP_DIR}/videoio/RenderOutputQueue.h"
"${APP_DIR}/videoio/SystemOutputFramePool.cpp"
"${APP_DIR}/videoio/SystemOutputFramePool.h"
"${APP_DIR}/videoio/VideoPlayoutPolicy.h"
"${APP_DIR}/videoio/VideoPlayoutScheduler.cpp" "${APP_DIR}/videoio/VideoPlayoutScheduler.cpp"
"${APP_DIR}/videoio/VideoPlayoutScheduler.h" "${APP_DIR}/videoio/VideoPlayoutScheduler.h"
) )
@@ -171,14 +187,18 @@ target_include_directories(LoopThroughWithOpenGLCompositing PRIVATE
"${APP_DIR}" "${APP_DIR}"
"${APP_DIR}/control" "${APP_DIR}/control"
"${APP_DIR}/gl" "${APP_DIR}/gl"
"${APP_DIR}/gl/composite"
"${APP_DIR}/gl/frame"
"${APP_DIR}/gl/pipeline" "${APP_DIR}/gl/pipeline"
"${APP_DIR}/gl/renderer" "${APP_DIR}/gl/renderer"
"${APP_DIR}/gl/shader" "${APP_DIR}/gl/shader"
"${APP_DIR}/gl/threading"
"${APP_DIR}/platform" "${APP_DIR}/platform"
"${APP_DIR}/runtime" "${APP_DIR}/runtime"
"${APP_DIR}/runtime/coordination" "${APP_DIR}/runtime/coordination"
"${APP_DIR}/runtime/events" "${APP_DIR}/runtime/events"
"${APP_DIR}/runtime/live" "${APP_DIR}/runtime/live"
"${APP_DIR}/runtime/persistence"
"${APP_DIR}/runtime/presentation" "${APP_DIR}/runtime/presentation"
"${APP_DIR}/runtime/snapshot" "${APP_DIR}/runtime/snapshot"
"${APP_DIR}/runtime/store" "${APP_DIR}/runtime/store"
@@ -209,6 +229,50 @@ if(MSVC)
target_compile_options(LoopThroughWithOpenGLCompositing PRIVATE /W3) target_compile_options(LoopThroughWithOpenGLCompositing PRIVATE /W3)
endif() endif()
set(PROBE_APP_DIR "${CMAKE_CURRENT_SOURCE_DIR}/apps/DeckLinkRenderCadenceProbe")
add_executable(DeckLinkRenderCadenceProbe
"${APP_DIR}/videoio/decklink/DeckLinkAPI_i.c"
"${APP_DIR}/videoio/decklink/DeckLinkDisplayMode.cpp"
"${APP_DIR}/videoio/decklink/DeckLinkDisplayMode.h"
"${APP_DIR}/videoio/decklink/DeckLinkFrameTransfer.cpp"
"${APP_DIR}/videoio/decklink/DeckLinkFrameTransfer.h"
"${APP_DIR}/videoio/decklink/DeckLinkSession.cpp"
"${APP_DIR}/videoio/decklink/DeckLinkSession.h"
"${APP_DIR}/videoio/decklink/DeckLinkVideoIOFormat.cpp"
"${APP_DIR}/videoio/decklink/DeckLinkVideoIOFormat.h"
"${APP_DIR}/gl/renderer/GLExtensions.cpp"
"${APP_DIR}/gl/renderer/GLExtensions.h"
"${APP_DIR}/videoio/VideoIOFormat.cpp"
"${APP_DIR}/videoio/VideoIOFormat.h"
"${APP_DIR}/videoio/VideoIOTypes.h"
"${APP_DIR}/videoio/VideoPlayoutPolicy.h"
"${APP_DIR}/videoio/VideoPlayoutScheduler.cpp"
"${APP_DIR}/videoio/VideoPlayoutScheduler.h"
"${PROBE_APP_DIR}/DeckLinkRenderCadenceProbe.cpp"
)
target_include_directories(DeckLinkRenderCadenceProbe PRIVATE
"${APP_DIR}"
"${APP_DIR}/gl/renderer"
"${APP_DIR}/videoio"
"${APP_DIR}/videoio/decklink"
)
target_link_libraries(DeckLinkRenderCadenceProbe PRIVATE
opengl32
Ole32
)
target_compile_definitions(DeckLinkRenderCadenceProbe PRIVATE
_UNICODE
UNICODE
)
if(MSVC)
target_compile_options(DeckLinkRenderCadenceProbe PRIVATE /W3)
endif()
add_executable(RuntimeJsonTests add_executable(RuntimeJsonTests
"${APP_DIR}/runtime/support/RuntimeJson.cpp" "${APP_DIR}/runtime/support/RuntimeJson.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/RuntimeJsonTests.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/tests/RuntimeJsonTests.cpp"
@@ -288,6 +352,7 @@ target_include_directories(RuntimeEventTypeTests PRIVATE
"${APP_DIR}" "${APP_DIR}"
"${APP_DIR}/runtime" "${APP_DIR}/runtime"
"${APP_DIR}/runtime/events" "${APP_DIR}/runtime/events"
"${APP_DIR}/runtime/persistence"
) )
if(MSVC) if(MSVC)
@@ -335,9 +400,28 @@ endif()
add_test(NAME RuntimeStateLayerModelTests COMMAND RuntimeStateLayerModelTests) add_test(NAME RuntimeStateLayerModelTests COMMAND RuntimeStateLayerModelTests)
add_executable(PersistenceWriterTests
"${APP_DIR}/runtime/persistence/PersistenceWriter.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/PersistenceWriterTests.cpp"
)
target_include_directories(PersistenceWriterTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/runtime"
"${APP_DIR}/runtime/persistence"
)
if(MSVC)
target_compile_options(PersistenceWriterTests PRIVATE /W3)
endif()
add_test(NAME PersistenceWriterTests COMMAND PersistenceWriterTests)
add_executable(RuntimeSubsystemTests add_executable(RuntimeSubsystemTests
"${APP_DIR}/runtime/coordination/RuntimeCoordinator.cpp" "${APP_DIR}/runtime/coordination/RuntimeCoordinator.cpp"
"${APP_DIR}/runtime/live/CommittedLiveState.cpp"
"${APP_DIR}/runtime/snapshot/RenderSnapshotBuilder.cpp" "${APP_DIR}/runtime/snapshot/RenderSnapshotBuilder.cpp"
"${APP_DIR}/runtime/persistence/PersistenceWriter.cpp"
"${APP_DIR}/runtime/store/LayerStackStore.cpp" "${APP_DIR}/runtime/store/LayerStackStore.cpp"
"${APP_DIR}/runtime/store/RuntimeConfigStore.cpp" "${APP_DIR}/runtime/store/RuntimeConfigStore.cpp"
"${APP_DIR}/runtime/store/RuntimeStore.cpp" "${APP_DIR}/runtime/store/RuntimeStore.cpp"
@@ -359,6 +443,8 @@ target_include_directories(RuntimeSubsystemTests PRIVATE
"${APP_DIR}/runtime" "${APP_DIR}/runtime"
"${APP_DIR}/runtime/coordination" "${APP_DIR}/runtime/coordination"
"${APP_DIR}/runtime/events" "${APP_DIR}/runtime/events"
"${APP_DIR}/runtime/live"
"${APP_DIR}/runtime/persistence"
"${APP_DIR}/runtime/presentation" "${APP_DIR}/runtime/presentation"
"${APP_DIR}/runtime/snapshot" "${APP_DIR}/runtime/snapshot"
"${APP_DIR}/runtime/store" "${APP_DIR}/runtime/store"
@@ -390,13 +476,14 @@ endif()
add_test(NAME Std140BufferTests COMMAND Std140BufferTests) add_test(NAME Std140BufferTests COMMAND Std140BufferTests)
add_executable(RenderCommandQueueTests add_executable(RenderCommandQueueTests
"${APP_DIR}/gl/RenderCommandQueue.cpp" "${APP_DIR}/gl/threading/RenderCommandQueue.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/RenderCommandQueueTests.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/tests/RenderCommandQueueTests.cpp"
) )
target_include_directories(RenderCommandQueueTests PRIVATE target_include_directories(RenderCommandQueueTests PRIVATE
"${APP_DIR}" "${APP_DIR}"
"${APP_DIR}/gl" "${APP_DIR}/gl"
"${APP_DIR}/gl/threading"
"${APP_DIR}/videoio" "${APP_DIR}/videoio"
"${APP_DIR}/videoio/decklink" "${APP_DIR}/videoio/decklink"
) )
@@ -506,6 +593,89 @@ endif()
add_test(NAME VideoPlayoutSchedulerTests COMMAND VideoPlayoutSchedulerTests) add_test(NAME VideoPlayoutSchedulerTests COMMAND VideoPlayoutSchedulerTests)
add_executable(OutputProductionControllerTests
"${APP_DIR}/videoio/OutputProductionController.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/OutputProductionControllerTests.cpp"
)
target_include_directories(OutputProductionControllerTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/videoio"
)
if(MSVC)
target_compile_options(OutputProductionControllerTests PRIVATE /W3)
endif()
add_test(NAME OutputProductionControllerTests COMMAND OutputProductionControllerTests)
add_executable(RenderOutputQueueTests
"${APP_DIR}/videoio/RenderOutputQueue.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/RenderOutputQueueTests.cpp"
)
target_include_directories(RenderOutputQueueTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/videoio"
"${APP_DIR}/videoio/decklink"
)
if(MSVC)
target_compile_options(RenderOutputQueueTests PRIVATE /W3)
endif()
add_test(NAME RenderOutputQueueTests COMMAND RenderOutputQueueTests)
add_executable(RenderCadenceControllerTests
"${APP_DIR}/videoio/RenderCadenceController.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/RenderCadenceControllerTests.cpp"
)
target_include_directories(RenderCadenceControllerTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/videoio"
)
if(MSVC)
target_compile_options(RenderCadenceControllerTests PRIVATE /W3)
endif()
add_test(NAME RenderCadenceControllerTests COMMAND RenderCadenceControllerTests)
add_executable(SystemOutputFramePoolTests
"${APP_DIR}/videoio/SystemOutputFramePool.cpp"
"${APP_DIR}/videoio/VideoIOFormat.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/SystemOutputFramePoolTests.cpp"
)
target_include_directories(SystemOutputFramePoolTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/videoio"
"${APP_DIR}/videoio/decklink"
)
if(MSVC)
target_compile_options(SystemOutputFramePoolTests PRIVATE /W3)
endif()
add_test(NAME SystemOutputFramePoolTests COMMAND SystemOutputFramePoolTests)
add_executable(VideoBackendLifecycleTests
"${APP_DIR}/videoio/VideoBackendLifecycle.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/VideoBackendLifecycleTests.cpp"
)
target_include_directories(VideoBackendLifecycleTests PRIVATE
"${APP_DIR}"
"${APP_DIR}/videoio"
)
if(MSVC)
target_compile_options(VideoBackendLifecycleTests PRIVATE /W3)
endif()
add_test(NAME VideoBackendLifecycleTests COMMAND VideoBackendLifecycleTests)
add_executable(VideoIODeviceFakeTests add_executable(VideoIODeviceFakeTests
"${APP_DIR}/videoio/VideoIOFormat.cpp" "${APP_DIR}/videoio/VideoIOFormat.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/tests/VideoIODeviceFakeTests.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/tests/VideoIODeviceFakeTests.cpp"

View File

@@ -0,0 +1,920 @@
#include "DeckLinkSession.h"
#include "GLExtensions.h"
#include "VideoIOFormat.h"
#include "VideoPlayoutPolicy.h"
#include <windows.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <condition_variable>
#include <cstdint>
#include <deque>
#include <iomanip>
#include <iostream>
#include <mutex>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
namespace
{
constexpr unsigned kDefaultWidth = 1920;
constexpr unsigned kDefaultHeight = 1080;
constexpr std::size_t kSystemFrameSlots = 12;
constexpr std::size_t kPboDepth = 6;
constexpr std::size_t kWarmupFrames = 4;
constexpr std::size_t kDeckLinkTargetBufferedFrames = 4;
enum class ProbeSlotState
{
Free,
Rendering,
Completed,
Scheduled
};
struct ProbeFrame
{
void* bytes = nullptr;
long rowBytes = 0;
unsigned width = 0;
unsigned height = 0;
VideoIOPixelFormat pixelFormat = VideoIOPixelFormat::Bgra8;
std::size_t index = 0;
uint64_t generation = 0;
uint64_t frameIndex = 0;
};
struct ProbeMetrics
{
uint64_t renderedFrames = 0;
uint64_t completedFrames = 0;
uint64_t scheduledFrames = 0;
uint64_t completedDrops = 0;
uint64_t acquireMisses = 0;
uint64_t scheduleUnderruns = 0;
uint64_t pboQueueMisses = 0;
std::size_t freeCount = 0;
std::size_t renderingCount = 0;
std::size_t completedCount = 0;
std::size_t scheduledCount = 0;
};
class LatestFrameStore
{
public:
LatestFrameStore(unsigned width, unsigned height, std::size_t capacity) :
mWidth(width),
mHeight(height),
mRowBytes(VideoIORowBytes(VideoIOPixelFormat::Bgra8, width))
{
mSlots.resize(capacity);
const std::size_t byteCount = static_cast<std::size_t>(mRowBytes) * static_cast<std::size_t>(mHeight);
for (Slot& slot : mSlots)
{
slot.bytes.resize(byteCount);
slot.generation = 1;
}
}
bool AcquireForRender(ProbeFrame& frame)
{
std::lock_guard<std::mutex> lock(mMutex);
if (!AcquireFreeLocked(frame))
{
if (!DropOldestCompletedLocked() || !AcquireFreeLocked(frame))
{
++mMetrics.acquireMisses;
return false;
}
}
return true;
}
bool PublishCompleted(const ProbeFrame& frame)
{
std::lock_guard<std::mutex> lock(mMutex);
if (!IsValidLocked(frame))
return false;
Slot& slot = mSlots[frame.index];
if (slot.state != ProbeSlotState::Rendering)
return false;
slot.state = ProbeSlotState::Completed;
slot.frameIndex = frame.frameIndex;
mCompletedIndices.push_back(frame.index);
++mMetrics.completedFrames;
mCondition.notify_all();
return true;
}
bool ConsumeCompleted(ProbeFrame& frame)
{
std::lock_guard<std::mutex> lock(mMutex);
while (!mCompletedIndices.empty())
{
const std::size_t index = mCompletedIndices.front();
mCompletedIndices.pop_front();
if (index >= mSlots.size() || mSlots[index].state != ProbeSlotState::Completed)
continue;
mSlots[index].state = ProbeSlotState::Scheduled;
FillFrameLocked(index, frame);
++mMetrics.scheduledFrames;
return true;
}
++mMetrics.scheduleUnderruns;
return false;
}
bool ReleaseByBytes(void* bytes)
{
if (bytes == nullptr)
return false;
std::lock_guard<std::mutex> lock(mMutex);
for (std::size_t index = 0; index < mSlots.size(); ++index)
{
if (mSlots[index].bytes.data() != bytes)
continue;
mSlots[index].state = ProbeSlotState::Free;
++mSlots[index].generation;
RemoveCompletedIndexLocked(index);
mCondition.notify_all();
return true;
}
return false;
}
bool WaitForCompletedDepth(std::size_t targetDepth, std::chrono::milliseconds timeout)
{
std::unique_lock<std::mutex> lock(mMutex);
return mCondition.wait_for(lock, timeout, [&]() {
return CompletedCountLocked() >= targetDepth;
});
}
ProbeMetrics Metrics() const
{
std::lock_guard<std::mutex> lock(mMutex);
ProbeMetrics metrics = mMetrics;
for (const Slot& slot : mSlots)
{
switch (slot.state)
{
case ProbeSlotState::Free:
++metrics.freeCount;
break;
case ProbeSlotState::Rendering:
++metrics.renderingCount;
break;
case ProbeSlotState::Completed:
++metrics.completedCount;
break;
case ProbeSlotState::Scheduled:
++metrics.scheduledCount;
break;
}
}
return metrics;
}
void CountRenderedFrame()
{
std::lock_guard<std::mutex> lock(mMutex);
++mMetrics.renderedFrames;
}
void CountPboQueueMiss()
{
std::lock_guard<std::mutex> lock(mMutex);
++mMetrics.pboQueueMisses;
}
private:
struct Slot
{
std::vector<unsigned char> bytes;
ProbeSlotState state = ProbeSlotState::Free;
uint64_t generation = 1;
uint64_t frameIndex = 0;
};
bool AcquireFreeLocked(ProbeFrame& frame)
{
for (std::size_t index = 0; index < mSlots.size(); ++index)
{
if (mSlots[index].state != ProbeSlotState::Free)
continue;
mSlots[index].state = ProbeSlotState::Rendering;
++mSlots[index].generation;
FillFrameLocked(index, frame);
return true;
}
return false;
}
bool DropOldestCompletedLocked()
{
while (!mCompletedIndices.empty())
{
const std::size_t index = mCompletedIndices.front();
mCompletedIndices.pop_front();
if (index >= mSlots.size() || mSlots[index].state != ProbeSlotState::Completed)
continue;
mSlots[index].state = ProbeSlotState::Free;
++mSlots[index].generation;
++mMetrics.completedDrops;
return true;
}
return false;
}
void FillFrameLocked(std::size_t index, ProbeFrame& frame) const
{
const Slot& slot = mSlots[index];
frame.bytes = const_cast<unsigned char*>(slot.bytes.data());
frame.rowBytes = static_cast<long>(mRowBytes);
frame.width = mWidth;
frame.height = mHeight;
frame.pixelFormat = VideoIOPixelFormat::Bgra8;
frame.index = index;
frame.generation = slot.generation;
frame.frameIndex = slot.frameIndex;
}
bool IsValidLocked(const ProbeFrame& frame) const
{
return frame.index < mSlots.size() && mSlots[frame.index].generation == frame.generation;
}
void RemoveCompletedIndexLocked(std::size_t index)
{
mCompletedIndices.erase(std::remove(mCompletedIndices.begin(), mCompletedIndices.end(), index), mCompletedIndices.end());
}
std::size_t CompletedCountLocked() const
{
std::size_t count = 0;
for (const Slot& slot : mSlots)
{
if (slot.state == ProbeSlotState::Completed)
++count;
}
return count;
}
unsigned mWidth = 0;
unsigned mHeight = 0;
unsigned mRowBytes = 0;
std::vector<Slot> mSlots;
std::deque<std::size_t> mCompletedIndices;
mutable std::mutex mMutex;
std::condition_variable mCondition;
ProbeMetrics mMetrics;
};
LRESULT CALLBACK ProbeWindowProc(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
return DefWindowProc(hwnd, message, wParam, lParam);
}
class HiddenOpenGLContext
{
public:
~HiddenOpenGLContext()
{
Destroy();
}
bool Create(unsigned width, unsigned height, std::string& error)
{
mInstance = GetModuleHandle(nullptr);
WNDCLASSA wc = {};
wc.style = CS_OWNDC;
wc.lpfnWndProc = ProbeWindowProc;
wc.hInstance = mInstance;
wc.lpszClassName = "DeckLinkRenderCadenceProbeWindow";
RegisterClassA(&wc);
mWindow = CreateWindowA(
wc.lpszClassName,
"DeckLink Render Cadence Probe",
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
static_cast<int>(width),
static_cast<int>(height),
nullptr,
nullptr,
mInstance,
nullptr);
if (!mWindow)
{
error = "CreateWindowA failed.";
return false;
}
mDc = GetDC(mWindow);
if (!mDc)
{
error = "GetDC failed.";
return false;
}
PIXELFORMATDESCRIPTOR pfd = {};
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cDepthBits = 0;
pfd.iLayerType = PFD_MAIN_PLANE;
const int pixelFormat = ChoosePixelFormat(mDc, &pfd);
if (pixelFormat == 0 || !SetPixelFormat(mDc, pixelFormat, &pfd))
{
error = "Could not choose/set a pixel format.";
return false;
}
mGlrc = wglCreateContext(mDc);
if (!mGlrc)
{
error = "wglCreateContext failed.";
return false;
}
return true;
}
bool MakeCurrent()
{
return mDc && mGlrc && wglMakeCurrent(mDc, mGlrc);
}
void ClearCurrent()
{
wglMakeCurrent(nullptr, nullptr);
}
void Destroy()
{
ClearCurrent();
if (mGlrc)
{
wglDeleteContext(mGlrc);
mGlrc = nullptr;
}
if (mWindow && mDc)
{
ReleaseDC(mWindow, mDc);
mDc = nullptr;
}
if (mWindow)
{
DestroyWindow(mWindow);
mWindow = nullptr;
}
}
private:
HINSTANCE mInstance = nullptr;
HWND mWindow = nullptr;
HDC mDc = nullptr;
HGLRC mGlrc = nullptr;
};
class RenderCadenceProbe
{
public:
RenderCadenceProbe(LatestFrameStore& frameStore, unsigned width, unsigned height, double frameDurationMs) :
mFrameStore(frameStore),
mWidth(width),
mHeight(height),
mFrameDuration(std::chrono::duration_cast<Clock::duration>(std::chrono::duration<double, std::milli>(frameDurationMs)))
{
if (mFrameDuration <= Clock::duration::zero())
mFrameDuration = std::chrono::milliseconds(16);
}
bool Start(std::string& error)
{
mStopping = false;
mThread = std::thread([this]() { ThreadMain(); });
std::unique_lock<std::mutex> lock(mStartupMutex);
if (!mStartupCondition.wait_for(lock, std::chrono::seconds(3), [this]() { return mStarted || !mStartupError.empty(); }))
{
error = "Timed out starting render thread.";
return false;
}
if (!mStartupError.empty())
{
error = mStartupError;
return false;
}
return true;
}
void Stop()
{
mStopping = true;
if (mThread.joinable())
mThread.join();
}
private:
struct PboSlot
{
GLuint pbo = 0;
GLsync fence = nullptr;
bool inFlight = false;
uint64_t frameIndex = 0;
};
using Clock = std::chrono::steady_clock;
void ThreadMain()
{
std::string error;
HiddenOpenGLContext context;
if (!context.Create(mWidth, mHeight, error) || !context.MakeCurrent())
{
SignalStartupFailure(error.empty() ? "OpenGL context creation failed." : error);
return;
}
if (!ResolveGLExtensions())
{
SignalStartupFailure("OpenGL extension resolution failed.");
return;
}
if (!CreateRenderTargets())
{
SignalStartupFailure("OpenGL render target creation failed.");
return;
}
CreatePbos();
SignalStarted();
auto nextRenderTime = Clock::now();
while (!mStopping)
{
ConsumeCompletedPbos();
const auto now = Clock::now();
if (now < nextRenderTime)
{
std::this_thread::sleep_for((std::min)(std::chrono::milliseconds(1), std::chrono::duration_cast<std::chrono::milliseconds>(nextRenderTime - now)));
continue;
}
RenderPattern(mFrameIndex);
if (!QueueReadback(mFrameIndex))
mFrameStore.CountPboQueueMiss();
mFrameStore.CountRenderedFrame();
++mFrameIndex;
nextRenderTime += mFrameDuration;
if (Clock::now() - nextRenderTime > mFrameDuration * 4)
nextRenderTime = Clock::now() + mFrameDuration;
}
FlushPbos();
DestroyPbos();
DestroyRenderTargets();
context.ClearCurrent();
}
bool CreateRenderTargets()
{
glGenFramebuffers(1, &mFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, mFramebuffer);
glGenTextures(1, &mTexture);
glBindTexture(GL_TEXTURE_2D, mTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, static_cast<GLsizei>(mWidth), static_cast<GLsizei>(mHeight), 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mTexture, 0);
const bool complete = glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE;
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
return complete;
}
void DestroyRenderTargets()
{
if (mFramebuffer != 0)
glDeleteFramebuffers(1, &mFramebuffer);
if (mTexture != 0)
glDeleteTextures(1, &mTexture);
mFramebuffer = 0;
mTexture = 0;
}
void CreatePbos()
{
mPbos.resize(kPboDepth);
const std::size_t byteCount = static_cast<std::size_t>(VideoIORowBytes(VideoIOPixelFormat::Bgra8, mWidth)) * mHeight;
for (PboSlot& slot : mPbos)
{
glGenBuffers(1, &slot.pbo);
glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pbo);
glBufferData(GL_PIXEL_PACK_BUFFER, static_cast<GLsizeiptr>(byteCount), nullptr, GL_STREAM_READ);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
void DestroyPbos()
{
for (PboSlot& slot : mPbos)
{
if (slot.fence)
glDeleteSync(slot.fence);
if (slot.pbo != 0)
glDeleteBuffers(1, &slot.pbo);
slot = {};
}
mPbos.clear();
}
void FlushPbos()
{
for (std::size_t i = 0; i < mPbos.size() * 2; ++i)
ConsumeCompletedPbos();
}
void RenderPattern(uint64_t frameIndex)
{
const float t = static_cast<float>(frameIndex) / 60.0f;
const float red = 0.1f + 0.4f * (0.5f + 0.5f * std::sin(t));
const float green = 0.1f + 0.4f * (0.5f + 0.5f * std::sin(t * 0.73f + 1.0f));
const float blue = 0.15f + 0.3f * (0.5f + 0.5f * std::sin(t * 0.41f + 2.0f));
glBindFramebuffer(GL_FRAMEBUFFER, mFramebuffer);
glViewport(0, 0, static_cast<GLsizei>(mWidth), static_cast<GLsizei>(mHeight));
glDisable(GL_SCISSOR_TEST);
glClearColor(red, green, blue, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
const int boxWidth = static_cast<int>(mWidth / 6);
const int boxHeight = static_cast<int>(mHeight / 5);
const float phase = 0.5f + 0.5f * std::sin(t * 1.7f);
const int x = static_cast<int>(phase * static_cast<float>(mWidth - boxWidth));
const int y = static_cast<int>((0.5f + 0.5f * std::sin(t * 1.1f + 0.8f)) * static_cast<float>(mHeight - boxHeight));
glEnable(GL_SCISSOR_TEST);
glScissor(x, y, boxWidth, boxHeight);
glClearColor(1.0f - red, 0.85f, 0.15f + blue, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_SCISSOR_TEST);
}
bool QueueReadback(uint64_t frameIndex)
{
if (mPbos.empty())
return false;
PboSlot& slot = mPbos[mWriteIndex];
if (slot.inFlight)
return false;
const std::size_t byteCount = static_cast<std::size_t>(VideoIORowBytes(VideoIOPixelFormat::Bgra8, mWidth)) * mHeight;
glBindFramebuffer(GL_READ_FRAMEBUFFER, mFramebuffer);
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pbo);
glBufferData(GL_PIXEL_PACK_BUFFER, static_cast<GLsizeiptr>(byteCount), nullptr, GL_STREAM_READ);
glReadPixels(0, 0, static_cast<GLsizei>(mWidth), static_cast<GLsizei>(mHeight), GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
slot.fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
slot.inFlight = slot.fence != nullptr;
slot.frameIndex = frameIndex;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
mWriteIndex = (mWriteIndex + 1) % mPbos.size();
return slot.inFlight;
}
void ConsumeCompletedPbos()
{
for (std::size_t checked = 0; checked < mPbos.size(); ++checked)
{
PboSlot& slot = mPbos[mReadIndex];
if (!slot.inFlight || slot.fence == nullptr)
{
mReadIndex = (mReadIndex + 1) % mPbos.size();
continue;
}
const GLenum waitResult = glClientWaitSync(slot.fence, 0, 0);
if (waitResult != GL_ALREADY_SIGNALED && waitResult != GL_CONDITION_SATISFIED)
return;
ProbeFrame frame;
if (mFrameStore.AcquireForRender(frame))
{
glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pbo);
void* mapped = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if (mapped)
{
const std::size_t byteCount = static_cast<std::size_t>(frame.rowBytes) * frame.height;
std::memcpy(frame.bytes, mapped, byteCount);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
frame.frameIndex = slot.frameIndex;
mFrameStore.PublishCompleted(frame);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
glDeleteSync(slot.fence);
slot.fence = nullptr;
slot.inFlight = false;
mReadIndex = (mReadIndex + 1) % mPbos.size();
}
}
void SignalStarted()
{
std::lock_guard<std::mutex> lock(mStartupMutex);
mStarted = true;
mStartupCondition.notify_all();
}
void SignalStartupFailure(const std::string& error)
{
std::lock_guard<std::mutex> lock(mStartupMutex);
mStartupError = error;
mStartupCondition.notify_all();
}
LatestFrameStore& mFrameStore;
unsigned mWidth = 0;
unsigned mHeight = 0;
Clock::duration mFrameDuration;
std::thread mThread;
std::atomic<bool> mStopping{ false };
std::mutex mStartupMutex;
std::condition_variable mStartupCondition;
bool mStarted = false;
std::string mStartupError;
GLuint mFramebuffer = 0;
GLuint mTexture = 0;
std::vector<PboSlot> mPbos;
std::size_t mWriteIndex = 0;
std::size_t mReadIndex = 0;
uint64_t mFrameIndex = 0;
};
class DeckLinkProbePlayout
{
public:
DeckLinkProbePlayout(DeckLinkSession& session, LatestFrameStore& frameStore) :
mSession(session),
mFrameStore(frameStore)
{
}
bool Start()
{
mStopping = false;
mThread = std::thread([this]() { ThreadMain(); });
return true;
}
void Stop()
{
mStopping = true;
if (mThread.joinable())
mThread.join();
}
void ThreadMain()
{
while (!mStopping)
{
const ProbeMetrics metrics = mFrameStore.Metrics();
if (metrics.scheduledCount >= kDeckLinkTargetBufferedFrames)
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));
continue;
}
ProbeFrame frame;
if (!mFrameStore.ConsumeCompleted(frame))
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));
continue;
}
VideoIOOutputFrame outputFrame;
outputFrame.bytes = frame.bytes;
outputFrame.nativeBuffer = frame.bytes;
outputFrame.rowBytes = frame.rowBytes;
outputFrame.width = frame.width;
outputFrame.height = frame.height;
outputFrame.pixelFormat = frame.pixelFormat;
if (!mSession.ScheduleOutputFrame(outputFrame))
{
mFrameStore.ReleaseByBytes(frame.bytes);
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
}
private:
DeckLinkSession& mSession;
LatestFrameStore& mFrameStore;
std::thread mThread;
std::atomic<bool> mStopping{ false };
};
std::string CompletionResultToString(VideoIOCompletionResult result)
{
switch (result)
{
case VideoIOCompletionResult::Completed:
return "completed";
case VideoIOCompletionResult::DisplayedLate:
return "late";
case VideoIOCompletionResult::Dropped:
return "dropped";
case VideoIOCompletionResult::Flushed:
return "flushed";
case VideoIOCompletionResult::Unknown:
default:
return "unknown";
}
}
void PrintUsage()
{
std::cout << "DeckLinkRenderCadenceProbe\n"
<< " Renders a simple OpenGL BGRA8 motion pattern on one GL thread,\n"
<< " copies completed PBO readbacks into latest-N system memory slots,\n"
<< " warms up rendered frames, then feeds DeckLink scheduled playback.\n\n"
<< "Press Enter to stop.\n";
}
class ComInitGuard
{
public:
~ComInitGuard()
{
if (mInitialized)
CoUninitialize();
}
bool Initialize()
{
const HRESULT result = CoInitialize(nullptr);
mInitialized = SUCCEEDED(result);
mResult = result;
return mInitialized;
}
HRESULT Result() const { return mResult; }
private:
bool mInitialized = false;
HRESULT mResult = S_OK;
};
}
int main()
{
PrintUsage();
ComInitGuard com;
if (!com.Initialize())
{
std::cerr << "COM initialization failed: 0x" << std::hex << com.Result() << std::dec << "\n";
return 1;
}
LatestFrameStore frameStore(kDefaultWidth, kDefaultHeight, kSystemFrameSlots);
DeckLinkSession deckLink;
std::atomic<uint64_t> completions{ 0 };
std::atomic<uint64_t> late{ 0 };
std::atomic<uint64_t> dropped{ 0 };
VideoFormatSelection formats;
std::string error;
if (!deckLink.DiscoverDevicesAndModes(formats, error))
{
std::cerr << "DeckLink discovery failed: " << error << "\n";
return 1;
}
if (!deckLink.SelectPreferredFormats(formats, false, error))
{
std::cerr << "DeckLink format selection failed: " << error << "\n";
return 1;
}
if (!deckLink.ConfigureOutput(
[&](const VideoIOCompletion& completion) {
frameStore.ReleaseByBytes(completion.outputFrameBuffer);
++completions;
if (completion.result == VideoIOCompletionResult::DisplayedLate)
++late;
else if (completion.result == VideoIOCompletionResult::Dropped)
++dropped;
},
formats.output,
false,
error))
{
std::cerr << "DeckLink output configuration failed: " << error << "\n";
return 1;
}
if (!deckLink.PrepareOutputSchedule())
{
std::cerr << "DeckLink schedule preparation failed.\n";
return 1;
}
const VideoIOState& state = deckLink.State();
if (state.outputFrameSize.width != kDefaultWidth || state.outputFrameSize.height != kDefaultHeight)
{
std::cerr << "This probe currently expects 1920x1080 output. Selected mode is "
<< state.outputFrameSize.width << "x" << state.outputFrameSize.height << ".\n";
return 1;
}
RenderCadenceProbe renderer(frameStore, state.outputFrameSize.width, state.outputFrameSize.height, state.frameBudgetMilliseconds);
if (!renderer.Start(error))
{
std::cerr << "Render thread start failed: " << error << "\n";
return 1;
}
std::cout << "Warming up " << kWarmupFrames << " rendered frames at cadence...\n";
if (!frameStore.WaitForCompletedDepth(kWarmupFrames, std::chrono::seconds(3)))
{
std::cerr << "Timed out waiting for rendered warmup frames.\n";
renderer.Stop();
return 1;
}
DeckLinkProbePlayout playout(deckLink, frameStore);
playout.Start();
const auto prerollDeadline = std::chrono::steady_clock::now() + std::chrono::seconds(3);
while (std::chrono::steady_clock::now() < prerollDeadline)
{
if (frameStore.Metrics().scheduledCount >= kDeckLinkTargetBufferedFrames)
break;
std::this_thread::sleep_for(std::chrono::milliseconds(2));
}
if (!deckLink.StartScheduledPlayback())
{
std::cerr << "DeckLink scheduled playback failed to start.\n";
playout.Stop();
renderer.Stop();
return 1;
}
std::atomic<bool> metricsStopping{ false };
std::thread metricsThread([&]() {
uint64_t lastRendered = 0;
uint64_t lastScheduled = 0;
auto lastTime = std::chrono::steady_clock::now();
while (!metricsStopping)
{
std::this_thread::sleep_for(std::chrono::seconds(1));
const auto now = std::chrono::steady_clock::now();
const double seconds = std::chrono::duration_cast<std::chrono::duration<double>>(now - lastTime).count();
const ProbeMetrics metrics = frameStore.Metrics();
const double renderFps = seconds > 0.0 ? static_cast<double>(metrics.renderedFrames - lastRendered) / seconds : 0.0;
const double scheduleFps = seconds > 0.0 ? static_cast<double>(metrics.scheduledFrames - lastScheduled) / seconds : 0.0;
lastRendered = metrics.renderedFrames;
lastScheduled = metrics.scheduledFrames;
lastTime = now;
std::cout << std::fixed << std::setprecision(1)
<< "renderFps=" << renderFps
<< " scheduleFps=" << scheduleFps
<< " free=" << metrics.freeCount
<< " completed=" << metrics.completedCount
<< " scheduled=" << metrics.scheduledCount
<< " drops=" << metrics.completedDrops
<< " pboMiss=" << metrics.pboQueueMisses
<< " completions=" << completions.load()
<< " late=" << late.load()
<< " dropped=" << dropped.load()
<< " decklinkBuffered=" << deckLink.State().actualDeckLinkBufferedFrames
<< "\n";
}
});
std::string line;
std::getline(std::cin, line);
metricsStopping = true;
if (metricsThread.joinable())
metricsThread.join();
playout.Stop();
deckLink.Stop();
renderer.Stop();
deckLink.ReleaseResources();
return 0;
}

View File

@@ -0,0 +1,113 @@
# DeckLink Render Cadence Probe
This is a deliberately small architecture probe for the Phase 7.7 playout model.
It is not the main app and does not use the main runtime, shader stack, preview path, input upload path, or render engine.
## What It Tests
The probe validates the clean playout spine:
```text
single OpenGL render thread
owns its own hidden GL context
renders a simple moving BGRA8 pattern at output cadence
queues GPU readback through a PBO ring
copies completed readbacks into latest-N system-memory slots
system-memory frame store
owns free / rendering / completed / scheduled slots
drops old completed unscheduled frames when render cadence needs space
protects scheduled frames until DeckLink completion
DeckLink playout thread
consumes completed system-memory frames
keeps a small scheduled buffer filled
does not render
```
Startup warms up rendered frames before starting DeckLink scheduled playback.
## How To Build
```powershell
cmake --build --preset build-debug --target DeckLinkRenderCadenceProbe -- /m:1
```
The executable is:
```text
build\vs2022-x64-debug\Debug\DeckLinkRenderCadenceProbe.exe
```
## How To Run
Run it from a terminal so you can see the telemetry:
```powershell
build\vs2022-x64-debug\Debug\DeckLinkRenderCadenceProbe.exe
```
Press Enter to stop.
The first version assumes `1080p59.94` / `1920x1080` output and BGRA8 system-memory frames.
## What To Watch
The probe prints one line per second:
- `renderFps`: cadence render throughput
- `scheduleFps`: DeckLink scheduling throughput
- `free`: free system-memory slots
- `completed`: rendered, unscheduled slots
- `scheduled`: slots currently owned by DeckLink
- `drops`: old completed unscheduled frames recycled by the latest-N cache
- `pboMiss`: PBO ring was full when trying to queue readback
- `late`: DeckLink displayed-late completions
- `dropped`: DeckLink dropped completions
- `decklinkBuffered`: actual DeckLink buffered-frame count when available
For a healthy architecture proof, expect:
- `renderFps` close to the selected output cadence
- `scheduleFps` close to the selected output cadence after warmup
- `scheduled` hovering near the target buffer depth
- `late` and `dropped` not increasing continuously
- visible motion that is smooth on the DeckLink output
## Interpretation
If this probe is smooth at 59.94/60, the broad architecture is viable and the main app's remaining stutters are likely caused by integration details such as input upload, shared render-thread work, preview/screenshot work, or runtime/render-state coupling.
If this probe is not smooth, the problem is lower level: DeckLink scheduling, OpenGL readback, Windows scheduling, or hardware/driver behavior.
## Initial Result
Date: 2026-05-12
User-visible result:
- output looked smooth
Representative telemetry:
```text
renderFps=59.9 scheduleFps=59.9 free=7 completed=1 scheduled=4 drops=0 pboMiss=0 completions=119 late=0 dropped=0 decklinkBuffered=4
renderFps=59.9 scheduleFps=59.9 free=7 completed=1 scheduled=4 drops=0 pboMiss=0 completions=179 late=0 dropped=0 decklinkBuffered=4
renderFps=59.8 scheduleFps=59.8 free=7 completed=1 scheduled=4 drops=0 pboMiss=0 completions=239 late=0 dropped=0 decklinkBuffered=4
renderFps=60.8 scheduleFps=59.8 free=7 completed=1 scheduled=4 drops=0 pboMiss=0 completions=299 late=0 dropped=0 decklinkBuffered=4
renderFps=59.9 scheduleFps=59.9 free=7 completed=1 scheduled=4 drops=0 pboMiss=0 completions=360 late=0 dropped=0 decklinkBuffered=4
renderFps=59.8 scheduleFps=60.8 free=8 completed=0 scheduled=4 drops=0 pboMiss=0 completions=420 late=0 dropped=0 decklinkBuffered=4
```
Read:
- the clean architecture can sustain the selected output cadence on the test machine
- BGRA8 PBO readback is viable when isolated from the main app's other render-thread work
- latest-N system-memory buffering stayed stable
- DeckLink actual buffered depth stayed at 4
- there were no late frames, dropped frames, completed-frame drops, or PBO misses in the sampled output
Implication:
The main app's remaining stutters are likely integration/ownership issues rather than a fundamental DeckLink/OpenGL/BGRA8 readback limit. The highest-value suspects are input upload before output render, shared render-thread queue contention, preview/screenshot work, and runtime/render-state work on the output path.

View File

@@ -241,6 +241,7 @@ void ControlServices::PollLoop(RuntimeCoordinator& runtimeCoordinator)
completedCommit.generation = entry.second.generation; completedCommit.generation = entry.second.generation;
std::lock_guard<std::mutex> lock(mCompletedOscCommitMutex); std::lock_guard<std::mutex> lock(mCompletedOscCommitMutex);
mCompletedOscCommits.push_back(std::move(completedCommit)); mCompletedOscCommits.push_back(std::move(completedCommit));
PublishOscOverlaySettled(entry.second);
} }
else if (!result.errorMessage.empty()) else if (!result.errorMessage.empty())
{ {
@@ -321,3 +322,22 @@ void ControlServices::PublishOscCommitRequested(const PendingOscCommit& commit)
OutputDebugStringA("OscCommitRequested event publish threw.\n"); OutputDebugStringA("OscCommitRequested event publish threw.\n");
} }
} }
void ControlServices::PublishOscOverlaySettled(const PendingOscCommit& commit)
{
try
{
OscOverlayEvent event;
event.routeKey = commit.routeKey;
event.layerKey = commit.layerKey;
event.parameterKey = commit.parameterKey;
event.generation = commit.generation;
event.settled = true;
if (!mRuntimeEventDispatcher.PublishPayload(event, "ControlServices"))
OutputDebugStringA("OscOverlaySettled event publish failed.\n");
}
catch (...)
{
OutputDebugStringA("OscOverlaySettled event publish threw.\n");
}
}

View File

@@ -76,6 +76,7 @@ private:
void PublishRuntimeStateBroadcastRequested(const std::string& reason); void PublishRuntimeStateBroadcastRequested(const std::string& reason);
void PublishOscValueReceived(const PendingOscUpdate& update, const std::string& routeKey); void PublishOscValueReceived(const PendingOscUpdate& update, const std::string& routeKey);
void PublishOscCommitRequested(const PendingOscCommit& commit); void PublishOscCommitRequested(const PendingOscCommit& commit);
void PublishOscOverlaySettled(const PendingOscCommit& commit);
std::unique_ptr<ControlServer> mControlServer; std::unique_ptr<ControlServer> mControlServer;
std::unique_ptr<OscServer> mOscServer; std::unique_ptr<OscServer> mOscServer;

View File

@@ -4,6 +4,7 @@
#include <algorithm> #include <algorithm>
#include <cstring> #include <cstring>
#include <sstream>
RenderEngine::RenderEngine( RenderEngine::RenderEngine(
RuntimeSnapshotProvider& runtimeSnapshotProvider, RuntimeSnapshotProvider& runtimeSnapshotProvider,
@@ -17,6 +18,7 @@ RenderEngine::RenderEngine(
mRenderPass(mRenderer), mRenderPass(mRenderer),
mRenderPipeline(mRenderer, runtimeSnapshotProvider, healthTelemetry, std::move(renderEffect), std::move(screenshotReady), std::move(previewPaint)), mRenderPipeline(mRenderer, runtimeSnapshotProvider, healthTelemetry, std::move(renderEffect), std::move(screenshotReady), std::move(previewPaint)),
mShaderPrograms(mRenderer, runtimeSnapshotProvider), mShaderPrograms(mRenderer, runtimeSnapshotProvider),
mHealthTelemetry(healthTelemetry),
mHdc(hdc), mHdc(hdc),
mHglrc(hglrc), mHglrc(hglrc),
mFrameStateResolver(runtimeSnapshotProvider) mFrameStateResolver(runtimeSnapshotProvider)
@@ -545,7 +547,11 @@ bool RenderEngine::RequestOutputFrame(const RenderPipelineFrameContext& context,
{ {
if (mRenderThreadRunning) if (mRenderThreadRunning)
{ {
return TryInvokeOnRenderThread("output-render", [this, &context, &outputFrame]() { const auto queuedAt = std::chrono::steady_clock::now();
return TryInvokeOnRenderThread("output-render", [this, &context, &outputFrame, queuedAt]() {
const auto startedAt = std::chrono::steady_clock::now();
const double queueWaitMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(startedAt - queuedAt).count();
mHealthTelemetry.TryRecordOutputRenderQueueWait(queueWaitMilliseconds);
mRenderCommandQueue.RequestOutputFrame({ context.videoState, context.completion }); mRenderCommandQueue.RequestOutputFrame({ context.videoState, context.completion });
RenderOutputFrameRequest request; RenderOutputFrameRequest request;
return mRenderCommandQueue.TryTakeOutputFrame(request) && return mRenderCommandQueue.TryTakeOutputFrame(request) &&

View File

@@ -22,7 +22,6 @@
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <queue> #include <queue>
#include <sstream>
#include <string> #include <string>
#include <thread> #include <thread>
#include <utility> #include <utility>
@@ -210,6 +209,7 @@ private:
OpenGLRenderPass mRenderPass; OpenGLRenderPass mRenderPass;
OpenGLRenderPipeline mRenderPipeline; OpenGLRenderPipeline mRenderPipeline;
OpenGLShaderPrograms mShaderPrograms; OpenGLShaderPrograms mShaderPrograms;
HealthTelemetry& mHealthTelemetry;
HDC mHdc; HDC mHdc;
HGLRC mHglrc; HGLRC mHglrc;

View File

@@ -58,6 +58,12 @@ OpenGLComposite::~OpenGLComposite()
mShaderBuildQueue->Stop(); mShaderBuildQueue->Stop();
if (mVideoBackend) if (mVideoBackend)
mVideoBackend->ReleaseResources(); mVideoBackend->ReleaseResources();
if (mRuntimeStore)
{
std::string persistenceError;
if (!mRuntimeStore->FlushPersistenceForShutdown(std::chrono::seconds(2), persistenceError))
OutputDebugStringA((std::string("Persistence shutdown flush failed: ") + persistenceError + "\n").c_str());
}
} }
bool OpenGLComposite::InitDeckLink() bool OpenGLComposite::InitDeckLink()
@@ -158,6 +164,9 @@ error:
void OpenGLComposite::paintGL(bool force) void OpenGLComposite::paintGL(bool force)
{ {
if (mRuntimeUpdateController)
mRuntimeUpdateController->ProcessRuntimeWork();
if (!force) if (!force)
{ {
if (IsIconic(hGLWnd)) if (IsIconic(hGLWnd))
@@ -165,6 +174,12 @@ void OpenGLComposite::paintGL(bool force)
} }
const unsigned previewFps = mRuntimeStore ? mRuntimeStore->GetConfiguredPreviewFps() : 30u; const unsigned previewFps = mRuntimeStore ? mRuntimeStore->GetConfiguredPreviewFps() : 30u;
if (!force && mVideoBackend && mVideoBackend->ShouldPrioritizeOutputOverPreview())
{
ValidateRect(hGLWnd, NULL);
return;
}
if (!mRenderEngine->TryPresentPreview(force, previewFps, mVideoBackend->OutputFrameWidth(), mVideoBackend->OutputFrameHeight())) if (!mRenderEngine->TryPresentPreview(force, previewFps, mVideoBackend->OutputFrameWidth(), mVideoBackend->OutputFrameHeight()))
{ {
ValidateRect(hGLWnd, NULL); ValidateRect(hGLWnd, NULL);
@@ -255,6 +270,9 @@ bool OpenGLComposite::Start()
if (!mRenderEngine->StartRenderThread()) if (!mRenderEngine->StartRenderThread())
return false; return false;
if (mRuntimeUpdateController)
mRuntimeUpdateController->ProcessRuntimeWork();
if (mVideoBackend->Start()) if (mVideoBackend->Start())
return true; return true;
@@ -277,6 +295,13 @@ bool OpenGLComposite::Stop()
if (mRenderEngine) if (mRenderEngine)
mRenderEngine->StopRenderThread(); mRenderEngine->StopRenderThread();
if (mRuntimeStore)
{
std::string persistenceError;
if (!mRuntimeStore->FlushPersistenceForShutdown(std::chrono::seconds(2), persistenceError))
OutputDebugStringA((std::string("Persistence shutdown flush failed: ") + persistenceError + "\n").c_str());
}
return true; return true;
} }
@@ -338,9 +363,6 @@ bool OpenGLComposite::RequestScreenshot(std::string& error)
void OpenGLComposite::renderEffect() void OpenGLComposite::renderEffect()
{ {
if (mRuntimeUpdateController)
mRuntimeUpdateController->ProcessRuntimeWork();
const RenderFrameInput frameInput = BuildRenderFrameInput(); const RenderFrameInput frameInput = BuildRenderFrameInput();
RenderFrame(frameInput); RenderFrame(frameInput);
} }

View File

@@ -2,23 +2,13 @@
#define __OPENGL_COMPOSITE_H__ #define __OPENGL_COMPOSITE_H__
#include <windows.h> #include <windows.h>
#include <process.h>
#include <tchar.h>
#include <gl/gl.h>
#include <gl/glu.h>
#include <objbase.h> #include <objbase.h>
#include <atlbase.h>
#include <comutil.h>
#include "GLExtensions.h"
#include "RenderFrameState.h" #include "RenderFrameState.h"
#include <functional>
#include <filesystem> #include <filesystem>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector>
class RenderEngine; class RenderEngine;
class RuntimeCoordinator; class RuntimeCoordinator;

View File

@@ -48,6 +48,9 @@ RuntimeUpdateController::RuntimeUpdateController(
mRuntimeEventDispatcher.Subscribe( mRuntimeEventDispatcher.Subscribe(
RuntimeEventType::RuntimeReloadRequested, RuntimeEventType::RuntimeReloadRequested,
[this](const RuntimeEvent& event) { HandleRuntimeReloadRequested(event); }); [this](const RuntimeEvent& event) { HandleRuntimeReloadRequested(event); });
mRuntimeEventDispatcher.Subscribe(
RuntimeEventType::RuntimePersistenceRequested,
[this](const RuntimeEvent& event) { HandleRuntimePersistenceRequested(event); });
mRuntimeEventDispatcher.Subscribe( mRuntimeEventDispatcher.Subscribe(
RuntimeEventType::ShaderBuildRequested, RuntimeEventType::ShaderBuildRequested,
[this](const RuntimeEvent& event) { HandleShaderBuildRequested(event); }); [this](const RuntimeEvent& event) { HandleShaderBuildRequested(event); });
@@ -158,6 +161,16 @@ void RuntimeUpdateController::HandleRuntimeReloadRequested(const RuntimeEvent& e
mRuntimeStore.ClearReloadRequest(); mRuntimeStore.ClearReloadRequest();
} }
void RuntimeUpdateController::HandleRuntimePersistenceRequested(const RuntimeEvent& event)
{
const RuntimePersistenceRequestedEvent* payload = std::get_if<RuntimePersistenceRequestedEvent>(&event.payload);
if (!payload)
return;
std::string error;
mRuntimeStore.RequestPersistence(payload->request, error);
}
void RuntimeUpdateController::HandleShaderBuildRequested(const RuntimeEvent& event) void RuntimeUpdateController::HandleShaderBuildRequested(const RuntimeEvent& event)
{ {
const ShaderBuildEvent* payload = std::get_if<ShaderBuildEvent>(&event.payload); const ShaderBuildEvent* payload = std::get_if<ShaderBuildEvent>(&event.payload);

View File

@@ -36,6 +36,7 @@ public:
private: private:
void HandleRuntimeStateBroadcastRequested(const RuntimeEvent& event); void HandleRuntimeStateBroadcastRequested(const RuntimeEvent& event);
void HandleRuntimeReloadRequested(const RuntimeEvent& event); void HandleRuntimeReloadRequested(const RuntimeEvent& event);
void HandleRuntimePersistenceRequested(const RuntimeEvent& event);
void HandleShaderBuildRequested(const RuntimeEvent& event); void HandleShaderBuildRequested(const RuntimeEvent& event);
void HandleShaderBuildPrepared(const RuntimeEvent& event); void HandleShaderBuildPrepared(const RuntimeEvent& event);
void HandleShaderBuildFailed(const RuntimeEvent& event); void HandleShaderBuildFailed(const RuntimeEvent& event);

View File

@@ -8,7 +8,9 @@
#include <cstring> #include <cstring>
#include <chrono> #include <chrono>
#include <cstdlib>
#include <gl/gl.h> #include <gl/gl.h>
#include <string>
OpenGLRenderPipeline::OpenGLRenderPipeline( OpenGLRenderPipeline::OpenGLRenderPipeline(
OpenGLRenderer& renderer, OpenGLRenderer& renderer,
@@ -22,7 +24,9 @@ OpenGLRenderPipeline::OpenGLRenderPipeline(
mHealthTelemetry(healthTelemetry), mHealthTelemetry(healthTelemetry),
mRenderEffect(renderEffect), mRenderEffect(renderEffect),
mOutputReady(outputReady), mOutputReady(outputReady),
mPaint(paint) mPaint(paint),
mOutputReadbackMode(ReadOutputReadbackModeFromEnvironment()),
mAsyncReadbackDepth(ReadAsyncReadbackDepthFromEnvironment())
{ {
} }
@@ -44,7 +48,9 @@ bool OpenGLRenderPipeline::RenderFrame(const RenderPipelineFrameContext& context
glBindFramebuffer(GL_FRAMEBUFFER, mRenderer.OutputFramebuffer()); glBindFramebuffer(GL_FRAMEBUFFER, mRenderer.OutputFramebuffer());
if (mOutputReady) if (mOutputReady)
mOutputReady(); mOutputReady();
if (state.outputPixelFormat == VideoIOPixelFormat::V210 || state.outputPixelFormat == VideoIOPixelFormat::Yuva10) if (state.outputPixelFormat == VideoIOPixelFormat::Bgra8)
PackOutputForBgra8(state);
else if (state.outputPixelFormat == VideoIOPixelFormat::V210 || state.outputPixelFormat == VideoIOPixelFormat::Yuva10)
PackOutputFor10Bit(state); PackOutputFor10Bit(state);
glFlush(); glFlush();
@@ -53,13 +59,44 @@ bool OpenGLRenderPipeline::RenderFrame(const RenderPipelineFrameContext& context
mHealthTelemetry.TryRecordPerformanceStats(state.frameBudgetMilliseconds, renderMilliseconds); mHealthTelemetry.TryRecordPerformanceStats(state.frameBudgetMilliseconds, renderMilliseconds);
mRuntimeSnapshotProvider.AdvanceFrame(); mRuntimeSnapshotProvider.AdvanceFrame();
ReadOutputFrame(state, outputFrame); OutputReadbackTiming readbackTiming = ReadOutputFrame(state, outputFrame);
if (mPaint) mHealthTelemetry.TryRecordOutputRenderPipelineTiming(
mPaint(); renderMilliseconds,
readbackTiming.fenceWaitMilliseconds,
readbackTiming.mapMilliseconds,
readbackTiming.copyMilliseconds,
readbackTiming.cachedCopyMilliseconds,
readbackTiming.asyncQueueMilliseconds,
readbackTiming.asyncQueueBufferMilliseconds,
readbackTiming.asyncQueueSetupMilliseconds,
readbackTiming.asyncQueueReadPixelsMilliseconds,
readbackTiming.asyncQueueFenceMilliseconds,
readbackTiming.syncReadMilliseconds,
readbackTiming.asyncReadbackMissed,
readbackTiming.cachedFallbackUsed,
readbackTiming.syncFallbackUsed);
return true; return true;
} }
void OpenGLRenderPipeline::PackOutputForBgra8(const VideoIOState& state)
{
glBindFramebuffer(GL_READ_FRAMEBUFFER, mRenderer.OutputFramebuffer());
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, mRenderer.OutputPackFramebuffer());
glBlitFramebuffer(
0,
0,
state.outputFrameSize.width,
state.outputFrameSize.height,
0,
0,
state.outputFrameSize.width,
state.outputFrameSize.height,
GL_COLOR_BUFFER_BIT,
GL_NEAREST);
glBindFramebuffer(GL_FRAMEBUFFER, mRenderer.OutputPackFramebuffer());
}
void OpenGLRenderPipeline::PackOutputFor10Bit(const VideoIOState& state) void OpenGLRenderPipeline::PackOutputFor10Bit(const VideoIOState& state)
{ {
glBindFramebuffer(GL_FRAMEBUFFER, mRenderer.OutputPackFramebuffer()); glBindFramebuffer(GL_FRAMEBUFFER, mRenderer.OutputPackFramebuffer());
@@ -93,11 +130,17 @@ bool OpenGLRenderPipeline::EnsureAsyncReadbackBuffers(std::size_t requiredBytes)
if (requiredBytes == 0) if (requiredBytes == 0)
return false; return false;
if (mAsyncReadbackBytes == requiredBytes && mAsyncReadbackSlots[0].pixelPackBuffer != 0) if (mAsyncReadbackBytes == requiredBytes &&
mAsyncReadbackSlots.size() == mAsyncReadbackDepth &&
!mAsyncReadbackSlots.empty() &&
mAsyncReadbackSlots[0].pixelPackBuffer != 0)
{
return true; return true;
}
ResetAsyncReadbackState(); ResetAsyncReadbackState();
mAsyncReadbackBytes = requiredBytes; mAsyncReadbackBytes = requiredBytes;
mAsyncReadbackSlots.resize(mAsyncReadbackDepth);
for (AsyncReadbackSlot& slot : mAsyncReadbackSlots) for (AsyncReadbackSlot& slot : mAsyncReadbackSlots)
{ {
glGenBuffers(1, &slot.pixelPackBuffer); glGenBuffers(1, &slot.pixelPackBuffer);
@@ -118,7 +161,7 @@ void OpenGLRenderPipeline::ResetAsyncReadbackState()
for (AsyncReadbackSlot& slot : mAsyncReadbackSlots) for (AsyncReadbackSlot& slot : mAsyncReadbackSlots)
slot.sizeBytes = 0; slot.sizeBytes = 0;
if (mAsyncReadbackSlots[0].pixelPackBuffer != 0) if (!mAsyncReadbackSlots.empty() && mAsyncReadbackSlots[0].pixelPackBuffer != 0)
{ {
for (AsyncReadbackSlot& slot : mAsyncReadbackSlots) for (AsyncReadbackSlot& slot : mAsyncReadbackSlots)
{ {
@@ -133,6 +176,7 @@ void OpenGLRenderPipeline::ResetAsyncReadbackState()
mAsyncReadbackWriteIndex = 0; mAsyncReadbackWriteIndex = 0;
mAsyncReadbackReadIndex = 0; mAsyncReadbackReadIndex = 0;
mAsyncReadbackBytes = 0; mAsyncReadbackBytes = 0;
mAsyncReadbackSlots.clear();
} }
void OpenGLRenderPipeline::FlushAsyncReadbackPipeline() void OpenGLRenderPipeline::FlushAsyncReadbackPipeline()
@@ -151,18 +195,29 @@ void OpenGLRenderPipeline::FlushAsyncReadbackPipeline()
mAsyncReadbackReadIndex = 0; mAsyncReadbackReadIndex = 0;
} }
void OpenGLRenderPipeline::QueueAsyncReadback(const VideoIOState& state) bool OpenGLRenderPipeline::QueueAsyncReadback(const VideoIOState& state, OutputReadbackTiming& timing)
{ {
const bool usePackedOutput = state.outputPixelFormat == VideoIOPixelFormat::V210 || state.outputPixelFormat == VideoIOPixelFormat::Yuva10; const auto queueStartTime = std::chrono::steady_clock::now();
const bool useTenBitPackedOutput = state.outputPixelFormat == VideoIOPixelFormat::V210 ||
state.outputPixelFormat == VideoIOPixelFormat::Yuva10;
const bool usePackFramebuffer = state.outputPixelFormat == VideoIOPixelFormat::Bgra8 || useTenBitPackedOutput;
const std::size_t requiredBytes = static_cast<std::size_t>(state.outputFrameRowBytes) * state.outputFrameSize.height; const std::size_t requiredBytes = static_cast<std::size_t>(state.outputFrameRowBytes) * state.outputFrameSize.height;
const GLenum format = usePackedOutput ? GL_RGBA : GL_BGRA; const GLenum format = useTenBitPackedOutput ? GL_RGBA : GL_BGRA;
const GLenum type = usePackedOutput ? GL_UNSIGNED_BYTE : GL_UNSIGNED_INT_8_8_8_8_REV; const GLenum type = useTenBitPackedOutput ? GL_UNSIGNED_BYTE : GL_UNSIGNED_INT_8_8_8_8_REV;
const GLuint framebuffer = usePackedOutput ? mRenderer.OutputPackFramebuffer() : mRenderer.OutputFramebuffer(); const GLuint framebuffer = usePackFramebuffer ? mRenderer.OutputPackFramebuffer() : mRenderer.OutputFramebuffer();
const GLsizei readWidth = static_cast<GLsizei>(usePackedOutput ? state.outputPackTextureWidth : state.outputFrameSize.width); const GLsizei readWidth = static_cast<GLsizei>(useTenBitPackedOutput ? state.outputPackTextureWidth : state.outputFrameSize.width);
const GLsizei readHeight = static_cast<GLsizei>(state.outputFrameSize.height); const GLsizei readHeight = static_cast<GLsizei>(state.outputFrameSize.height);
const auto finishTiming = [&timing, queueStartTime]() {
const auto queueEndTime = std::chrono::steady_clock::now();
timing.asyncQueueMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(queueEndTime - queueStartTime).count();
};
if (requiredBytes == 0) if (requiredBytes == 0)
return; {
finishTiming();
return false;
}
if (mAsyncReadbackBytes != requiredBytes if (mAsyncReadbackBytes != requiredBytes
|| mAsyncReadbackFormat != format || mAsyncReadbackFormat != format
@@ -173,30 +228,56 @@ void OpenGLRenderPipeline::QueueAsyncReadback(const VideoIOState& state)
mAsyncReadbackType = type; mAsyncReadbackType = type;
mAsyncReadbackFramebuffer = framebuffer; mAsyncReadbackFramebuffer = framebuffer;
if (!EnsureAsyncReadbackBuffers(requiredBytes)) if (!EnsureAsyncReadbackBuffers(requiredBytes))
return; {
finishTiming();
return false;
}
}
if (mAsyncReadbackSlots.empty())
{
finishTiming();
return false;
} }
AsyncReadbackSlot& slot = mAsyncReadbackSlots[mAsyncReadbackWriteIndex]; AsyncReadbackSlot& slot = mAsyncReadbackSlots[mAsyncReadbackWriteIndex];
if (slot.fence != nullptr) if (slot.inFlight)
{ {
glDeleteSync(slot.fence); finishTiming();
slot.fence = nullptr; return false;
} }
auto stageStartTime = std::chrono::steady_clock::now();
glPixelStorei(GL_PACK_ALIGNMENT, 4); glPixelStorei(GL_PACK_ALIGNMENT, 4);
glPixelStorei(GL_PACK_ROW_LENGTH, 0); glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer); glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer);
glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pixelPackBuffer); glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pixelPackBuffer);
auto stageEndTime = std::chrono::steady_clock::now();
timing.asyncQueueSetupMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(stageEndTime - stageStartTime).count();
stageStartTime = std::chrono::steady_clock::now();
glBufferData(GL_PIXEL_PACK_BUFFER, static_cast<GLsizeiptr>(requiredBytes), nullptr, GL_STREAM_READ); glBufferData(GL_PIXEL_PACK_BUFFER, static_cast<GLsizeiptr>(requiredBytes), nullptr, GL_STREAM_READ);
stageEndTime = std::chrono::steady_clock::now();
timing.asyncQueueBufferMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(stageEndTime - stageStartTime).count();
stageStartTime = std::chrono::steady_clock::now();
glReadPixels(0, 0, readWidth, readHeight, format, type, nullptr); glReadPixels(0, 0, readWidth, readHeight, format, type, nullptr);
stageEndTime = std::chrono::steady_clock::now();
timing.asyncQueueReadPixelsMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(stageEndTime - stageStartTime).count();
stageStartTime = std::chrono::steady_clock::now();
slot.fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); slot.fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
stageEndTime = std::chrono::steady_clock::now();
timing.asyncQueueFenceMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(stageEndTime - stageStartTime).count();
slot.inFlight = slot.fence != nullptr; slot.inFlight = slot.fence != nullptr;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
mAsyncReadbackWriteIndex = (mAsyncReadbackWriteIndex + 1) % mAsyncReadbackSlots.size(); mAsyncReadbackWriteIndex = (mAsyncReadbackWriteIndex + 1) % mAsyncReadbackSlots.size();
finishTiming();
return slot.inFlight;
} }
bool OpenGLRenderPipeline::TryConsumeAsyncReadback(VideoIOOutputFrame& outputFrame, GLuint64 timeoutNanoseconds) bool OpenGLRenderPipeline::TryConsumeAsyncReadback(VideoIOOutputFrame& outputFrame, GLuint64 timeoutNanoseconds, OutputReadbackTiming& timing)
{ {
if (mAsyncReadbackBytes == 0 || outputFrame.bytes == nullptr) if (mAsyncReadbackBytes == 0 || outputFrame.bytes == nullptr)
return false; return false;
@@ -206,15 +287,24 @@ bool OpenGLRenderPipeline::TryConsumeAsyncReadback(VideoIOOutputFrame& outputFra
return false; return false;
const GLenum waitFlags = timeoutNanoseconds > 0 ? GL_SYNC_FLUSH_COMMANDS_BIT : 0; const GLenum waitFlags = timeoutNanoseconds > 0 ? GL_SYNC_FLUSH_COMMANDS_BIT : 0;
const auto waitStartTime = std::chrono::steady_clock::now();
const GLenum waitResult = glClientWaitSync(slot.fence, waitFlags, timeoutNanoseconds); const GLenum waitResult = glClientWaitSync(slot.fence, waitFlags, timeoutNanoseconds);
const auto waitEndTime = std::chrono::steady_clock::now();
timing.fenceWaitMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(waitEndTime - waitStartTime).count();
if (waitResult != GL_ALREADY_SIGNALED && waitResult != GL_CONDITION_SATISFIED) if (waitResult != GL_ALREADY_SIGNALED && waitResult != GL_CONDITION_SATISFIED)
{
timing.asyncReadbackMissed = true;
return false; return false;
}
glDeleteSync(slot.fence); glDeleteSync(slot.fence);
slot.fence = nullptr; slot.fence = nullptr;
glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pixelPackBuffer); glBindBuffer(GL_PIXEL_PACK_BUFFER, slot.pixelPackBuffer);
const auto mapStartTime = std::chrono::steady_clock::now();
void* mappedBytes = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY); void* mappedBytes = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
const auto mapEndTime = std::chrono::steady_clock::now();
timing.mapMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(mapEndTime - mapStartTime).count();
if (mappedBytes == nullptr) if (mappedBytes == nullptr)
{ {
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
@@ -223,7 +313,10 @@ bool OpenGLRenderPipeline::TryConsumeAsyncReadback(VideoIOOutputFrame& outputFra
return false; return false;
} }
const auto copyStartTime = std::chrono::steady_clock::now();
std::memcpy(outputFrame.bytes, mappedBytes, slot.sizeBytes); std::memcpy(outputFrame.bytes, mappedBytes, slot.sizeBytes);
const auto copyEndTime = std::chrono::steady_clock::now();
timing.copyMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(copyEndTime - copyStartTime).count();
glUnmapBuffer(GL_PIXEL_PACK_BUFFER); glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
@@ -243,40 +336,144 @@ void OpenGLRenderPipeline::CacheOutputFrame(const VideoIOOutputFrame& outputFram
std::memcpy(mCachedOutputFrame.data(), outputFrame.bytes, byteCount); std::memcpy(mCachedOutputFrame.data(), outputFrame.bytes, byteCount);
} }
void OpenGLRenderPipeline::ReadOutputFrameSynchronously(const VideoIOState& state, void* destinationBytes) bool OpenGLRenderPipeline::TryCopyCachedOutputFrame(VideoIOOutputFrame& outputFrame, OutputReadbackTiming& timing) const
{ {
if (outputFrame.bytes == nullptr || outputFrame.height == 0 || outputFrame.rowBytes <= 0)
return false;
const std::size_t byteCount = static_cast<std::size_t>(outputFrame.rowBytes) * outputFrame.height;
if (mCachedOutputFrame.size() != byteCount)
return false;
const auto copyStartTime = std::chrono::steady_clock::now();
std::memcpy(outputFrame.bytes, mCachedOutputFrame.data(), byteCount);
const auto copyEndTime = std::chrono::steady_clock::now();
timing.cachedCopyMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(copyEndTime - copyStartTime).count();
timing.cachedFallbackUsed = true;
return true;
}
void OpenGLRenderPipeline::ReadOutputFrameSynchronously(const VideoIOState& state, void* destinationBytes, OutputReadbackTiming& timing)
{
const auto readStartTime = std::chrono::steady_clock::now();
const bool usePackedOutput = state.outputPixelFormat == VideoIOPixelFormat::V210 || state.outputPixelFormat == VideoIOPixelFormat::Yuva10; const bool usePackedOutput = state.outputPixelFormat == VideoIOPixelFormat::V210 || state.outputPixelFormat == VideoIOPixelFormat::Yuva10;
const bool usePackFramebuffer = state.outputPixelFormat == VideoIOPixelFormat::Bgra8 || usePackedOutput;
glPixelStorei(GL_PACK_ALIGNMENT, 4); glPixelStorei(GL_PACK_ALIGNMENT, 4);
glPixelStorei(GL_PACK_ROW_LENGTH, 0); glPixelStorei(GL_PACK_ROW_LENGTH, 0);
if (usePackedOutput) if (usePackFramebuffer)
{ {
glBindFramebuffer(GL_READ_FRAMEBUFFER, mRenderer.OutputPackFramebuffer()); glBindFramebuffer(GL_READ_FRAMEBUFFER, mRenderer.OutputPackFramebuffer());
if (usePackedOutput)
glReadPixels(0, 0, state.outputPackTextureWidth, state.outputFrameSize.height, GL_RGBA, GL_UNSIGNED_BYTE, destinationBytes); glReadPixels(0, 0, state.outputPackTextureWidth, state.outputFrameSize.height, GL_RGBA, GL_UNSIGNED_BYTE, destinationBytes);
else
glReadPixels(0, 0, state.outputFrameSize.width, state.outputFrameSize.height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, destinationBytes);
} }
else else
{ {
glBindFramebuffer(GL_READ_FRAMEBUFFER, mRenderer.OutputFramebuffer()); glBindFramebuffer(GL_READ_FRAMEBUFFER, mRenderer.OutputFramebuffer());
glReadPixels(0, 0, state.outputFrameSize.width, state.outputFrameSize.height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, destinationBytes); glReadPixels(0, 0, state.outputFrameSize.width, state.outputFrameSize.height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, destinationBytes);
} }
const auto readEndTime = std::chrono::steady_clock::now();
timing.syncReadMilliseconds += std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(readEndTime - readStartTime).count();
timing.syncFallbackUsed = true;
} }
void OpenGLRenderPipeline::ReadOutputFrame(const VideoIOState& state, VideoIOOutputFrame& outputFrame) OpenGLRenderPipeline::OutputReadbackTiming OpenGLRenderPipeline::ReadOutputFrame(const VideoIOState& state, VideoIOOutputFrame& outputFrame)
{ {
if (TryConsumeAsyncReadback(outputFrame, 500000)) OutputReadbackTiming timing;
{
QueueAsyncReadback(state);
return;
}
// If async readback misses the playout deadline, prefer a fresh synchronous if (mOutputReadbackMode == OutputReadbackMode::Synchronous)
// frame over reusing stale cached output, then restart the async pipeline. {
if (outputFrame.bytes != nullptr) if (outputFrame.bytes != nullptr)
{ {
ReadOutputFrameSynchronously(state, outputFrame.bytes); ReadOutputFrameSynchronously(state, outputFrame.bytes, timing);
CacheOutputFrame(outputFrame);
}
return timing;
}
if (mOutputReadbackMode == OutputReadbackMode::CachedOnly)
{
if (TryCopyCachedOutputFrame(outputFrame, timing))
return timing;
if (outputFrame.bytes != nullptr)
{
ReadOutputFrameSynchronously(state, outputFrame.bytes, timing);
CacheOutputFrame(outputFrame);
}
return timing;
}
if (TryConsumeAsyncReadback(outputFrame, 0, timing))
{
(void)QueueAsyncReadback(state, timing);
return timing;
}
const bool queued = QueueAsyncReadback(state, timing);
if (queued && TryConsumeAsyncReadback(outputFrame, 0, timing))
return timing;
if (TryCopyCachedOutputFrame(outputFrame, timing))
{
return timing;
}
// Bootstrap only: until the first async readback has produced cached output,
// use one synchronous readback so DeckLink has a valid frame to schedule.
if (outputFrame.bytes != nullptr && mCachedOutputFrame.empty())
{
ReadOutputFrameSynchronously(state, outputFrame.bytes, timing);
CacheOutputFrame(outputFrame); CacheOutputFrame(outputFrame);
} }
FlushAsyncReadbackPipeline(); if (!queued)
QueueAsyncReadback(state); (void)QueueAsyncReadback(state, timing);
return timing;
}
OpenGLRenderPipeline::OutputReadbackMode OpenGLRenderPipeline::ReadOutputReadbackModeFromEnvironment()
{
char* mode = nullptr;
std::size_t modeSize = 0;
if (_dupenv_s(&mode, &modeSize, "VST_OUTPUT_READBACK_MODE") != 0 || mode == nullptr)
return OutputReadbackMode::AsyncPbo;
const std::string modeValue(mode);
std::free(mode);
if (modeValue == "async_pbo")
return OutputReadbackMode::AsyncPbo;
if (modeValue == "sync")
return OutputReadbackMode::Synchronous;
if (modeValue == "cached_only")
return OutputReadbackMode::CachedOnly;
return OutputReadbackMode::AsyncPbo;
}
std::size_t OpenGLRenderPipeline::ReadAsyncReadbackDepthFromEnvironment()
{
char* depthValue = nullptr;
std::size_t depthValueSize = 0;
if (_dupenv_s(&depthValue, &depthValueSize, "VST_OUTPUT_READBACK_DEPTH") != 0 || depthValue == nullptr)
return 6;
const std::string value(depthValue);
std::free(depthValue);
try
{
const unsigned long requestedDepth = std::stoul(value);
if (requestedDepth < 3)
return 3;
if (requestedDepth > 12)
return 12;
return static_cast<std::size_t>(requestedDepth);
}
catch (...)
{
return 6;
}
} }

View File

@@ -3,7 +3,6 @@
#include "GLExtensions.h" #include "GLExtensions.h"
#include "VideoIOTypes.h" #include "VideoIOTypes.h"
#include <array>
#include <functional> #include <functional>
#include <vector> #include <vector>
@@ -36,6 +35,13 @@ public:
bool RenderFrame(const RenderPipelineFrameContext& context, VideoIOOutputFrame& outputFrame); bool RenderFrame(const RenderPipelineFrameContext& context, VideoIOOutputFrame& outputFrame);
private: private:
enum class OutputReadbackMode
{
AsyncPbo,
Synchronous,
CachedOnly
};
struct AsyncReadbackSlot struct AsyncReadbackSlot
{ {
GLuint pixelPackBuffer = 0; GLuint pixelPackBuffer = 0;
@@ -44,15 +50,36 @@ private:
bool inFlight = false; bool inFlight = false;
}; };
struct OutputReadbackTiming
{
double fenceWaitMilliseconds = 0.0;
double mapMilliseconds = 0.0;
double copyMilliseconds = 0.0;
double cachedCopyMilliseconds = 0.0;
double asyncQueueMilliseconds = 0.0;
double asyncQueueBufferMilliseconds = 0.0;
double asyncQueueSetupMilliseconds = 0.0;
double asyncQueueReadPixelsMilliseconds = 0.0;
double asyncQueueFenceMilliseconds = 0.0;
double syncReadMilliseconds = 0.0;
bool asyncReadbackMissed = false;
bool cachedFallbackUsed = false;
bool syncFallbackUsed = false;
};
bool EnsureAsyncReadbackBuffers(std::size_t requiredBytes); bool EnsureAsyncReadbackBuffers(std::size_t requiredBytes);
void ResetAsyncReadbackState(); void ResetAsyncReadbackState();
void FlushAsyncReadbackPipeline(); void FlushAsyncReadbackPipeline();
void QueueAsyncReadback(const VideoIOState& state); bool QueueAsyncReadback(const VideoIOState& state, OutputReadbackTiming& timing);
bool TryConsumeAsyncReadback(VideoIOOutputFrame& outputFrame, GLuint64 timeoutNanoseconds); bool TryConsumeAsyncReadback(VideoIOOutputFrame& outputFrame, GLuint64 timeoutNanoseconds, OutputReadbackTiming& timing);
void CacheOutputFrame(const VideoIOOutputFrame& outputFrame); void CacheOutputFrame(const VideoIOOutputFrame& outputFrame);
void ReadOutputFrameSynchronously(const VideoIOState& state, void* destinationBytes); bool TryCopyCachedOutputFrame(VideoIOOutputFrame& outputFrame, OutputReadbackTiming& timing) const;
void ReadOutputFrameSynchronously(const VideoIOState& state, void* destinationBytes, OutputReadbackTiming& timing);
void PackOutputForBgra8(const VideoIOState& state);
void PackOutputFor10Bit(const VideoIOState& state); void PackOutputFor10Bit(const VideoIOState& state);
void ReadOutputFrame(const VideoIOState& state, VideoIOOutputFrame& outputFrame); OutputReadbackTiming ReadOutputFrame(const VideoIOState& state, VideoIOOutputFrame& outputFrame);
static OutputReadbackMode ReadOutputReadbackModeFromEnvironment();
static std::size_t ReadAsyncReadbackDepthFromEnvironment();
OpenGLRenderer& mRenderer; OpenGLRenderer& mRenderer;
RuntimeSnapshotProvider& mRuntimeSnapshotProvider; RuntimeSnapshotProvider& mRuntimeSnapshotProvider;
@@ -60,7 +87,9 @@ private:
RenderEffectCallback mRenderEffect; RenderEffectCallback mRenderEffect;
OutputReadyCallback mOutputReady; OutputReadyCallback mOutputReady;
PaintCallback mPaint; PaintCallback mPaint;
std::array<AsyncReadbackSlot, 3> mAsyncReadbackSlots; OutputReadbackMode mOutputReadbackMode = OutputReadbackMode::AsyncPbo;
std::vector<AsyncReadbackSlot> mAsyncReadbackSlots;
std::size_t mAsyncReadbackDepth = 0;
std::size_t mAsyncReadbackWriteIndex = 0; std::size_t mAsyncReadbackWriteIndex = 0;
std::size_t mAsyncReadbackReadIndex = 0; std::size_t mAsyncReadbackReadIndex = 0;
std::size_t mAsyncReadbackBytes = 0; std::size_t mAsyncReadbackBytes = 0;

View File

@@ -7,4 +7,3 @@ constexpr GLuint kDecodedVideoTextureUnit = 1;
constexpr GLuint kSourceHistoryTextureUnitBase = 2; constexpr GLuint kSourceHistoryTextureUnitBase = 2;
constexpr GLuint kPackedVideoTextureUnit = 2; constexpr GLuint kPackedVideoTextureUnit = 2;
constexpr GLuint kGlobalParamsBindingPoint = 0; constexpr GLuint kGlobalParamsBindingPoint = 0;
constexpr unsigned kPrerollFrameCount = 12;

View File

@@ -178,9 +178,14 @@ RuntimeCoordinatorResult RuntimeCoordinator::UpdateLayerParameterByControlKey(co
RuntimeCoordinatorResult RuntimeCoordinator::CommitOscParameterByControlKey(const std::string& layerKey, const std::string& parameterKey, const JsonValue& newValue) RuntimeCoordinatorResult RuntimeCoordinator::CommitOscParameterByControlKey(const std::string& layerKey, const std::string& parameterKey, const JsonValue& newValue)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
constexpr RuntimeCoordinatorOscCommitPersistence kDefaultOscCommitPersistence =
RuntimeCoordinatorOscCommitPersistence::SessionOnly;
constexpr bool kPersistSettledOscCommits =
kDefaultOscCommitPersistence == RuntimeCoordinatorOscCommitPersistence::Persistent;
std::string error; std::string error;
ResolvedParameterMutation mutation; ResolvedParameterMutation mutation;
if (!BuildParameterMutationByControlKey(layerKey, parameterKey, newValue, true, mutation, error)) if (!BuildParameterMutationByControlKey(layerKey, parameterKey, newValue, kPersistSettledOscCommits, mutation, error))
{ {
RuntimeCoordinatorResult result = ApplyStoreMutation(false, error, false, false, false); RuntimeCoordinatorResult result = ApplyStoreMutation(false, error, false, false, false);
PublishCoordinatorResult("CommitOscParameterByControlKey", result); PublishCoordinatorResult("CommitOscParameterByControlKey", result);
@@ -575,8 +580,7 @@ void RuntimeCoordinator::PublishCoordinatorFollowUpEvents(const std::string& act
if (result.persistenceRequested) if (result.persistenceRequested)
{ {
RuntimePersistenceRequestedEvent persistenceRequested; RuntimePersistenceRequestedEvent persistenceRequested;
persistenceRequested.reason = action; persistenceRequested.request = PersistenceRequest::RuntimeStateRequest(action);
persistenceRequested.debounceAllowed = true;
mRuntimeEventDispatcher.PublishPayload(persistenceRequested, "RuntimeCoordinator"); mRuntimeEventDispatcher.PublishPayload(persistenceRequested, "RuntimeCoordinator");
} }

View File

@@ -32,6 +32,12 @@ enum class RuntimeCoordinatorTransientOscInvalidation
All All
}; };
enum class RuntimeCoordinatorOscCommitPersistence
{
SessionOnly,
Persistent
};
struct RuntimeCoordinatorResult struct RuntimeCoordinatorResult
{ {
bool accepted = false; bool accepted = false;

View File

@@ -4,6 +4,8 @@
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include "PersistenceRequest.h"
#include <string> #include <string>
enum class RuntimeEventSeverity enum class RuntimeEventSeverity
@@ -109,8 +111,7 @@ struct RuntimeStateChangedEvent
struct RuntimePersistenceRequestedEvent struct RuntimePersistenceRequestedEvent
{ {
std::string reason; PersistenceRequest request;
bool debounceAllowed = true;
}; };
struct RuntimeReloadRequestedEvent struct RuntimeReloadRequestedEvent

View File

@@ -0,0 +1,144 @@
#include "CommittedLiveState.h"
bool CommittedLiveState::LoadPersistentStateValue(const JsonValue& root)
{
return mLayerStack.LoadPersistentStateValue(root);
}
JsonValue CommittedLiveState::BuildPersistentStateValue(const ShaderPackageCatalog& shaderCatalog) const
{
return mLayerStack.BuildPersistentStateValue(shaderCatalog);
}
void CommittedLiveState::NormalizeLayerIds()
{
mLayerStack.NormalizeLayerIds();
}
void CommittedLiveState::EnsureDefaultsForAllLayers(const ShaderPackageCatalog& shaderCatalog)
{
mLayerStack.EnsureDefaultsForAllLayers(shaderCatalog);
}
void CommittedLiveState::EnsureDefaultLayer(const ShaderPackageCatalog& shaderCatalog)
{
mLayerStack.EnsureDefaultLayer(shaderCatalog);
}
void CommittedLiveState::RemoveLayersWithMissingPackages(const ShaderPackageCatalog& shaderCatalog)
{
mLayerStack.RemoveLayersWithMissingPackages(shaderCatalog);
}
bool CommittedLiveState::CreateLayer(const ShaderPackageCatalog& shaderCatalog, const std::string& shaderId, std::string& error)
{
return mLayerStack.CreateLayer(shaderCatalog, shaderId, error);
}
bool CommittedLiveState::DeleteLayer(const std::string& layerId, std::string& error)
{
return mLayerStack.DeleteLayer(layerId, error);
}
bool CommittedLiveState::MoveLayer(const std::string& layerId, int direction, std::string& error)
{
return mLayerStack.MoveLayer(layerId, direction, error);
}
bool CommittedLiveState::MoveLayerToIndex(const std::string& layerId, std::size_t targetIndex, std::string& error)
{
return mLayerStack.MoveLayerToIndex(layerId, targetIndex, error);
}
bool CommittedLiveState::SetLayerBypassState(const std::string& layerId, bool bypassed, std::string& error)
{
return mLayerStack.SetLayerBypassState(layerId, bypassed, error);
}
bool CommittedLiveState::SetLayerShaderSelection(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, const std::string& shaderId, std::string& error)
{
return mLayerStack.SetLayerShaderSelection(shaderCatalog, layerId, shaderId, error);
}
bool CommittedLiveState::SetParameterValue(const std::string& layerId, const std::string& parameterId, const ShaderParameterValue& value, std::string& error)
{
return mLayerStack.SetParameterValue(layerId, parameterId, value, error);
}
bool CommittedLiveState::ResetLayerParameterValues(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, std::string& error)
{
return mLayerStack.ResetLayerParameterValues(shaderCatalog, layerId, error);
}
bool CommittedLiveState::HasLayer(const std::string& layerId) const
{
return mLayerStack.HasLayer(layerId);
}
bool CommittedLiveState::TryGetParameterById(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, const std::string& parameterId, StoredParameterSnapshot& snapshot, std::string& error) const
{
return mLayerStack.TryGetParameterById(shaderCatalog, layerId, parameterId, snapshot, error);
}
bool CommittedLiveState::TryGetParameterByControlKey(const ShaderPackageCatalog& shaderCatalog, const std::string& layerKey, const std::string& parameterKey, StoredParameterSnapshot& snapshot, std::string& error) const
{
return mLayerStack.TryGetParameterByControlKey(shaderCatalog, layerKey, parameterKey, snapshot, error);
}
bool CommittedLiveState::ResolveLayerMove(const std::string& layerId, int direction, bool& shouldMove, std::string& error) const
{
return mLayerStack.ResolveLayerMove(layerId, direction, shouldMove, error);
}
bool CommittedLiveState::ResolveLayerMoveToIndex(const std::string& layerId, std::size_t targetIndex, bool& shouldMove, std::string& error) const
{
return mLayerStack.ResolveLayerMoveToIndex(layerId, targetIndex, shouldMove, error);
}
JsonValue CommittedLiveState::BuildStackPresetValue(const ShaderPackageCatalog& shaderCatalog, const std::string& presetName) const
{
return mLayerStack.BuildStackPresetValue(shaderCatalog, presetName);
}
bool CommittedLiveState::LoadStackPresetValue(const ShaderPackageCatalog& shaderCatalog, const JsonValue& root, std::string& error)
{
return mLayerStack.LoadStackPresetValue(shaderCatalog, root, error);
}
CommittedLiveStateReadModel CommittedLiveState::BuildReadModel(const ShaderPackageCatalog& shaderCatalog) const
{
CommittedLiveStateReadModel model;
model.layers = mLayerStack.Layers();
model.packagesById = shaderCatalog.CaptureSnapshot().packagesById;
return model;
}
std::vector<CommittedLiveState::LayerPersistentState> CommittedLiveState::CopyLayerStates() const
{
return mLayerStack.Layers();
}
const std::vector<CommittedLiveState::LayerPersistentState>& CommittedLiveState::Layers() const
{
return mLayerStack.Layers();
}
std::vector<CommittedLiveState::LayerPersistentState>& CommittedLiveState::Layers()
{
return mLayerStack.Layers();
}
const CommittedLiveState::LayerPersistentState* CommittedLiveState::FindLayerById(const std::string& layerId) const
{
return mLayerStack.FindLayerById(layerId);
}
const LayerStackStore& CommittedLiveState::LayerStack() const
{
return mLayerStack;
}
LayerStackStore& CommittedLiveState::LayerStack()
{
return mLayerStack;
}

View File

@@ -0,0 +1,52 @@
#pragma once
#include "LayerStackStore.h"
#include "RuntimeStoreReadModels.h"
#include "ShaderPackageCatalog.h"
#include <cstddef>
#include <string>
#include <vector>
class CommittedLiveState
{
public:
using LayerPersistentState = LayerStackStore::LayerPersistentState;
using StoredParameterSnapshot = LayerStackStore::StoredParameterSnapshot;
bool LoadPersistentStateValue(const JsonValue& root);
JsonValue BuildPersistentStateValue(const ShaderPackageCatalog& shaderCatalog) const;
void NormalizeLayerIds();
void EnsureDefaultsForAllLayers(const ShaderPackageCatalog& shaderCatalog);
void EnsureDefaultLayer(const ShaderPackageCatalog& shaderCatalog);
void RemoveLayersWithMissingPackages(const ShaderPackageCatalog& shaderCatalog);
bool CreateLayer(const ShaderPackageCatalog& shaderCatalog, const std::string& shaderId, std::string& error);
bool DeleteLayer(const std::string& layerId, std::string& error);
bool MoveLayer(const std::string& layerId, int direction, std::string& error);
bool MoveLayerToIndex(const std::string& layerId, std::size_t targetIndex, std::string& error);
bool SetLayerBypassState(const std::string& layerId, bool bypassed, std::string& error);
bool SetLayerShaderSelection(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, const std::string& shaderId, std::string& error);
bool SetParameterValue(const std::string& layerId, const std::string& parameterId, const ShaderParameterValue& value, std::string& error);
bool ResetLayerParameterValues(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, std::string& error);
bool HasLayer(const std::string& layerId) const;
bool TryGetParameterById(const ShaderPackageCatalog& shaderCatalog, const std::string& layerId, const std::string& parameterId, StoredParameterSnapshot& snapshot, std::string& error) const;
bool TryGetParameterByControlKey(const ShaderPackageCatalog& shaderCatalog, const std::string& layerKey, const std::string& parameterKey, StoredParameterSnapshot& snapshot, std::string& error) const;
bool ResolveLayerMove(const std::string& layerId, int direction, bool& shouldMove, std::string& error) const;
bool ResolveLayerMoveToIndex(const std::string& layerId, std::size_t targetIndex, bool& shouldMove, std::string& error) const;
JsonValue BuildStackPresetValue(const ShaderPackageCatalog& shaderCatalog, const std::string& presetName) const;
bool LoadStackPresetValue(const ShaderPackageCatalog& shaderCatalog, const JsonValue& root, std::string& error);
CommittedLiveStateReadModel BuildReadModel(const ShaderPackageCatalog& shaderCatalog) const;
std::vector<LayerPersistentState> CopyLayerStates() const;
const std::vector<LayerPersistentState>& Layers() const;
std::vector<LayerPersistentState>& Layers();
const LayerPersistentState* FindLayerById(const std::string& layerId) const;
const LayerStackStore& LayerStack() const;
LayerStackStore& LayerStack();
private:
LayerStackStore mLayerStack;
};

View File

@@ -92,7 +92,7 @@ std::vector<RuntimeStateLayerDescriptor> GetRuntimeStateLayerInventory()
{ {
RuntimeStateLayerKind::CommittedLive, RuntimeStateLayerKind::CommittedLive,
"Committed live state", "Committed live state",
"RuntimeCoordinator, physically backed by RuntimeStore during migration", "RuntimeCoordinator / CommittedLiveState",
"Current running session", "Current running session",
"May request persistence depending on mutation policy", "May request persistence depending on mutation policy",
"Operator/session truth until changed again" "Operator/session truth until changed again"
@@ -152,14 +152,14 @@ std::vector<RuntimeStateFieldDescriptor> GetRuntimeStateFieldInventory()
RuntimeStateField::CommittedSessionParameterValues, RuntimeStateField::CommittedSessionParameterValues,
ClassifyRuntimeStateField(RuntimeStateField::CommittedSessionParameterValues), ClassifyRuntimeStateField(RuntimeStateField::CommittedSessionParameterValues),
"committed session parameter values", "committed session parameter values",
"RuntimeCoordinator policy, RuntimeStore backing during migration", "RuntimeCoordinator policy, CommittedLiveState backing",
"Operator/API truth after accepted mutations" "Operator/API truth after accepted mutations"
}, },
{ {
RuntimeStateField::CommittedLayerBypass, RuntimeStateField::CommittedLayerBypass,
ClassifyRuntimeStateField(RuntimeStateField::CommittedLayerBypass), ClassifyRuntimeStateField(RuntimeStateField::CommittedLayerBypass),
"committed layer bypass", "committed layer bypass",
"RuntimeCoordinator policy, RuntimeStore backing during migration", "RuntimeCoordinator policy, CommittedLiveState backing",
"Current operator/API bypass state" "Current operator/API bypass state"
}, },
{ {
@@ -227,4 +227,3 @@ std::vector<RuntimeStateFieldDescriptor> GetRuntimeStateFieldInventory()
} }
}; };
} }

View File

@@ -0,0 +1,44 @@
#pragma once
#include <cstdint>
#include <filesystem>
#include <string>
enum class PersistenceTargetKind
{
RuntimeState,
StackPreset,
RuntimeConfig
};
struct PersistenceRequest
{
PersistenceTargetKind targetKind = PersistenceTargetKind::RuntimeState;
std::string reason;
std::string debounceKey = "runtime-state";
bool debounceAllowed = true;
bool flushRequested = false;
uint64_t sequence = 0;
static PersistenceRequest RuntimeStateRequest(const std::string& reason)
{
PersistenceRequest request;
request.targetKind = PersistenceTargetKind::RuntimeState;
request.reason = reason;
request.debounceKey = "runtime-state";
request.debounceAllowed = true;
return request;
}
};
struct PersistenceSnapshot
{
PersistenceTargetKind targetKind = PersistenceTargetKind::RuntimeState;
std::filesystem::path targetPath;
std::string contents;
std::string reason;
std::string debounceKey;
bool debounceAllowed = false;
bool flushRequested = false;
uint64_t generation = 0;
};

View File

@@ -0,0 +1,271 @@
#include "PersistenceWriter.h"
#include <windows.h>
#include <algorithm>
#include <filesystem>
#include <fstream>
#include <utility>
PersistenceWriter::PersistenceWriter(std::chrono::milliseconds debounceDelay, SnapshotSink sink) :
mDebounceDelay(debounceDelay),
mSink(std::move(sink))
{
}
PersistenceWriter::~PersistenceWriter()
{
std::string error;
StopAndFlush((std::chrono::milliseconds::max)(), error);
}
void PersistenceWriter::SetResultCallback(ResultCallback callback)
{
std::lock_guard<std::mutex> lock(mMutex);
mResultCallback = std::move(callback);
}
bool PersistenceWriter::WriteSnapshot(const PersistenceSnapshot& snapshot, std::string& error)
{
if (!ValidateSnapshot(snapshot, error))
return false;
const bool succeeded = WriteSnapshotThroughSink(snapshot, error);
PublishWriteResult(snapshot, succeeded, error, false);
return succeeded;
}
bool PersistenceWriter::EnqueueSnapshot(const PersistenceSnapshot& snapshot, std::string& error)
{
if (!ValidateSnapshot(snapshot, error))
return false;
std::lock_guard<std::mutex> lock(mMutex);
if (!mAcceptingRequests)
{
error = "Persistence writer is stopping.";
return false;
}
StartWorkerLocked();
const auto now = std::chrono::steady_clock::now();
if (snapshot.debounceAllowed)
{
const std::string debounceKey = snapshot.debounceKey.empty() ? snapshot.targetPath.string() : snapshot.debounceKey;
PendingSnapshot& pending = mDebouncedSnapshots[debounceKey];
if (!pending.snapshot.targetPath.empty())
++mCoalescedCount;
else
++mEnqueuedCount;
pending.snapshot = snapshot;
pending.readyAt = snapshot.flushRequested ? now : now + mDebounceDelay;
}
else
{
mImmediateSnapshots.push_back(snapshot);
++mEnqueuedCount;
}
mCondition.notify_one();
return true;
}
bool PersistenceWriter::StopAndFlush(std::chrono::milliseconds timeout, std::string& error)
{
{
std::lock_guard<std::mutex> lock(mMutex);
mAcceptingRequests = false;
mStopping = true;
const auto now = std::chrono::steady_clock::now();
for (auto& entry : mDebouncedSnapshots)
entry.second.readyAt = now;
}
mCondition.notify_all();
std::unique_lock<std::mutex> lock(mMutex);
if (mWorkerRunning)
{
if (timeout == (std::chrono::milliseconds::max)())
{
mCondition.wait(lock, [this]() { return !mWorkerRunning; });
}
else
{
const auto deadline = std::chrono::steady_clock::now() + timeout;
if (!mCondition.wait_until(lock, deadline, [this]() { return !mWorkerRunning; }))
{
error = "Timed out while flushing persistence writer.";
return false;
}
}
}
lock.unlock();
if (mWorker.joinable())
mWorker.join();
return true;
}
PersistenceWriterMetrics PersistenceWriter::GetMetrics() const
{
std::lock_guard<std::mutex> lock(mMutex);
PersistenceWriterMetrics metrics;
metrics.pendingCount = PendingCountLocked();
metrics.enqueuedCount = mEnqueuedCount;
metrics.coalescedCount = mCoalescedCount;
metrics.writtenCount = mWrittenCount;
metrics.failedCount = mFailedCount;
return metrics;
}
bool PersistenceWriter::ValidateSnapshot(const PersistenceSnapshot& snapshot, std::string& error) const
{
if (snapshot.targetPath.empty())
{
error = "Persistence snapshot target path is empty.";
return false;
}
return true;
}
bool PersistenceWriter::WriteSnapshotThroughSink(const PersistenceSnapshot& snapshot, std::string& error) const
{
if (mSink)
return mSink(snapshot, error);
std::error_code fsError;
std::filesystem::create_directories(snapshot.targetPath.parent_path(), fsError);
const std::filesystem::path temporaryPath = snapshot.targetPath.string() + ".tmp";
std::ofstream output(temporaryPath, std::ios::binary | std::ios::trunc);
if (!output)
{
error = "Could not write file: " + temporaryPath.string();
return false;
}
output << snapshot.contents;
output.close();
if (!output.good())
{
error = "Could not finish writing file: " + temporaryPath.string();
return false;
}
if (!MoveFileExA(temporaryPath.string().c_str(), snapshot.targetPath.string().c_str(), MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH))
{
const DWORD lastError = GetLastError();
std::filesystem::remove(temporaryPath, fsError);
error = "Could not replace file: " + snapshot.targetPath.string() + " (Win32 error " + std::to_string(lastError) + ")";
return false;
}
return true;
}
void PersistenceWriter::PublishWriteResult(const PersistenceSnapshot& snapshot, bool succeeded, const std::string& errorMessage, bool newerRequestPending)
{
ResultCallback callback;
{
std::lock_guard<std::mutex> lock(mMutex);
callback = mResultCallback;
}
if (!callback)
return;
PersistenceWriteResult result;
result.targetKind = snapshot.targetKind;
result.targetPath = snapshot.targetPath.string();
result.reason = snapshot.reason;
result.succeeded = succeeded;
result.errorMessage = errorMessage;
result.newerRequestPending = newerRequestPending;
callback(result);
}
void PersistenceWriter::StartWorkerLocked()
{
if (mWorkerRunning)
return;
mWorkerRunning = true;
mWorker = std::thread([this]() { WorkerMain(); });
}
void PersistenceWriter::WorkerMain()
{
for (;;)
{
PersistenceSnapshot snapshot;
{
std::unique_lock<std::mutex> lock(mMutex);
for (;;)
{
if (!mImmediateSnapshots.empty())
{
snapshot = std::move(mImmediateSnapshots.front());
mImmediateSnapshots.pop_front();
break;
}
if (!mDebouncedSnapshots.empty())
{
const auto now = std::chrono::steady_clock::now();
auto readyIt = mDebouncedSnapshots.end();
auto nextReadyAt = (std::chrono::steady_clock::time_point::max)();
for (auto it = mDebouncedSnapshots.begin(); it != mDebouncedSnapshots.end(); ++it)
{
if (it->second.readyAt <= now)
{
readyIt = it;
break;
}
if (it->second.readyAt < nextReadyAt)
nextReadyAt = it->second.readyAt;
}
if (readyIt != mDebouncedSnapshots.end())
{
snapshot = std::move(readyIt->second.snapshot);
mDebouncedSnapshots.erase(readyIt);
break;
}
mCondition.wait_until(lock, nextReadyAt);
continue;
}
if (mStopping)
{
mWorkerRunning = false;
mCondition.notify_all();
return;
}
mCondition.wait(lock);
}
}
std::string error;
const bool succeeded = WriteSnapshotThroughSink(snapshot, error);
bool newerRequestPending = false;
{
std::lock_guard<std::mutex> lock(mMutex);
if (succeeded)
++mWrittenCount;
else
++mFailedCount;
newerRequestPending = PendingCountLocked() > 0;
}
PublishWriteResult(snapshot, succeeded, error, newerRequestPending);
}
}
std::size_t PersistenceWriter::PendingCountLocked() const
{
return mImmediateSnapshots.size() + mDebouncedSnapshots.size();
}

View File

@@ -0,0 +1,80 @@
#pragma once
#include "PersistenceRequest.h"
#include <chrono>
#include <condition_variable>
#include <cstdint>
#include <deque>
#include <functional>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_map>
struct PersistenceWriterMetrics
{
std::size_t pendingCount = 0;
uint64_t enqueuedCount = 0;
uint64_t coalescedCount = 0;
uint64_t writtenCount = 0;
uint64_t failedCount = 0;
};
struct PersistenceWriteResult
{
PersistenceTargetKind targetKind = PersistenceTargetKind::RuntimeState;
std::string targetPath;
std::string reason;
bool succeeded = false;
std::string errorMessage;
bool newerRequestPending = false;
};
class PersistenceWriter
{
public:
using SnapshotSink = std::function<bool(const PersistenceSnapshot&, std::string&)>;
using ResultCallback = std::function<void(const PersistenceWriteResult&)>;
explicit PersistenceWriter(
std::chrono::milliseconds debounceDelay = std::chrono::milliseconds(50),
SnapshotSink sink = SnapshotSink());
~PersistenceWriter();
void SetResultCallback(ResultCallback callback);
bool WriteSnapshot(const PersistenceSnapshot& snapshot, std::string& error);
bool EnqueueSnapshot(const PersistenceSnapshot& snapshot, std::string& error);
bool StopAndFlush(std::chrono::milliseconds timeout, std::string& error);
PersistenceWriterMetrics GetMetrics() const;
private:
struct PendingSnapshot
{
PersistenceSnapshot snapshot;
std::chrono::steady_clock::time_point readyAt;
};
bool ValidateSnapshot(const PersistenceSnapshot& snapshot, std::string& error) const;
bool WriteSnapshotThroughSink(const PersistenceSnapshot& snapshot, std::string& error) const;
void PublishWriteResult(const PersistenceSnapshot& snapshot, bool succeeded, const std::string& errorMessage, bool newerRequestPending);
void StartWorkerLocked();
void WorkerMain();
std::size_t PendingCountLocked() const;
std::chrono::milliseconds mDebounceDelay;
SnapshotSink mSink;
ResultCallback mResultCallback;
mutable std::mutex mMutex;
std::condition_variable mCondition;
std::thread mWorker;
bool mWorkerRunning = false;
bool mStopping = false;
bool mAcceptingRequests = true;
std::unordered_map<std::string, PendingSnapshot> mDebouncedSnapshots;
std::deque<PersistenceSnapshot> mImmediateSnapshots;
uint64_t mEnqueuedCount = 0;
uint64_t mCoalescedCount = 0;
uint64_t mWrittenCount = 0;
uint64_t mFailedCount = 0;
};

View File

@@ -51,6 +51,11 @@ JsonValue RuntimeStatePresenter::BuildRuntimeStateValue(const RuntimeStore& runt
deckLink.set("externalKeyingRequested", JsonValue(telemetrySnapshot.videoIO.externalKeyingRequested)); deckLink.set("externalKeyingRequested", JsonValue(telemetrySnapshot.videoIO.externalKeyingRequested));
deckLink.set("externalKeyingActive", JsonValue(telemetrySnapshot.videoIO.externalKeyingActive)); deckLink.set("externalKeyingActive", JsonValue(telemetrySnapshot.videoIO.externalKeyingActive));
deckLink.set("statusMessage", JsonValue(telemetrySnapshot.videoIO.statusMessage)); deckLink.set("statusMessage", JsonValue(telemetrySnapshot.videoIO.statusMessage));
deckLink.set("actualBufferedFramesAvailable", JsonValue(telemetrySnapshot.backendPlayout.actualDeckLinkBufferedFramesAvailable));
deckLink.set("actualBufferedFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.actualDeckLinkBufferedFrames)));
deckLink.set("targetBufferedFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.targetDeckLinkBufferedFrames)));
deckLink.set("scheduleCallMs", JsonValue(telemetrySnapshot.backendPlayout.deckLinkScheduleCallMilliseconds));
deckLink.set("scheduleFailures", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.deckLinkScheduleFailureCount)));
root.set("decklink", deckLink); root.set("decklink", deckLink);
JsonValue videoIO = JsonValue::MakeObject(); JsonValue videoIO = JsonValue::MakeObject();
@@ -80,6 +85,86 @@ JsonValue RuntimeStatePresenter::BuildRuntimeStateValue(const RuntimeStore& runt
performance.set("flushedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.performance.flushedFrameCount))); performance.set("flushedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.performance.flushedFrameCount)));
root.set("performance", performance); root.set("performance", performance);
JsonValue readyQueue = JsonValue::MakeObject();
readyQueue.set("depth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueDepth)));
readyQueue.set("capacity", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueCapacity)));
readyQueue.set("minDepth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.minReadyQueueDepth)));
readyQueue.set("maxDepth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.maxReadyQueueDepth)));
readyQueue.set("zeroDepthCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueZeroDepthCount)));
readyQueue.set("pushedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueuePushedCount)));
readyQueue.set("poppedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueuePoppedCount)));
readyQueue.set("droppedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueDroppedCount)));
readyQueue.set("underrunCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueUnderrunCount)));
JsonValue systemMemory = JsonValue::MakeObject();
systemMemory.set("freeFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFramePoolFree)));
systemMemory.set("readyFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFramePoolReady)));
systemMemory.set("scheduledFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFramePoolScheduled)));
systemMemory.set("underrunCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFrameUnderrunCount)));
systemMemory.set("repeatCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFrameRepeatCount)));
systemMemory.set("dropCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.systemFrameDropCount)));
systemMemory.set("ageAtScheduleMs", JsonValue(telemetrySnapshot.backendPlayout.systemFrameAgeAtScheduleMilliseconds));
systemMemory.set("ageAtCompletionMs", JsonValue(telemetrySnapshot.backendPlayout.systemFrameAgeAtCompletionMilliseconds));
JsonValue outputRender = JsonValue::MakeObject();
outputRender.set("renderMs", JsonValue(telemetrySnapshot.backendPlayout.outputRenderMilliseconds));
outputRender.set("smoothedRenderMs", JsonValue(telemetrySnapshot.backendPlayout.smoothedOutputRenderMilliseconds));
outputRender.set("maxRenderMs", JsonValue(telemetrySnapshot.backendPlayout.maxOutputRenderMilliseconds));
outputRender.set("acquireFrameMs", JsonValue(telemetrySnapshot.backendPlayout.outputFrameAcquireMilliseconds));
outputRender.set("renderRequestMs", JsonValue(telemetrySnapshot.backendPlayout.outputFrameRenderRequestMilliseconds));
outputRender.set("endAccessMs", JsonValue(telemetrySnapshot.backendPlayout.outputFrameEndAccessMilliseconds));
outputRender.set("queueWaitMs", JsonValue(telemetrySnapshot.backendPlayout.outputRenderQueueWaitMilliseconds));
outputRender.set("drawMs", JsonValue(telemetrySnapshot.backendPlayout.outputRenderDrawMilliseconds));
outputRender.set("fenceWaitMs", JsonValue(telemetrySnapshot.backendPlayout.outputReadbackFenceWaitMilliseconds));
outputRender.set("mapMs", JsonValue(telemetrySnapshot.backendPlayout.outputReadbackMapMilliseconds));
outputRender.set("readbackCopyMs", JsonValue(telemetrySnapshot.backendPlayout.outputReadbackCopyMilliseconds));
outputRender.set("cachedCopyMs", JsonValue(telemetrySnapshot.backendPlayout.outputCachedCopyMilliseconds));
outputRender.set("asyncQueueMs", JsonValue(telemetrySnapshot.backendPlayout.outputAsyncQueueMilliseconds));
outputRender.set("asyncQueueBufferMs", JsonValue(telemetrySnapshot.backendPlayout.outputAsyncQueueBufferMilliseconds));
outputRender.set("asyncQueueSetupMs", JsonValue(telemetrySnapshot.backendPlayout.outputAsyncQueueSetupMilliseconds));
outputRender.set("asyncQueueReadPixelsMs", JsonValue(telemetrySnapshot.backendPlayout.outputAsyncQueueReadPixelsMilliseconds));
outputRender.set("asyncQueueFenceMs", JsonValue(telemetrySnapshot.backendPlayout.outputAsyncQueueFenceMilliseconds));
outputRender.set("syncReadMs", JsonValue(telemetrySnapshot.backendPlayout.outputSyncReadMilliseconds));
outputRender.set("asyncReadbackMissCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.outputAsyncReadbackMissCount)));
outputRender.set("cachedFallbackCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.outputCachedFallbackCount)));
outputRender.set("syncFallbackCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.outputSyncFallbackCount)));
JsonValue recovery = JsonValue::MakeObject();
recovery.set("completionResult", JsonValue(telemetrySnapshot.backendPlayout.completionResult));
recovery.set("completedFrameIndex", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.completedFrameIndex)));
recovery.set("scheduledFrameIndex", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.scheduledFrameIndex)));
recovery.set("scheduledLeadFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.scheduledLeadFrames)));
recovery.set("syntheticScheduledLeadFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.scheduledLeadFrames)));
recovery.set("measuredLagFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.measuredLagFrames)));
recovery.set("catchUpFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.catchUpFrames)));
recovery.set("lateStreak", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.lateStreak)));
recovery.set("dropStreak", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.dropStreak)));
JsonValue deckLinkPlayout = JsonValue::MakeObject();
deckLinkPlayout.set("actualBufferedFramesAvailable", JsonValue(telemetrySnapshot.backendPlayout.actualDeckLinkBufferedFramesAvailable));
deckLinkPlayout.set("actualBufferedFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.actualDeckLinkBufferedFrames)));
deckLinkPlayout.set("targetBufferedFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.targetDeckLinkBufferedFrames)));
deckLinkPlayout.set("scheduleCallMs", JsonValue(telemetrySnapshot.backendPlayout.deckLinkScheduleCallMilliseconds));
deckLinkPlayout.set("scheduleFailures", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.deckLinkScheduleFailureCount)));
JsonValue scheduler = JsonValue::MakeObject();
scheduler.set("syntheticLeadFrames", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.scheduledLeadFrames)));
JsonValue backendPlayout = JsonValue::MakeObject();
backendPlayout.set("lifecycleState", JsonValue(telemetrySnapshot.backendPlayout.lifecycleState));
backendPlayout.set("degraded", JsonValue(telemetrySnapshot.backendPlayout.degraded));
backendPlayout.set("statusMessage", JsonValue(telemetrySnapshot.backendPlayout.statusMessage));
backendPlayout.set("lateFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.lateFrameCount)));
backendPlayout.set("droppedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.droppedFrameCount)));
backendPlayout.set("flushedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.flushedFrameCount)));
backendPlayout.set("readyQueue", readyQueue);
backendPlayout.set("systemMemory", systemMemory);
backendPlayout.set("outputRender", outputRender);
backendPlayout.set("decklink", deckLinkPlayout);
backendPlayout.set("scheduler", scheduler);
backendPlayout.set("recovery", recovery);
root.set("backendPlayout", backendPlayout);
JsonValue eventQueue = JsonValue::MakeObject(); JsonValue eventQueue = JsonValue::MakeObject();
eventQueue.set("name", JsonValue(telemetrySnapshot.runtimeEvents.queue.queueName)); eventQueue.set("name", JsonValue(telemetrySnapshot.runtimeEvents.queue.queueName));
eventQueue.set("depth", JsonValue(static_cast<double>(telemetrySnapshot.runtimeEvents.queue.depth))); eventQueue.set("depth", JsonValue(static_cast<double>(telemetrySnapshot.runtimeEvents.queue.depth)));

View File

@@ -89,7 +89,7 @@ bool RenderSnapshotBuilder::TryBuildLayerRenderStates(unsigned outputWidth, unsi
bool RenderSnapshotBuilder::TryRefreshLayerParameters(std::vector<RuntimeRenderState>& states) const bool RenderSnapshotBuilder::TryRefreshLayerParameters(std::vector<RuntimeRenderState>& states) const
{ {
RefreshLayerParameters(mRuntimeStore.CopyLayerStates(), states); RefreshLayerParameters(mRuntimeStore.CopyCommittedLiveLayerStates(), states);
return true; return true;
} }
@@ -113,10 +113,10 @@ void RenderSnapshotBuilder::BuildLayerRenderStates(unsigned outputWidth, unsigne
{ {
states.clear(); states.clear();
for (const LayerStackStore::LayerPersistentState& layer : readModel.layers) for (const LayerStackStore::LayerPersistentState& layer : readModel.committedLiveState.layers)
{ {
auto shaderIt = readModel.packagesById.find(layer.shaderId); auto shaderIt = readModel.committedLiveState.packagesById.find(layer.shaderId);
if (shaderIt == readModel.packagesById.end()) if (shaderIt == readModel.committedLiveState.packagesById.end())
continue; continue;
const ShaderPackage& shaderPackage = shaderIt->second; const ShaderPackage& shaderPackage = shaderIt->second;

View File

@@ -7,7 +7,6 @@
#include <mutex> #include <mutex>
#include <random> #include <random>
#include <sstream> #include <sstream>
#include <windows.h>
namespace namespace
{ {
@@ -25,11 +24,25 @@ double GenerateStartupRandom()
return distribution(randomDevice); return distribution(randomDevice);
} }
std::string PersistenceTargetKindName(PersistenceTargetKind targetKind)
{
switch (targetKind)
{
case PersistenceTargetKind::RuntimeState:
return "runtime-state";
case PersistenceTargetKind::StackPreset:
return "stack-preset";
case PersistenceTargetKind::RuntimeConfig:
return "runtime-config";
default:
return "unknown";
}
}
} }
RuntimeStore::RuntimeStore() : RuntimeStore::RuntimeStore() :
mRenderSnapshotBuilder(*this), mRenderSnapshotBuilder(*this),
mHealthTelemetry(),
mReloadRequested(false), mReloadRequested(false),
mCompileSucceeded(false), mCompileSucceeded(false),
mStartupRandom(GenerateStartupRandom()), mStartupRandom(GenerateStartupRandom()),
@@ -38,6 +51,15 @@ RuntimeStore::RuntimeStore() :
mStartTime(std::chrono::steady_clock::now()), mStartTime(std::chrono::steady_clock::now()),
mLastScanTime((std::chrono::steady_clock::time_point::min)()) mLastScanTime((std::chrono::steady_clock::time_point::min)())
{ {
mPersistenceWriter.SetResultCallback([this](const PersistenceWriteResult& result) {
mHealthTelemetry.RecordPersistenceWriteResult(
result.succeeded,
PersistenceTargetKindName(result.targetKind),
result.targetPath,
result.reason,
result.errorMessage,
result.newerRequestPending);
});
} }
HealthTelemetry& RuntimeStore::GetHealthTelemetry() HealthTelemetry& RuntimeStore::GetHealthTelemetry()
@@ -72,9 +94,9 @@ bool RuntimeStore::InitializeStore(std::string& error)
return false; return false;
if (!ScanShaderPackages(error)) if (!ScanShaderPackages(error))
return false; return false;
mLayerStack.NormalizeLayerIds(); mCommittedLiveState.NormalizeLayerIds();
mLayerStack.EnsureDefaultsForAllLayers(mShaderCatalog); mCommittedLiveState.EnsureDefaultsForAllLayers(mShaderCatalog);
mLayerStack.EnsureDefaultLayer(mShaderCatalog); mCommittedLiveState.EnsureDefaultLayer(mShaderCatalog);
mServerPort = mConfigStore.GetConfig().serverPort; mServerPort = mConfigStore.GetConfig().serverPort;
mAutoReloadEnabled = mConfigStore.GetConfig().autoReload; mAutoReloadEnabled = mConfigStore.GetConfig().autoReload;
@@ -99,6 +121,70 @@ std::string RuntimeStore::BuildPersistentStateJson() const
return RuntimeStatePresenter::BuildRuntimeStateJson(*this); return RuntimeStatePresenter::BuildRuntimeStateJson(*this);
} }
PersistenceSnapshot RuntimeStore::BuildRuntimeStatePersistenceSnapshot(const PersistenceRequest& request) const
{
std::lock_guard<std::mutex> lock(mMutex);
return BuildRuntimeStatePersistenceSnapshotLocked(request);
}
bool RuntimeStore::RequestPersistence(const PersistenceRequest& request, std::string& error)
{
if (request.targetKind != PersistenceTargetKind::RuntimeState)
{
error = "Unsupported persistence request target: " + PersistenceTargetKindName(request.targetKind);
mHealthTelemetry.RecordPersistenceWriteResult(
false,
PersistenceTargetKindName(request.targetKind),
std::string(),
request.reason,
error,
false);
return false;
}
const PersistenceSnapshot snapshot = BuildRuntimeStatePersistenceSnapshot(request);
if (mPersistenceWriter.EnqueueSnapshot(snapshot, error))
return true;
mHealthTelemetry.RecordPersistenceWriteResult(
false,
PersistenceTargetKindName(request.targetKind),
snapshot.targetPath.string(),
request.reason,
error,
false);
return false;
}
bool RuntimeStore::FlushPersistenceForShutdown(std::chrono::milliseconds timeout, std::string& error)
{
if (mPersistenceWriter.StopAndFlush(timeout, error))
return true;
mHealthTelemetry.RecordPersistenceWriteResult(
false,
PersistenceTargetKindName(PersistenceTargetKind::RuntimeState),
std::string(),
"shutdown-flush",
error,
true);
return false;
}
PersistenceSnapshot RuntimeStore::BuildRuntimeStatePersistenceSnapshotLocked(const PersistenceRequest& request) const
{
PersistenceSnapshot snapshot;
snapshot.targetKind = PersistenceTargetKind::RuntimeState;
snapshot.targetPath = mConfigStore.GetRuntimeStatePath();
snapshot.contents = SerializeJson(mCommittedLiveState.BuildPersistentStateValue(mShaderCatalog), true);
snapshot.reason = request.reason;
snapshot.debounceKey = request.debounceKey;
snapshot.debounceAllowed = request.debounceAllowed;
snapshot.flushRequested = request.flushRequested;
snapshot.generation = request.sequence;
return snapshot;
}
bool RuntimeStore::PollStoredFileChanges(bool& registryChanged, bool& reloadRequested, std::string& error) bool RuntimeStore::PollStoredFileChanges(bool& registryChanged, bool& reloadRequested, std::string& error)
{ {
try try
@@ -133,8 +219,8 @@ bool RuntimeStore::PollStoredFileChanges(bool& registryChanged, bool& reloadRequ
registryChanged = mShaderCatalog.HasCatalogChangedSince(previousCatalog); registryChanged = mShaderCatalog.HasCatalogChangedSince(previousCatalog);
mLayerStack.EnsureDefaultsForAllLayers(mShaderCatalog); mCommittedLiveState.EnsureDefaultsForAllLayers(mShaderCatalog);
for (RuntimeStore::LayerPersistentState& layer : mLayerStack.Layers()) for (RuntimeStore::LayerPersistentState& layer : mCommittedLiveState.Layers())
{ {
const ShaderPackage* active = mShaderCatalog.FindPackage(layer.shaderId); const ShaderPackage* active = mShaderCatalog.FindPackage(layer.shaderId);
if (!active) if (!active)
@@ -163,101 +249,102 @@ bool RuntimeStore::PollStoredFileChanges(bool& registryChanged, bool& reloadRequ
bool RuntimeStore::CreateStoredLayer(const std::string& shaderId, std::string& error) bool RuntimeStore::CreateStoredLayer(const std::string& shaderId, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.CreateLayer(mShaderCatalog, shaderId, error)) if (!mCommittedLiveState.CreateLayer(mShaderCatalog, shaderId, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::DeleteStoredLayer(const std::string& layerId, std::string& error) bool RuntimeStore::DeleteStoredLayer(const std::string& layerId, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.DeleteLayer(layerId, error)) if (!mCommittedLiveState.DeleteLayer(layerId, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::MoveStoredLayer(const std::string& layerId, int direction, std::string& error) bool RuntimeStore::MoveStoredLayer(const std::string& layerId, int direction, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
bool shouldMove = false; bool shouldMove = false;
if (!mLayerStack.ResolveLayerMove(layerId, direction, shouldMove, error)) if (!mCommittedLiveState.ResolveLayerMove(layerId, direction, shouldMove, error))
return false; return false;
if (!shouldMove) if (!shouldMove)
return true; return true;
if (!mLayerStack.MoveLayer(layerId, direction, error)) if (!mCommittedLiveState.MoveLayer(layerId, direction, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::MoveStoredLayerToIndex(const std::string& layerId, std::size_t targetIndex, std::string& error) bool RuntimeStore::MoveStoredLayerToIndex(const std::string& layerId, std::size_t targetIndex, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
bool shouldMove = false; bool shouldMove = false;
if (!mLayerStack.ResolveLayerMoveToIndex(layerId, targetIndex, shouldMove, error)) if (!mCommittedLiveState.ResolveLayerMoveToIndex(layerId, targetIndex, shouldMove, error))
return false; return false;
if (!shouldMove) if (!shouldMove)
return true; return true;
if (!mLayerStack.MoveLayerToIndex(layerId, targetIndex, error)) if (!mCommittedLiveState.MoveLayerToIndex(layerId, targetIndex, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::SetStoredLayerBypassState(const std::string& layerId, bool bypassed, std::string& error) bool RuntimeStore::SetStoredLayerBypassState(const std::string& layerId, bool bypassed, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.SetLayerBypassState(layerId, bypassed, error)) if (!mCommittedLiveState.SetLayerBypassState(layerId, bypassed, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkParameterStateDirtyLocked(); MarkParameterStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::SetStoredLayerShaderSelection(const std::string& layerId, const std::string& shaderId, std::string& error) bool RuntimeStore::SetStoredLayerShaderSelection(const std::string& layerId, const std::string& shaderId, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.SetLayerShaderSelection(mShaderCatalog, layerId, shaderId, error)) if (!mCommittedLiveState.SetLayerShaderSelection(mShaderCatalog, layerId, shaderId, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::SetStoredParameterValue(const std::string& layerId, const std::string& parameterId, const ShaderParameterValue& value, bool persistState, std::string& error) bool RuntimeStore::SetStoredParameterValue(const std::string& layerId, const std::string& parameterId, const ShaderParameterValue& value, bool persistState, std::string& error)
{ {
(void)persistState;
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.SetParameterValue(layerId, parameterId, value, error)) if (!mCommittedLiveState.SetParameterValue(layerId, parameterId, value, error))
return false; return false;
MarkParameterStateDirtyLocked(); MarkParameterStateDirtyLocked();
return !persistState || SavePersistentState(error); return true;
} }
bool RuntimeStore::ResetStoredLayerParameterValues(const std::string& layerId, std::string& error) bool RuntimeStore::ResetStoredLayerParameterValues(const std::string& layerId, std::string& error)
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
if (!mLayerStack.ResetLayerParameterValues(mShaderCatalog, layerId, error)) if (!mCommittedLiveState.ResetLayerParameterValues(mShaderCatalog, layerId, error))
return false; return false;
MarkParameterStateDirtyLocked(); MarkParameterStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::SaveStackPresetSnapshot(const std::string& presetName, std::string& error) const bool RuntimeStore::SaveStackPresetSnapshot(const std::string& presetName, std::string& error) const
@@ -270,10 +357,7 @@ bool RuntimeStore::SaveStackPresetSnapshot(const std::string& presetName, std::s
return false; return false;
} }
JsonValue root = JsonValue::MakeObject(); return mPersistenceWriter.WriteSnapshot(BuildStackPresetPersistenceSnapshot(presetName), error);
root = mLayerStack.BuildStackPresetValue(mShaderCatalog, presetName);
return WriteTextFile(mConfigStore.GetPresetRoot() / (safeStem + ".json"), SerializeJson(root, true), error);
} }
bool RuntimeStore::LoadStackPresetSnapshot(const std::string& presetName, std::string& error) bool RuntimeStore::LoadStackPresetSnapshot(const std::string& presetName, std::string& error)
@@ -295,18 +379,18 @@ bool RuntimeStore::LoadStackPresetSnapshot(const std::string& presetName, std::s
if (!ParseJson(presetText, root, error)) if (!ParseJson(presetText, root, error))
return false; return false;
if (!mLayerStack.LoadStackPresetValue(mShaderCatalog, root, error)) if (!mCommittedLiveState.LoadStackPresetValue(mShaderCatalog, root, error))
return false; return false;
mReloadRequested = true; mReloadRequested = true;
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return SavePersistentState(error); return true;
} }
bool RuntimeStore::HasStoredLayer(const std::string& layerId) const bool RuntimeStore::HasStoredLayer(const std::string& layerId) const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.HasLayer(layerId); return mCommittedLiveState.HasLayer(layerId);
} }
bool RuntimeStore::HasStoredShader(const std::string& shaderId) const bool RuntimeStore::HasStoredShader(const std::string& shaderId) const
@@ -319,26 +403,26 @@ bool RuntimeStore::TryGetStoredParameterById(const std::string& layerId, const s
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.TryGetParameterById(mShaderCatalog, layerId, parameterId, snapshot, error); return mCommittedLiveState.TryGetParameterById(mShaderCatalog, layerId, parameterId, snapshot, error);
} }
bool RuntimeStore::TryGetStoredParameterByControlKey(const std::string& layerKey, const std::string& parameterKey, StoredParameterSnapshot& snapshot, std::string& error) const bool RuntimeStore::TryGetStoredParameterByControlKey(const std::string& layerKey, const std::string& parameterKey, StoredParameterSnapshot& snapshot, std::string& error) const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.TryGetParameterByControlKey(mShaderCatalog, layerKey, parameterKey, snapshot, error); return mCommittedLiveState.TryGetParameterByControlKey(mShaderCatalog, layerKey, parameterKey, snapshot, error);
} }
bool RuntimeStore::ResolveStoredLayerMove(const std::string& layerId, int direction, bool& shouldMove, std::string& error) const bool RuntimeStore::ResolveStoredLayerMove(const std::string& layerId, int direction, bool& shouldMove, std::string& error) const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.ResolveLayerMove(layerId, direction, shouldMove, error); return mCommittedLiveState.ResolveLayerMove(layerId, direction, shouldMove, error);
} }
bool RuntimeStore::ResolveStoredLayerMoveToIndex(const std::string& layerId, std::size_t targetIndex, bool& shouldMove, std::string& error) const bool RuntimeStore::ResolveStoredLayerMoveToIndex(const std::string& layerId, std::size_t targetIndex, bool& shouldMove, std::string& error) const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.ResolveLayerMoveToIndex(layerId, targetIndex, shouldMove, error); return mCommittedLiveState.ResolveLayerMoveToIndex(layerId, targetIndex, shouldMove, error);
} }
bool RuntimeStore::IsValidStackPresetName(const std::string& presetName) const bool RuntimeStore::IsValidStackPresetName(const std::string& presetName) const
@@ -460,12 +544,23 @@ bool RuntimeStore::LoadPersistentState(std::string& error)
if (!ParseJson(stateText, root, error)) if (!ParseJson(stateText, root, error))
return false; return false;
return mLayerStack.LoadPersistentStateValue(root); return mCommittedLiveState.LoadPersistentStateValue(root);
} }
bool RuntimeStore::SavePersistentState(std::string& error) const PersistenceSnapshot RuntimeStore::BuildStackPresetPersistenceSnapshot(const std::string& presetName) const
{ {
return WriteTextFile(mConfigStore.GetRuntimeStatePath(), SerializeJson(mLayerStack.BuildPersistentStateValue(mShaderCatalog), true), error); const std::string safeStem = LayerStackStore::MakeSafePresetFileStem(presetName);
PersistenceSnapshot snapshot;
snapshot.targetKind = PersistenceTargetKind::StackPreset;
snapshot.targetPath = mConfigStore.GetPresetRoot() / (safeStem + ".json");
snapshot.contents = SerializeJson(mCommittedLiveState.BuildStackPresetValue(mShaderCatalog, presetName), true);
snapshot.reason = "SaveStackPreset";
snapshot.debounceKey = "stack-preset:" + safeStem;
snapshot.debounceAllowed = false;
snapshot.flushRequested = true;
snapshot.generation = 0;
return snapshot;
} }
bool RuntimeStore::ScanShaderPackages(std::string& error) bool RuntimeStore::ScanShaderPackages(std::string& error)
@@ -473,7 +568,7 @@ bool RuntimeStore::ScanShaderPackages(std::string& error)
if (!mShaderCatalog.Scan(mConfigStore.GetShaderRoot(), mConfigStore.GetConfig().maxTemporalHistoryFrames, error)) if (!mShaderCatalog.Scan(mConfigStore.GetShaderRoot(), mConfigStore.GetConfig().maxTemporalHistoryFrames, error))
return false; return false;
mLayerStack.RemoveLayersWithMissingPackages(mShaderCatalog); mCommittedLiveState.RemoveLayersWithMissingPackages(mShaderCatalog);
MarkRenderStateDirtyLocked(); MarkRenderStateDirtyLocked();
return true; return true;
@@ -493,38 +588,6 @@ std::string RuntimeStore::ReadTextFile(const std::filesystem::path& path, std::s
return buffer.str(); return buffer.str();
} }
bool RuntimeStore::WriteTextFile(const std::filesystem::path& path, const std::string& contents, std::string& error) const
{
std::error_code fsError;
std::filesystem::create_directories(path.parent_path(), fsError);
const std::filesystem::path temporaryPath = path.string() + ".tmp";
std::ofstream output(temporaryPath, std::ios::binary | std::ios::trunc);
if (!output)
{
error = "Could not write file: " + temporaryPath.string();
return false;
}
output << contents;
output.close();
if (!output.good())
{
error = "Could not finish writing file: " + temporaryPath.string();
return false;
}
if (!MoveFileExA(temporaryPath.string().c_str(), path.string().c_str(), MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH))
{
const DWORD lastError = GetLastError();
std::filesystem::remove(temporaryPath, fsError);
error = "Could not replace file: " + path.string() + " (Win32 error " + std::to_string(lastError) + ")";
return false;
}
return true;
}
std::vector<std::string> RuntimeStore::GetStackPresetNamesLocked() const std::vector<std::string> RuntimeStore::GetStackPresetNamesLocked() const
{ {
std::vector<std::string> presetNames; std::vector<std::string> presetNames;
@@ -548,7 +611,7 @@ std::vector<std::string> RuntimeStore::GetStackPresetNamesLocked() const
bool RuntimeStore::CopyShaderPackageForStoredLayer(const std::string& layerId, ShaderPackage& shaderPackage, std::string& error) const bool RuntimeStore::CopyShaderPackageForStoredLayer(const std::string& layerId, ShaderPackage& shaderPackage, std::string& error) const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
const RuntimeStore::LayerPersistentState* layer = mLayerStack.FindLayerById(layerId); const RuntimeStore::LayerPersistentState* layer = mCommittedLiveState.FindLayerById(layerId);
if (!layer) if (!layer)
{ {
error = "Unknown layer id: " + layerId; error = "Unknown layer id: " + layerId;
@@ -576,22 +639,32 @@ ShaderCompilerInputs RuntimeStore::GetShaderCompilerInputs() const
return inputs; return inputs;
} }
CommittedLiveStateReadModel RuntimeStore::BuildCommittedLiveStateReadModel() const
{
std::lock_guard<std::mutex> lock(mMutex);
return mCommittedLiveState.BuildReadModel(mShaderCatalog);
}
RenderSnapshotReadModel RuntimeStore::BuildRenderSnapshotReadModel() const RenderSnapshotReadModel RuntimeStore::BuildRenderSnapshotReadModel() const
{ {
RenderSnapshotReadModel model; RenderSnapshotReadModel model;
model.signalStatus = mHealthTelemetry.GetSignalStatusSnapshot(); model.signalStatus = mHealthTelemetry.GetSignalStatusSnapshot();
model.committedLiveState = BuildCommittedLiveStateReadModel();
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
model.layers = mLayerStack.Layers();
model.packagesById = mShaderCatalog.CaptureSnapshot().packagesById;
model.timing.startTime = mStartTime; model.timing.startTime = mStartTime;
model.timing.startupRandom = mStartupRandom; model.timing.startupRandom = mStartupRandom;
return model; return model;
} }
std::vector<RuntimeStore::LayerPersistentState> RuntimeStore::CopyLayerStates() const std::vector<RuntimeStore::LayerPersistentState> RuntimeStore::CopyCommittedLiveLayerStates() const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return mLayerStack.Layers(); return mCommittedLiveState.CopyLayerStates();
}
std::vector<RuntimeStore::LayerPersistentState> RuntimeStore::CopyLayerStates() const
{
return CopyCommittedLiveLayerStates();
} }
RenderTimingSnapshot RuntimeStore::GetRenderTimingSnapshot() const RenderTimingSnapshot RuntimeStore::GetRenderTimingSnapshot() const
@@ -609,7 +682,7 @@ RuntimeStatePresentationReadModel RuntimeStore::BuildRuntimeStatePresentationRea
model.telemetry = mHealthTelemetry.GetSnapshot(); model.telemetry = mHealthTelemetry.GetSnapshot();
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
model.config = mConfigStore.GetConfig(); model.config = mConfigStore.GetConfig();
model.layerStack = mLayerStack; model.layerStack = mCommittedLiveState.LayerStack();
model.shaderCatalog = mShaderCatalog.CaptureSnapshot(); model.shaderCatalog = mShaderCatalog.CaptureSnapshot();
model.packageStatuses = mShaderCatalog.PackageStatuses(); model.packageStatuses = mShaderCatalog.PackageStatuses();
model.stackPresetNames = GetStackPresetNamesLocked(); model.stackPresetNames = GetStackPresetNamesLocked();

View File

@@ -1,7 +1,9 @@
#pragma once #pragma once
#include "HealthTelemetry.h" #include "HealthTelemetry.h"
#include "CommittedLiveState.h"
#include "LayerStackStore.h" #include "LayerStackStore.h"
#include "PersistenceWriter.h"
#include "RenderSnapshotBuilder.h" #include "RenderSnapshotBuilder.h"
#include "RuntimeConfigStore.h" #include "RuntimeConfigStore.h"
#include "RuntimeJson.h" #include "RuntimeJson.h"
@@ -29,6 +31,9 @@ public:
bool InitializeStore(std::string& error); bool InitializeStore(std::string& error);
std::string BuildPersistentStateJson() const; std::string BuildPersistentStateJson() const;
PersistenceSnapshot BuildRuntimeStatePersistenceSnapshot(const PersistenceRequest& request) const;
bool RequestPersistence(const PersistenceRequest& request, std::string& error);
bool FlushPersistenceForShutdown(std::chrono::milliseconds timeout, std::string& error);
bool PollStoredFileChanges(bool& registryChanged, bool& reloadRequested, std::string& error); bool PollStoredFileChanges(bool& registryChanged, bool& reloadRequested, std::string& error);
bool CreateStoredLayer(const std::string& shaderId, std::string& error); bool CreateStoredLayer(const std::string& shaderId, std::string& error);
@@ -71,17 +76,19 @@ public:
void ClearReloadRequest(); void ClearReloadRequest();
bool CopyShaderPackageForStoredLayer(const std::string& layerId, ShaderPackage& shaderPackage, std::string& error) const; bool CopyShaderPackageForStoredLayer(const std::string& layerId, ShaderPackage& shaderPackage, std::string& error) const;
::ShaderCompilerInputs GetShaderCompilerInputs() const; ::ShaderCompilerInputs GetShaderCompilerInputs() const;
::CommittedLiveStateReadModel BuildCommittedLiveStateReadModel() const;
::RenderSnapshotReadModel BuildRenderSnapshotReadModel() const; ::RenderSnapshotReadModel BuildRenderSnapshotReadModel() const;
std::vector<LayerPersistentState> CopyCommittedLiveLayerStates() const;
std::vector<LayerPersistentState> CopyLayerStates() const; std::vector<LayerPersistentState> CopyLayerStates() const;
::RenderTimingSnapshot GetRenderTimingSnapshot() const; ::RenderTimingSnapshot GetRenderTimingSnapshot() const;
::RuntimeStatePresentationReadModel BuildRuntimeStatePresentationReadModel() const; ::RuntimeStatePresentationReadModel BuildRuntimeStatePresentationReadModel() const;
private: private:
bool LoadPersistentState(std::string& error); bool LoadPersistentState(std::string& error);
bool SavePersistentState(std::string& error) const; PersistenceSnapshot BuildRuntimeStatePersistenceSnapshotLocked(const PersistenceRequest& request) const;
PersistenceSnapshot BuildStackPresetPersistenceSnapshot(const std::string& presetName) const;
bool ScanShaderPackages(std::string& error); bool ScanShaderPackages(std::string& error);
std::string ReadTextFile(const std::filesystem::path& path, std::string& error) const; std::string ReadTextFile(const std::filesystem::path& path, std::string& error) const;
bool WriteTextFile(const std::filesystem::path& path, const std::string& contents, std::string& error) const;
std::vector<std::string> GetStackPresetNamesLocked() const; std::vector<std::string> GetStackPresetNamesLocked() const;
void MarkRenderStateDirtyLocked(); void MarkRenderStateDirtyLocked();
void MarkParameterStateDirtyLocked(); void MarkParameterStateDirtyLocked();
@@ -89,8 +96,9 @@ private:
RenderSnapshotBuilder mRenderSnapshotBuilder; RenderSnapshotBuilder mRenderSnapshotBuilder;
RuntimeConfigStore mConfigStore; RuntimeConfigStore mConfigStore;
ShaderPackageCatalog mShaderCatalog; ShaderPackageCatalog mShaderCatalog;
LayerStackStore mLayerStack; CommittedLiveState mCommittedLiveState;
HealthTelemetry mHealthTelemetry; HealthTelemetry mHealthTelemetry;
mutable PersistenceWriter mPersistenceWriter;
mutable std::mutex mMutex; mutable std::mutex mMutex;
bool mReloadRequested; bool mReloadRequested;
bool mCompileSucceeded; bool mCompileSucceeded;

View File

@@ -27,10 +27,15 @@ struct RenderTimingSnapshot
double startupRandom = 0.0; double startupRandom = 0.0;
}; };
struct RenderSnapshotReadModel struct CommittedLiveStateReadModel
{ {
std::vector<LayerStackStore::LayerPersistentState> layers; std::vector<LayerStackStore::LayerPersistentState> layers;
std::map<std::string, ShaderPackage> packagesById; std::map<std::string, ShaderPackage> packagesById;
};
struct RenderSnapshotReadModel
{
CommittedLiveStateReadModel committedLiveState;
HealthTelemetry::SignalStatusSnapshot signalStatus; HealthTelemetry::SignalStatusSnapshot signalStatus;
RenderTimingSnapshot timing; RenderTimingSnapshot timing;
}; };

View File

@@ -169,6 +169,286 @@ bool HealthTelemetry::TryRecordRuntimeEventDispatchStats(std::size_t dispatchedE
return true; return true;
} }
void HealthTelemetry::RecordPersistenceWriteResult(bool succeeded, const std::string& targetKind, const std::string& targetPath,
const std::string& reason, const std::string& errorMessage, bool newerRequestPending)
{
std::lock_guard<std::mutex> lock(mMutex);
if (succeeded)
++mPersistence.writeSuccessCount;
else
++mPersistence.writeFailureCount;
mPersistence.lastWriteSucceeded = succeeded;
mPersistence.unsavedChanges = !succeeded || newerRequestPending;
mPersistence.newerRequestPending = newerRequestPending;
mPersistence.lastTargetKind = targetKind;
mPersistence.lastTargetPath = targetPath;
mPersistence.lastReason = reason;
mPersistence.lastErrorMessage = errorMessage;
}
bool HealthTelemetry::TryRecordPersistenceWriteResult(bool succeeded, const std::string& targetKind, const std::string& targetPath,
const std::string& reason, const std::string& errorMessage, bool newerRequestPending)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
if (succeeded)
++mPersistence.writeSuccessCount;
else
++mPersistence.writeFailureCount;
mPersistence.lastWriteSucceeded = succeeded;
mPersistence.unsavedChanges = !succeeded || newerRequestPending;
mPersistence.newerRequestPending = newerRequestPending;
mPersistence.lastTargetKind = targetKind;
mPersistence.lastTargetPath = targetPath;
mPersistence.lastReason = reason;
mPersistence.lastErrorMessage = errorMessage;
return true;
}
void HealthTelemetry::RecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
double outputFrameAcquireMilliseconds, double outputFrameRenderRequestMilliseconds, double outputFrameEndAccessMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
bool degraded, const std::string& statusMessage)
{
std::lock_guard<std::mutex> lock(mMutex);
mBackendPlayout.lifecycleState = lifecycleState;
mBackendPlayout.completionResult = completionResult;
mBackendPlayout.readyQueueDepth = readyQueueDepth;
mBackendPlayout.readyQueueCapacity = readyQueueCapacity;
mBackendPlayout.minReadyQueueDepth = minReadyQueueDepth;
mBackendPlayout.maxReadyQueueDepth = maxReadyQueueDepth;
mBackendPlayout.readyQueueZeroDepthCount = readyQueueZeroDepthCount;
mBackendPlayout.readyQueuePushedCount = readyQueuePushedCount;
mBackendPlayout.readyQueuePoppedCount = readyQueuePoppedCount;
mBackendPlayout.readyQueueDroppedCount = readyQueueDroppedCount;
mBackendPlayout.readyQueueUnderrunCount = readyQueueUnderrunCount;
mBackendPlayout.outputRenderMilliseconds = std::max(outputRenderMilliseconds, 0.0);
mBackendPlayout.smoothedOutputRenderMilliseconds = std::max(smoothedOutputRenderMilliseconds, 0.0);
mBackendPlayout.maxOutputRenderMilliseconds = std::max(maxOutputRenderMilliseconds, 0.0);
mBackendPlayout.outputFrameAcquireMilliseconds = std::max(outputFrameAcquireMilliseconds, 0.0);
mBackendPlayout.outputFrameRenderRequestMilliseconds = std::max(outputFrameRenderRequestMilliseconds, 0.0);
mBackendPlayout.outputFrameEndAccessMilliseconds = std::max(outputFrameEndAccessMilliseconds, 0.0);
mBackendPlayout.completedFrameIndex = completedFrameIndex;
mBackendPlayout.scheduledFrameIndex = scheduledFrameIndex;
mBackendPlayout.scheduledLeadFrames = scheduledLeadFrames;
mBackendPlayout.measuredLagFrames = measuredLagFrames;
mBackendPlayout.catchUpFrames = catchUpFrames;
mBackendPlayout.lateStreak = lateStreak;
mBackendPlayout.dropStreak = dropStreak;
mBackendPlayout.lateFrameCount = lateFrameCount;
mBackendPlayout.droppedFrameCount = droppedFrameCount;
mBackendPlayout.flushedFrameCount = flushedFrameCount;
mBackendPlayout.degraded = degraded;
mBackendPlayout.statusMessage = statusMessage;
}
bool HealthTelemetry::TryRecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
double outputFrameAcquireMilliseconds, double outputFrameRenderRequestMilliseconds, double outputFrameEndAccessMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
bool degraded, const std::string& statusMessage)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
mBackendPlayout.lifecycleState = lifecycleState;
mBackendPlayout.completionResult = completionResult;
mBackendPlayout.readyQueueDepth = readyQueueDepth;
mBackendPlayout.readyQueueCapacity = readyQueueCapacity;
mBackendPlayout.minReadyQueueDepth = minReadyQueueDepth;
mBackendPlayout.maxReadyQueueDepth = maxReadyQueueDepth;
mBackendPlayout.readyQueueZeroDepthCount = readyQueueZeroDepthCount;
mBackendPlayout.readyQueuePushedCount = readyQueuePushedCount;
mBackendPlayout.readyQueuePoppedCount = readyQueuePoppedCount;
mBackendPlayout.readyQueueDroppedCount = readyQueueDroppedCount;
mBackendPlayout.readyQueueUnderrunCount = readyQueueUnderrunCount;
mBackendPlayout.outputRenderMilliseconds = std::max(outputRenderMilliseconds, 0.0);
mBackendPlayout.smoothedOutputRenderMilliseconds = std::max(smoothedOutputRenderMilliseconds, 0.0);
mBackendPlayout.maxOutputRenderMilliseconds = std::max(maxOutputRenderMilliseconds, 0.0);
mBackendPlayout.outputFrameAcquireMilliseconds = std::max(outputFrameAcquireMilliseconds, 0.0);
mBackendPlayout.outputFrameRenderRequestMilliseconds = std::max(outputFrameRenderRequestMilliseconds, 0.0);
mBackendPlayout.outputFrameEndAccessMilliseconds = std::max(outputFrameEndAccessMilliseconds, 0.0);
mBackendPlayout.completedFrameIndex = completedFrameIndex;
mBackendPlayout.scheduledFrameIndex = scheduledFrameIndex;
mBackendPlayout.scheduledLeadFrames = scheduledLeadFrames;
mBackendPlayout.measuredLagFrames = measuredLagFrames;
mBackendPlayout.catchUpFrames = catchUpFrames;
mBackendPlayout.lateStreak = lateStreak;
mBackendPlayout.dropStreak = dropStreak;
mBackendPlayout.lateFrameCount = lateFrameCount;
mBackendPlayout.droppedFrameCount = droppedFrameCount;
mBackendPlayout.flushedFrameCount = flushedFrameCount;
mBackendPlayout.degraded = degraded;
mBackendPlayout.statusMessage = statusMessage;
return true;
}
void HealthTelemetry::RecordOutputRenderQueueWait(double queueWaitMilliseconds)
{
std::lock_guard<std::mutex> lock(mMutex);
mBackendPlayout.outputRenderQueueWaitMilliseconds = std::max(queueWaitMilliseconds, 0.0);
}
bool HealthTelemetry::TryRecordOutputRenderQueueWait(double queueWaitMilliseconds)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
mBackendPlayout.outputRenderQueueWaitMilliseconds = std::max(queueWaitMilliseconds, 0.0);
return true;
}
void HealthTelemetry::RecordSystemMemoryPlayoutStats(std::size_t freeFrameCount, std::size_t readyFrameCount,
std::size_t scheduledFrameCount, uint64_t underrunCount, uint64_t repeatCount, uint64_t dropCount,
double frameAgeAtScheduleMilliseconds, double frameAgeAtCompletionMilliseconds)
{
std::lock_guard<std::mutex> lock(mMutex);
mBackendPlayout.systemFramePoolFree = freeFrameCount;
mBackendPlayout.systemFramePoolReady = readyFrameCount;
mBackendPlayout.systemFramePoolScheduled = scheduledFrameCount;
mBackendPlayout.systemFrameUnderrunCount = underrunCount;
mBackendPlayout.systemFrameRepeatCount = repeatCount;
mBackendPlayout.systemFrameDropCount = dropCount;
mBackendPlayout.systemFrameAgeAtScheduleMilliseconds = std::max(frameAgeAtScheduleMilliseconds, 0.0);
mBackendPlayout.systemFrameAgeAtCompletionMilliseconds = std::max(frameAgeAtCompletionMilliseconds, 0.0);
}
bool HealthTelemetry::TryRecordSystemMemoryPlayoutStats(std::size_t freeFrameCount, std::size_t readyFrameCount,
std::size_t scheduledFrameCount, uint64_t underrunCount, uint64_t repeatCount, uint64_t dropCount,
double frameAgeAtScheduleMilliseconds, double frameAgeAtCompletionMilliseconds)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
mBackendPlayout.systemFramePoolFree = freeFrameCount;
mBackendPlayout.systemFramePoolReady = readyFrameCount;
mBackendPlayout.systemFramePoolScheduled = scheduledFrameCount;
mBackendPlayout.systemFrameUnderrunCount = underrunCount;
mBackendPlayout.systemFrameRepeatCount = repeatCount;
mBackendPlayout.systemFrameDropCount = dropCount;
mBackendPlayout.systemFrameAgeAtScheduleMilliseconds = std::max(frameAgeAtScheduleMilliseconds, 0.0);
mBackendPlayout.systemFrameAgeAtCompletionMilliseconds = std::max(frameAgeAtCompletionMilliseconds, 0.0);
return true;
}
void HealthTelemetry::RecordDeckLinkBufferTelemetry(bool actualBufferedFramesAvailable, uint64_t actualBufferedFrames,
std::size_t targetBufferedFrames, double scheduleCallMilliseconds, uint64_t scheduleFailureCount)
{
std::lock_guard<std::mutex> lock(mMutex);
mBackendPlayout.actualDeckLinkBufferedFramesAvailable = actualBufferedFramesAvailable;
mBackendPlayout.actualDeckLinkBufferedFrames = actualBufferedFramesAvailable ? actualBufferedFrames : 0;
mBackendPlayout.targetDeckLinkBufferedFrames = targetBufferedFrames;
mBackendPlayout.deckLinkScheduleCallMilliseconds = std::max(scheduleCallMilliseconds, 0.0);
mBackendPlayout.deckLinkScheduleFailureCount = scheduleFailureCount;
}
bool HealthTelemetry::TryRecordDeckLinkBufferTelemetry(bool actualBufferedFramesAvailable, uint64_t actualBufferedFrames,
std::size_t targetBufferedFrames, double scheduleCallMilliseconds, uint64_t scheduleFailureCount)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
mBackendPlayout.actualDeckLinkBufferedFramesAvailable = actualBufferedFramesAvailable;
mBackendPlayout.actualDeckLinkBufferedFrames = actualBufferedFramesAvailable ? actualBufferedFrames : 0;
mBackendPlayout.targetDeckLinkBufferedFrames = targetBufferedFrames;
mBackendPlayout.deckLinkScheduleCallMilliseconds = std::max(scheduleCallMilliseconds, 0.0);
mBackendPlayout.deckLinkScheduleFailureCount = scheduleFailureCount;
return true;
}
void HealthTelemetry::RecordOutputRenderPipelineTiming(
double drawMilliseconds,
double fenceWaitMilliseconds,
double mapMilliseconds,
double readbackCopyMilliseconds,
double cachedCopyMilliseconds,
double asyncQueueMilliseconds,
double asyncQueueBufferMilliseconds,
double asyncQueueSetupMilliseconds,
double asyncQueueReadPixelsMilliseconds,
double asyncQueueFenceMilliseconds,
double syncReadMilliseconds,
bool asyncReadbackMissed,
bool cachedFallbackUsed,
bool syncFallbackUsed)
{
std::lock_guard<std::mutex> lock(mMutex);
mBackendPlayout.outputRenderDrawMilliseconds = std::max(drawMilliseconds, 0.0);
mBackendPlayout.outputReadbackFenceWaitMilliseconds = std::max(fenceWaitMilliseconds, 0.0);
mBackendPlayout.outputReadbackMapMilliseconds = std::max(mapMilliseconds, 0.0);
mBackendPlayout.outputReadbackCopyMilliseconds = std::max(readbackCopyMilliseconds, 0.0);
mBackendPlayout.outputCachedCopyMilliseconds = std::max(cachedCopyMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueMilliseconds = std::max(asyncQueueMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueBufferMilliseconds = std::max(asyncQueueBufferMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueSetupMilliseconds = std::max(asyncQueueSetupMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueReadPixelsMilliseconds = std::max(asyncQueueReadPixelsMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueFenceMilliseconds = std::max(asyncQueueFenceMilliseconds, 0.0);
mBackendPlayout.outputSyncReadMilliseconds = std::max(syncReadMilliseconds, 0.0);
if (asyncReadbackMissed)
++mBackendPlayout.outputAsyncReadbackMissCount;
if (cachedFallbackUsed)
++mBackendPlayout.outputCachedFallbackCount;
if (syncFallbackUsed)
++mBackendPlayout.outputSyncFallbackCount;
}
bool HealthTelemetry::TryRecordOutputRenderPipelineTiming(
double drawMilliseconds,
double fenceWaitMilliseconds,
double mapMilliseconds,
double readbackCopyMilliseconds,
double cachedCopyMilliseconds,
double asyncQueueMilliseconds,
double asyncQueueBufferMilliseconds,
double asyncQueueSetupMilliseconds,
double asyncQueueReadPixelsMilliseconds,
double asyncQueueFenceMilliseconds,
double syncReadMilliseconds,
bool asyncReadbackMissed,
bool cachedFallbackUsed,
bool syncFallbackUsed)
{
std::unique_lock<std::mutex> lock(mMutex, std::try_to_lock);
if (!lock.owns_lock())
return false;
mBackendPlayout.outputRenderDrawMilliseconds = std::max(drawMilliseconds, 0.0);
mBackendPlayout.outputReadbackFenceWaitMilliseconds = std::max(fenceWaitMilliseconds, 0.0);
mBackendPlayout.outputReadbackMapMilliseconds = std::max(mapMilliseconds, 0.0);
mBackendPlayout.outputReadbackCopyMilliseconds = std::max(readbackCopyMilliseconds, 0.0);
mBackendPlayout.outputCachedCopyMilliseconds = std::max(cachedCopyMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueMilliseconds = std::max(asyncQueueMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueBufferMilliseconds = std::max(asyncQueueBufferMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueSetupMilliseconds = std::max(asyncQueueSetupMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueReadPixelsMilliseconds = std::max(asyncQueueReadPixelsMilliseconds, 0.0);
mBackendPlayout.outputAsyncQueueFenceMilliseconds = std::max(asyncQueueFenceMilliseconds, 0.0);
mBackendPlayout.outputSyncReadMilliseconds = std::max(syncReadMilliseconds, 0.0);
if (asyncReadbackMissed)
++mBackendPlayout.outputAsyncReadbackMissCount;
if (cachedFallbackUsed)
++mBackendPlayout.outputCachedFallbackCount;
if (syncFallbackUsed)
++mBackendPlayout.outputSyncFallbackCount;
return true;
}
HealthTelemetry::SignalStatusSnapshot HealthTelemetry::GetSignalStatusSnapshot() const HealthTelemetry::SignalStatusSnapshot HealthTelemetry::GetSignalStatusSnapshot() const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
@@ -193,6 +473,18 @@ HealthTelemetry::RuntimeEventMetricsSnapshot HealthTelemetry::GetRuntimeEventMet
return mRuntimeEvents; return mRuntimeEvents;
} }
HealthTelemetry::PersistenceSnapshot HealthTelemetry::GetPersistenceSnapshot() const
{
std::lock_guard<std::mutex> lock(mMutex);
return mPersistence;
}
HealthTelemetry::BackendPlayoutSnapshot HealthTelemetry::GetBackendPlayoutSnapshot() const
{
std::lock_guard<std::mutex> lock(mMutex);
return mBackendPlayout;
}
HealthTelemetry::Snapshot HealthTelemetry::GetSnapshot() const HealthTelemetry::Snapshot HealthTelemetry::GetSnapshot() const
{ {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
@@ -202,5 +494,7 @@ HealthTelemetry::Snapshot HealthTelemetry::GetSnapshot() const
snapshot.videoIO = mVideoIOStatus; snapshot.videoIO = mVideoIOStatus;
snapshot.performance = mPerformance; snapshot.performance = mPerformance;
snapshot.runtimeEvents = mRuntimeEvents; snapshot.runtimeEvents = mRuntimeEvents;
snapshot.persistence = mPersistence;
snapshot.backendPlayout = mBackendPlayout;
return snapshot; return snapshot;
} }

View File

@@ -5,9 +5,8 @@
#include <cstdint> #include <cstdint>
#include <string> #include <string>
// Phase 1 compatibility seam for status and timing reporting. HealthTelemetry // HealthTelemetry owns the current operational status snapshot directly, so
// owns the current operational status snapshot directly, so callers can report // callers can report health without sharing runtime-store state.
// health without sharing runtime-store state.
class HealthTelemetry class HealthTelemetry
{ {
public: public:
@@ -69,12 +68,88 @@ public:
RuntimeEventDispatchSnapshot dispatch; RuntimeEventDispatchSnapshot dispatch;
}; };
struct PersistenceSnapshot
{
uint64_t writeSuccessCount = 0;
uint64_t writeFailureCount = 0;
bool lastWriteSucceeded = true;
bool unsavedChanges = false;
bool newerRequestPending = false;
std::string lastTargetKind;
std::string lastTargetPath;
std::string lastReason;
std::string lastErrorMessage;
};
struct BackendPlayoutSnapshot
{
std::string lifecycleState = "NotStarted";
std::string completionResult = "Unknown";
std::size_t readyQueueDepth = 0;
std::size_t readyQueueCapacity = 0;
std::size_t minReadyQueueDepth = 0;
std::size_t maxReadyQueueDepth = 0;
uint64_t readyQueueZeroDepthCount = 0;
uint64_t readyQueuePushedCount = 0;
uint64_t readyQueuePoppedCount = 0;
uint64_t readyQueueDroppedCount = 0;
uint64_t readyQueueUnderrunCount = 0;
std::size_t systemFramePoolFree = 0;
std::size_t systemFramePoolReady = 0;
std::size_t systemFramePoolScheduled = 0;
uint64_t systemFrameUnderrunCount = 0;
uint64_t systemFrameRepeatCount = 0;
uint64_t systemFrameDropCount = 0;
double systemFrameAgeAtScheduleMilliseconds = 0.0;
double systemFrameAgeAtCompletionMilliseconds = 0.0;
double outputRenderMilliseconds = 0.0;
double smoothedOutputRenderMilliseconds = 0.0;
double maxOutputRenderMilliseconds = 0.0;
double outputFrameAcquireMilliseconds = 0.0;
double outputFrameRenderRequestMilliseconds = 0.0;
double outputFrameEndAccessMilliseconds = 0.0;
double outputRenderQueueWaitMilliseconds = 0.0;
double outputRenderDrawMilliseconds = 0.0;
double outputReadbackFenceWaitMilliseconds = 0.0;
double outputReadbackMapMilliseconds = 0.0;
double outputReadbackCopyMilliseconds = 0.0;
double outputCachedCopyMilliseconds = 0.0;
double outputAsyncQueueMilliseconds = 0.0;
double outputAsyncQueueBufferMilliseconds = 0.0;
double outputAsyncQueueSetupMilliseconds = 0.0;
double outputAsyncQueueReadPixelsMilliseconds = 0.0;
double outputAsyncQueueFenceMilliseconds = 0.0;
double outputSyncReadMilliseconds = 0.0;
uint64_t outputAsyncReadbackMissCount = 0;
uint64_t outputCachedFallbackCount = 0;
uint64_t outputSyncFallbackCount = 0;
uint64_t completedFrameIndex = 0;
uint64_t scheduledFrameIndex = 0;
uint64_t scheduledLeadFrames = 0;
bool actualDeckLinkBufferedFramesAvailable = false;
uint64_t actualDeckLinkBufferedFrames = 0;
std::size_t targetDeckLinkBufferedFrames = 0;
double deckLinkScheduleCallMilliseconds = 0.0;
uint64_t deckLinkScheduleFailureCount = 0;
uint64_t measuredLagFrames = 0;
uint64_t catchUpFrames = 0;
uint64_t lateStreak = 0;
uint64_t dropStreak = 0;
uint64_t lateFrameCount = 0;
uint64_t droppedFrameCount = 0;
uint64_t flushedFrameCount = 0;
bool degraded = false;
std::string statusMessage;
};
struct Snapshot struct Snapshot
{ {
SignalStatusSnapshot signal; SignalStatusSnapshot signal;
VideoIOStatusSnapshot videoIO; VideoIOStatusSnapshot videoIO;
PerformanceSnapshot performance; PerformanceSnapshot performance;
RuntimeEventMetricsSnapshot runtimeEvents; RuntimeEventMetricsSnapshot runtimeEvents;
PersistenceSnapshot persistence;
BackendPlayoutSnapshot backendPlayout;
}; };
HealthTelemetry() = default; HealthTelemetry() = default;
@@ -107,10 +182,84 @@ public:
bool TryRecordRuntimeEventDispatchStats(std::size_t dispatchedEvents, std::size_t handlerInvocations, bool TryRecordRuntimeEventDispatchStats(std::size_t dispatchedEvents, std::size_t handlerInvocations,
std::size_t handlerFailures, double dispatchDurationMilliseconds); std::size_t handlerFailures, double dispatchDurationMilliseconds);
void RecordPersistenceWriteResult(bool succeeded, const std::string& targetKind, const std::string& targetPath,
const std::string& reason, const std::string& errorMessage, bool newerRequestPending);
bool TryRecordPersistenceWriteResult(bool succeeded, const std::string& targetKind, const std::string& targetPath,
const std::string& reason, const std::string& errorMessage, bool newerRequestPending);
void RecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
double outputFrameAcquireMilliseconds, double outputFrameRenderRequestMilliseconds, double outputFrameEndAccessMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
bool degraded, const std::string& statusMessage);
bool TryRecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
double outputFrameAcquireMilliseconds, double outputFrameRenderRequestMilliseconds, double outputFrameEndAccessMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
bool degraded, const std::string& statusMessage);
void RecordOutputRenderQueueWait(double queueWaitMilliseconds);
bool TryRecordOutputRenderQueueWait(double queueWaitMilliseconds);
void RecordSystemMemoryPlayoutStats(std::size_t freeFrameCount, std::size_t readyFrameCount,
std::size_t scheduledFrameCount, uint64_t underrunCount, uint64_t repeatCount, uint64_t dropCount,
double frameAgeAtScheduleMilliseconds, double frameAgeAtCompletionMilliseconds);
bool TryRecordSystemMemoryPlayoutStats(std::size_t freeFrameCount, std::size_t readyFrameCount,
std::size_t scheduledFrameCount, uint64_t underrunCount, uint64_t repeatCount, uint64_t dropCount,
double frameAgeAtScheduleMilliseconds, double frameAgeAtCompletionMilliseconds);
void RecordDeckLinkBufferTelemetry(bool actualBufferedFramesAvailable, uint64_t actualBufferedFrames,
std::size_t targetBufferedFrames, double scheduleCallMilliseconds, uint64_t scheduleFailureCount);
bool TryRecordDeckLinkBufferTelemetry(bool actualBufferedFramesAvailable, uint64_t actualBufferedFrames,
std::size_t targetBufferedFrames, double scheduleCallMilliseconds, uint64_t scheduleFailureCount);
void RecordOutputRenderPipelineTiming(
double drawMilliseconds,
double fenceWaitMilliseconds,
double mapMilliseconds,
double readbackCopyMilliseconds,
double cachedCopyMilliseconds,
double asyncQueueMilliseconds,
double asyncQueueBufferMilliseconds,
double asyncQueueSetupMilliseconds,
double asyncQueueReadPixelsMilliseconds,
double asyncQueueFenceMilliseconds,
double syncReadMilliseconds,
bool asyncReadbackMissed,
bool cachedFallbackUsed,
bool syncFallbackUsed);
bool TryRecordOutputRenderPipelineTiming(
double drawMilliseconds,
double fenceWaitMilliseconds,
double mapMilliseconds,
double readbackCopyMilliseconds,
double cachedCopyMilliseconds,
double asyncQueueMilliseconds,
double asyncQueueBufferMilliseconds,
double asyncQueueSetupMilliseconds,
double asyncQueueReadPixelsMilliseconds,
double asyncQueueFenceMilliseconds,
double syncReadMilliseconds,
bool asyncReadbackMissed,
bool cachedFallbackUsed,
bool syncFallbackUsed);
SignalStatusSnapshot GetSignalStatusSnapshot() const; SignalStatusSnapshot GetSignalStatusSnapshot() const;
VideoIOStatusSnapshot GetVideoIOStatusSnapshot() const; VideoIOStatusSnapshot GetVideoIOStatusSnapshot() const;
PerformanceSnapshot GetPerformanceSnapshot() const; PerformanceSnapshot GetPerformanceSnapshot() const;
RuntimeEventMetricsSnapshot GetRuntimeEventMetricsSnapshot() const; RuntimeEventMetricsSnapshot GetRuntimeEventMetricsSnapshot() const;
PersistenceSnapshot GetPersistenceSnapshot() const;
BackendPlayoutSnapshot GetBackendPlayoutSnapshot() const;
Snapshot GetSnapshot() const; Snapshot GetSnapshot() const;
private: private:
@@ -119,4 +268,6 @@ private:
VideoIOStatusSnapshot mVideoIOStatus; VideoIOStatusSnapshot mVideoIOStatus;
PerformanceSnapshot mPerformance; PerformanceSnapshot mPerformance;
RuntimeEventMetricsSnapshot mRuntimeEvents; RuntimeEventMetricsSnapshot mRuntimeEvents;
PersistenceSnapshot mPersistence;
BackendPlayoutSnapshot mBackendPlayout;
}; };

View File

@@ -0,0 +1,89 @@
#include "OutputProductionController.h"
#include <algorithm>
namespace
{
std::size_t ClampReadyLimit(unsigned value, std::size_t capacity)
{
const std::size_t requested = static_cast<std::size_t>(value);
if (capacity == 0)
return requested;
return (std::min)(requested, capacity);
}
}
OutputProductionController::OutputProductionController(const VideoPlayoutPolicy& policy) :
mPolicy(NormalizeVideoPlayoutPolicy(policy))
{
}
void OutputProductionController::Configure(const VideoPlayoutPolicy& policy)
{
mPolicy = NormalizeVideoPlayoutPolicy(policy);
}
OutputProductionDecision OutputProductionController::Decide(const OutputProductionPressure& pressure) const
{
OutputProductionDecision decision;
const std::size_t configuredMaxReadyFrames = static_cast<std::size_t>(mPolicy.maxReadyFrames);
const std::size_t effectiveMaxReadyFrames = pressure.readyQueueCapacity > 0
? (std::min)(configuredMaxReadyFrames, pressure.readyQueueCapacity)
: configuredMaxReadyFrames;
const std::size_t effectiveTargetReadyFrames = (std::min)(
ClampReadyLimit(mPolicy.targetReadyFrames, pressure.readyQueueCapacity),
effectiveMaxReadyFrames);
decision.targetReadyFrames = effectiveTargetReadyFrames;
decision.maxReadyFrames = effectiveMaxReadyFrames;
if (effectiveMaxReadyFrames == 0)
{
decision.action = OutputProductionAction::Throttle;
decision.reason = "no-ready-frame-capacity";
return decision;
}
if (pressure.readyQueueDepth >= effectiveMaxReadyFrames)
{
decision.action = OutputProductionAction::Throttle;
decision.reason = "ready-queue-full";
return decision;
}
if (pressure.readyQueueDepth < effectiveTargetReadyFrames)
{
decision.action = OutputProductionAction::Produce;
decision.requestedFrames = effectiveTargetReadyFrames - pressure.readyQueueDepth;
decision.reason = "ready-queue-below-target";
return decision;
}
if ((pressure.lateStreak > 0 || pressure.dropStreak > 0 || pressure.readyQueueUnderrunCount > 0) &&
pressure.readyQueueDepth < effectiveMaxReadyFrames)
{
decision.action = OutputProductionAction::Produce;
decision.requestedFrames = 1;
decision.reason = "playout-pressure";
return decision;
}
decision.action = OutputProductionAction::Wait;
decision.reason = "ready-queue-at-target";
return decision;
}
const char* OutputProductionActionName(OutputProductionAction action)
{
switch (action)
{
case OutputProductionAction::Produce:
return "Produce";
case OutputProductionAction::Throttle:
return "Throttle";
case OutputProductionAction::Wait:
default:
return "Wait";
}
}

View File

@@ -0,0 +1,46 @@
#pragma once
#include "VideoPlayoutPolicy.h"
#include <cstddef>
#include <cstdint>
#include <string>
enum class OutputProductionAction
{
Produce,
Wait,
Throttle
};
struct OutputProductionPressure
{
std::size_t readyQueueDepth = 0;
std::size_t readyQueueCapacity = 0;
uint64_t readyQueueUnderrunCount = 0;
uint64_t lateStreak = 0;
uint64_t dropStreak = 0;
};
struct OutputProductionDecision
{
OutputProductionAction action = OutputProductionAction::Wait;
std::size_t requestedFrames = 0;
std::size_t targetReadyFrames = 0;
std::size_t maxReadyFrames = 0;
std::string reason;
};
class OutputProductionController
{
public:
explicit OutputProductionController(const VideoPlayoutPolicy& policy = VideoPlayoutPolicy());
void Configure(const VideoPlayoutPolicy& policy);
OutputProductionDecision Decide(const OutputProductionPressure& pressure) const;
private:
VideoPlayoutPolicy mPolicy;
};
const char* OutputProductionActionName(OutputProductionAction action);

View File

@@ -0,0 +1,102 @@
#include "RenderCadenceController.h"
#include <algorithm>
#include <cmath>
void RenderCadenceController::Configure(Duration targetFrameDuration, TimePoint firstRenderTime, const RenderCadencePolicy& policy)
{
mTargetFrameDuration = IsPositive(targetFrameDuration) ? targetFrameDuration : std::chrono::milliseconds(1);
mPolicy = policy;
if (mPolicy.skipThresholdFrames < 1.0)
mPolicy.skipThresholdFrames = 1.0;
Reset(firstRenderTime);
}
void RenderCadenceController::Reset(TimePoint firstRenderTime)
{
mNextRenderTime = firstRenderTime;
mNextFrameIndex = 0;
mMetrics = RenderCadenceMetrics();
}
RenderCadenceDecision RenderCadenceController::Tick(TimePoint now)
{
RenderCadenceDecision decision;
decision.frameIndex = mNextFrameIndex;
decision.renderTargetTime = mNextRenderTime;
decision.nextRenderTime = mNextRenderTime;
if (now < mNextRenderTime)
{
decision.action = RenderCadenceAction::Wait;
decision.waitDuration = mNextRenderTime - now;
decision.reason = "waiting-for-next-render-tick";
return decision;
}
const Duration lateness = now - mNextRenderTime;
const uint64_t skippedTicks = SkippedTicksForLateness(lateness);
if (skippedTicks > 0)
{
decision.skippedTicks = skippedTicks;
decision.frameIndex = mNextFrameIndex + skippedTicks;
decision.renderTargetTime = mNextRenderTime + (mTargetFrameDuration * skippedTicks);
decision.reason = "late-skip-render-ticks";
mMetrics.skippedTickCount += skippedTicks;
}
else
{
decision.reason = IsPositive(lateness) ? "late-render-now" : "on-time-render";
}
decision.action = RenderCadenceAction::Render;
decision.lateness = now > decision.renderTargetTime
? now - decision.renderTargetTime
: Duration::zero();
mNextFrameIndex = decision.frameIndex + 1;
mNextRenderTime = decision.renderTargetTime + mTargetFrameDuration;
decision.nextRenderTime = mNextRenderTime;
++mMetrics.renderedFrameCount;
mMetrics.nextFrameIndex = mNextFrameIndex;
mMetrics.lastLateness = decision.lateness;
if (IsPositive(decision.lateness))
{
++mMetrics.lateFrameCount;
mMetrics.maxLateness = (std::max)(mMetrics.maxLateness, decision.lateness);
}
return decision;
}
uint64_t RenderCadenceController::SkippedTicksForLateness(Duration lateness) const
{
if (!mPolicy.skipLateTicks || !IsPositive(lateness) || !IsPositive(mTargetFrameDuration))
return 0;
const double lateFrames = static_cast<double>(lateness.count()) / static_cast<double>(mTargetFrameDuration.count());
if (lateFrames < mPolicy.skipThresholdFrames)
return 0;
const uint64_t elapsedTicks = static_cast<uint64_t>(std::floor(lateFrames));
if (elapsedTicks == 0)
return 0;
return (std::min)(elapsedTicks, mPolicy.maxSkippedTicksPerDecision);
}
bool RenderCadenceController::IsPositive(Duration duration)
{
return duration > Duration::zero();
}
const char* RenderCadenceActionName(RenderCadenceAction action)
{
switch (action)
{
case RenderCadenceAction::Render:
return "Render";
case RenderCadenceAction::Wait:
default:
return "Wait";
}
}

View File

@@ -0,0 +1,68 @@
#pragma once
#include <chrono>
#include <cstdint>
enum class RenderCadenceAction
{
Wait,
Render
};
struct RenderCadencePolicy
{
bool skipLateTicks = true;
uint64_t maxSkippedTicksPerDecision = 4;
double skipThresholdFrames = 2.0;
};
struct RenderCadenceDecision
{
RenderCadenceAction action = RenderCadenceAction::Wait;
uint64_t frameIndex = 0;
uint64_t skippedTicks = 0;
std::chrono::steady_clock::time_point renderTargetTime;
std::chrono::steady_clock::time_point nextRenderTime;
std::chrono::steady_clock::duration waitDuration = std::chrono::steady_clock::duration::zero();
std::chrono::steady_clock::duration lateness = std::chrono::steady_clock::duration::zero();
const char* reason = "waiting-for-next-render-tick";
};
struct RenderCadenceMetrics
{
uint64_t nextFrameIndex = 0;
uint64_t renderedFrameCount = 0;
uint64_t skippedTickCount = 0;
uint64_t lateFrameCount = 0;
std::chrono::steady_clock::duration lastLateness = std::chrono::steady_clock::duration::zero();
std::chrono::steady_clock::duration maxLateness = std::chrono::steady_clock::duration::zero();
};
class RenderCadenceController
{
public:
using Clock = std::chrono::steady_clock;
using TimePoint = Clock::time_point;
using Duration = Clock::duration;
void Configure(Duration targetFrameDuration, TimePoint firstRenderTime, const RenderCadencePolicy& policy = RenderCadencePolicy());
void Reset(TimePoint firstRenderTime);
RenderCadenceDecision Tick(TimePoint now);
Duration TargetFrameDuration() const { return mTargetFrameDuration; }
TimePoint NextRenderTime() const { return mNextRenderTime; }
uint64_t NextFrameIndex() const { return mNextFrameIndex; }
const RenderCadenceMetrics& Metrics() const { return mMetrics; }
private:
uint64_t SkippedTicksForLateness(Duration lateness) const;
static bool IsPositive(Duration duration);
Duration mTargetFrameDuration = std::chrono::milliseconds(16);
TimePoint mNextRenderTime;
uint64_t mNextFrameIndex = 0;
RenderCadencePolicy mPolicy;
RenderCadenceMetrics mMetrics;
};
const char* RenderCadenceActionName(RenderCadenceAction action);

View File

@@ -0,0 +1,93 @@
#include "RenderOutputQueue.h"
RenderOutputQueue::RenderOutputQueue(const VideoPlayoutPolicy& policy) :
mPolicy(NormalizeVideoPlayoutPolicy(policy))
{
}
void RenderOutputQueue::Configure(const VideoPlayoutPolicy& policy)
{
std::lock_guard<std::mutex> lock(mMutex);
mPolicy = NormalizeVideoPlayoutPolicy(policy);
while (mReadyFrames.size() > CapacityLocked())
{
ReleaseFrame(mReadyFrames.front());
mReadyFrames.pop_front();
++mDroppedCount;
}
}
bool RenderOutputQueue::Push(RenderOutputFrame frame)
{
std::lock_guard<std::mutex> lock(mMutex);
if (mReadyFrames.size() >= CapacityLocked())
{
ReleaseFrame(mReadyFrames.front());
mReadyFrames.pop_front();
++mDroppedCount;
}
mReadyFrames.push_back(frame);
++mPushedCount;
return true;
}
bool RenderOutputQueue::TryPop(RenderOutputFrame& frame)
{
std::lock_guard<std::mutex> lock(mMutex);
if (mReadyFrames.empty())
{
++mUnderrunCount;
return false;
}
frame = mReadyFrames.front();
mReadyFrames.pop_front();
++mPoppedCount;
return true;
}
bool RenderOutputQueue::DropOldestFrame()
{
std::lock_guard<std::mutex> lock(mMutex);
if (mReadyFrames.empty())
return false;
ReleaseFrame(mReadyFrames.front());
mReadyFrames.pop_front();
++mDroppedCount;
return true;
}
void RenderOutputQueue::Clear()
{
std::lock_guard<std::mutex> lock(mMutex);
for (RenderOutputFrame& frame : mReadyFrames)
ReleaseFrame(frame);
mReadyFrames.clear();
}
RenderOutputQueueMetrics RenderOutputQueue::GetMetrics() const
{
std::lock_guard<std::mutex> lock(mMutex);
RenderOutputQueueMetrics metrics;
metrics.depth = mReadyFrames.size();
metrics.capacity = CapacityLocked();
metrics.pushedCount = mPushedCount;
metrics.poppedCount = mPoppedCount;
metrics.droppedCount = mDroppedCount;
metrics.underrunCount = mUnderrunCount;
return metrics;
}
std::size_t RenderOutputQueue::CapacityLocked() const
{
return static_cast<std::size_t>(mPolicy.maxReadyFrames);
}
void RenderOutputQueue::ReleaseFrame(RenderOutputFrame& frame)
{
if (frame.releaseFrame)
frame.releaseFrame(frame.frame);
frame.releaseFrame = {};
}

View File

@@ -0,0 +1,52 @@
#pragma once
#include "VideoIOTypes.h"
#include "VideoPlayoutPolicy.h"
#include <cstdint>
#include <deque>
#include <functional>
#include <mutex>
struct RenderOutputFrame
{
VideoIOOutputFrame frame;
uint64_t frameIndex = 0;
bool stale = false;
std::function<void(VideoIOOutputFrame& frame)> releaseFrame;
};
struct RenderOutputQueueMetrics
{
std::size_t depth = 0;
std::size_t capacity = 0;
uint64_t pushedCount = 0;
uint64_t poppedCount = 0;
uint64_t droppedCount = 0;
uint64_t underrunCount = 0;
};
class RenderOutputQueue
{
public:
explicit RenderOutputQueue(const VideoPlayoutPolicy& policy = VideoPlayoutPolicy());
void Configure(const VideoPlayoutPolicy& policy);
bool Push(RenderOutputFrame frame);
bool TryPop(RenderOutputFrame& frame);
bool DropOldestFrame();
void Clear();
RenderOutputQueueMetrics GetMetrics() const;
private:
std::size_t CapacityLocked() const;
static void ReleaseFrame(RenderOutputFrame& frame);
mutable std::mutex mMutex;
VideoPlayoutPolicy mPolicy;
std::deque<RenderOutputFrame> mReadyFrames;
uint64_t mPushedCount = 0;
uint64_t mPoppedCount = 0;
uint64_t mDroppedCount = 0;
uint64_t mUnderrunCount = 0;
};

View File

@@ -0,0 +1,260 @@
#include "SystemOutputFramePool.h"
#include <algorithm>
namespace
{
SystemOutputFramePoolConfig NormalizeConfig(SystemOutputFramePoolConfig config)
{
if (config.rowBytes == 0)
config.rowBytes = VideoIORowBytes(config.pixelFormat, config.width);
return config;
}
}
SystemOutputFramePool::SystemOutputFramePool(const SystemOutputFramePoolConfig& config)
{
Configure(config);
}
void SystemOutputFramePool::Configure(const SystemOutputFramePoolConfig& config)
{
std::lock_guard<std::mutex> lock(mMutex);
mConfig = NormalizeConfig(config);
mReadySlots.clear();
mSlots.clear();
mSlots.resize(mConfig.capacity);
const std::size_t byteCount = FrameByteCount();
for (StoredSlot& slot : mSlots)
{
slot.bytes.resize(byteCount);
slot.state = OutputFrameSlotState::Free;
++slot.generation;
}
mAcquireMissCount = 0;
mReadyUnderrunCount = 0;
}
SystemOutputFramePoolConfig SystemOutputFramePool::Config() const
{
std::lock_guard<std::mutex> lock(mMutex);
return mConfig;
}
bool SystemOutputFramePool::AcquireFreeSlot(OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
for (std::size_t index = 0; index < mSlots.size(); ++index)
{
if (mSlots[index].state != OutputFrameSlotState::Free)
continue;
mSlots[index].state = OutputFrameSlotState::Rendering;
++mSlots[index].generation;
FillOutputSlotLocked(index, slot);
return true;
}
slot = OutputFrameSlot();
++mAcquireMissCount;
return false;
}
bool SystemOutputFramePool::AcquireRenderingSlot(OutputFrameSlot& slot)
{
return AcquireFreeSlot(slot);
}
bool SystemOutputFramePool::PublishReadySlot(const OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
if (!TransitionSlotLocked(slot, OutputFrameSlotState::Rendering, OutputFrameSlotState::Completed))
return false;
mReadySlots.push_back(slot.index);
return true;
}
bool SystemOutputFramePool::PublishCompletedSlot(const OutputFrameSlot& slot)
{
return PublishReadySlot(slot);
}
bool SystemOutputFramePool::ConsumeReadySlot(OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
while (!mReadySlots.empty())
{
const std::size_t index = mReadySlots.front();
mReadySlots.pop_front();
if (index >= mSlots.size() || mSlots[index].state != OutputFrameSlotState::Completed)
continue;
FillOutputSlotLocked(index, slot);
return true;
}
slot = OutputFrameSlot();
++mReadyUnderrunCount;
return false;
}
bool SystemOutputFramePool::ConsumeCompletedSlot(OutputFrameSlot& slot)
{
return ConsumeReadySlot(slot);
}
bool SystemOutputFramePool::MarkScheduled(const OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
if (!IsValidSlotLocked(slot))
return false;
if (mSlots[slot.index].state != OutputFrameSlotState::Completed)
return false;
RemoveReadyIndexLocked(slot.index);
mSlots[slot.index].state = OutputFrameSlotState::Scheduled;
return true;
}
bool SystemOutputFramePool::MarkScheduledByBuffer(void* bytes)
{
if (bytes == nullptr)
return false;
std::lock_guard<std::mutex> lock(mMutex);
for (std::size_t index = 0; index < mSlots.size(); ++index)
{
if (mSlots[index].bytes.empty() || mSlots[index].bytes.data() != bytes)
continue;
if (mSlots[index].state != OutputFrameSlotState::Completed)
return false;
RemoveReadyIndexLocked(index);
mSlots[index].state = OutputFrameSlotState::Scheduled;
return true;
}
return false;
}
bool SystemOutputFramePool::ReleaseSlot(const OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
if (!IsValidSlotLocked(slot) || mSlots[slot.index].state == OutputFrameSlotState::Free)
return false;
return ReleaseSlotByIndexLocked(slot.index);
}
bool SystemOutputFramePool::ReleaseScheduledSlot(const OutputFrameSlot& slot)
{
std::lock_guard<std::mutex> lock(mMutex);
return TransitionSlotLocked(slot, OutputFrameSlotState::Scheduled, OutputFrameSlotState::Free);
}
bool SystemOutputFramePool::ReleaseSlotByBuffer(void* bytes)
{
if (bytes == nullptr)
return false;
std::lock_guard<std::mutex> lock(mMutex);
for (std::size_t index = 0; index < mSlots.size(); ++index)
{
if (!mSlots[index].bytes.empty() && mSlots[index].bytes.data() == bytes)
return ReleaseSlotByIndexLocked(index);
}
return false;
}
void SystemOutputFramePool::Clear()
{
std::lock_guard<std::mutex> lock(mMutex);
mReadySlots.clear();
for (StoredSlot& slot : mSlots)
{
slot.state = OutputFrameSlotState::Free;
++slot.generation;
}
}
SystemOutputFramePoolMetrics SystemOutputFramePool::GetMetrics() const
{
std::lock_guard<std::mutex> lock(mMutex);
SystemOutputFramePoolMetrics metrics;
metrics.capacity = mSlots.size();
metrics.readyCount = mReadySlots.size();
metrics.acquireMissCount = mAcquireMissCount;
metrics.readyUnderrunCount = mReadyUnderrunCount;
for (const StoredSlot& slot : mSlots)
{
switch (slot.state)
{
case OutputFrameSlotState::Free:
++metrics.freeCount;
break;
case OutputFrameSlotState::Rendering:
++metrics.renderingCount;
++metrics.acquiredCount;
break;
case OutputFrameSlotState::Completed:
++metrics.completedCount;
break;
case OutputFrameSlotState::Scheduled:
++metrics.scheduledCount;
break;
}
}
return metrics;
}
bool SystemOutputFramePool::IsValidSlotLocked(const OutputFrameSlot& slot) const
{
return slot.index < mSlots.size() && mSlots[slot.index].generation == slot.generation;
}
bool SystemOutputFramePool::TransitionSlotLocked(const OutputFrameSlot& slot, OutputFrameSlotState expectedState, OutputFrameSlotState nextState)
{
if (!IsValidSlotLocked(slot) || mSlots[slot.index].state != expectedState)
return false;
mSlots[slot.index].state = nextState;
return true;
}
void SystemOutputFramePool::FillOutputSlotLocked(std::size_t index, OutputFrameSlot& slot)
{
StoredSlot& storedSlot = mSlots[index];
slot.index = index;
slot.generation = storedSlot.generation;
slot.frame.bytes = storedSlot.bytes.empty() ? nullptr : storedSlot.bytes.data();
slot.frame.rowBytes = static_cast<long>(mConfig.rowBytes);
slot.frame.width = mConfig.width;
slot.frame.height = mConfig.height;
slot.frame.pixelFormat = mConfig.pixelFormat;
slot.frame.nativeFrame = nullptr;
slot.frame.nativeBuffer = slot.frame.bytes;
}
void SystemOutputFramePool::RemoveReadyIndexLocked(std::size_t index)
{
mReadySlots.erase(std::remove(mReadySlots.begin(), mReadySlots.end(), index), mReadySlots.end());
}
bool SystemOutputFramePool::ReleaseSlotByIndexLocked(std::size_t index)
{
if (index >= mSlots.size() || mSlots[index].state == OutputFrameSlotState::Free)
return false;
RemoveReadyIndexLocked(index);
mSlots[index].state = OutputFrameSlotState::Free;
return true;
}
std::size_t SystemOutputFramePool::FrameByteCount() const
{
return static_cast<std::size_t>(mConfig.rowBytes) * static_cast<std::size_t>(mConfig.height);
}

View File

@@ -0,0 +1,94 @@
#pragma once
#include "VideoIOTypes.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <mutex>
#include <vector>
enum class OutputFrameSlotState
{
Free,
Rendering,
Completed,
Scheduled
};
struct SystemOutputFramePoolConfig
{
unsigned width = 0;
unsigned height = 0;
VideoIOPixelFormat pixelFormat = VideoIOPixelFormat::Bgra8;
unsigned rowBytes = 0;
std::size_t capacity = 0;
};
struct OutputFrameSlot
{
VideoIOOutputFrame frame;
std::size_t index = 0;
uint64_t generation = 0;
};
struct SystemOutputFramePoolMetrics
{
std::size_t capacity = 0;
std::size_t freeCount = 0;
std::size_t renderingCount = 0;
std::size_t completedCount = 0;
std::size_t scheduledCount = 0;
std::size_t acquiredCount = 0;
std::size_t readyCount = 0;
std::size_t consumedCount = 0;
uint64_t acquireMissCount = 0;
uint64_t readyUnderrunCount = 0;
};
class SystemOutputFramePool
{
public:
SystemOutputFramePool() = default;
explicit SystemOutputFramePool(const SystemOutputFramePoolConfig& config);
void Configure(const SystemOutputFramePoolConfig& config);
SystemOutputFramePoolConfig Config() const;
bool AcquireFreeSlot(OutputFrameSlot& slot);
bool AcquireRenderingSlot(OutputFrameSlot& slot);
bool PublishReadySlot(const OutputFrameSlot& slot);
bool PublishCompletedSlot(const OutputFrameSlot& slot);
bool ConsumeReadySlot(OutputFrameSlot& slot);
bool ConsumeCompletedSlot(OutputFrameSlot& slot);
bool MarkScheduled(const OutputFrameSlot& slot);
bool MarkScheduledByBuffer(void* bytes);
bool ReleaseSlot(const OutputFrameSlot& slot);
bool ReleaseScheduledSlot(const OutputFrameSlot& slot);
bool ReleaseSlotByBuffer(void* bytes);
void Clear();
SystemOutputFramePoolMetrics GetMetrics() const;
private:
struct StoredSlot
{
std::vector<unsigned char> bytes;
OutputFrameSlotState state = OutputFrameSlotState::Free;
uint64_t generation = 1;
};
bool IsValidSlotLocked(const OutputFrameSlot& slot) const;
bool TransitionSlotLocked(const OutputFrameSlot& slot, OutputFrameSlotState expectedState, OutputFrameSlotState nextState);
void FillOutputSlotLocked(std::size_t index, OutputFrameSlot& slot);
void RemoveReadyIndexLocked(std::size_t index);
bool ReleaseSlotByIndexLocked(std::size_t index);
std::size_t FrameByteCount() const;
mutable std::mutex mMutex;
SystemOutputFramePoolConfig mConfig;
std::vector<StoredSlot> mSlots;
std::deque<std::size_t> mReadySlots;
uint64_t mAcquireMissCount = 0;
uint64_t mReadyUnderrunCount = 0;
};

View File

@@ -6,14 +6,22 @@
#include "RenderEngine.h" #include "RenderEngine.h"
#include "RuntimeEventDispatcher.h" #include "RuntimeEventDispatcher.h"
#include <algorithm>
#include <chrono> #include <chrono>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <windows.h> #include <windows.h>
VideoBackend::VideoBackend(RenderEngine& renderEngine, HealthTelemetry& healthTelemetry, RuntimeEventDispatcher& runtimeEventDispatcher) : VideoBackend::VideoBackend(RenderEngine& renderEngine, HealthTelemetry& healthTelemetry, RuntimeEventDispatcher& runtimeEventDispatcher) :
mHealthTelemetry(healthTelemetry), mHealthTelemetry(healthTelemetry),
mRuntimeEventDispatcher(runtimeEventDispatcher), mRuntimeEventDispatcher(runtimeEventDispatcher),
mPlayoutPolicy(NormalizeVideoPlayoutPolicy(VideoPlayoutPolicy())),
mOutputProductionController(mPlayoutPolicy),
mReadyOutputQueue(mPlayoutPolicy),
mVideoIODevice(std::make_unique<DeckLinkSession>()), mVideoIODevice(std::make_unique<DeckLinkSession>()),
mBridge(std::make_unique<OpenGLVideoIOBridge>(renderEngine)) mBridge(std::make_unique<OpenGLVideoIOBridge>(renderEngine)),
mInputCaptureDisabled(IsEnvironmentFlagEnabled("VST_DISABLE_INPUT_CAPTURE"))
{ {
} }
@@ -24,48 +32,140 @@ VideoBackend::~VideoBackend()
void VideoBackend::ReleaseResources() void VideoBackend::ReleaseResources()
{ {
StopOutputCompletionWorker();
mReadyOutputQueue.Clear();
if (mVideoIODevice) if (mVideoIODevice)
mVideoIODevice->ReleaseResources(); mVideoIODevice->ReleaseResources();
mSystemOutputFramePool.Clear();
if (!VideoBackendLifecycle::CanTransition(mLifecycle.State(), VideoBackendLifecycleState::Stopped))
ApplyLifecycleFailure("Video backend resources released before lifecycle completed.");
ApplyLifecycleTransition(VideoBackendLifecycleState::Stopped, "Video backend resources released.");
}
VideoBackendLifecycleState VideoBackend::LifecycleState() const
{
return mLifecycle.State();
} }
bool VideoBackend::DiscoverDevicesAndModes(const VideoFormatSelection& videoModes, std::string& error) bool VideoBackend::DiscoverDevicesAndModes(const VideoFormatSelection& videoModes, std::string& error)
{ {
return mVideoIODevice->DiscoverDevicesAndModes(videoModes, error); ApplyLifecycleTransition(VideoBackendLifecycleState::Discovering, "Discovering video backend devices and modes.");
if (mVideoIODevice->DiscoverDevicesAndModes(videoModes, error))
return ApplyLifecycleTransition(VideoBackendLifecycleState::Discovered, "Video backend devices and modes discovered.");
ApplyLifecycleFailure(error.empty() ? "Video backend discovery failed." : error);
return false;
} }
bool VideoBackend::SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error) bool VideoBackend::SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error)
{ {
return mVideoIODevice->SelectPreferredFormats(videoModes, outputAlphaRequired, error); ApplyLifecycleTransition(VideoBackendLifecycleState::Configuring, "Selecting preferred video backend formats.");
if (mVideoIODevice->SelectPreferredFormats(videoModes, outputAlphaRequired, error))
return true;
ApplyLifecycleFailure(error.empty() ? "Video backend format selection failed." : error);
return false;
} }
bool VideoBackend::ConfigureInput(const VideoFormat& inputVideoMode, std::string& error) bool VideoBackend::ConfigureInput(const VideoFormat& inputVideoMode, std::string& error)
{ {
return mVideoIODevice->ConfigureInput( if (mLifecycle.State() != VideoBackendLifecycleState::Configuring)
ApplyLifecycleTransition(VideoBackendLifecycleState::Configuring, "Configuring video backend input.");
if (mInputCaptureDisabled)
{
MutableState().hasInputSource = false;
MutableState().statusMessage = "DeckLink input capture disabled by VST_DISABLE_INPUT_CAPTURE for output timing isolation.";
return true;
}
if (!mVideoIODevice->ConfigureInput(
[this](const VideoIOFrame& frame) { HandleInputFrame(frame); }, [this](const VideoIOFrame& frame) { HandleInputFrame(frame); },
inputVideoMode, inputVideoMode,
error); error))
{
ApplyLifecycleFailure(error.empty() ? "Video backend input configuration failed." : error);
return false;
}
return true;
} }
bool VideoBackend::ConfigureOutput(const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error) bool VideoBackend::ConfigureOutput(const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error)
{ {
return mVideoIODevice->ConfigureOutput( mPlayoutPolicy = NormalizeVideoPlayoutPolicy(mPlayoutPolicy);
mOutputProductionController.Configure(mPlayoutPolicy);
mReadyOutputQueue.Configure(mPlayoutPolicy);
if (mLifecycle.State() != VideoBackendLifecycleState::Configuring)
ApplyLifecycleTransition(VideoBackendLifecycleState::Configuring, "Configuring video backend output.");
if (!mVideoIODevice->ConfigureOutput(
[this](const VideoIOCompletion& completion) { HandleOutputFrameCompletion(completion); }, [this](const VideoIOCompletion& completion) { HandleOutputFrameCompletion(completion); },
outputVideoMode, outputVideoMode,
externalKeyingEnabled, externalKeyingEnabled,
error); error))
{
ApplyLifecycleFailure(error.empty() ? "Video backend output configuration failed." : error);
return false;
}
SystemOutputFramePoolConfig poolConfig;
poolConfig.width = mVideoIODevice->OutputFrameWidth();
poolConfig.height = mVideoIODevice->OutputFrameHeight();
poolConfig.pixelFormat = mVideoIODevice->OutputPixelFormat();
poolConfig.rowBytes = mVideoIODevice->OutputFrameRowBytes();
poolConfig.capacity = mPlayoutPolicy.outputFramePoolSize;
mSystemOutputFramePool.Configure(poolConfig);
RecordSystemMemoryPlayoutStats();
return ApplyLifecycleTransition(VideoBackendLifecycleState::Configured, "Video backend configured.");
} }
bool VideoBackend::Start() bool VideoBackend::Start()
{ {
const bool started = mVideoIODevice->Start(); ApplyLifecycleTransition(VideoBackendLifecycleState::Prerolling, "Video backend preroll starting.");
PublishBackendStateChanged(started ? "started" : "start-failed", started ? "Video backend started." : StatusMessage()); if (!mVideoIODevice->PrepareOutputSchedule())
return started; {
ApplyLifecycleFailure(StatusMessage().empty() ? "Video backend output schedule preparation failed." : StatusMessage());
return false;
}
StartOutputCompletionWorker();
StartOutputProducerWorker();
if (!WarmupOutputPreroll())
{
StopOutputProducerWorker();
StopOutputCompletionWorker();
ApplyLifecycleFailure(StatusMessage().empty() ? "Video backend preroll warmup failed." : StatusMessage());
return false;
}
if (!mInputCaptureDisabled && !mVideoIODevice->StartInputStreams())
{
StopOutputProducerWorker();
StopOutputCompletionWorker();
ApplyLifecycleFailure(StatusMessage().empty() ? "Video backend input stream start failed." : StatusMessage());
return false;
}
if (!mVideoIODevice->StartScheduledPlayback())
{
StopOutputProducerWorker();
mVideoIODevice->Stop();
StopOutputCompletionWorker();
ApplyLifecycleFailure(StatusMessage().empty() ? "Video backend scheduled playback start failed." : StatusMessage());
return false;
}
ApplyLifecycleTransition(VideoBackendLifecycleState::Running, "Video backend started.");
return true;
} }
bool VideoBackend::Stop() bool VideoBackend::Stop()
{ {
ApplyLifecycleTransition(VideoBackendLifecycleState::Stopping, "Video backend stopping.");
StopOutputProducerWorker();
const bool stopped = mVideoIODevice->Stop(); const bool stopped = mVideoIODevice->Stop();
PublishBackendStateChanged(stopped ? "stopped" : "stop-failed", stopped ? "Video backend stopped." : StatusMessage()); StopOutputCompletionWorker();
if (stopped)
ApplyLifecycleTransition(VideoBackendLifecycleState::Stopped, "Video backend stopped.");
else
ApplyLifecycleFailure(StatusMessage().empty() ? "Video backend stop failed." : StatusMessage());
return stopped; return stopped;
} }
@@ -94,9 +194,9 @@ bool VideoBackend::ScheduleOutputFrame(const VideoIOOutputFrame& frame)
return mVideoIODevice->ScheduleOutputFrame(frame); return mVideoIODevice->ScheduleOutputFrame(frame);
} }
void VideoBackend::AccountForCompletionResult(VideoIOCompletionResult result) VideoPlayoutRecoveryDecision VideoBackend::AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth)
{ {
mVideoIODevice->AccountForCompletionResult(result); return mVideoIODevice->AccountForCompletionResult(result, readyQueueDepth);
} }
bool VideoBackend::HasInputDevice() const bool VideoBackend::HasInputDevice() const
@@ -106,6 +206,8 @@ bool VideoBackend::HasInputDevice() const
bool VideoBackend::HasInputSource() const bool VideoBackend::HasInputSource() const
{ {
if (mInputCaptureDisabled)
return false;
return mVideoIODevice->HasInputSource(); return mVideoIODevice->HasInputSource();
} }
@@ -179,6 +281,12 @@ const std::string& VideoBackend::StatusMessage() const
return mVideoIODevice->StatusMessage(); return mVideoIODevice->StatusMessage();
} }
bool VideoBackend::ShouldPrioritizeOutputOverPreview() const
{
const RenderOutputQueueMetrics metrics = mReadyOutputQueue.GetMetrics();
return metrics.depth < static_cast<std::size_t>(mPlayoutPolicy.targetReadyFrames);
}
void VideoBackend::SetStatusMessage(const std::string& message) void VideoBackend::SetStatusMessage(const std::string& message)
{ {
mVideoIODevice->SetStatusMessage(message); mVideoIODevice->SetStatusMessage(message);
@@ -198,7 +306,7 @@ void VideoBackend::PublishStatus(bool externalKeyingConfigured, const std::strin
externalKeyingConfigured, externalKeyingConfigured,
ExternalKeyingActive(), ExternalKeyingActive(),
StatusMessage()); StatusMessage());
PublishBackendStateChanged("status", StatusMessage()); PublishBackendStateChanged(VideoBackendLifecycle::StateName(mLifecycle.State()), StatusMessage());
} }
void VideoBackend::ReportNoInputDeviceSignalStatus() void VideoBackend::ReportNoInputDeviceSignalStatus()
@@ -213,6 +321,9 @@ void VideoBackend::ReportNoInputDeviceSignalStatus()
void VideoBackend::HandleInputFrame(const VideoIOFrame& frame) void VideoBackend::HandleInputFrame(const VideoIOFrame& frame)
{ {
if (mInputCaptureDisabled)
return;
const VideoIOState& state = mVideoIODevice->State(); const VideoIOState& state = mVideoIODevice->State();
mHealthTelemetry.TryReportSignalStatus(!frame.hasNoInputSource, state.inputFrameSize.width, state.inputFrameSize.height, state.inputDisplayModeName); mHealthTelemetry.TryReportSignalStatus(!frame.hasNoInputSource, state.inputFrameSize.width, state.inputFrameSize.height, state.inputDisplayModeName);
PublishInputSignalChanged(frame, state); PublishInputSignalChanged(frame, state);
@@ -224,30 +335,477 @@ void VideoBackend::HandleInputFrame(const VideoIOFrame& frame)
void VideoBackend::HandleOutputFrameCompletion(const VideoIOCompletion& completion) void VideoBackend::HandleOutputFrameCompletion(const VideoIOCompletion& completion)
{ {
RecordFramePacing(completion.result);
PublishOutputFrameCompleted(completion);
VideoIOOutputFrame outputFrame;
if (!BeginOutputFrame(outputFrame))
return;
const VideoIOState& state = mVideoIODevice->State();
bool rendered = true;
if (mBridge)
rendered = mBridge->RenderScheduledFrame(state, completion, outputFrame);
EndOutputFrame(outputFrame);
AccountForCompletionResult(completion.result);
if (!rendered)
{ {
PublishBackendStateChanged("output-render-failed", "Output frame render request failed; skipping schedule for this frame."); std::lock_guard<std::mutex> lock(mOutputCompletionMutex);
if (!mOutputCompletionWorkerRunning || mOutputCompletionWorkerStopping)
return; return;
mPendingOutputCompletions.push_back(completion);
}
mOutputCompletionCondition.notify_one();
} }
// Schedule the next frame after render work is complete so device-side void VideoBackend::StartOutputCompletionWorker()
// bookkeeping stays with the backend seam and the bridge stays render-only. {
if (ScheduleOutputFrame(outputFrame)) {
std::lock_guard<std::mutex> lock(mOutputCompletionMutex);
if (mOutputCompletionWorkerRunning)
return;
mPendingOutputCompletions.clear();
mReadyOutputQueue.Clear();
mNextReadyOutputFrameIndex = 0;
mHasReadyQueueDepthBaseline = false;
mMinReadyQueueDepth = 0;
mMaxReadyQueueDepth = 0;
mReadyQueueZeroDepthCount = 0;
mOutputRenderMilliseconds = 0.0;
mSmoothedOutputRenderMilliseconds = 0.0;
mMaxOutputRenderMilliseconds = 0.0;
mOutputFrameAcquireMilliseconds = 0.0;
mOutputFrameRenderRequestMilliseconds = 0.0;
mOutputFrameEndAccessMilliseconds = 0.0;
mLastLateStreak = 0;
mLastDropStreak = 0;
mOutputCompletionWorkerStopping = false;
mOutputCompletionWorkerRunning = true;
mOutputCompletionWorker = std::thread(&VideoBackend::OutputCompletionWorkerMain, this);
}
}
void VideoBackend::StopOutputCompletionWorker()
{
StopOutputProducerWorker();
bool shouldJoin = false;
{
std::lock_guard<std::mutex> lock(mOutputCompletionMutex);
if (mOutputCompletionWorkerRunning)
mOutputCompletionWorkerStopping = true;
shouldJoin = mOutputCompletionWorker.joinable();
}
mOutputCompletionCondition.notify_one();
if (shouldJoin)
mOutputCompletionWorker.join();
}
void VideoBackend::StartOutputProducerWorker()
{
std::lock_guard<std::mutex> lock(mOutputProducerMutex);
if (mOutputProducerWorkerRunning)
return;
const double frameBudgetMilliseconds = State().frameBudgetMilliseconds;
const auto frameDuration = frameBudgetMilliseconds > 0.0
? std::chrono::duration_cast<RenderCadenceController::Duration>(
std::chrono::duration<double, std::milli>(frameBudgetMilliseconds))
: std::chrono::milliseconds(16);
mRenderCadenceController.Configure(frameDuration, std::chrono::steady_clock::now());
mLastOutputProductionCompletion = VideoIOCompletion();
mLastOutputProductionTime = std::chrono::steady_clock::time_point();
mOutputProducerWorkerStopping = false;
mOutputProducerWorkerRunning = true;
mOutputProducerWorker = std::thread(&VideoBackend::OutputProducerWorkerMain, this);
mOutputProducerCondition.notify_one();
}
void VideoBackend::StopOutputProducerWorker()
{
bool shouldJoin = false;
{
std::lock_guard<std::mutex> lock(mOutputProducerMutex);
if (mOutputProducerWorkerRunning)
mOutputProducerWorkerStopping = true;
shouldJoin = mOutputProducerWorker.joinable();
}
mOutputProducerCondition.notify_one();
if (shouldJoin)
mOutputProducerWorker.join();
}
void VideoBackend::NotifyOutputProducer()
{
mOutputProducerCondition.notify_one();
}
bool VideoBackend::WarmupOutputPreroll()
{
const VideoPlayoutPolicy policy = NormalizeVideoPlayoutPolicy(mPlayoutPolicy);
const std::size_t targetPrerollFrames = static_cast<std::size_t>(policy.targetPrerollFrames);
if (targetPrerollFrames == 0)
return true;
const double frameBudgetMilliseconds = State().frameBudgetMilliseconds > 0.0 ? State().frameBudgetMilliseconds : 16.0;
const auto estimatedCadenceTime = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::duration<double, std::milli>(frameBudgetMilliseconds * static_cast<double>(targetPrerollFrames + 2)));
const auto timeout = (std::max)(std::chrono::milliseconds(1000), estimatedCadenceTime + std::chrono::milliseconds(500));
const auto deadline = std::chrono::steady_clock::now() + timeout;
while (std::chrono::steady_clock::now() < deadline)
{
ScheduleReadyOutputFramesToTarget();
const SystemOutputFramePoolMetrics metrics = mSystemOutputFramePool.GetMetrics();
RecordSystemMemoryPlayoutStats();
if (metrics.scheduledCount >= targetPrerollFrames)
return true;
NotifyOutputProducer();
const auto waitDuration = (std::min)(OutputProducerWakeInterval(), std::chrono::milliseconds(5));
std::unique_lock<std::mutex> lock(mOutputProducerMutex);
mOutputProducerCondition.wait_for(lock, waitDuration);
if (mOutputProducerWorkerStopping)
return false;
}
SetStatusMessage("Timed out warming up DeckLink preroll from rendered system-memory frames.");
return false;
}
void VideoBackend::OutputCompletionWorkerMain()
{
for (;;)
{
VideoIOCompletion completion;
{
std::unique_lock<std::mutex> lock(mOutputCompletionMutex);
mOutputCompletionCondition.wait(lock, [this]() {
return mOutputCompletionWorkerStopping || !mPendingOutputCompletions.empty();
});
if (mPendingOutputCompletions.empty())
{
if (mOutputCompletionWorkerStopping)
{
mOutputCompletionWorkerRunning = false;
return;
}
continue;
}
completion = mPendingOutputCompletions.front();
mPendingOutputCompletions.pop_front();
}
ProcessOutputFrameCompletion(completion);
}
}
void VideoBackend::OutputProducerWorkerMain()
{
for (;;)
{
{
std::lock_guard<std::mutex> lock(mOutputProducerMutex);
if (mOutputProducerWorkerStopping)
{
mOutputProducerWorkerRunning = false;
return;
}
}
ScheduleReadyOutputFramesToTarget();
const RenderOutputQueueMetrics metrics = mReadyOutputQueue.GetMetrics();
RecordReadyQueueDepthSample(metrics);
const auto now = std::chrono::steady_clock::now();
RenderCadenceDecision cadenceDecision = mRenderCadenceController.Tick(now);
if (cadenceDecision.action == RenderCadenceAction::Wait)
{
const auto waitDuration = (std::min)(
std::chrono::duration_cast<std::chrono::milliseconds>(cadenceDecision.waitDuration),
OutputProducerWakeInterval());
std::unique_lock<std::mutex> lock(mOutputProducerMutex);
mOutputProducerCondition.wait_for(lock, waitDuration);
if (mOutputProducerWorkerStopping)
{
mOutputProducerWorkerRunning = false;
return;
}
continue;
}
VideoIOCompletion completion;
{
std::lock_guard<std::mutex> lock(mOutputProducerMutex);
if (mOutputProducerWorkerStopping)
continue;
completion = mLastOutputProductionCompletion;
}
const std::size_t producedFrames = ProduceReadyOutputFrames(completion, 1);
if (producedFrames > 0)
{
mLastOutputProductionTime = std::chrono::steady_clock::now();
ScheduleReadyOutputFramesToTarget();
continue;
}
{
std::unique_lock<std::mutex> lock(mOutputProducerMutex);
mOutputProducerCondition.wait_for(lock, OutputProducerWakeInterval());
if (mOutputProducerWorkerStopping)
{
mOutputProducerWorkerRunning = false;
return;
}
}
}
}
std::chrono::milliseconds VideoBackend::OutputProducerWakeInterval() const
{
const double frameBudgetMilliseconds = State().frameBudgetMilliseconds;
if (frameBudgetMilliseconds <= 0.0)
return std::chrono::milliseconds(8);
const int intervalMilliseconds = (std::max)(1, static_cast<int>(std::floor(frameBudgetMilliseconds * 0.75)));
return std::chrono::milliseconds(intervalMilliseconds);
}
void VideoBackend::ProcessOutputFrameCompletion(const VideoIOCompletion& completion)
{
if (completion.outputFrameBuffer != nullptr)
mSystemOutputFramePool.ReleaseSlotByBuffer(completion.outputFrameBuffer);
RecordFramePacing(completion.result);
PublishOutputFrameCompleted(completion);
const RenderOutputQueueMetrics initialQueueMetrics = mReadyOutputQueue.GetMetrics();
RecordReadyQueueDepthSample(initialQueueMetrics);
const VideoPlayoutRecoveryDecision recoveryDecision = AccountForCompletionResult(completion.result, initialQueueMetrics.depth);
{
std::lock_guard<std::mutex> lock(mOutputMetricsMutex);
mLastLateStreak = recoveryDecision.lateStreak;
mLastDropStreak = recoveryDecision.dropStreak;
}
{
std::lock_guard<std::mutex> lock(mOutputProducerMutex);
mLastOutputProductionCompletion = completion;
}
NotifyOutputProducer();
RecordBackendPlayoutHealth(completion.result, recoveryDecision);
RecordSystemMemoryPlayoutStats();
}
std::size_t VideoBackend::ScheduleReadyOutputFramesToTarget()
{
const std::size_t targetScheduledFrames = static_cast<std::size_t>(mPlayoutPolicy.targetPrerollFrames);
std::size_t scheduledFrames = 0;
for (;;)
{
const SystemOutputFramePoolMetrics poolMetrics = mSystemOutputFramePool.GetMetrics();
if (poolMetrics.scheduledCount >= targetScheduledFrames)
break;
if (!ScheduleReadyOutputFrame())
break;
++scheduledFrames;
}
return scheduledFrames;
}
void VideoBackend::RecordBackendPlayoutHealth(VideoIOCompletionResult result, const VideoPlayoutRecoveryDecision& recoveryDecision)
{
const RenderOutputQueueMetrics queueMetrics = mReadyOutputQueue.GetMetrics();
std::size_t minReadyQueueDepth = 0;
std::size_t maxReadyQueueDepth = 0;
uint64_t readyQueueZeroDepthCount = 0;
double outputRenderMilliseconds = 0.0;
double smoothedOutputRenderMilliseconds = 0.0;
double maxOutputRenderMilliseconds = 0.0;
double outputFrameAcquireMilliseconds = 0.0;
double outputFrameRenderRequestMilliseconds = 0.0;
double outputFrameEndAccessMilliseconds = 0.0;
{
std::lock_guard<std::mutex> lock(mOutputMetricsMutex);
minReadyQueueDepth = mMinReadyQueueDepth;
maxReadyQueueDepth = mMaxReadyQueueDepth;
readyQueueZeroDepthCount = mReadyQueueZeroDepthCount;
outputRenderMilliseconds = mOutputRenderMilliseconds;
smoothedOutputRenderMilliseconds = mSmoothedOutputRenderMilliseconds;
maxOutputRenderMilliseconds = mMaxOutputRenderMilliseconds;
outputFrameAcquireMilliseconds = mOutputFrameAcquireMilliseconds;
outputFrameRenderRequestMilliseconds = mOutputFrameRenderRequestMilliseconds;
outputFrameEndAccessMilliseconds = mOutputFrameEndAccessMilliseconds;
}
mHealthTelemetry.TryRecordBackendPlayoutHealth(
VideoBackendLifecycle::StateName(mLifecycle.State()),
CompletionResultName(result),
queueMetrics.depth,
queueMetrics.capacity,
queueMetrics.pushedCount,
minReadyQueueDepth,
maxReadyQueueDepth,
readyQueueZeroDepthCount,
queueMetrics.poppedCount,
queueMetrics.droppedCount,
queueMetrics.underrunCount,
outputRenderMilliseconds,
smoothedOutputRenderMilliseconds,
maxOutputRenderMilliseconds,
outputFrameAcquireMilliseconds,
outputFrameRenderRequestMilliseconds,
outputFrameEndAccessMilliseconds,
recoveryDecision.completedFrameIndex,
recoveryDecision.scheduledFrameIndex,
recoveryDecision.scheduledLeadFrames,
recoveryDecision.measuredLagFrames,
recoveryDecision.catchUpFrames,
recoveryDecision.lateStreak,
recoveryDecision.dropStreak,
mLateFrameCount,
mDroppedFrameCount,
mFlushedFrameCount,
mLifecycle.State() == VideoBackendLifecycleState::Degraded,
StatusMessage());
}
std::size_t VideoBackend::ProduceReadyOutputFrames(const VideoIOCompletion& completion, std::size_t maxFrames)
{
if (maxFrames == 0)
return 0;
std::lock_guard<std::mutex> productionLock(mOutputProductionMutex);
RenderOutputQueueMetrics metrics = mReadyOutputQueue.GetMetrics();
std::size_t producedFrames = 0;
while (producedFrames < maxFrames)
{
if (!RenderReadyOutputFrame(mVideoIODevice->State(), completion))
break;
++producedFrames;
metrics = mReadyOutputQueue.GetMetrics();
RecordReadyQueueDepthSample(metrics);
}
return producedFrames;
}
OutputProductionPressure VideoBackend::BuildOutputProductionPressure(const RenderOutputQueueMetrics& metrics) const
{
OutputProductionPressure pressure;
pressure.readyQueueDepth = metrics.depth;
pressure.readyQueueCapacity = metrics.capacity;
pressure.readyQueueUnderrunCount = metrics.underrunCount;
{
std::lock_guard<std::mutex> lock(mOutputMetricsMutex);
pressure.lateStreak = mLastLateStreak;
pressure.dropStreak = mLastDropStreak;
}
return pressure;
}
bool VideoBackend::RenderReadyOutputFrame(const VideoIOState& state, const VideoIOCompletion& completion)
{
const auto renderStart = std::chrono::steady_clock::now();
OutputFrameSlot outputSlot;
VideoIOOutputFrame outputFrame;
const auto acquireStart = std::chrono::steady_clock::now();
if (!mSystemOutputFramePool.AcquireFreeSlot(outputSlot))
{
if (!mReadyOutputQueue.DropOldestFrame() || !mSystemOutputFramePool.AcquireFreeSlot(outputSlot))
return false;
}
outputFrame = outputSlot.frame;
const auto acquireEnd = std::chrono::steady_clock::now();
bool rendered = true;
const auto renderRequestStart = std::chrono::steady_clock::now();
if (mBridge)
rendered = mBridge->RenderScheduledFrame(state, completion, outputFrame);
const auto renderRequestEnd = std::chrono::steady_clock::now();
const auto endAccessStart = std::chrono::steady_clock::now();
const bool publishedReady = mSystemOutputFramePool.PublishReadySlot(outputSlot);
const auto endAccessEnd = std::chrono::steady_clock::now();
const double acquireMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(acquireEnd - acquireStart).count();
const double renderRequestMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(renderRequestEnd - renderRequestStart).count();
const double endAccessMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(endAccessEnd - endAccessStart).count();
if (!rendered)
{
mSystemOutputFramePool.ReleaseSlot(outputSlot);
ApplyLifecycleTransition(VideoBackendLifecycleState::Degraded, "Output frame render request failed; skipping schedule for this frame.");
const double renderMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
std::chrono::steady_clock::now() - renderStart).count();
RecordOutputRenderDuration(renderMilliseconds, acquireMilliseconds, renderRequestMilliseconds, endAccessMilliseconds);
return false;
}
if (!publishedReady)
{
mSystemOutputFramePool.ReleaseSlot(outputSlot);
return false;
}
const double renderMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
std::chrono::steady_clock::now() - renderStart).count();
RecordOutputRenderDuration(renderMilliseconds, acquireMilliseconds, renderRequestMilliseconds, endAccessMilliseconds);
RenderOutputFrame readyFrame;
readyFrame.frame = outputFrame;
readyFrame.frameIndex = ++mNextReadyOutputFrameIndex;
readyFrame.releaseFrame = [this](VideoIOOutputFrame& frame) {
mSystemOutputFramePool.ReleaseSlotByBuffer(frame.bytes);
};
const bool pushed = mReadyOutputQueue.Push(readyFrame);
if (!pushed)
mSystemOutputFramePool.ReleaseSlot(outputSlot);
RecordSystemMemoryPlayoutStats();
return pushed;
}
bool VideoBackend::ScheduleReadyOutputFrame()
{
std::lock_guard<std::mutex> schedulingLock(mOutputSchedulingMutex);
RenderOutputFrame readyFrame;
if (!mReadyOutputQueue.TryPop(readyFrame))
return false;
RecordReadyQueueDepthSample(mReadyOutputQueue.GetMetrics());
if (!mSystemOutputFramePool.MarkScheduledByBuffer(readyFrame.frame.bytes))
{
if (readyFrame.releaseFrame)
readyFrame.releaseFrame(readyFrame.frame);
return false;
}
if (!ScheduleOutputFrame(readyFrame.frame))
{
RecordDeckLinkBufferTelemetry();
mSystemOutputFramePool.ReleaseSlotByBuffer(readyFrame.frame.bytes);
return false;
}
RecordDeckLinkBufferTelemetry();
PublishOutputFrameScheduled(readyFrame.frame);
RecordSystemMemoryPlayoutStats();
return true;
}
bool VideoBackend::ScheduleBlackUnderrunFrame()
{
VideoIOOutputFrame outputFrame;
if (!BeginOutputFrame(outputFrame))
{
ApplyLifecycleTransition(VideoBackendLifecycleState::Degraded, "Output underrun: no output frame was available for fallback scheduling.");
return false;
}
if (outputFrame.bytes != nullptr && outputFrame.rowBytes > 0 && outputFrame.height > 0)
std::memset(outputFrame.bytes, 0, static_cast<std::size_t>(outputFrame.rowBytes) * outputFrame.height);
EndOutputFrame(outputFrame);
if (!ScheduleOutputFrame(outputFrame))
{
RecordDeckLinkBufferTelemetry();
ApplyLifecycleTransition(VideoBackendLifecycleState::Degraded, "Output underrun: black fallback frame scheduling failed.");
return false;
}
RecordDeckLinkBufferTelemetry();
ApplyLifecycleTransition(VideoBackendLifecycleState::Degraded, "Output underrun: scheduled black fallback frame.");
PublishOutputFrameScheduled(outputFrame); PublishOutputFrameScheduled(outputFrame);
return true;
} }
void VideoBackend::RecordFramePacing(VideoIOCompletionResult completionResult) void VideoBackend::RecordFramePacing(VideoIOCompletionResult completionResult)
@@ -283,6 +841,98 @@ void VideoBackend::RecordFramePacing(VideoIOCompletionResult completionResult)
PublishTimingSample("VideoBackend", "smoothedCompletionInterval", mSmoothedCompletionIntervalMilliseconds, "ms"); PublishTimingSample("VideoBackend", "smoothedCompletionInterval", mSmoothedCompletionIntervalMilliseconds, "ms");
} }
void VideoBackend::RecordReadyQueueDepthSample(const RenderOutputQueueMetrics& metrics)
{
std::lock_guard<std::mutex> lock(mOutputMetricsMutex);
if (!mHasReadyQueueDepthBaseline)
{
mHasReadyQueueDepthBaseline = true;
mMinReadyQueueDepth = metrics.depth;
mMaxReadyQueueDepth = metrics.depth;
}
else
{
mMinReadyQueueDepth = (std::min)(mMinReadyQueueDepth, metrics.depth);
mMaxReadyQueueDepth = (std::max)(mMaxReadyQueueDepth, metrics.depth);
}
if (metrics.depth == 0)
++mReadyQueueZeroDepthCount;
}
void VideoBackend::RecordDeckLinkBufferTelemetry()
{
if (!mVideoIODevice)
return;
const VideoIOState& state = mVideoIODevice->State();
mHealthTelemetry.TryRecordDeckLinkBufferTelemetry(
state.actualDeckLinkBufferedFramesAvailable,
state.actualDeckLinkBufferedFrames,
static_cast<std::size_t>(mPlayoutPolicy.targetPrerollFrames),
state.deckLinkScheduleCallMilliseconds,
state.deckLinkScheduleFailureCount);
}
void VideoBackend::RecordSystemMemoryPlayoutStats()
{
const SystemOutputFramePoolMetrics poolMetrics = mSystemOutputFramePool.GetMetrics();
const RenderOutputQueueMetrics queueMetrics = mReadyOutputQueue.GetMetrics();
RecordDeckLinkBufferTelemetry();
mHealthTelemetry.TryRecordSystemMemoryPlayoutStats(
poolMetrics.freeCount,
poolMetrics.readyCount,
poolMetrics.scheduledCount,
poolMetrics.readyUnderrunCount,
0,
queueMetrics.droppedCount,
0.0,
0.0);
}
void VideoBackend::RecordOutputRenderDuration(double renderMilliseconds, double acquireMilliseconds, double renderRequestMilliseconds, double endAccessMilliseconds)
{
std::lock_guard<std::mutex> lock(mOutputMetricsMutex);
mOutputRenderMilliseconds = (std::max)(renderMilliseconds, 0.0);
if (mSmoothedOutputRenderMilliseconds <= 0.0)
mSmoothedOutputRenderMilliseconds = mOutputRenderMilliseconds;
else
mSmoothedOutputRenderMilliseconds = mSmoothedOutputRenderMilliseconds * 0.9 + mOutputRenderMilliseconds * 0.1;
mMaxOutputRenderMilliseconds = (std::max)(mMaxOutputRenderMilliseconds, mOutputRenderMilliseconds);
mOutputFrameAcquireMilliseconds = (std::max)(acquireMilliseconds, 0.0);
mOutputFrameRenderRequestMilliseconds = (std::max)(renderRequestMilliseconds, 0.0);
mOutputFrameEndAccessMilliseconds = (std::max)(endAccessMilliseconds, 0.0);
PublishTimingSample("VideoBackend", "outputRender", mOutputRenderMilliseconds, "ms");
PublishTimingSample("VideoBackend", "smoothedOutputRender", mSmoothedOutputRenderMilliseconds, "ms");
}
bool VideoBackend::ApplyLifecycleTransition(VideoBackendLifecycleState state, const std::string& message)
{
const VideoBackendLifecycleTransition transition = mLifecycle.TransitionTo(state, message);
if (!transition.accepted)
{
PublishBackendStateChanged(VideoBackendLifecycle::StateName(transition.current), transition.errorMessage);
return false;
}
PublishBackendStateChanged(VideoBackendLifecycle::StateName(transition.current), message);
return true;
}
bool VideoBackend::ApplyLifecycleFailure(const std::string& message)
{
const VideoBackendLifecycleTransition transition = mLifecycle.Fail(message);
if (!transition.accepted)
{
PublishBackendStateChanged(VideoBackendLifecycle::StateName(transition.current), transition.errorMessage);
return false;
}
PublishBackendStateChanged(VideoBackendLifecycle::StateName(transition.current), message);
return true;
}
void VideoBackend::PublishBackendStateChanged(const std::string& state, const std::string& message) void VideoBackend::PublishBackendStateChanged(const std::string& state, const std::string& message)
{ {
try try
@@ -428,3 +1078,18 @@ std::string VideoBackend::PixelFormatName(VideoIOPixelFormat pixelFormat)
{ {
return std::string(VideoIOPixelFormatName(pixelFormat)); return std::string(VideoIOPixelFormatName(pixelFormat));
} }
bool VideoBackend::IsEnvironmentFlagEnabled(const char* name)
{
if (name == nullptr || name[0] == '\0')
return false;
char* value = nullptr;
std::size_t valueSize = 0;
if (_dupenv_s(&value, &valueSize, name) != 0 || value == nullptr)
return false;
const std::string flag(value);
std::free(value);
return flag == "1" || flag == "true" || flag == "TRUE" || flag == "yes" || flag == "on";
}

View File

@@ -1,11 +1,21 @@
#pragma once #pragma once
#include "OutputProductionController.h"
#include "RenderCadenceController.h"
#include "RenderOutputQueue.h"
#include "SystemOutputFramePool.h"
#include "VideoBackendLifecycle.h"
#include "VideoIOTypes.h" #include "VideoIOTypes.h"
#include "VideoPlayoutPolicy.h"
#include <chrono> #include <chrono>
#include <condition_variable>
#include <cstdint> #include <cstdint>
#include <deque>
#include <memory> #include <memory>
#include <mutex>
#include <string> #include <string>
#include <thread>
class HealthTelemetry; class HealthTelemetry;
class OpenGLVideoIOBridge; class OpenGLVideoIOBridge;
@@ -20,6 +30,7 @@ public:
~VideoBackend(); ~VideoBackend();
void ReleaseResources(); void ReleaseResources();
VideoBackendLifecycleState LifecycleState() const;
bool DiscoverDevicesAndModes(const VideoFormatSelection& videoModes, std::string& error); bool DiscoverDevicesAndModes(const VideoFormatSelection& videoModes, std::string& error);
bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error); bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error);
bool ConfigureInput(const VideoFormat& inputVideoMode, std::string& error); bool ConfigureInput(const VideoFormat& inputVideoMode, std::string& error);
@@ -32,7 +43,8 @@ public:
bool BeginOutputFrame(VideoIOOutputFrame& frame); bool BeginOutputFrame(VideoIOOutputFrame& frame);
void EndOutputFrame(VideoIOOutputFrame& frame); void EndOutputFrame(VideoIOOutputFrame& frame);
bool ScheduleOutputFrame(const VideoIOOutputFrame& frame); bool ScheduleOutputFrame(const VideoIOOutputFrame& frame);
void AccountForCompletionResult(VideoIOCompletionResult result); VideoPlayoutRecoveryDecision AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth);
void RecordBackendPlayoutHealth(VideoIOCompletionResult result, const VideoPlayoutRecoveryDecision& recoveryDecision);
bool HasInputDevice() const; bool HasInputDevice() const;
bool HasInputSource() const; bool HasInputSource() const;
@@ -50,6 +62,7 @@ public:
bool KeyerInterfaceAvailable() const; bool KeyerInterfaceAvailable() const;
bool ExternalKeyingActive() const; bool ExternalKeyingActive() const;
const std::string& StatusMessage() const; const std::string& StatusMessage() const;
bool ShouldPrioritizeOutputOverPreview() const;
void SetStatusMessage(const std::string& message); void SetStatusMessage(const std::string& message);
void PublishStatus(bool externalKeyingConfigured, const std::string& statusMessage = std::string()); void PublishStatus(bool externalKeyingConfigured, const std::string& statusMessage = std::string());
void ReportNoInputDeviceSignalStatus(); void ReportNoInputDeviceSignalStatus();
@@ -57,7 +70,29 @@ public:
private: private:
void HandleInputFrame(const VideoIOFrame& frame); void HandleInputFrame(const VideoIOFrame& frame);
void HandleOutputFrameCompletion(const VideoIOCompletion& completion); void HandleOutputFrameCompletion(const VideoIOCompletion& completion);
void StartOutputCompletionWorker();
void StopOutputCompletionWorker();
void OutputCompletionWorkerMain();
void StartOutputProducerWorker();
void StopOutputProducerWorker();
void OutputProducerWorkerMain();
void NotifyOutputProducer();
bool WarmupOutputPreroll();
std::chrono::milliseconds OutputProducerWakeInterval() const;
void ProcessOutputFrameCompletion(const VideoIOCompletion& completion);
std::size_t ProduceReadyOutputFrames(const VideoIOCompletion& completion, std::size_t maxFrames);
OutputProductionPressure BuildOutputProductionPressure(const RenderOutputQueueMetrics& metrics) const;
bool RenderReadyOutputFrame(const VideoIOState& state, const VideoIOCompletion& completion);
std::size_t ScheduleReadyOutputFramesToTarget();
bool ScheduleReadyOutputFrame();
bool ScheduleBlackUnderrunFrame();
void RecordFramePacing(VideoIOCompletionResult completionResult); void RecordFramePacing(VideoIOCompletionResult completionResult);
void RecordReadyQueueDepthSample(const RenderOutputQueueMetrics& metrics);
void RecordDeckLinkBufferTelemetry();
void RecordSystemMemoryPlayoutStats();
void RecordOutputRenderDuration(double renderMilliseconds, double acquireMilliseconds, double renderRequestMilliseconds, double endAccessMilliseconds);
bool ApplyLifecycleTransition(VideoBackendLifecycleState state, const std::string& message);
bool ApplyLifecycleFailure(const std::string& message);
void PublishBackendStateChanged(const std::string& state, const std::string& message); void PublishBackendStateChanged(const std::string& state, const std::string& message);
void PublishInputSignalChanged(const VideoIOFrame& frame, const VideoIOState& state); void PublishInputSignalChanged(const VideoIOFrame& frame, const VideoIOState& state);
void PublishInputFrameArrived(const VideoIOFrame& frame); void PublishInputFrameArrived(const VideoIOFrame& frame);
@@ -66,11 +101,36 @@ private:
void PublishTimingSample(const std::string& subsystem, const std::string& metric, double value, const std::string& unit); void PublishTimingSample(const std::string& subsystem, const std::string& metric, double value, const std::string& unit);
static std::string CompletionResultName(VideoIOCompletionResult result); static std::string CompletionResultName(VideoIOCompletionResult result);
static std::string PixelFormatName(VideoIOPixelFormat pixelFormat); static std::string PixelFormatName(VideoIOPixelFormat pixelFormat);
static bool IsEnvironmentFlagEnabled(const char* name);
HealthTelemetry& mHealthTelemetry; HealthTelemetry& mHealthTelemetry;
RuntimeEventDispatcher& mRuntimeEventDispatcher; RuntimeEventDispatcher& mRuntimeEventDispatcher;
VideoBackendLifecycle mLifecycle;
VideoPlayoutPolicy mPlayoutPolicy;
OutputProductionController mOutputProductionController;
RenderCadenceController mRenderCadenceController;
RenderOutputQueue mReadyOutputQueue;
SystemOutputFramePool mSystemOutputFramePool;
std::unique_ptr<VideoIODevice> mVideoIODevice; std::unique_ptr<VideoIODevice> mVideoIODevice;
std::unique_ptr<OpenGLVideoIOBridge> mBridge; std::unique_ptr<OpenGLVideoIOBridge> mBridge;
std::mutex mOutputCompletionMutex;
std::condition_variable mOutputCompletionCondition;
std::deque<VideoIOCompletion> mPendingOutputCompletions;
std::thread mOutputCompletionWorker;
std::mutex mOutputProducerMutex;
std::condition_variable mOutputProducerCondition;
std::thread mOutputProducerWorker;
VideoIOCompletion mLastOutputProductionCompletion;
std::chrono::steady_clock::time_point mLastOutputProductionTime;
std::mutex mOutputProductionMutex;
std::mutex mOutputSchedulingMutex;
mutable std::mutex mOutputMetricsMutex;
bool mOutputCompletionWorkerRunning = false;
bool mOutputCompletionWorkerStopping = false;
bool mOutputProducerWorkerRunning = false;
bool mOutputProducerWorkerStopping = false;
bool mInputCaptureDisabled = false;
uint64_t mNextReadyOutputFrameIndex = 0;
uint64_t mInputFrameIndex = 0; uint64_t mInputFrameIndex = 0;
uint64_t mOutputFrameScheduleIndex = 0; uint64_t mOutputFrameScheduleIndex = 0;
uint64_t mOutputFrameCompletionIndex = 0; uint64_t mOutputFrameCompletionIndex = 0;
@@ -83,6 +143,18 @@ private:
double mCompletionIntervalMilliseconds = 0.0; double mCompletionIntervalMilliseconds = 0.0;
double mSmoothedCompletionIntervalMilliseconds = 0.0; double mSmoothedCompletionIntervalMilliseconds = 0.0;
double mMaxCompletionIntervalMilliseconds = 0.0; double mMaxCompletionIntervalMilliseconds = 0.0;
bool mHasReadyQueueDepthBaseline = false;
std::size_t mMinReadyQueueDepth = 0;
std::size_t mMaxReadyQueueDepth = 0;
uint64_t mReadyQueueZeroDepthCount = 0;
double mOutputRenderMilliseconds = 0.0;
double mSmoothedOutputRenderMilliseconds = 0.0;
double mMaxOutputRenderMilliseconds = 0.0;
double mOutputFrameAcquireMilliseconds = 0.0;
double mOutputFrameRenderRequestMilliseconds = 0.0;
double mOutputFrameEndAccessMilliseconds = 0.0;
uint64_t mLastLateStreak = 0;
uint64_t mLastDropStreak = 0;
uint64_t mLateFrameCount = 0; uint64_t mLateFrameCount = 0;
uint64_t mDroppedFrameCount = 0; uint64_t mDroppedFrameCount = 0;
uint64_t mFlushedFrameCount = 0; uint64_t mFlushedFrameCount = 0;

View File

@@ -0,0 +1,123 @@
#include "VideoBackendLifecycle.h"
VideoBackendLifecycleState VideoBackendLifecycle::State() const
{
return mState;
}
const std::string& VideoBackendLifecycle::FailureReason() const
{
return mFailureReason;
}
VideoBackendLifecycleTransition VideoBackendLifecycle::TransitionTo(VideoBackendLifecycleState next, const std::string& reason)
{
VideoBackendLifecycleTransition transition;
transition.previous = mState;
transition.current = next;
transition.reason = reason;
transition.accepted = CanTransition(mState, next);
if (!transition.accepted)
{
transition.current = mState;
transition.errorMessage = std::string("Invalid video backend lifecycle transition from ") +
StateName(mState) + " to " + StateName(next) + ".";
return transition;
}
mState = next;
transition.current = mState;
if (mState != VideoBackendLifecycleState::Failed)
mFailureReason.clear();
return transition;
}
VideoBackendLifecycleTransition VideoBackendLifecycle::Fail(const std::string& reason)
{
VideoBackendLifecycleTransition transition = TransitionTo(VideoBackendLifecycleState::Failed, reason);
if (transition.accepted)
mFailureReason = reason;
return transition;
}
bool VideoBackendLifecycle::CanTransition(VideoBackendLifecycleState current, VideoBackendLifecycleState next)
{
if (current == next)
return true;
switch (current)
{
case VideoBackendLifecycleState::Uninitialized:
return next == VideoBackendLifecycleState::Discovering ||
next == VideoBackendLifecycleState::Stopped ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Discovering:
return next == VideoBackendLifecycleState::Discovered ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Discovered:
return next == VideoBackendLifecycleState::Configuring ||
next == VideoBackendLifecycleState::Stopped ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Configuring:
return next == VideoBackendLifecycleState::Configured ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Configured:
return next == VideoBackendLifecycleState::Prerolling ||
next == VideoBackendLifecycleState::Stopped ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Prerolling:
return next == VideoBackendLifecycleState::Running ||
next == VideoBackendLifecycleState::Stopping ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Running:
return next == VideoBackendLifecycleState::Degraded ||
next == VideoBackendLifecycleState::Stopping ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Degraded:
return next == VideoBackendLifecycleState::Running ||
next == VideoBackendLifecycleState::Stopping ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Stopping:
return next == VideoBackendLifecycleState::Stopped ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Stopped:
return next == VideoBackendLifecycleState::Discovering ||
next == VideoBackendLifecycleState::Failed;
case VideoBackendLifecycleState::Failed:
return next == VideoBackendLifecycleState::Stopped ||
next == VideoBackendLifecycleState::Discovering;
default:
return false;
}
}
const char* VideoBackendLifecycle::StateName(VideoBackendLifecycleState state)
{
switch (state)
{
case VideoBackendLifecycleState::Uninitialized:
return "uninitialized";
case VideoBackendLifecycleState::Discovering:
return "discovering";
case VideoBackendLifecycleState::Discovered:
return "discovered";
case VideoBackendLifecycleState::Configuring:
return "configuring";
case VideoBackendLifecycleState::Configured:
return "configured";
case VideoBackendLifecycleState::Prerolling:
return "prerolling";
case VideoBackendLifecycleState::Running:
return "running";
case VideoBackendLifecycleState::Degraded:
return "degraded";
case VideoBackendLifecycleState::Stopping:
return "stopping";
case VideoBackendLifecycleState::Stopped:
return "stopped";
case VideoBackendLifecycleState::Failed:
return "failed";
default:
return "unknown";
}
}

View File

@@ -0,0 +1,43 @@
#pragma once
#include <string>
enum class VideoBackendLifecycleState
{
Uninitialized,
Discovering,
Discovered,
Configuring,
Configured,
Prerolling,
Running,
Degraded,
Stopping,
Stopped,
Failed
};
struct VideoBackendLifecycleTransition
{
VideoBackendLifecycleState previous = VideoBackendLifecycleState::Uninitialized;
VideoBackendLifecycleState current = VideoBackendLifecycleState::Uninitialized;
bool accepted = false;
std::string reason;
std::string errorMessage;
};
class VideoBackendLifecycle
{
public:
VideoBackendLifecycleState State() const;
const std::string& FailureReason() const;
VideoBackendLifecycleTransition TransitionTo(VideoBackendLifecycleState next, const std::string& reason);
VideoBackendLifecycleTransition Fail(const std::string& reason);
static bool CanTransition(VideoBackendLifecycleState current, VideoBackendLifecycleState next);
static const char* StateName(VideoBackendLifecycleState state);
private:
VideoBackendLifecycleState mState = VideoBackendLifecycleState::Uninitialized;
std::string mFailureReason;
};

View File

@@ -50,6 +50,10 @@ struct VideoIOState
bool keyerInterfaceAvailable = false; bool keyerInterfaceAvailable = false;
bool externalKeyingActive = false; bool externalKeyingActive = false;
double frameBudgetMilliseconds = 0.0; double frameBudgetMilliseconds = 0.0;
bool actualDeckLinkBufferedFramesAvailable = false;
uint64_t actualDeckLinkBufferedFrames = 0;
double deckLinkScheduleCallMilliseconds = 0.0;
uint64_t deckLinkScheduleFailureCount = 0;
}; };
struct VideoIOFrame struct VideoIOFrame
@@ -76,6 +80,7 @@ struct VideoIOOutputFrame
struct VideoIOCompletion struct VideoIOCompletion
{ {
VideoIOCompletionResult result = VideoIOCompletionResult::Completed; VideoIOCompletionResult result = VideoIOCompletionResult::Completed;
void* outputFrameBuffer = nullptr;
}; };
struct VideoIOScheduleTime struct VideoIOScheduleTime
@@ -86,6 +91,19 @@ struct VideoIOScheduleTime
uint64_t frameIndex = 0; uint64_t frameIndex = 0;
}; };
struct VideoPlayoutRecoveryDecision
{
VideoIOCompletionResult result = VideoIOCompletionResult::Completed;
uint64_t completedFrameIndex = 0;
uint64_t scheduledFrameIndex = 0;
uint64_t readyQueueDepth = 0;
uint64_t scheduledLeadFrames = 0;
uint64_t measuredLagFrames = 0;
uint64_t catchUpFrames = 0;
uint64_t lateStreak = 0;
uint64_t dropStreak = 0;
};
class VideoIODevice class VideoIODevice
{ {
public: public:
@@ -98,6 +116,9 @@ public:
virtual bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error) = 0; virtual bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error) = 0;
virtual bool ConfigureInput(InputFrameCallback callback, const VideoFormat& inputVideoMode, std::string& error) = 0; virtual bool ConfigureInput(InputFrameCallback callback, const VideoFormat& inputVideoMode, std::string& error) = 0;
virtual bool ConfigureOutput(OutputFrameCallback callback, const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error) = 0; virtual bool ConfigureOutput(OutputFrameCallback callback, const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error) = 0;
virtual bool PrepareOutputSchedule() = 0;
virtual bool StartInputStreams() = 0;
virtual bool StartScheduledPlayback() = 0;
virtual bool Start() = 0; virtual bool Start() = 0;
virtual bool Stop() = 0; virtual bool Stop() = 0;
virtual const VideoIOState& State() const = 0; virtual const VideoIOState& State() const = 0;
@@ -105,7 +126,7 @@ public:
virtual bool BeginOutputFrame(VideoIOOutputFrame& frame) = 0; virtual bool BeginOutputFrame(VideoIOOutputFrame& frame) = 0;
virtual void EndOutputFrame(VideoIOOutputFrame& frame) = 0; virtual void EndOutputFrame(VideoIOOutputFrame& frame) = 0;
virtual bool ScheduleOutputFrame(const VideoIOOutputFrame& frame) = 0; virtual bool ScheduleOutputFrame(const VideoIOOutputFrame& frame) = 0;
virtual void AccountForCompletionResult(VideoIOCompletionResult result) = 0; virtual VideoPlayoutRecoveryDecision AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth) = 0;
bool HasInputDevice() const { return State().hasInputDevice; } bool HasInputDevice() const { return State().hasInputDevice; }
bool HasInputSource() const { return State().hasInputSource; } bool HasInputSource() const { return State().hasInputSource; }

View File

@@ -0,0 +1,37 @@
#pragma once
#include <cstdint>
enum class VideoUnderrunBehavior
{
ReuseLastCompletedFrame,
BlackFrame
};
struct VideoPlayoutPolicy
{
unsigned outputFramePoolSize = 10;
unsigned targetPrerollFrames = 4;
unsigned targetReadyFrames = 2;
unsigned maxReadyFrames = 4;
unsigned minimumSpareDeviceFrames = 1;
uint64_t lateOrDropCatchUpFrames = 0;
VideoUnderrunBehavior underrunBehavior = VideoUnderrunBehavior::ReuseLastCompletedFrame;
bool adaptiveHeadroomEnabled = false;
};
inline VideoPlayoutPolicy NormalizeVideoPlayoutPolicy(VideoPlayoutPolicy policy)
{
if (policy.outputFramePoolSize == 0)
policy.outputFramePoolSize = 1;
if (policy.targetPrerollFrames == 0)
policy.targetPrerollFrames = 1;
if (policy.targetReadyFrames == 0)
policy.targetReadyFrames = 1;
if (policy.maxReadyFrames < policy.targetReadyFrames)
policy.maxReadyFrames = policy.targetReadyFrames;
const unsigned minimumOutputFramePoolSize = policy.targetPrerollFrames + policy.maxReadyFrames + policy.minimumSpareDeviceFrames;
if (policy.outputFramePoolSize < minimumOutputFramePoolSize)
policy.outputFramePoolSize = minimumOutputFramePoolSize;
return policy;
}

View File

@@ -1,15 +1,24 @@
#include "VideoPlayoutScheduler.h" #include "VideoPlayoutScheduler.h"
void VideoPlayoutScheduler::Configure(int64_t frameDuration, int64_t timeScale) void VideoPlayoutScheduler::Configure(int64_t frameDuration, int64_t timeScale)
{
Configure(frameDuration, timeScale, VideoPlayoutPolicy());
}
void VideoPlayoutScheduler::Configure(int64_t frameDuration, int64_t timeScale, const VideoPlayoutPolicy& policy)
{ {
mFrameDuration = frameDuration; mFrameDuration = frameDuration;
mTimeScale = timeScale; mTimeScale = timeScale;
mPolicy = NormalizeVideoPlayoutPolicy(policy);
Reset(); Reset();
} }
void VideoPlayoutScheduler::Reset() void VideoPlayoutScheduler::Reset()
{ {
mScheduledFrameIndex = 0; mScheduledFrameIndex = 0;
mCompletedFrameIndex = 0;
mLateStreak = 0;
mDropStreak = 0;
} }
VideoIOScheduleTime VideoPlayoutScheduler::NextScheduleTime() VideoIOScheduleTime VideoPlayoutScheduler::NextScheduleTime()
@@ -23,10 +32,38 @@ VideoIOScheduleTime VideoPlayoutScheduler::NextScheduleTime()
return time; return time;
} }
void VideoPlayoutScheduler::AccountForCompletionResult(VideoIOCompletionResult result) VideoPlayoutRecoveryDecision VideoPlayoutScheduler::AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth)
{ {
if (result == VideoIOCompletionResult::DisplayedLate || result == VideoIOCompletionResult::Dropped) ++mCompletedFrameIndex;
mScheduledFrameIndex += 2; if (result == VideoIOCompletionResult::DisplayedLate)
++mLateStreak;
else
mLateStreak = 0;
if (result == VideoIOCompletionResult::Dropped)
++mDropStreak;
else
mDropStreak = 0;
const uint64_t measuredLagFrames = MeasureLag(result, readyQueueDepth);
const uint64_t catchUpFrames = measuredLagFrames < mPolicy.lateOrDropCatchUpFrames
? measuredLagFrames
: mPolicy.lateOrDropCatchUpFrames;
if (catchUpFrames > 0)
mScheduledFrameIndex += catchUpFrames;
VideoPlayoutRecoveryDecision decision;
decision.result = result;
decision.completedFrameIndex = mCompletedFrameIndex;
decision.scheduledFrameIndex = mScheduledFrameIndex;
decision.readyQueueDepth = readyQueueDepth;
decision.scheduledLeadFrames = mScheduledFrameIndex > mCompletedFrameIndex
? mScheduledFrameIndex - mCompletedFrameIndex
: 0;
decision.measuredLagFrames = measuredLagFrames;
decision.catchUpFrames = catchUpFrames;
decision.lateStreak = mLateStreak;
decision.dropStreak = mDropStreak;
return decision;
} }
double VideoPlayoutScheduler::FrameBudgetMilliseconds() const double VideoPlayoutScheduler::FrameBudgetMilliseconds() const
@@ -35,3 +72,26 @@ double VideoPlayoutScheduler::FrameBudgetMilliseconds() const
? (static_cast<double>(mFrameDuration) * 1000.0) / static_cast<double>(mTimeScale) ? (static_cast<double>(mFrameDuration) * 1000.0) / static_cast<double>(mTimeScale)
: 0.0; : 0.0;
} }
uint64_t VideoPlayoutScheduler::MeasureLag(VideoIOCompletionResult result, uint64_t readyQueueDepth) const
{
if (result != VideoIOCompletionResult::DisplayedLate && result != VideoIOCompletionResult::Dropped)
return 0;
uint64_t lagFrames = 1;
if (result == VideoIOCompletionResult::DisplayedLate && mLateStreak > lagFrames)
lagFrames = mLateStreak;
if (result == VideoIOCompletionResult::Dropped && mDropStreak * 2 > lagFrames)
lagFrames = mDropStreak * 2;
if (mCompletedFrameIndex >= mScheduledFrameIndex)
{
const uint64_t scheduleLagFrames = mCompletedFrameIndex - mScheduledFrameIndex + 1;
if (scheduleLagFrames > lagFrames)
lagFrames = scheduleLagFrames;
}
if (readyQueueDepth < mPolicy.targetReadyFrames && mPolicy.targetReadyFrames - readyQueueDepth > lagFrames)
lagFrames = mPolicy.targetReadyFrames - readyQueueDepth;
return lagFrames;
}

View File

@@ -1,6 +1,7 @@
#pragma once #pragma once
#include "VideoIOTypes.h" #include "VideoIOTypes.h"
#include "VideoPlayoutPolicy.h"
#include <cstdint> #include <cstdint>
@@ -8,15 +9,26 @@ class VideoPlayoutScheduler
{ {
public: public:
void Configure(int64_t frameDuration, int64_t timeScale); void Configure(int64_t frameDuration, int64_t timeScale);
void Configure(int64_t frameDuration, int64_t timeScale, const VideoPlayoutPolicy& policy);
void Reset(); void Reset();
VideoIOScheduleTime NextScheduleTime(); VideoIOScheduleTime NextScheduleTime();
void AccountForCompletionResult(VideoIOCompletionResult result); VideoPlayoutRecoveryDecision AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth = 0);
double FrameBudgetMilliseconds() const; double FrameBudgetMilliseconds() const;
uint64_t ScheduledFrameIndex() const { return mScheduledFrameIndex; } uint64_t ScheduledFrameIndex() const { return mScheduledFrameIndex; }
uint64_t CompletedFrameIndex() const { return mCompletedFrameIndex; }
uint64_t LateStreak() const { return mLateStreak; }
uint64_t DropStreak() const { return mDropStreak; }
int64_t TimeScale() const { return mTimeScale; } int64_t TimeScale() const { return mTimeScale; }
const VideoPlayoutPolicy& Policy() const { return mPolicy; }
private: private:
uint64_t MeasureLag(VideoIOCompletionResult result, uint64_t readyQueueDepth) const;
int64_t mFrameDuration = 0; int64_t mFrameDuration = 0;
int64_t mTimeScale = 0; int64_t mTimeScale = 0;
uint64_t mScheduledFrameIndex = 0; uint64_t mScheduledFrameIndex = 0;
uint64_t mCompletedFrameIndex = 0;
uint64_t mLateStreak = 0;
uint64_t mDropStreak = 0;
VideoPlayoutPolicy mPolicy;
}; };

View File

@@ -1,8 +1,8 @@
#include "DeckLinkSession.h" #include "DeckLinkSession.h"
#include "GlRenderConstants.h"
#include <atlbase.h> #include <atlbase.h>
#include <atomic>
#include <chrono>
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
#include <new> #include <new>
@@ -12,6 +12,75 @@
namespace namespace
{ {
class SystemMemoryDeckLinkVideoBuffer : public IDeckLinkVideoBuffer
{
public:
SystemMemoryDeckLinkVideoBuffer(void* bytes, unsigned long long sizeBytes) :
mBytes(bytes),
mSizeBytes(sizeBytes),
mRefCount(1)
{
}
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID* ppv) override
{
if (ppv == nullptr)
return E_POINTER;
if (iid == IID_IUnknown || iid == IID_IDeckLinkVideoBuffer)
{
*ppv = static_cast<IDeckLinkVideoBuffer*>(this);
AddRef();
return S_OK;
}
*ppv = nullptr;
return E_NOINTERFACE;
}
ULONG STDMETHODCALLTYPE AddRef() override
{
return ++mRefCount;
}
ULONG STDMETHODCALLTYPE Release() override
{
const ULONG refCount = --mRefCount;
if (refCount == 0)
delete this;
return refCount;
}
HRESULT STDMETHODCALLTYPE GetBytes(void** buffer) override
{
if (buffer == nullptr)
return E_POINTER;
*buffer = mBytes;
return mBytes != nullptr ? S_OK : E_FAIL;
}
HRESULT STDMETHODCALLTYPE GetSize(unsigned long long* bufferSize) override
{
if (bufferSize == nullptr)
return E_POINTER;
*bufferSize = mSizeBytes;
return S_OK;
}
HRESULT STDMETHODCALLTYPE StartAccess(BMDBufferAccessFlags) override
{
return S_OK;
}
HRESULT STDMETHODCALLTYPE EndAccess(BMDBufferAccessFlags) override
{
return S_OK;
}
private:
void* mBytes = nullptr;
unsigned long long mSizeBytes = 0;
std::atomic<ULONG> mRefCount;
};
std::string BstrToUtf8(BSTR value) std::string BstrToUtf8(BSTR value)
{ {
if (value == nullptr) if (value == nullptr)
@@ -210,7 +279,7 @@ bool DeckLinkSession::DiscoverDevicesAndModes(const VideoFormatSelection& videoM
BMDTimeValue frameDuration = 0; BMDTimeValue frameDuration = 0;
BMDTimeScale frameTimescale = 0; BMDTimeScale frameTimescale = 0;
outputMode->GetFrameRate(&frameDuration, &frameTimescale); outputMode->GetFrameRate(&frameDuration, &frameTimescale);
mScheduler.Configure(frameDuration, frameTimescale); mScheduler.Configure(frameDuration, frameTimescale, mPlayoutPolicy);
mState.frameBudgetMilliseconds = mScheduler.FrameBudgetMilliseconds(); mState.frameBudgetMilliseconds = mScheduler.FrameBudgetMilliseconds();
mState.inputFrameRowBytes = mState.inputFrameSize.width * 2u; mState.inputFrameRowBytes = mState.inputFrameSize.width * 2u;
@@ -379,7 +448,9 @@ bool DeckLinkSession::ConfigureOutput(OutputFrameCallback callback, const VideoF
mState.statusMessage = "Selected DeckLink output supports external keying. Set enableExternalKeying to true in runtime-host.json to request it."; mState.statusMessage = "Selected DeckLink output supports external keying. Set enableExternalKeying to true in runtime-host.json to request it.";
} }
for (int i = 0; i < 10; i++) const VideoPlayoutPolicy policy = NormalizeVideoPlayoutPolicy(mPlayoutPolicy);
mPlayoutPolicy = policy;
for (unsigned i = 0; i < policy.outputFramePoolSize; i++)
{ {
CComPtr<IDeckLinkMutableVideoFrame> outputFrame; CComPtr<IDeckLinkMutableVideoFrame> outputFrame;
@@ -423,7 +494,6 @@ bool DeckLinkSession::AcquireNextOutputVideoFrame(CComPtr<IDeckLinkMutableVideoF
return false; return false;
outputVideoFrame = outputVideoFrameQueue.front(); outputVideoFrame = outputVideoFrameQueue.front();
outputVideoFrameQueue.push_back(outputVideoFrame);
outputVideoFrameQueue.pop_front(); outputVideoFrameQueue.pop_front();
return outputVideoFrame != nullptr; return outputVideoFrame != nullptr;
} }
@@ -448,6 +518,7 @@ bool DeckLinkSession::PopulateOutputFrame(IDeckLinkMutableVideoFrame* outputVide
frame.width = mState.outputFrameSize.width; frame.width = mState.outputFrameSize.width;
frame.height = mState.outputFrameSize.height; frame.height = mState.outputFrameSize.height;
frame.pixelFormat = mState.outputPixelFormat; frame.pixelFormat = mState.outputPixelFormat;
outputVideoFrame->AddRef();
frame.nativeFrame = outputVideoFrame; frame.nativeFrame = outputVideoFrame;
frame.nativeBuffer = outputVideoFrameBuffer.Detach(); frame.nativeBuffer = outputVideoFrameBuffer.Detach();
return true; return true;
@@ -456,8 +527,62 @@ bool DeckLinkSession::PopulateOutputFrame(IDeckLinkMutableVideoFrame* outputVide
bool DeckLinkSession::ScheduleFrame(IDeckLinkMutableVideoFrame* outputVideoFrame) bool DeckLinkSession::ScheduleFrame(IDeckLinkMutableVideoFrame* outputVideoFrame)
{ {
const VideoIOScheduleTime scheduleTime = mScheduler.NextScheduleTime(); const VideoIOScheduleTime scheduleTime = mScheduler.NextScheduleTime();
return outputVideoFrame != nullptr && if (outputVideoFrame == nullptr || output == nullptr)
output->ScheduleVideoFrame(outputVideoFrame, scheduleTime.streamTime, scheduleTime.duration, scheduleTime.timeScale) == S_OK; {
++mState.deckLinkScheduleFailureCount;
return false;
}
const auto scheduleStart = std::chrono::steady_clock::now();
const HRESULT result = output->ScheduleVideoFrame(outputVideoFrame, scheduleTime.streamTime, scheduleTime.duration, scheduleTime.timeScale);
const auto scheduleEnd = std::chrono::steady_clock::now();
mState.deckLinkScheduleCallMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(scheduleEnd - scheduleStart).count();
if (result != S_OK)
++mState.deckLinkScheduleFailureCount;
RefreshBufferedVideoFrameCount();
return result == S_OK;
}
bool DeckLinkSession::ScheduleSystemMemoryFrame(const VideoIOOutputFrame& frame)
{
if (output == nullptr || frame.bytes == nullptr || frame.rowBytes <= 0 || frame.height == 0)
return false;
CComPtr<IDeckLinkVideoBuffer> videoBuffer;
videoBuffer.Attach(new (std::nothrow) SystemMemoryDeckLinkVideoBuffer(
frame.bytes,
static_cast<unsigned long long>(frame.rowBytes) * static_cast<unsigned long long>(frame.height)));
if (videoBuffer == nullptr)
return false;
CComPtr<IDeckLinkMutableVideoFrame> outputVideoFrame;
const BMDPixelFormat pixelFormat = DeckLinkPixelFormatForVideoIO(frame.pixelFormat);
if (output->CreateVideoFrameWithBuffer(
frame.width,
frame.height,
frame.rowBytes,
pixelFormat,
bmdFrameFlagFlipVertical,
videoBuffer,
&outputVideoFrame) != S_OK)
{
return false;
}
IDeckLinkVideoFrame* scheduledFrame = outputVideoFrame;
{
std::lock_guard<std::mutex> lock(mScheduledSystemFrameMutex);
mScheduledSystemFrameBuffers[scheduledFrame] = frame.bytes;
}
if (ScheduleFrame(outputVideoFrame))
return true;
{
std::lock_guard<std::mutex> lock(mScheduledSystemFrameMutex);
mScheduledSystemFrameBuffers.erase(scheduledFrame);
}
return false;
} }
bool DeckLinkSession::ScheduleBlackFrame(IDeckLinkMutableVideoFrame* outputVideoFrame) bool DeckLinkSession::ScheduleBlackFrame(IDeckLinkMutableVideoFrame* outputVideoFrame)
@@ -480,6 +605,26 @@ bool DeckLinkSession::ScheduleBlackFrame(IDeckLinkMutableVideoFrame* outputVideo
return ScheduleFrame(outputVideoFrame); return ScheduleFrame(outputVideoFrame);
} }
void DeckLinkSession::RefreshBufferedVideoFrameCount()
{
if (output == nullptr)
{
mState.actualDeckLinkBufferedFramesAvailable = false;
return;
}
unsigned int bufferedFrameCount = 0;
if (output->GetBufferedVideoFrameCount(&bufferedFrameCount) == S_OK)
{
mState.actualDeckLinkBufferedFrames = bufferedFrameCount;
mState.actualDeckLinkBufferedFramesAvailable = true;
}
else
{
mState.actualDeckLinkBufferedFramesAvailable = false;
}
}
bool DeckLinkSession::BeginOutputFrame(VideoIOOutputFrame& frame) bool DeckLinkSession::BeginOutputFrame(VideoIOOutputFrame& frame)
{ {
CComPtr<IDeckLinkMutableVideoFrame> outputVideoFrame; CComPtr<IDeckLinkMutableVideoFrame> outputVideoFrame;
@@ -498,20 +643,62 @@ void DeckLinkSession::EndOutputFrame(VideoIOOutputFrame& frame)
frame.bytes = nullptr; frame.bytes = nullptr;
} }
void DeckLinkSession::AccountForCompletionResult(VideoIOCompletionResult completionResult) VideoPlayoutRecoveryDecision DeckLinkSession::AccountForCompletionResult(VideoIOCompletionResult completionResult, uint64_t readyQueueDepth)
{ {
mScheduler.AccountForCompletionResult(completionResult); return mScheduler.AccountForCompletionResult(completionResult, readyQueueDepth);
} }
bool DeckLinkSession::ScheduleOutputFrame(const VideoIOOutputFrame& frame) bool DeckLinkSession::ScheduleOutputFrame(const VideoIOOutputFrame& frame)
{ {
if (frame.nativeFrame == nullptr)
return ScheduleSystemMemoryFrame(frame);
IDeckLinkMutableVideoFrame* outputVideoFrame = static_cast<IDeckLinkMutableVideoFrame*>(frame.nativeFrame); IDeckLinkMutableVideoFrame* outputVideoFrame = static_cast<IDeckLinkMutableVideoFrame*>(frame.nativeFrame);
return ScheduleFrame(outputVideoFrame); const bool scheduled = ScheduleFrame(outputVideoFrame);
if (outputVideoFrame != nullptr)
outputVideoFrame->Release();
return scheduled;
}
bool DeckLinkSession::PrepareOutputSchedule()
{
mScheduler.Reset();
RefreshBufferedVideoFrameCount();
return output != nullptr;
}
bool DeckLinkSession::StartInputStreams()
{
if (!input)
return true;
if (input->StartStreams() != S_OK)
{
MessageBoxA(NULL, "Could not start the DeckLink input stream.", "DeckLink start failed", MB_OK | MB_ICONERROR);
return false;
}
return true;
}
bool DeckLinkSession::StartScheduledPlayback()
{
if (!output)
{
MessageBoxA(NULL, "Cannot start playout because no DeckLink output device is available.", "DeckLink start failed", MB_OK | MB_ICONERROR);
return false;
}
if (output->StartScheduledPlayback(0, mScheduler.TimeScale(), 1.0) != S_OK)
{
MessageBoxA(NULL, "Could not start DeckLink scheduled playback.", "DeckLink start failed", MB_OK | MB_ICONERROR);
return false;
}
RefreshBufferedVideoFrameCount();
return true;
} }
bool DeckLinkSession::Start() bool DeckLinkSession::Start()
{ {
mScheduler.Reset();
if (!output) if (!output)
{ {
MessageBoxA(NULL, "Cannot start playout because no DeckLink output device is available.", "DeckLink start failed", MB_OK | MB_ICONERROR); MessageBoxA(NULL, "Cannot start playout because no DeckLink output device is available.", "DeckLink start failed", MB_OK | MB_ICONERROR);
@@ -523,7 +710,12 @@ bool DeckLinkSession::Start()
return false; return false;
} }
for (unsigned i = 0; i < kPrerollFrameCount; i++) const VideoPlayoutPolicy policy = NormalizeVideoPlayoutPolicy(mPlayoutPolicy);
mPlayoutPolicy = policy;
if (!PrepareOutputSchedule())
return false;
for (unsigned i = 0; i < policy.targetPrerollFrames; i++)
{ {
CComPtr<IDeckLinkMutableVideoFrame> outputVideoFrame; CComPtr<IDeckLinkMutableVideoFrame> outputVideoFrame;
if (!AcquireNextOutputVideoFrame(outputVideoFrame)) if (!AcquireNextOutputVideoFrame(outputVideoFrame))
@@ -538,21 +730,7 @@ bool DeckLinkSession::Start()
} }
} }
if (input) return StartInputStreams() && StartScheduledPlayback();
{
if (input->StartStreams() != S_OK)
{
MessageBoxA(NULL, "Could not start the DeckLink input stream.", "DeckLink start failed", MB_OK | MB_ICONERROR);
return false;
}
}
if (output->StartScheduledPlayback(0, mScheduler.TimeScale(), 1.0) != S_OK)
{
MessageBoxA(NULL, "Could not start DeckLink scheduled playback.", "DeckLink start failed", MB_OK | MB_ICONERROR);
return false;
}
return true;
} }
bool DeckLinkSession::Stop() bool DeckLinkSession::Stop()
@@ -614,13 +792,42 @@ void DeckLinkSession::HandleVideoInputFrame(IDeckLinkVideoInputFrame* inputFrame
inputFrameBuffer->EndAccess(bmdBufferAccessRead); inputFrameBuffer->EndAccess(bmdBufferAccessRead);
} }
void DeckLinkSession::HandlePlayoutFrameCompleted(IDeckLinkVideoFrame*, BMDOutputFrameCompletionResult completionResult) void DeckLinkSession::HandlePlayoutFrameCompleted(IDeckLinkVideoFrame* completedFrame, BMDOutputFrameCompletionResult completionResult)
{ {
RefreshBufferedVideoFrameCount();
void* completedSystemBuffer = nullptr;
if (completedFrame != nullptr)
{
bool externalSystemFrame = false;
{
std::lock_guard<std::mutex> lock(mScheduledSystemFrameMutex);
auto externalFrame = mScheduledSystemFrameBuffers.find(completedFrame);
if (externalFrame != mScheduledSystemFrameBuffers.end())
{
completedSystemBuffer = externalFrame->second;
mScheduledSystemFrameBuffers.erase(externalFrame);
externalSystemFrame = true;
}
}
if (!externalSystemFrame)
{
CComPtr<IDeckLinkMutableVideoFrame> reusableFrame;
if (completedFrame->QueryInterface(IID_IDeckLinkMutableVideoFrame, reinterpret_cast<void**>(&reusableFrame)) == S_OK &&
reusableFrame != nullptr)
{
outputVideoFrameQueue.push_back(reusableFrame);
}
}
}
if (!mOutputFrameCallback) if (!mOutputFrameCallback)
return; return;
VideoIOCompletion completion; VideoIOCompletion completion;
completion.result = TranslateCompletionResult(completionResult); completion.result = TranslateCompletionResult(completionResult);
completion.outputFrameBuffer = completedSystemBuffer;
mOutputFrameCallback(completion); mOutputFrameCallback(completion);
} }

View File

@@ -6,11 +6,14 @@
#include "DeckLinkVideoIOFormat.h" #include "DeckLinkVideoIOFormat.h"
#include "VideoIOFormat.h" #include "VideoIOFormat.h"
#include "VideoIOTypes.h" #include "VideoIOTypes.h"
#include "VideoPlayoutPolicy.h"
#include "VideoPlayoutScheduler.h" #include "VideoPlayoutScheduler.h"
#include <atlbase.h> #include <atlbase.h>
#include <deque> #include <deque>
#include <mutex>
#include <string> #include <string>
#include <unordered_map>
class OpenGLComposite; class OpenGLComposite;
@@ -25,6 +28,9 @@ public:
bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error) override; bool SelectPreferredFormats(const VideoFormatSelection& videoModes, bool outputAlphaRequired, std::string& error) override;
bool ConfigureInput(InputFrameCallback callback, const VideoFormat& inputVideoMode, std::string& error) override; bool ConfigureInput(InputFrameCallback callback, const VideoFormat& inputVideoMode, std::string& error) override;
bool ConfigureOutput(OutputFrameCallback callback, const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error) override; bool ConfigureOutput(OutputFrameCallback callback, const VideoFormat& outputVideoMode, bool externalKeyingEnabled, std::string& error) override;
bool PrepareOutputSchedule() override;
bool StartInputStreams() override;
bool StartScheduledPlayback() override;
bool Start() override; bool Start() override;
bool Stop() override; bool Stop() override;
@@ -58,7 +64,7 @@ public:
const VideoIOState& State() const override { return mState; } const VideoIOState& State() const override { return mState; }
VideoIOState& MutableState() override { return mState; } VideoIOState& MutableState() override { return mState; }
double FrameBudgetMilliseconds() const; double FrameBudgetMilliseconds() const;
void AccountForCompletionResult(VideoIOCompletionResult completionResult) override; VideoPlayoutRecoveryDecision AccountForCompletionResult(VideoIOCompletionResult completionResult, uint64_t readyQueueDepth) override;
bool BeginOutputFrame(VideoIOOutputFrame& frame) override; bool BeginOutputFrame(VideoIOOutputFrame& frame) override;
void EndOutputFrame(VideoIOOutputFrame& frame) override; void EndOutputFrame(VideoIOOutputFrame& frame) override;
bool ScheduleOutputFrame(const VideoIOOutputFrame& frame) override; bool ScheduleOutputFrame(const VideoIOOutputFrame& frame) override;
@@ -69,7 +75,9 @@ private:
bool AcquireNextOutputVideoFrame(CComPtr<IDeckLinkMutableVideoFrame>& outputVideoFrame); bool AcquireNextOutputVideoFrame(CComPtr<IDeckLinkMutableVideoFrame>& outputVideoFrame);
bool PopulateOutputFrame(IDeckLinkMutableVideoFrame* outputVideoFrame, VideoIOOutputFrame& frame); bool PopulateOutputFrame(IDeckLinkMutableVideoFrame* outputVideoFrame, VideoIOOutputFrame& frame);
bool ScheduleFrame(IDeckLinkMutableVideoFrame* outputVideoFrame); bool ScheduleFrame(IDeckLinkMutableVideoFrame* outputVideoFrame);
bool ScheduleSystemMemoryFrame(const VideoIOOutputFrame& frame);
bool ScheduleBlackFrame(IDeckLinkMutableVideoFrame* outputVideoFrame); bool ScheduleBlackFrame(IDeckLinkMutableVideoFrame* outputVideoFrame);
void RefreshBufferedVideoFrameCount();
static VideoIOCompletionResult TranslateCompletionResult(BMDOutputFrameCompletionResult completionResult); static VideoIOCompletionResult TranslateCompletionResult(BMDOutputFrameCompletionResult completionResult);
CComPtr<CaptureDelegate> captureDelegate; CComPtr<CaptureDelegate> captureDelegate;
@@ -78,7 +86,10 @@ private:
CComPtr<IDeckLinkOutput> output; CComPtr<IDeckLinkOutput> output;
CComPtr<IDeckLinkKeyer> keyer; CComPtr<IDeckLinkKeyer> keyer;
std::deque<CComPtr<IDeckLinkMutableVideoFrame>> outputVideoFrameQueue; std::deque<CComPtr<IDeckLinkMutableVideoFrame>> outputVideoFrameQueue;
std::mutex mScheduledSystemFrameMutex;
std::unordered_map<IDeckLinkVideoFrame*, void*> mScheduledSystemFrameBuffers;
VideoIOState mState; VideoIOState mState;
VideoPlayoutPolicy mPlayoutPolicy;
VideoPlayoutScheduler mScheduler; VideoPlayoutScheduler mScheduler;
InputFrameCallback mInputFrameCallback; InputFrameCallback mInputFrameCallback;
OutputFrameCallback mOutputFrameCallback; OutputFrameCallback mOutputFrameCallback;

View File

@@ -9,9 +9,10 @@ Phase checklist:
- [x] Split `RuntimeHost` - [x] Split `RuntimeHost`
- [x] Finish live-state and service-facing coordination - [x] Finish live-state and service-facing coordination
- [x] Make the render thread the sole GL owner - [x] Make the render thread the sole GL owner
- [ ] Refactor live state layering into an explicit composition model - [x] Refactor live state layering into an explicit composition model
- [ ] Move persistence onto a background snapshot writer - [x] Move persistence onto a background snapshot writer
- [ ] Make DeckLink/backend lifecycle explicit with a state machine - [x] Make DeckLink/backend lifecycle explicit with a state machine
- [ ] Make playout timing proactive and deadline-aware
- [ ] Add structured health, telemetry, and operational reporting - [ ] Add structured health, telemetry, and operational reporting
Checklist note: Checklist note:
@@ -20,7 +21,8 @@ Checklist note:
- The checked Phase 2 item means the internal event model substrate is complete enough for later phases: the typed event vocabulary, app-owned dispatcher, coalesced event pump, reload bridge events, production bridges, and pure event tests are in place. Remaining items in [PHASE_2_INTERNAL_EVENT_MODEL_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_2_INTERNAL_EVENT_MODEL_DESIGN.md) are narrow follow-ups, mainly completion/failure observations and later replacement of the runtime-store poll fallback with real file-watch events. - The checked Phase 2 item means the internal event model substrate is complete enough for later phases: the typed event vocabulary, app-owned dispatcher, coalesced event pump, reload bridge events, production bridges, and pure event tests are in place. Remaining items in [PHASE_2_INTERNAL_EVENT_MODEL_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_2_INTERNAL_EVENT_MODEL_DESIGN.md) are narrow follow-ups, mainly completion/failure observations and later replacement of the runtime-store poll fallback with real file-watch events.
- The checked Phase 3 item means the render-facing state path now has named live-state, composition, frame-state, resolver, and service-bridge boundaries. `OpenGLComposite::renderEffect()` is reduced to runtime work, frame input construction, and frame rendering. - The checked Phase 3 item means the render-facing state path now has named live-state, composition, frame-state, resolver, and service-bridge boundaries. `OpenGLComposite::renderEffect()` is reduced to runtime work, frame input construction, and frame rendering.
- The checked Phase 4 item means normal runtime GL work is now owned by a dedicated `RenderEngine` render thread. Input upload, output render, preview, screenshot capture, render-local resets, and shader application enter through render-thread queue/request paths instead of caller-thread context borrowing. The remaining output timing risk is callback-coupled synchronous output production, which is intentionally tracked for the later DeckLink/backend lifecycle and playout-queue work. - The checked Phase 4 item means normal runtime GL work is now owned by a dedicated `RenderEngine` render thread. Input upload, output render, preview, screenshot capture, render-local resets, and shader application enter through render-thread queue/request paths instead of caller-thread context borrowing. The remaining output timing risk is callback-coupled synchronous output production, which is intentionally tracked for the later DeckLink/backend lifecycle and playout-queue work.
- It does not mean the whole app is fully extracted. Deeper live-state layering, background persistence, backend lifecycle/playout queue policy, and richer telemetry continue through later phases. - The checked Phase 5 item means persisted, committed/session, transient automation, and render-local state are explicitly named. `CommittedLiveState` physically owns current session layer state, `RuntimeLiveState` owns transient OSC overlays, `RenderStateComposer` consumes a layered input contract, and reset/reload/preset overlay invalidation is centralized and covered by non-GL tests.
- It does not mean the whole app is fully extracted. Backend lifecycle/playout queue policy and richer telemetry continue through later phases.
## Timing Review ## Timing Review
@@ -67,7 +69,7 @@ Relevant code:
- [RenderEngine.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/RenderEngine.cpp:36) - [RenderEngine.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/RenderEngine.cpp:36)
- [OpenGLVideoIOBridge.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLVideoIOBridge.cpp:11) - [OpenGLVideoIOBridge.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLVideoIOBridge.cpp:11)
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:168) - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:168)
This removes cross-thread GL context borrowing as the central correctness model. The remaining timing risk is that output frame production is still synchronous from the DeckLink completion path, so a render/readback spike can still reduce playout headroom. This removes cross-thread GL context borrowing as the central correctness model. The remaining timing risk is that output frame production is still synchronous from the DeckLink completion path, so a render/readback spike can still reduce playout headroom.
@@ -112,7 +114,7 @@ Failures are often surfaced via `MessageBoxA`, while background services mainly
Relevant code: Relevant code:
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:314) - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:314)
- [DeckLinkSession.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/videoio/decklink/DeckLinkSession.cpp:478) - [DeckLinkSession.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/videoio/decklink/DeckLinkSession.cpp:478)
- [RuntimeServices.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.cpp:205) - [RuntimeServices.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.cpp:205)
@@ -125,30 +127,31 @@ Recommended direction:
- prefer degraded runtime states over modal failure handling where possible - prefer degraded runtime states over modal failure handling where possible
- add a rolling log file for operational troubleshooting - add a rolling log file for operational troubleshooting
### 5. Live OSC overlay and persisted state are still separate concepts without a formal model ### 5. Live OSC overlay and persisted state now have an explicit layering model
The current design works better now, but it still relies on hand-managed reconciliation between: Phase 5 formalized the previous hand-managed reconciliation between:
- persisted/committed parameter state in `RuntimeStore` - base persisted state owned by `RuntimeStore` serialization/preset IO
- transient OSC overlay state in `RenderEngine` - committed session state owned by `CommittedLiveState`
- transient OSC overlay state owned by `RuntimeLiveState`
- render-local temporal, feedback, preview, screenshot, and playout state owned by `RenderEngine`
Relevant code: Relevant code:
- [RenderEngine.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/RenderEngine.h:18) - [CommittedLiveState.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/live/CommittedLiveState.h:1)
- [RuntimeLiveState.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/live/RuntimeLiveState.h:1)
- [RenderStateComposer.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/live/RenderStateComposer.h:1)
- [RuntimeStateLayerModel.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/live/RuntimeStateLayerModel.h:1)
Recommended direction: Current direction:
Formalize three layers of state: - render resolves values with a named composition rule:
- base persisted state
- operator/UI committed state
- transient live automation overlay
Then render can always resolve:
- `final = base + committed + transient` - `final = base + committed + transient`
That avoids special-case sync behavior becoming scattered across the code. - settled OSC commits are session-only by default and do not request persistence unless policy explicitly opts in
- reset, reload, preset load, and shader compatibility changes prune or clear transient overlays at the live-state boundary
- render-local temporal and feedback resources remain outside the parameter layering model
### 6. DeckLink lifecycle could be modeled more explicitly ### 6. DeckLink lifecycle could be modeled more explicitly
@@ -258,17 +261,22 @@ Recommended direction:
### 7. Persistence should be more asynchronous and debounced ### 7. Persistence should be more asynchronous and debounced
`SavePersistentState()` is still called directly from many update paths. Status: addressed by Phase 6.
Relevant code: Relevant code:
- `RuntimeHost.cpp` - `RuntimeCoordinator.cpp`
- `RuntimeUpdateController.cpp`
- `RuntimeStore.cpp`
- `PersistenceWriter.cpp`
Recent OSC work already reduced this problem for live automation, but the broader architecture would still benefit from: Runtime-state persistence now flows from accepted coordinator mutations to typed persistence events, then into a debounced background writer. The store still owns serialization and preset IO, while the writer owns temp-file replacement, coalescing, result reporting, and shutdown flushing.
- a debounced persistence queue The remaining architecture concern is broader persistence policy, not direct mutation-path disk writes:
- atomic write-behind snapshots
- clear separation between state mutation and disk flush - whether preset saves should stay synchronous
- whether runtime config writes should share the persistence writer
- whether failed writes should retry automatically or wait for the next request
This improves both resilience and timing safety. This improves both resilience and timing safety.
@@ -314,7 +322,7 @@ The desktop preview is rate-limited, but still presented from inside the render
Relevant code: Relevant code:
- [OpenGLRenderPipeline.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLRenderPipeline.cpp:54) - [OpenGLRenderPipeline.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLRenderPipeline.cpp:54)
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:235) - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:235)
This means preview presentation can still consume time on the same path that is trying to meet output deadlines. This means preview presentation can still consume time on the same path that is trying to meet output deadlines.
@@ -542,11 +550,14 @@ Dedicated design note:
- [PHASE_5_LIVE_STATE_LAYERING_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_5_LIVE_STATE_LAYERING_DESIGN.md) - [PHASE_5_LIVE_STATE_LAYERING_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_5_LIVE_STATE_LAYERING_DESIGN.md)
Recommended layers: Status:
- base persisted state - complete for the current architecture
- operator-committed live state - `RuntimeStateLayerModel` names the state categories
- transient automation overlay - `CommittedLiveState` physically owns committed/session layer state
- `RenderStateComposer` consumes `LayeredRenderStateInput`
- `RuntimeLiveState` owns transient overlay smoothing, generation, commit settlement, and compatibility pruning
- settled OSC commits update session state without requesting persistence by default
Render should derive final values from a clear composition rule such as: Render should derive final values from a clear composition rule such as:
@@ -565,18 +576,19 @@ Expected benefits:
### Phase 6. Move persistence onto a background snapshot writer ### Phase 6. Move persistence onto a background snapshot writer
After the state model is explicit, persistence should become a background concern rather than a synchronous side effect of mutations. Status: complete. Runtime-state persistence is now a background concern rather than a synchronous side effect of mutations.
Dedicated design note: Dedicated design note:
- [PHASE_6_BACKGROUND_PERSISTENCE_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_6_BACKGROUND_PERSISTENCE_DESIGN.md) - [PHASE_6_BACKGROUND_PERSISTENCE_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_6_BACKGROUND_PERSISTENCE_DESIGN.md)
Target behavior: Implemented behavior:
- mutations update authoritative in-memory stored state - mutations update authoritative in-memory stored state
- persistence requests are queued - persistence requests are queued
- disk writes are debounced and coalesced - disk writes are debounced and coalesced
- writes are atomic and versioned where practical - writes use temp-file replacement where practical
- shutdown flush behavior is explicit and tested
Why this phase comes after state splitting: Why this phase comes after state splitting:
@@ -621,6 +633,21 @@ Expected benefits:
- easier handling of missing input, dropped frames, or reconfiguration - easier handling of missing input, dropped frames, or reconfiguration
- a clearer place to own playout headroom policy, output queue sizing, and late-frame recovery behavior - a clearer place to own playout headroom policy, output queue sizing, and late-frame recovery behavior
### Phase 7.5. Make playout timing proactive and deadline-aware
Phase 7 made backend lifecycle, ready-frame queueing, measured recovery, and backend playout health visible. The remaining timing-specific work is to make output production proactive instead of demand-filled by completion pressure.
Dedicated design note:
- [PHASE_7_5_PROACTIVE_PLAYOUT_TIMING_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_7_5_PROACTIVE_PLAYOUT_TIMING_DESIGN.md)
Expected benefits:
- output frames are produced ahead based on queue pressure or cadence
- DeckLink completion handling normally consumes already-ready frames
- preview and synchronous readback fallback become explicitly subordinate to playout deadlines
- queue depth, readback misses, preview skips, and render timing explain why headroom drains
### Phase 8. Add structured health, telemetry, and operational reporting ### Phase 8. Add structured health, telemetry, and operational reporting
This phase should happen after the main ownership changes so the telemetry can reflect the final architecture instead of a transient one. This phase should happen after the main ownership changes so the telemetry can reflect the final architecture instead of a transient one.
@@ -669,7 +696,8 @@ If this is approached as a serious architecture program rather than opportunisti
5. Formalize live state layering and composition. 5. Formalize live state layering and composition.
6. Move persistence to a background snapshot writer. 6. Move persistence to a background snapshot writer.
7. Refactor DeckLink/backend lifecycle into an explicit state machine. 7. Refactor DeckLink/backend lifecycle into an explicit state machine.
8. Add structured telemetry, health reporting, and operational diagnostics. 8. Make playout timing proactive and deadline-aware.
9. Add structured telemetry, health reporting, and operational diagnostics.
## Why This Order Makes Sense ## Why This Order Makes Sense
@@ -678,7 +706,7 @@ This order tries to avoid doing foundational work twice.
- The event model comes before major subsystem extraction so coordination patterns stabilize early. - The event model comes before major subsystem extraction so coordination patterns stabilize early.
- runtime state ownership is split before render isolation so the render thread does not inherit a monolithic state model. - runtime state ownership is split before render isolation so the render thread does not inherit a monolithic state model.
- Live state layering is formalized only after render ownership is clearer. - Live state layering is formalized only after render ownership is clearer.
- Persistence is moved later so it can target the final state model rather than the current one. - Persistence moved after the state model split so it could target the durable snapshot model rather than an older mixed-responsibility runtime object.
- Telemetry is intentionally late so it instruments the architecture that survives the refactor. - Telemetry is intentionally late so it instruments the architecture that survives the refactor.
## Short Version ## Short Version
@@ -690,8 +718,9 @@ The app is in a much better place than it was before the OSC timing work. The sh
3. split state ownership 3. split state ownership
4. isolate rendering 4. isolate rendering
5. formalize layered live state 5. formalize layered live state
6. background persistence 6. complete background persistence
7. explicit backend lifecycle 7. explicit backend lifecycle
8. health and telemetry 8. proactive playout timing
9. health and telemetry
That sequence gives each later phase a cleaner foundation than the current app has today. That sequence gives each later phase a cleaner foundation than the current app has today.

View File

@@ -0,0 +1,529 @@
# Current System Architecture
This document describes how the application currently works.
It replaces the phase-by-phase design trail as the best entry point for understanding the repo. The older phase documents remain useful history, but they mix implementation notes, experiments, and target designs. This document is organized by current runtime behavior and subsystem ownership instead.
## Application Shape
The app is a live OpenGL compositor with DeckLink input/output, runtime control services, persistent layer-stack state, live state overlays, health telemetry, and a small internal event model.
At runtime the major subsystems are:
- `OpenGLComposite`
- `RuntimeStore`
- `RuntimeCoordinator`
- `RuntimeSnapshotProvider`
- `RuntimeServices`
- `RuntimeUpdateController`
- `RenderEngine`
- `VideoBackend`
- `DeckLinkSession`
- `HealthTelemetry`
- `RuntimeEventDispatcher`
- `PersistenceWriter`
The key architectural rule is:
- runtime/control subsystems decide what state should exist
- render subsystems decide how to draw that state
- video subsystems decide how frames move to and from hardware
- telemetry observes behavior without becoming a control plane
## Process Startup
The Win32 app creates the window, chooses a pixel format, creates an OpenGL context, initializes COM, and constructs `OpenGLComposite`.
`OpenGLComposite` owns the high-level assembly of the runtime:
- runtime store
- runtime coordinator
- runtime services
- runtime update controller
- render engine
- video backend
Startup proceeds broadly as:
1. COM and OpenGL are initialized by the Win32 app.
2. `OpenGLComposite::InitDeckLink()` discovers/configures DeckLink and runtime state.
3. Runtime services are started.
4. Shader programs and GL resources are initialized.
5. The render thread is started.
6. The video backend starts output preroll and playback.
The normal VS Code debug launch currently sets:
```text
VST_DISABLE_INPUT_CAPTURE=1
```
That disables DeckLink input capture for output-timing isolation while keeping the output path active.
## Runtime State
### `RuntimeStore`
`RuntimeStore` owns durable runtime data and file-backed state.
It owns:
- runtime host configuration
- stored layer stack data
- persisted parameter values
- stack presets
- shader package catalog metadata
- runtime state presentation data
- persistence requests
It does not own render-thread resources, DeckLink timing, control ingress, or mutation policy.
### `CommittedLiveState`
`CommittedLiveState` owns current session/operator layer state that is live but not necessarily persisted as the durable base state.
It gives the renderer and snapshot builder a named read model for current committed layer state.
### `RuntimeCoordinator`
`RuntimeCoordinator` is the mutation policy boundary.
It validates and applies runtime mutations, classifies whether changes are persisted/committed/transient, emits persistence requests, and produces render reset/reload decisions.
It keeps mutation decisions out of:
- the render engine
- control services
- video backend
- telemetry
### `RuntimeSnapshotProvider`
`RuntimeSnapshotProvider` publishes render-facing snapshots.
It owns the currently published render snapshot and gives the render path a stable read boundary. Rendering does not read mutable store objects directly.
## Live State And Layering
The current render state is built from named layers of state:
- persisted layer/package/default state from the runtime store
- committed live/session state
- transient live overlays from OSC/control input
- render-local state owned by the renderer
`RuntimeStateLayerModel` names these categories. `RenderStateComposer` and `RuntimeLiveState` combine live values into render-facing state.
`RenderFrameInput` and `RenderFrameState` are the frame contract:
- `RenderFrameInput` describes what kind of frame is being built
- `RenderFrameState` describes the resolved state used to draw that frame
The renderer should not ask global state systems which snapshot or layer state to use midway through drawing.
## Control And Events
### `RuntimeServices`
`RuntimeServices` owns runtime-facing services such as OSC/control integration and service lifecycle.
It connects control ingress to the coordinator and live-state bridge.
### `ControlServices`
`ControlServices` handles OSC/control ingress, buffering, and polling/wake behavior.
It does not own runtime mutation policy. It normalizes ingress and asks the coordinator/runtime services to apply changes.
### `RuntimeEventDispatcher`
The app uses typed runtime events for internal coordination and observation.
Events are used for:
- runtime state broadcast requests
- shader build lifecycle
- backend state changes
- input/output frame observations
- timing samples
- health and queue observations
Events say what happened. Commands/request methods still exist where a caller needs an immediate success/failure answer.
## Persistence
Persistence is handled by `PersistenceWriter`.
Runtime mutations can enqueue persistence requests without blocking the render/output path. Shutdown performs a bounded persistence flush.
The store owns durable state; the writer owns background write execution.
## Render System
### `RenderEngine`
`RenderEngine` owns normal runtime OpenGL work.
It starts a dedicated render thread and binds the GL context on that thread. Runtime GL work enters through render-thread requests or render command queues.
The render thread handles:
- output frame rendering
- input frame upload
- preview present
- screenshot capture
- render-local resets
- shader/rebuild application
- temporal history and shader feedback resources
Startup initialization still happens before the render thread starts while the app explicitly owns the context. Normal runtime work is routed through `RenderEngine`.
### Current Render-Thread Limitation
The current render thread is a shared GL executor, not a pure output-only cadence thread.
This means output render can still be delayed by:
- input upload work
- preview present requests
- screenshot capture
- render reset commands
- shader/resource update work
- synchronous render-thread request queue wait
For output-timing diagnosis, input capture can be disabled with:
```text
VST_DISABLE_INPUT_CAPTURE=1
```
When enabled, the backend skips DeckLink input configuration/start and `HasInputSource()` reports false.
### `OpenGLRenderPipeline`
`OpenGLRenderPipeline` draws the frame and performs output packing/readback.
The current output path:
1. binds the composite framebuffer
2. calls the render effect callback
3. blits/composes into the output framebuffer
4. packs the output for the configured pixel format
5. flushes GL
6. reads output into the provided system-memory output frame
7. records render/readback timing
For BGRA8 output, the pipeline uses a BGRA-compatible pack framebuffer and async PBO readback by default.
## Video Backend
### `VideoBackend`
`VideoBackend` owns app-level video device lifecycle, output production, system-memory frame slots, and backend playout health.
It owns:
- backend lifecycle state
- output production worker
- output completion worker
- system-memory output frame pool
- ready/completed output queue
- render cadence controller
- playout policy
- output frame scheduling into `VideoIODevice`
- backend timing and queue telemetry
It does not own GL drawing. It asks `OpenGLVideoIOBridge` / `RenderEngine` to render into system-memory output frames.
### Lifecycle
The current backend lifecycle includes:
- discovery
- configuring
- configured
- prerolling
- running
- degraded
- stopping
- stopped
- failed
Startup now separates output schedule preparation from scheduled playback:
1. prepare the DeckLink output schedule
2. start output completion worker
3. start output producer worker
4. warm up rendered system-memory preroll frames
5. optionally start input streams
6. start DeckLink scheduled playback
### Output Production
The output producer is cadence-driven.
`RenderCadenceController` tracks the selected output frame duration and decides when the producer should render another frame.
The render producer attempts to render one output frame per selected output tick. It does not speed up just because DeckLink is empty.
If render/GPU work is late enough, the cadence controller can skip late ticks according to policy.
### System-Memory Frame Pool
`SystemOutputFramePool` owns reusable system-memory output slots.
Slots have four states:
- `Free`
- `Rendering`
- `Completed`
- `Scheduled`
Completed-but-unscheduled frames are treated as a latest-N cache. If render cadence needs space and old completed frames have not been scheduled, the oldest unscheduled completed frame can be recycled.
Scheduled frames are protected until DeckLink reports completion.
### Output Queue
`RenderOutputQueue` holds completed unscheduled output frames waiting to be scheduled.
It is bounded and latest-N:
- pushing beyond capacity releases/drops the oldest ready frame
- `DropOldestFrame()` is used when the frame pool needs to recycle old completed work
### Scheduling
`VideoBackend::ScheduleReadyOutputFramesToTarget()` schedules completed system-memory frames up to the configured preroll/scheduled target.
DeckLink scheduling is capped by the current app-owned scheduled count. Real DeckLink buffered-frame telemetry is also recorded.
### Completion Handling
DeckLink completion callbacks do not render.
The callback path reports completion into `VideoBackend`, which processes completions on a backend worker. Completion processing:
- releases the system-memory slot by buffer pointer
- records pacing
- accounts for late/drop/flushed/completed result
- records telemetry
- wakes the output producer
## DeckLink Integration
### `DeckLinkSession`
`DeckLinkSession` is the DeckLink implementation of `VideoIODevice`.
It owns:
- DeckLink discovery
- input/output mode selection
- DeckLink input/output interfaces
- keyer configuration
- capture and playout delegates
- schedule-time generation through `VideoPlayoutScheduler`
- DeckLink frame scheduling
- actual buffered-frame telemetry
For output, system-memory frames are scheduled through DeckLink `CreateVideoFrameWithBuffer()`.
When a system-memory frame is scheduled, `DeckLinkSession` records a map from the DeckLink frame object back to the app-owned system-memory buffer pointer. On completion, the buffer pointer is returned so `VideoBackend` can release the matching slot.
### Actual DeckLink Buffer Telemetry
`DeckLinkSession` calls `GetBufferedVideoFrameCount()` after schedule/completion where available.
Telemetry separates:
- actual DeckLink buffered frames
- app-owned scheduled system-memory slots
- synthetic schedule/completion counters
- late/drop/flushed completion results
## Output Timing Experiments And Current Finding
The repo includes `DeckLinkRenderCadenceProbe`, a small standalone test app under:
```text
apps/DeckLinkRenderCadenceProbe
```
The probe does not use the main runtime, shader system, preview path, input upload path, or shared render engine. It uses:
- one OpenGL render thread with its own hidden GL context
- simple BGRA8 motion rendering
- async PBO readback
- latest-N system-memory frame slots
- a playout thread that feeds DeckLink
- real rendered warmup before scheduled playback
The first hardware result was smooth at roughly 59.94/60 fps with:
- `renderFps` near 59.9
- `scheduleFps` near 59.9
- DeckLink actual buffered frames stable at 4
- no late frames
- no dropped frames
- no PBO misses
- no completed-frame drops
That proves the clean architecture can work on the test machine. Remaining main-app timing issues are therefore likely integration/ownership issues in the main app rather than a fundamental DeckLink/OpenGL/BGRA8 limitation.
The highest-value current suspects are:
- input upload sharing the output render thread
- shared render-thread task queue contention
- preview/screenshot work
- runtime/render-state work on the output path
## Health Telemetry
`HealthTelemetry` owns app-visible health and timing observations.
It records:
- signal/input status
- performance/render timing
- event queue timing
- backend lifecycle/playout state
- output render queue wait
- output render/readback timing
- system-memory frame counts
- actual DeckLink buffer depth
- late/drop/flushed/completed frame counters
- schedule-call timing/failure counts
Several hot-path telemetry calls use try-lock variants so observation does not become a major timing dependency.
Runtime state presentation exposes telemetry through the runtime JSON/open API surface.
## Preview And Screenshot
Preview is best-effort.
`OpenGLComposite::paintGL()` skips preview when the backend reports output pressure. Preview presentation is requested through the render thread.
Screenshot capture is also a render-thread request. It reads pixels from the output framebuffer and writes PNG asynchronously after capture.
Both preview and screenshot share GL execution with output render, so they are secondary to output timing.
## Output Readback Modes
The output readback path supports environment-selected modes:
```text
VST_OUTPUT_READBACK_MODE=async_pbo
VST_OUTPUT_READBACK_MODE=sync
VST_OUTPUT_READBACK_MODE=cached_only
```
Default behavior is `async_pbo`.
Experiment findings:
- direct synchronous readback was slower on the sampled machine
- cached-only recovered timing but is visually invalid for live motion
- BGRA8 pack framebuffer plus async PBO removed the earlier large readback stall
## Current Debug/Experiment Launches
VS Code launch configurations include:
- `Debug LoopThroughWithOpenGLCompositing`
- `Debug LoopThroughWithOpenGLCompositing - sync readback experiment`
- `Debug LoopThroughWithOpenGLCompositing - cached output experiment`
- `Debug DeckLinkRenderCadenceProbe`
The default main-app debug launch currently disables input capture with `VST_DISABLE_INPUT_CAPTURE=1` so output timing can be tested without input upload interference.
## Current Ownership Summary
| Area | Current Owner |
| --- | --- |
| Durable runtime config/state | `RuntimeStore` |
| Current committed live layer state | `CommittedLiveState` |
| Mutation validation/policy | `RuntimeCoordinator` |
| Render snapshot publication | `RuntimeSnapshotProvider` |
| OSC/control ingress | `RuntimeServices` / `ControlServices` |
| Internal event dispatch | `RuntimeEventDispatcher` |
| Background persistence writes | `PersistenceWriter` |
| GL context and normal GL work | `RenderEngine` render thread |
| Render-pass execution and output readback | `OpenGLRenderPipeline` |
| Device lifecycle and output production | `VideoBackend` |
| DeckLink API integration | `DeckLinkSession` |
| Operational health/timing | `HealthTelemetry` |
## Current Runtime Flow Summary
### Control Mutation
```text
OSC/API/control input
-> RuntimeServices / ControlServices
-> RuntimeCoordinator
-> RuntimeStore / CommittedLiveState / RuntimeLiveState
-> RuntimeSnapshotProvider publication or live overlay update
-> RuntimeEventDispatcher observations
```
### Output Render
```text
VideoBackend output producer
-> RenderCadenceController tick
-> SystemOutputFramePool acquire rendering slot
-> OpenGLVideoIOBridge::RenderScheduledFrame
-> RenderEngine::RequestOutputFrame
-> render thread
-> OpenGLRenderPipeline::RenderFrame
-> system-memory output slot
-> RenderOutputQueue completed frame
```
### DeckLink Playout
```text
RenderOutputQueue completed frame
-> VideoBackend schedules to target
-> DeckLinkSession::ScheduleOutputFrame
-> CreateVideoFrameWithBuffer
-> ScheduleVideoFrame
-> DeckLink playback
-> completion callback
-> VideoBackend completion worker
-> release scheduled system-memory slot
```
### Input Capture
When input capture is enabled:
```text
DeckLink input callback
-> VideoBackend::HandleInputFrame
-> OpenGLVideoIOBridge::UploadInputFrame
-> RenderEngine::QueueInputFrame
-> render thread upload
```
When `VST_DISABLE_INPUT_CAPTURE=1`, this flow is skipped.
## Known Current Constraints
- The main app render thread still handles multiple kinds of GL work.
- Output render still uses a synchronous request/response call into the render thread.
- Input upload can contend with output render when input capture is enabled.
- Preview and screenshot share the render thread.
- Phase/experiment documents still exist as historical notes, but this document is the current architecture summary.
## Practical Rules
- Keep one owner for each kind of state.
- Keep GL work on the render thread.
- Keep DeckLink completion callbacks passive.
- Treat completed unscheduled output frames as latest-N cache entries.
- Protect scheduled output frames until DeckLink completion.
- Keep output timing more important than preview/screenshot.
- Measure timing by domain instead of adding fallback branches blindly.

View File

@@ -0,0 +1,377 @@
# DeckLink / OpenGL Lessons Learned
This document summarizes the practical lessons from the Phase 3-7.7 refactor work, especially the DeckLink playout timing experiments.
It is intentionally broader than the phase design docs. The goal is to preserve what we now know about the system so future architecture choices start from evidence instead of rediscovering the same constraints.
## High-Level Lesson
The application is not just a renderer with a video output attached.
It is a real-time playout system with several independent clocks:
- the selected output cadence, for example 59.94 fps
- the GPU render/readback timeline
- the DeckLink scheduled playback clock
- the Windows thread scheduler
- the input capture callback cadence
- the preview/window message loop
- the runtime/control update cadence
Stable playback depends on assigning one owner to each timing domain and keeping those domains loosely coupled.
## What Worked
### Named State Contracts Helped
`RenderFrameInput` and `RenderFrameState` made the render path easier to reason about.
Before that, frame rendering depended on scattered choices about snapshots, cache state, layer state, input source state, and runtime service state. Naming the frame contract made it possible to move logic out of `RenderEngine` and toward explicit frame construction.
Lesson:
- keep frame inputs explicit
- keep render-frame state immutable for the duration of a frame
- avoid making the renderer ask global systems which state it should use mid-frame
### Render-Thread Ownership Helped
Moving GL work behind a render-thread boundary reduced wrong-thread GL access risk and made ownership clearer.
The current render thread is still shared by output render, input upload, preview, screenshot, resize, and reset work, so it is not yet a pure output cadence thread. But the ownership direction is right.
Lesson:
- GL context ownership should be explicit
- public methods should enqueue or request work
- render-thread methods should own GL bodies
- synchronous calls should be reserved for places that genuinely need a result
### Background Persistence Was Worth It
Moving persistence away from hot render/control paths reduced incidental latency risk and made state writes easier to reason about.
Lesson:
- runtime/control persistence should not sit on output render timing
- shutdown flushing is fine, steady-state blocking is not
### Lifecycle State Was Worth It
The backend lifecycle model gave us better failure and shutdown vocabulary.
This became important once startup stopped being a single `Start()` call and became:
- prepare output schedule
- start render cadence
- warm up real frames
- start input streams
- start scheduled playback
Lesson:
- playout startup needs phases
- degradation should be explicit
- shutdown order should be deliberate and testable
## What Did Not Work
### Completion-Driven Rendering Was Too Fragile
Rendering on or near DeckLink completion can average the target frame rate, but it leaves no headroom.
When the callback asks for a frame just-in-time, any small delay in render, readback, scheduling, or Windows wake timing becomes visible as a buffer dip or stutter.
Lesson:
- DeckLink completion should release scheduled resources and wake scheduling
- it should not render
- it should not decide visual fallback policy in steady state
### Black Fallback Hid The Real Timing Problem
Scheduling black on app-ready underrun made the pipeline appear to keep moving while producing visible black flicker.
It also made diagnosis harder because DeckLink could have scheduled frames while the app visibly failed.
Lesson:
- black is a startup/error/degraded-state policy, not normal steady-state recovery
- steady-state underruns should be measured as timing failures
### Synthetic Schedule Lead Was Misleading
The synthetic scheduled/completed index could report a large buffer while DeckLink still showed low actual device buffer depth.
Real DeckLink `GetBufferedVideoFrameCount()` telemetry was necessary to separate:
- app-owned scheduled slots
- synthetic schedule lead
- actual hardware/device buffer depth
Lesson:
- measure actual device buffer depth
- keep synthetic counters only as diagnostics
- do not infer device health from internal stream indexes alone
### More Buffer Is Not Automatically Smoother
Increasing DeckLink scheduled frames sometimes made the reported device buffer look healthier while visible motion still stuttered.
The problem was not only "how many frames are scheduled"; it was also whether the scheduled frames represented a stable render cadence.
Lesson:
- buffer depth absorbs jitter, but it cannot fix bad cadence ownership
- a full buffer of poorly timed or repeated frames can still look wrong
### Speed-Up Catch-Up Was The Wrong Instinct
Letting the producer sprint to refill the buffer created new timing artifacts.
The render side should behave like a stable game/render loop: render at the selected cadence, record lateness, and only skip ticks when render/GPU work itself overruns.
Lesson:
- the render thread should not render faster because DeckLink is empty
- buffer drain is a failure signal, not a sprint signal
- warmup should fill buffers before playback starts
## GPU Readback Lessons
### The Original Readback Path Was The Major Collapse
Early Phase 7.5 telemetry showed `glReadPixels(..., nullptr)` into the PBO costing roughly 8-14 ms on representative samples. That was enough to collapse ready depth and cause long freezes.
Direct synchronous readback was worse on the sampled machine.
Cached-output mode, while visually invalid for live output, immediately recovered timing. That proved ongoing GPU-to-CPU transfer was the major cost in that version of the path.
Lesson:
- isolate readback cost from render cost
- use intentionally invalid cached-output experiments when diagnosing throughput
- do not assume async PBO is actually cheap on every format/driver path
### BGRA8 Packing Changed The Problem
Changing the output path so readback matched the DeckLink BGRA8 format made `asyncQueueReadPixelsMs` drop dramatically in sampled runs.
Long pauses disappeared and the remaining issue became short stutters/cadence gaps.
Lesson:
- output/readback format matters
- avoid format conversions on the readback path when possible
- BGRA8 is a good current format target for experiments
- v210/YUV packing can be deferred until cadence is stable
### DeckLink SDK Fast Transfer Was Not Available On The Test GPU
The SDK OpenGL fast-transfer path depends on hardware/extension support that was not present on the RTX 4060 Ti test machine:
- NVIDIA DVP path was gated around Quadro-style support
- `GL_AMD_pinned_memory` was not exposed
Lesson:
- SDK fast-transfer samples are useful references but not a universal fix
- unsupported fast-transfer code should not be central to the architecture
- the default path must work with ordinary consumer GPUs
## DeckLink Lessons
### DeckLink Wants Scheduled System-Memory Frames
Using `CreateVideoFrameWithBuffer()` lets DeckLink schedule frames backed by our system-memory slots.
That is the right ownership model for this app:
- render/readback writes into a slot
- DeckLink schedules a frame that references that slot
- the slot is protected until DeckLink completion
Lesson:
- system-memory slots are the contract between render and playout
- scheduled slots must not be recycled early
- completed-but-unscheduled slots can be latest-N cache entries
### Startup Needs Real Preroll
Starting scheduled playback before real rendered frames exist creates avoidable startup fragility.
The better startup shape is:
- prepare the DeckLink schedule
- start render cadence
- render warmup frames at normal cadence
- schedule those frames as preroll
- start DeckLink scheduled playback
Lesson:
- do not use black preroll as the normal startup path
- do not render faster during warmup
- if warmup cannot fill in a bounded time, fail/degrade visibly
## Buffering Lessons
### There Are Two Different Buffers
The app has at least two important frame stores:
- system-memory completed/latest-N frames
- DeckLink scheduled/device buffer
They have different ownership rules.
Completed-but-unscheduled frames are disposable if a newer frame is available and cadence needs the slot.
Scheduled frames are not disposable because DeckLink may still read them.
Lesson:
- latest-N completed frames are a cache
- scheduled frames are owned by DeckLink until completion
- keep metrics for both
### Consume-Before-Render Is The Wrong Model For Completed Frames
If the render cadence waits for completed frames to be consumed, DeckLink timing can indirectly slow the renderer.
That couples the clocks again.
Lesson:
- render cadence should keep rendering at selected cadence
- if completed cache is full, recycle/drop the oldest unscheduled completed frame
- only scheduled/in-flight saturation should prevent rendering to a safe slot
## Render Thread Lessons
### The Current Render Thread Is Still Shared
The GL render thread currently handles:
- output rendering
- input upload
- preview present
- screenshot capture
- render reset commands
- shader/resource operations
Output render can therefore be delayed by queued or inline work.
Lesson:
- "one GL thread" is not the same as "one output cadence thread"
- output render should become the highest-priority GL operation
- non-output GL work needs budgets, coalescing, or deferral
### Input Upload Is A Suspect Timing Coupling
Output render currently processes input upload work immediately before rendering the output frame.
That keeps input fresh but can steal time from the exact frame we are trying to render on cadence.
Lesson:
- measure input upload count and time immediately before output render
- test policies such as `one_before_output` or `skip_before_output`
- prefer latest-input semantics over draining every pending upload
### Preview And Screenshot Must Stay Secondary
Preview is useful, but DeckLink output is the real-time path.
Screenshot and preview share GL resources and can block or queue work on the same render thread.
Lesson:
- preview should be skipped when output is under pressure
- screenshot capture should be treated as disruptive unless proven otherwise
- forced preview/screenshot should be visible in telemetry
## Telemetry Lessons
The useful telemetry has been the telemetry that separates domains:
- output render queue wait
- render/draw time
- readback queue time
- readback fence/map/copy time
- app ready/completed queue depth
- system-memory free/rendering/completed/scheduled counts
- actual DeckLink buffered-frame count
- DeckLink schedule-call time/failures
- late/drop completion counts
Lesson:
- averages are not enough
- timing spikes matter more than steady low values
- count ownership states, not just queue depth
- keep experiment logs short and evidence-based
## Current Architectural Direction
The current direction is still sound:
```text
Render cadence loop
renders at selected output cadence
writes latest-N completed system-memory frames
never sprints to refill DeckLink
Frame store
owns free / rendering / completed / scheduled slots
recycles unscheduled completed frames when needed
protects scheduled frames until completion
DeckLink playout scheduler
consumes completed frames
tops up actual device buffer
never renders
Completion callback
releases scheduled slots
records completion result
wakes scheduler
```
## Rewrite Lesson
A full restart is not obviously the right next move.
The current repo now contains:
- working runtime/control architecture
- useful phase docs
- non-GL tests around key state machines
- real telemetry
- a clearer understanding of DeckLink and OpenGL timing
The better next step is likely a contained "V2 spine" inside the current app:
- harden the render cadence loop
- harden the frame store
- separate DeckLink scheduling
- demote preview/screenshot/input upload below output cadence
- delete old compatibility branches as they become unnecessary
A full rewrite becomes attractive only if the current GL ownership model cannot be made deterministic without excessive surgery, or if the project switches rendering API.
## Practical Rules Going Forward
- One timing authority per domain.
- Render cadence is time-driven, not completion-driven.
- DeckLink scheduling is device-buffer-driven, not render-driven.
- Completion callbacks release and report; they do not render.
- System-memory completed frames are latest-N cache entries.
- Scheduled frames are protected until DeckLink completion.
- Startup uses real rendered warmup/preroll.
- Black fallback is degraded/error behavior, not steady-state behavior.
- Output render has priority over preview, screenshot, and bulk input upload.
- Measure before adding recovery branches.

View File

@@ -1,714 +0,0 @@
# Phase 1 Design: Subsystem Boundaries and Target Architecture
This document expands Phase 1 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete target design. Its purpose is to define the long-term subsystem split before later phases introduce a full event model and move rendering onto a sole-owner render thread.
The main goal of Phase 1 is not to immediately rewrite the app. It is to establish clear ownership boundaries so later refactors all move toward the same architecture instead of solving local problems in conflicting ways.
## Status
Phase 1 has two different meanings in this repo, and they should not be collapsed:
- Phase 1 design package: complete.
- Phase 1 runtime implementation foothold: complete.
The completed design package includes the agreed subsystem names, responsibilities, dependency rules, state categories, and current-to-target migration map. The runtime code now has concrete subsystem folders, collaborators, read models, and tests for those boundaries, and the compiled runtime path no longer depends on `RuntimeHost`. That is different from saying every target boundary is fully extracted across the whole app: later roadmap phases are still responsible for the event model, sole-owner render thread, explicit live-state layering, background persistence, backend state machine, and fuller telemetry.
## Why Phase 1 Exists
At the start of this phase the app worked, but too many responsibilities converged in a few places:
- `RuntimeHost` owned persistence, live layer state, shader package access, status reporting, and mutation entrypoints.
- `OpenGLComposite` coordinates runtime setup, render state retrieval, shader rebuild handling, transient OSC overlay behavior, and video backend integration.
- DeckLink callback-driven playout still reaches directly into render-facing work.
- Background services rely on polling and shared mutable state more than explicit subsystem contracts.
Those are exactly the kinds of overlaps that make timing issues, state regressions, and recovery edge cases harder to solve cleanly.
Phase 1 creates a map for where each responsibility should eventually live.
## Design Goals
The target architecture should optimize for:
- live timing isolation
- explicit state ownership
- predictable recovery behavior
- clear boundaries between persistent state and transient live state
- easier testing of non-GL and non-hardware logic
- fewer cross-thread shared mutable objects
- a playout model that can evolve toward producer/consumer scheduling
## Non-Goals
Phase 1 does not itself require:
- replacing every direct call with events immediately
- moving all rendering to a new thread yet
- redesigning the shader contract again
- changing DeckLink behavior in place
- removing all existing classes before replacements exist
This phase is the target design and the dependency rules. Later phases perform the actual extraction.
## Current Pressure Points
The following current code paths are the strongest evidence for the split proposed here:
- `RuntimeHost` was both store and live authority:
- `RuntimeHost.h`
- `RuntimeHost.cpp`
- `OpenGLComposite` is both app orchestrator and render/runtime coordinator:
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:106)
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:283)
- `RuntimeServices` mixes service orchestration with polling and deferred state work:
- [RuntimeServices.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.h:46)
- [RuntimeServices.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.cpp:194)
- Playout is still callback-coupled to render-facing work:
- [OpenGLVideoIOBridge.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLVideoIOBridge.cpp:68)
## Target Subsystems
The long-term architecture should converge on seven primary subsystems:
1. `RuntimeStore`
2. `RuntimeCoordinator`
3. `RuntimeSnapshotProvider`
4. `ControlServices`
5. `RenderEngine`
6. `VideoBackend`
7. `HealthTelemetry`
The split below is intentionally sharper than the current code. The point is to make ownership obvious.
Subsystem-specific design notes that elaborate these boundaries live under [docs/subsystems](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems).
## Phase 1 Document Set
This document is the parent note for the Phase 1 subsystem package. The bundle index and subsystem notes live here:
- [Subsystem Design Index](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/README.md)
- [RuntimeStore.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeStore.md)
- [RuntimeCoordinator.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeCoordinator.md)
- [RuntimeSnapshotProvider.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeSnapshotProvider.md)
- [ControlServices.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/ControlServices.md)
- [RenderEngine.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RenderEngine.md)
- [VideoBackend.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/VideoBackend.md)
- [HealthTelemetry.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/HealthTelemetry.md)
## Current Implementation Foothold
The codebase now has a Phase 1 runtime implementation foothold in place:
- `RuntimeStore`
- [RuntimeStore.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/RuntimeStore.h)
- [RuntimeStore.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/RuntimeStore.cpp)
- `RuntimeConfigStore`
- [RuntimeConfigStore.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/RuntimeConfigStore.h)
- [RuntimeConfigStore.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/RuntimeConfigStore.cpp)
- `ShaderPackageCatalog`
- [ShaderPackageCatalog.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/ShaderPackageCatalog.h)
- [ShaderPackageCatalog.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/store/ShaderPackageCatalog.cpp)
- `RuntimeCoordinator`
- [RuntimeCoordinator.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/coordination/RuntimeCoordinator.h)
- [RuntimeCoordinator.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/coordination/RuntimeCoordinator.cpp)
- `RuntimeSnapshotProvider`
- [RuntimeSnapshotProvider.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/snapshot/RuntimeSnapshotProvider.h)
- [RuntimeSnapshotProvider.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/snapshot/RuntimeSnapshotProvider.cpp)
- `RenderSnapshotBuilder`
- [RenderSnapshotBuilder.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/snapshot/RenderSnapshotBuilder.h)
- [RenderSnapshotBuilder.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/snapshot/RenderSnapshotBuilder.cpp)
- `ControlServices`
- [ControlServices.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/ControlServices.h)
- [ControlServices.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/ControlServices.cpp)
- `HealthTelemetry`
- [HealthTelemetry.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/telemetry/HealthTelemetry.h)
- [HealthTelemetry.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/runtime/telemetry/HealthTelemetry.cpp)
- `RenderEngine`
- [RenderEngine.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/RenderEngine.h)
- [RenderEngine.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/RenderEngine.cpp)
- `VideoBackend`
- [VideoBackend.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/videoio/VideoBackend.h)
- [VideoBackend.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/videoio/VideoBackend.cpp)
The runtime seams are now concrete code boundaries. Some app-level flows still delegate through compatibility helpers, `OpenGLComposite`, `DeckLinkSession`, and the existing bridge/pipeline classes, but runtime responsibilities have moved behind named collaborators:
- UI/runtime control calls in `OpenGLCompositeRuntimeControls.cpp` now route through `RuntimeCoordinator`
- runtime startup now initializes path resolution and config loading through `RuntimeConfigStore`, with shader package scan and lookup delegated to `ShaderPackageCatalog`
- runtime/UI state JSON composition now routes through `RuntimeStatePresenter` and `RuntimeStateJson` instead of living in `RuntimeHost` or `RuntimeStore`
- regular stored layer mutations and stack preset save/load now route through `RuntimeStore` into `LayerStackStore` instead of `RuntimeHost` public APIs
- persisted OSC-by-control-key commits now route through `RuntimeCoordinator` before applying store changes
- mutation and reload policy now routes through `RuntimeCoordinator`
- parameter target resolution, value normalization, trigger classification, and move no-op classification now live under `RuntimeCoordinator`
- render-state and shader-build reads in `OpenGLComposite.cpp`, `OpenGLShaderPrograms.cpp`, and `ShaderBuildQueue.cpp` now route through `RuntimeSnapshotProvider`
- `RuntimeSnapshotProvider` now depends on `RenderSnapshotBuilder` rather than on `RuntimeStore` friendship or shared `RuntimeHost` access
- render-state assembly, cached parameter refresh, dynamic frame-field application, and render snapshot versions now live in `RenderSnapshotBuilder` instead of `RuntimeStore`
- `RuntimeSnapshotProvider` now publishes versioned render snapshot objects and serves matching consumers from the last published snapshot
- service ingress and polling coordination now route through `ControlServices`
- `ControlServices` now queues coordinator results for OSC commit and file-poll outcomes instead of directly deciding runtime/store policy
- timing and status writes now route through `HealthTelemetry`
- `HealthTelemetry` now owns the live signal, video-I/O, and performance snapshots directly instead of `RuntimeHost` keeping those backing fields
- render-side frame advancement and render-performance reporting now flow through `RuntimeSnapshotProvider` and `HealthTelemetry` instead of directly through `RuntimeHost`
- `RuntimeStore` now owns its durable/session backing fields directly instead of wrapping a compatibility `RuntimeHost` object
- `RuntimeConfigStore` now owns runtime config parsing, path resolution, configured ports/formats, runtime roots, and shader compiler paths instead of leaving those responsibilities inside `RuntimeStore`
- `ShaderPackageCatalog` now owns shader package scanning, package status/order/lookup, and package asset/source change comparison instead of leaving those responsibilities inside `RuntimeStore`
- `LayerStackStore` now owns durable layer state, layer CRUD/reorder, parameter persistence, and stack preset value serialization/load instead of leaving those responsibilities inside `RuntimeStore`
- `RuntimeStatePresenter` and `RuntimeStateJson` now own runtime-state JSON assembly and layer-stack presentation serialization instead of leaving those responsibilities inside storage classes
- `RuntimeCoordinator` now uses explicit `RuntimeStore` query APIs/read models instead of friendship or direct store-internal access
- live OSC overlay state and smoothing/commit decisions now live under `RenderEngine` instead of `OpenGLComposite`
- coordinator result application, shader-build requests, ready-build application, and runtime-state broadcasts now route through `RuntimeUpdateController` instead of being interpreted directly by `OpenGLComposite`
- `OpenGLComposite` now owns a `RenderEngine` seam for renderer, pipeline, render-pass, and shader-program responsibilities
- `OpenGLComposite` now owns a `VideoBackend` seam for device/session ownership and callback wiring
- `OpenGLVideoIOBridge` now acts as an explicit compatibility adapter between `VideoBackend` and `RenderEngine`, instead of `OpenGLComposite` directly owning both sides
- `RuntimeSubsystemTests` now cover the new runtime seams around layer-stack storage, preset round-trips, mutation classification, and runtime-state JSON serialization
That means Phase 2 can focus on eventing and coordination mechanics rather than inventing the runtime boundary vocabulary.
Later-phase extraction work includes:
- moving persistence to an asynchronous writer in a later phase
- replacing polling/shared-object coordination with the planned internal event model
- making the render thread the sole GL owner
- formalizing committed-live versus transient-overlay layering
- making backend lifecycle and telemetry richer and more explicit
## Subsystem Responsibilities
### `RuntimeStore`
`RuntimeStore` owns persisted and operator-authored state.
It is the source of truth for:
- runtime config loaded from disk
- persisted layer stack structure
- persisted parameter values
- stack preset serialization/deserialization
- shader/package metadata that must survive across renders
It should not be responsible for:
- render-thread timing
- GL resource lifetime
- live transient overlays
- hardware callback coordination
- UI/websocket broadcasting policy
Design rules:
- disk I/O belongs here or in its dedicated writer helper
- values here are authoritative for saved state
- writes may be debounced later, but the data model itself belongs here
### `RuntimeCoordinator`
`RuntimeCoordinator` is the mutation and policy layer.
It is responsible for:
- receiving valid mutation requests from controls, services, or automation
- validating requested changes against shader definitions and config rules
- resolving how persisted state, committed live state, and transient overlays should interact
- requesting snapshot publication when state changes affect render
- requesting persistence when stored state changes
It should not be responsible for:
- direct disk serialization details
- direct GL work
- hardware device lifecycle
- polling loops
Design rules:
- all non-render mutations should eventually flow through this layer
- this layer decides whether a change is persisted, transient, or both
- this layer owns state policy, not device policy
### `RuntimeSnapshotProvider`
`RuntimeSnapshotProvider` publishes render-facing snapshots.
It is responsible for:
- building immutable or near-immutable render snapshots
- translating runtime state into render-ready structures
- publishing versioned snapshots
- serving the render side without large mutable shared locks
It should not be responsible for:
- deciding whether a mutation is allowed
- directly applying UI/OSC requests
- persistence
- shader compilation orchestration
Design rules:
- render consumes snapshots, not live mutable store objects
- snapshots should be cheap to read and explicit about version changes
- dynamic frame-only values may still be attached later, but the snapshot shape should stay stable
### `ControlServices`
`ControlServices` is the ingress boundary for non-render control sources.
It is responsible for:
- OSC receive and route resolution
- REST/websocket/control UI ingress
- file-watch or reload request ingress
- translating external inputs into typed internal actions/events
- low-cost buffering/coalescing where appropriate
It should not be responsible for:
- persistence decisions
- render snapshot building
- hardware playout policy
- direct long-lived state ownership beyond ingress-specific queues
Design rules:
- external inputs enter here and are normalized before they touch core state
- service-specific timing concerns stay here unless they affect whole-app policy
- no service should directly mutate render-facing state structures
### `RenderEngine`
`RenderEngine` is the owner of live rendering behavior.
It is responsible for:
- sole ownership of GL work in the target architecture
- shader program lifecycle once compilation outputs are available
- texture upload scheduling
- render-pass execution
- temporal history and shader feedback resources
- transient render-only overlays
- preview production as a subordinate output
- output-frame production for the video backend
It should not be responsible for:
- persistence
- user-facing control normalization
- hardware discovery/configuration
- high-level runtime mutation policy
Design rules:
- render consumes snapshots plus render-local transient state
- render-local state is allowed if it stays render-local
- preview must be treated as best-effort relative to playout
### `VideoBackend`
`VideoBackend` owns input/output device lifecycle and playout policy.
It is responsible for:
- input device configuration and callbacks
- output device configuration and callbacks
- frame scheduling policy
- buffer-pool ownership
- playout headroom policy
- input signal status
- backend state transitions and recovery logic
It should not be responsible for:
- composing frames
- owning GL contexts long-term
- validating shader parameter changes
- persistence
Design rules:
- this subsystem is the consumer of rendered output frames, not the owner of frame composition policy
- it should evolve toward producer/consumer playout rather than callback-driven rendering
- backend state should be explicit and reportable
### `HealthTelemetry`
`HealthTelemetry` owns structured operational visibility.
It is responsible for:
- logging
- warning/error counters
- timing traces
- subsystem health state
- degraded-mode reporting
- operator-visible health summaries
It should not be responsible for:
- deciding core app behavior
- owning render or backend state
- persistence policy
Design rules:
- all major subsystems publish health information here
- health visibility should outlive UI connection state
- modal dialogs should not be the main operational surface
## Target Dependency Rules
The architecture should follow these rules as closely as possible.
Allowed dependency directions:
- `ControlServices -> RuntimeCoordinator`
- `RuntimeCoordinator -> RuntimeStore`
- `RuntimeCoordinator -> RuntimeSnapshotProvider`
- `RuntimeCoordinator -> HealthTelemetry`
- `RuntimeSnapshotProvider -> RenderSnapshotBuilder`
- `RenderSnapshotBuilder -> RuntimeStore`
- `RenderEngine -> RuntimeSnapshotProvider`
- `RenderEngine -> HealthTelemetry`
- `VideoBackend -> RenderEngine`
- `VideoBackend -> HealthTelemetry`
Conditionally allowed during migration:
- `ControlServices -> HealthTelemetry`
- `ControlServices -> RuntimeStore` only through temporary compatibility shims
Not allowed in the target design:
- `RenderEngine -> RuntimeStore`
- `RenderEngine -> ControlServices`
- `VideoBackend -> RuntimeStore`
- `ControlServices -> RenderEngine` for direct mutation
- `RuntimeStore -> RenderEngine`
- `HealthTelemetry -> any subsystem` for control flow
The key principle is:
- store owns durable data
- coordinator owns mutation policy
- snapshot provider owns render-facing state publication
- render owns live GPU execution
- backend owns device timing
- telemetry observes all of them
## State Ownership Model
The app has several different kinds of state, and Phase 1 should name them explicitly.
### Persisted State
Owned by `RuntimeStore`.
Examples:
- layer stack structure
- selected shader ids
- saved parameter values
- runtime host config
- stack presets
### Committed Live State
Owned logically by `RuntimeCoordinator`, stored in the store or a live-state companion depending on future implementation.
Examples:
- current operator-selected parameter values
- current bypass state
- current selected shader for each layer
This is state that should normally survive until explicitly changed and can be persisted if policy says so.
### Transient Live Overlay State
Owned by the subsystem that consumes it, not by the persisted store.
Examples:
- active OSC overlay targets while automation is flowing
- shader feedback buffers
- temporal history textures
- queued input frames
- in-flight preview state
- playout queue state
This is where many current issues come from. The design rule is:
- transient state may influence output
- transient state should not masquerade as persisted truth
### Health and Timing State
Owned by `HealthTelemetry`.
Examples:
- frame pacing stats
- render timing
- late/dropped frame counters
- queue depths
- warning states
## Target Runtime Flow
This section describes the intended long-term flow once later phases are in place.
### Control Mutation Flow
1. OSC/UI/file-watch input enters `ControlServices`.
2. `ControlServices` normalizes it into an internal action or event.
3. `RuntimeCoordinator` validates and classifies the action.
4. If the action changes durable state, `RuntimeStore` is updated.
5. If the action changes render-facing state, `RuntimeSnapshotProvider` publishes a new snapshot.
6. If the action requires persistence, a persistence request is queued.
7. Health/timing observations are emitted separately.
### Render Flow
1. `RenderEngine` consumes the latest published snapshot.
2. `RenderEngine` combines that snapshot with render-local transient state.
3. `RenderEngine` performs uploads, pass execution, feedback/history maintenance, and output production.
4. `RenderEngine` produces:
- preview-ready output
- video-backend-ready output frames
- render timing and warning signals
### Video Output Flow
Target long-term flow:
1. `RenderEngine` produces completed output frames ahead of demand.
2. `VideoBackend` consumes those frames from a bounded queue or ring buffer.
3. Device callbacks only drive dequeue/schedule/accounting behavior.
4. `HealthTelemetry` records queue depth, lateness, underruns, and recovery events.
### Reload / Shader Rebuild Flow
1. file-watch or manual reload enters through `ControlServices`
2. `RuntimeCoordinator` classifies the reload request
3. `RuntimeStore` and shader/package metadata are refreshed if needed
4. `RuntimeSnapshotProvider` republishes affected snapshot state
5. `RenderEngine` rebuilds render-local resources from the new snapshot/build outputs
The important boundary here is that reload is not "a render concern that also touches persistence." It is a coordinated runtime concern with a render-local execution phase.
## Suggested Public Interfaces
These are not final class signatures, but they show the shape the architecture should move toward.
### `RuntimeStore`
Core responsibilities:
- `LoadConfig()`
- `LoadPersistentState()`
- `SavePersistentStateSnapshot(...)`
- `GetStoredLayerStack()`
- `SetStoredLayerStack(...)`
- `GetStackPresetNames()`
- `SaveStackPreset(...)`
- `LoadStackPreset(...)`
### `RuntimeCoordinator`
Core responsibilities:
- `ApplyControlMutation(...)`
- `ApplyAutomationTarget(...)`
- `ResetLayer(...)`
- `RequestReload(...)`
- `CommitOverlayState(...)`
- `PublishSnapshotIfNeeded()`
- `RequestPersistenceIfNeeded()`
### `RuntimeSnapshotProvider`
Core responsibilities:
- `BuildSnapshot(...)`
- `GetLatestSnapshot()`
- `GetSnapshotVersion()`
- `PublishSnapshot(...)`
### `ControlServices`
Core responsibilities:
- `StartOscIngress(...)`
- `StartWebControlIngress(...)`
- `StartFileWatchIngress(...)`
- `EnqueueControlAction(...)`
- `DrainServiceEvents(...)`
### `RenderEngine`
Core responsibilities:
- `StartRenderLoop(...)`
- `ConsumeSnapshot(...)`
- `EnqueueInputFrame(...)`
- `ProduceOutputFrame(...)`
- `ResetRenderLocalState(...)`
- `HandleRebuildOutputs(...)`
### `VideoBackend`
Core responsibilities:
- `ConfigureInput(...)`
- `ConfigureOutput(...)`
- `StartPlayout(...)`
- `StopPlayout(...)`
- `ConsumeRenderedFrame(...)`
- `ReportBackendState(...)`
### `HealthTelemetry`
Core responsibilities:
- `RecordTimingSample(...)`
- `RecordCounterDelta(...)`
- `RaiseWarning(...)`
- `ClearWarning(...)`
- `AppendLogEntry(...)`
- `BuildHealthSnapshot()`
## Mapping From Current Code to Target Subsystems
This is not a one-to-one rename plan. It is a responsibility migration map.
### Previous `RuntimeHost`
Should eventually split across:
- `RuntimeStore`
- `RuntimeCoordinator`
- `RuntimeSnapshotProvider`
- parts of `HealthTelemetry`
Likely examples:
- config loading/path resolution -> `RuntimeConfigStore`
- persistent state saving -> `RuntimeStore`
- layer stack mutation validation -> `RuntimeCoordinator`
- render state building/versioning -> `RenderSnapshotBuilder`
- render snapshot publication/cache -> `RuntimeSnapshotProvider`
- timing/status setters -> `HealthTelemetry`
### Current `RuntimeServices`
Should eventually become mostly:
- `ControlServices`
- a small service-hosting shell
Likely examples:
- OSC ingress/coalescing -> `ControlServices`
- file-watch ingress -> `ControlServices`
- deferred service coordination now done by polling -> split between `ControlServices` and event-driven coordinator calls
### Current `OpenGLComposite`
Should eventually split across:
- application bootstrap shell
- `RenderEngine`
- orchestration glue that wires subsystems together
Likely examples:
- render-pass facing code -> `RenderEngine`
- app/service/backend bootstrap -> composition root
- runtime mutation API surface -> coordinator-facing adapter, not render owner
### Current `OpenGLVideoIOBridge` and `DeckLinkSession`
Should eventually align more clearly under:
- `VideoBackend`
- `RenderEngine`
Likely examples:
- device callback and scheduling policy -> `VideoBackend`
- GL upload/readback/render work -> `RenderEngine`
## Architectural Guardrails
As later phases begin, these rules should be treated as guardrails.
### 1. No new cross-cutting runtime object should be introduced
If a new feature needs durable state, place it conceptually under `RuntimeStore`.
If it needs render-local transient state, place it conceptually under `RenderEngine`.
If it needs timing/status counters, place it conceptually under `HealthTelemetry`.
### 2. Render-local state should stay render-local
Do not push shader feedback, temporal history, preview caches, or playout queues back into the store just to make them easy to reach from other systems.
### 3. Device callbacks should not become a dumping ground for app work
Callback threads should converge toward signaling and queue management, not core rendering, persistence, or control mutation.
### 4. Persistence should not be used as a control synchronization mechanism
Saving state is not how subsystems discover changes. Published snapshots and explicit events should handle that.
### 5. Health reporting should observe, not coordinate
Telemetry systems may record warnings and degraded states, but they should not become the hidden control plane for the app.
## Migration Strategy
Phase 1 is a design phase, but it should support incremental migration.
Recommended order after this document:
1. Introduce names and interfaces before moving logic.
2. Create compatibility adapters around the subsystem facades rather than forcing a flag day.
3. Move read-only render snapshot publication out before moving all mutation logic.
4. Move service ingress boundaries out before removing the old polling shell.
5. Isolate timing/health setters from the core store as early as practical.
This keeps progress measurable while reducing rewrite risk.
## Suggested Deliverables for Completing Phase 1
Phase 1 can reasonably be considered complete once the project has:
- this subsystem-boundary design document
- agreed subsystem names and responsibilities
- agreed allowed dependency directions
- explicit state categories: persisted, committed live, transient overlay, health/timing
- a current-to-target responsibility map for runtime services, `OpenGLComposite`, and backend/render bridge code
- a decision that later phases will build against this target rather than inventing new boundaries ad hoc
By that definition, Phase 1 is complete for runtime: the design package is complete, `RuntimeHost` is retired from the compiled runtime path, runtime seams are represented in code, and runtime subsystem tests cover the new boundaries. App-wide ownership work continues in later phases.
## Open Questions For Later Phases
These do not block Phase 1, but they should remain visible.
- Should shader package registry ownership live entirely in `RuntimeStore`, or should compile-ready derived registry data move into the snapshot provider?
- Should committed live state be stored directly in `RuntimeStore`, or split into store plus live-session state owned by the coordinator?
- How much of shader build orchestration belongs to `RenderEngine` versus a separate build service?
- At what phase should preview become fully decoupled from playout cadence?
- Should persistence become its own `PersistenceWriter` subsystem in Phase 6, or remain an implementation detail under `RuntimeStore`?
## Short Version
Phase 1 should establish one simple rule for the rest of the refactor:
- durable state lives in the store
- mutation policy lives in the coordinator
- render-facing state is published as snapshots
- external control sources enter through services
- GL work belongs to render
- hardware pacing belongs to the backend
- health visibility belongs to telemetry
If later phases keep to that rule, the architecture will become materially more resilient without needing another round of foundational boundary changes.

View File

@@ -1,660 +0,0 @@
# Phase 2 Design: Internal Event Model
This document expands Phase 2 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phase 1 established the subsystem vocabulary and moved the runtime path behind clearer collaborators. Phase 2 should now give those subsystems a safer way to coordinate than direct cross-calls, shared mutable result queues, and coarse polling loops.
## Status
- Phase 2 design package: accepted.
- Phase 2 implementation: substantially complete for the coordination substrate.
- Current alignment: the typed event substrate, app-owned dispatcher, coalesced app pump, reload bridge events, production bridges, and event behavior tests are in place. Remaining items are narrow follow-ups rather than foundation work.
The current repo now has concrete Phase 2 implementation footholds:
- `RuntimeEventType`, typed payload structs, `RuntimeEvent`, `RuntimeEventQueue`, `RuntimeEventDispatcher`, and `RuntimeEventCoalescingQueue` define the event substrate.
- `OpenGLComposite` owns one app-level `RuntimeEventDispatcher` and passes it into `RuntimeServices`, `RuntimeCoordinator`, `RuntimeUpdateController`, `RuntimeSnapshotProvider`, `ShaderBuildQueue`, and `VideoBackend`.
- `ControlServices` publishes typed OSC and runtime-state broadcast events and uses condition-variable wakeups with a fallback compatibility timer.
- `RuntimeCoordinator` publishes accepted, rejected, state-changed, persistence, reload, shader-build, and compile-status follow-up events.
- `RuntimeUpdateController` subscribes to event families for broadcast, shader build, compile status, render reset, and dispatcher health observations.
- `RuntimeSnapshotProvider` publishes render snapshot request/published events.
- `ShaderBuildQueue` and `RuntimeUpdateController` publish shader build lifecycle events with generation matching.
- `VideoBackend` publishes backend observation events and timing samples.
- `HealthTelemetry` receives dispatcher metrics directly and the event vocabulary now includes health observation events.
- Tests cover event type stability, payload mapping, FIFO dispatch, coalescing infrastructure, app-level coalesced broadcast/build behavior, handler failures, mutation follow-up behavior, reload bridge behavior, and shader-build generation behavior.
The implementation is now established in the repo. The remaining Phase 2 follow-up work is small: add completion/failure observations where useful and keep the runtime-store poll fallback explicitly transitional until a later file-watch implementation replaces it.
## Why Phase 2 Exists
The resilience review originally called out three timing and ownership problems that an event model could directly improve:
- background service timing relied on coarse sleeps and polling
- control, reload, persistence, and render-update work traveled through mixed shared state and result queues
- later render/backend refactors need a stable coordination model before they move more work across threads
The goal is not to make the app fully asynchronous in one pass. It is to introduce typed internal events so each subsystem can publish what happened without knowing who will react or how many downstream effects are needed.
## Goals
Phase 2 should establish:
- a small typed event vocabulary for control, runtime, render, backend, persistence, and health coordination
- one app-owned event pump or dispatcher that can route events deterministically
- bounded queues with clear ownership and no unbounded background growth
- wakeup-driven service coordination where practical, replacing coarse polling as the default shape
- explicit event-to-command boundaries so events do not become hidden global mutation APIs
- tests for event ordering, coalescing, rejection, and dispatch side effects
## Non-Goals
Phase 2 should not require:
- a dedicated render thread yet
- a full actor system
- lock-free queues everywhere
- background persistence implementation
- a complete DeckLink state machine
- final live-state layering
- replacing every direct call in one change
Those are later phases. Phase 2 provides the coordination substrate they can build on.
## Current Coordination Shape
The current runtime is much cleaner than before Phase 1, and Phase 2 has moved the main coordination model toward typed publication and app-owned dispatch:
- `ControlServices` publishes OSC value, OSC commit, and runtime-state broadcast events.
- `ControlServices::PollLoop(...)` is wakeup-driven for queued OSC commit work, with a bounded fallback timer for compatibility polling.
- `RuntimeCoordinator` still returns `RuntimeCoordinatorResult` for synchronous callers, but also publishes accepted/rejected/follow-up events.
- `RuntimeUpdateController` subscribes to event families and applies many effects from events rather than only from drained result objects.
- shader-build request, readiness, failure, and application are represented by typed events.
- render snapshot publication and backend observations are represented by typed events.
- dispatcher queue metrics and handler failures feed telemetry and health observation events.
There is still transitional bridge-state:
- `ControlServices` still exposes completed OSC commit notifications for render overlay settlement.
- `RuntimeEventCoalescingQueue` is now wired into the app-owned dispatcher for latest-value event types.
- `FileChangeDetected` and `ManualReloadRequested` are now published as reload ingress bridge events before coordinator reload follow-ups.
- runtime-state broadcast completion/failure events are still a target, not current behavior.
That means Phase 2 is complete enough as the coordination substrate for later phases. The remaining items are refinement work and should not block moving to render ownership, live-state layering, or persistence work.
## Event Model Principles
### Events say what happened
Events should describe facts:
- `OscValueReceived`
- `RuntimeMutationAccepted`
- `RuntimeMutationRejected`
- `ShaderReloadRequested`
- `ShaderBuildPrepared`
- `ShaderBuildFailed`
- `RenderSnapshotPublished`
- `RuntimeStateBroadcastRequested`
They should not be vague commands like "do everything needed now."
### Commands request intent
Some work is still naturally command-shaped:
- "apply this parameter mutation"
- "request shader reload"
- "save this stack preset"
- "start backend output"
Commands enter an owner subsystem. Events leave a subsystem after the owner has accepted, rejected, or completed work.
### One owner mutates each state category
Events must not become a way to bypass Phase 1 ownership:
- `RuntimeCoordinator` remains the owner of mutation policy.
- `RuntimeStore` remains the owner of durable state.
- `RuntimeSnapshotProvider` remains the owner of render snapshot publication.
- `RenderEngine` remains the owner of render-local transient state.
- `VideoBackend` remains the owner of device lifecycle and pacing.
- `HealthTelemetry` observes and reports, but does not coordinate behavior.
### Event handlers should be small
Handlers should translate events into owner calls or follow-up events. They should not accumulate hidden long-lived state unless that state belongs to the handler's subsystem.
### Queues must be bounded or coalesced
High-rate control traffic can arrive faster than the app should process every individual sample. Phase 2 should preserve the useful current behavior of coalescing OSC updates by route, but make the coalescing policy explicit.
## Event Families
### Control Events
Produced by `ControlServices`.
Examples:
- `OscValueReceived`
- `OscValueCoalesced`
- `OscCommitRequested`
- `HttpControlMutationRequested`
- `WebSocketClientConnected`
- `RuntimeStateBroadcastRequested`
- `FileChangeDetected`
- `ManualReloadRequested`
Primary consumers:
- `RuntimeCoordinator`
- `HealthTelemetry`
- later, a persistence writer or diagnostics publisher
### Runtime Events
Produced by `RuntimeCoordinator`, `RuntimeStore`, and snapshot publication code.
Examples:
- `RuntimeMutationAccepted`
- `RuntimeMutationRejected`
- `RuntimeStateChanged`
- `RuntimePersistenceRequested`
- `RuntimeReloadRequested`
- `ShaderPackagesChanged`
- `RenderSnapshotPublishRequested`
- `RuntimeStatePresentationChanged`
Primary consumers:
- `RuntimeSnapshotProvider`
- `RenderEngine`
- `ControlServices`
- `HealthTelemetry`
- later, `PersistenceWriter`
### Shader Build Events
Produced by shader build orchestration and render-side build application.
Examples:
- `ShaderBuildRequested`
- `ShaderBuildPrepared`
- `ShaderBuildApplied`
- `ShaderBuildFailed`
- `CompileStatusChanged`
Primary consumers:
- `RenderEngine`
- `RuntimeCoordinator`
- `ControlServices`
- `HealthTelemetry`
### Render Events
Produced by `RenderEngine` and `RuntimeSnapshotProvider`.
Examples:
- `RenderSnapshotPublished`
- `RenderResetRequested`
- `RenderResetApplied`
- `OscOverlayApplied`
- `OscOverlaySettled`
- `FrameRendered`
- `PreviewFrameAvailable`
Primary consumers:
- `RenderEngine`
- `ControlServices`
- `VideoBackend`
- `HealthTelemetry`
### Backend Events
Produced by `VideoBackend` and backend adapters.
Examples:
- `InputSignalChanged`
- `InputFrameArrived`
- `OutputFrameScheduled`
- `OutputFrameCompleted`
- `OutputLateFrameDetected`
- `OutputDroppedFrameDetected`
- `BackendStateChanged`
Primary consumers:
- `RenderEngine`
- `HealthTelemetry`
- later, backend lifecycle state machine handlers
### Health Events
Produced by all major subsystems.
Examples:
- `SubsystemWarningRaised`
- `SubsystemWarningCleared`
- `SubsystemRecovered`
- `TimingSampleRecorded`
- `QueueDepthChanged`
Primary consumer:
- `HealthTelemetry`
Health events should be observational. They should not be required for core behavior to proceed.
## Event Envelope
A practical initial event envelope can stay simple:
```cpp
enum class RuntimeEventType
{
OscCommitRequested,
RuntimeMutationAccepted,
RuntimeMutationRejected,
RuntimeReloadRequested,
ShaderBuildRequested,
ShaderBuildPrepared,
ShaderBuildFailed,
RenderSnapshotPublishRequested,
RenderSnapshotPublished,
RuntimeStateBroadcastRequested,
BackendStateChanged,
SubsystemWarningRaised
};
struct RuntimeEvent
{
RuntimeEventType type;
uint64_t sequence = 0;
std::chrono::steady_clock::time_point createdAt;
std::string source;
std::variant<
OscCommitRequestedEvent,
RuntimeMutationEvent,
ShaderBuildEvent,
RenderSnapshotEvent,
BackendEvent,
HealthEvent> payload;
};
```
The exact C++ names can change. The key design requirements are:
- event type is explicit
- event order is observable
- source subsystem is recorded
- payload is typed, not a bag of optional strings
- timestamps exist for queue-age telemetry
- failures are events too, not just debug strings
## Event Bus Shape
Phase 2 does not need a large framework. A small app-owned dispatcher is enough.
Suggested components:
- `RuntimeEventDispatcher`
- owns queues
- assigns sequence numbers
- exposes `Publish(...)`
- exposes `DispatchPending(...)`
- event handlers
- narrow handler interface or function callback
- registered by subsystem/composition root
- `RuntimeEventQueue`
- bounded FIFO for ordinary events
- `RuntimeEventCoalescingQueue`
- bounded keyed latest-value queue for flows such as high-rate OSC, broadcast requests, file/reload bursts, and queue-depth telemetry
- queue and dispatch metrics
- queue depth
- oldest event age
- dropped/coalesced counts
Initial implementation is single-process and mostly single-dispatch-thread. The important part is that event publication and event handling are explicit.
### Dispatcher Ownership Decision
The first concrete implementation uses one app-owned `RuntimeEventDispatcher`.
Ownership:
- `OpenGLComposite` owns the dispatcher as part of the current composition root.
References:
- `RuntimeServices` receives the dispatcher and passes it to `ControlServices`.
- `RuntimeCoordinator` receives the dispatcher so coordinator outcomes can become explicit events.
- `RuntimeUpdateController` receives the dispatcher so it can become the first effect/apply handler.
- `RuntimeSnapshotProvider`, `ShaderBuildQueue`, and `VideoBackend` receive the dispatcher so snapshot, shader lifecycle, and backend observation events are visible.
This is intentionally a composition-root dependency, not a new subsystem dependency. Subsystems should not construct their own dispatchers, and future tests should use `RuntimeEventTestHarness` rather than creating ad hoc event plumbing.
The dispatcher should move out of `OpenGLComposite` only if a later application-shell/composition-root object replaces `OpenGLComposite` as the owner of subsystem wiring.
## Queue Policy
Not every event deserves the same queue semantics.
### FIFO Events
Use FIFO for events where every item matters:
- mutation accepted/rejected
- shader build completed/failed
- backend state changed
- warning raised/cleared
### Coalesced Events
Use coalescing for high-rate latest-value flows:
- OSC parameter target updates by route
- runtime-state broadcast requests
- file-change reload requests during a burst
- queue-depth telemetry
Coalesced events should record how many updates were collapsed so telemetry can show pressure.
### Synchronous Boundaries
Some calls may remain synchronous during Phase 2:
- UI/API mutation calls that need an immediate success/error response
- startup configuration failures
- shutdown ordering
- tests
The rule is that synchronous calls should still publish events for accepted/rejected/completed work, so the rest of the app does not need to infer side effects from the call path.
## Event Bridge Policy
This section is the implementation rulebook for converting existing direct calls and result queues into events. Future Phase 2 lanes should use this table unless they deliberately update the policy here first.
### Bridge Categories
| Bridge category | Use when | Queue shape | Handler expectation |
| --- | --- | --- | --- |
| `fifo-fact` | every occurrence matters and must be observed in order | bounded FIFO | handler consumes each event exactly once |
| `coalesced-latest` | only the latest value per key matters | bounded coalescing queue | handler consumes the latest event and telemetry records collapsed count |
| `sync-command-with-event` | caller needs an immediate success/error result | direct owner call plus follow-up event publication | handler must not be required for the caller's response |
| `observation-only` | event is telemetry/diagnostic and must not drive core behavior | FIFO or coalesced depending on rate | handler failure must never block app behavior |
| `compatibility-poll` | source cannot yet publish an event directly | temporary poll adapter publishes typed events | poll interval is wakeup-driven with a fallback timer until a later file-watch implementation replaces it |
### Current Bridge Decisions
| Current flow | Phase 2 bridge | Event(s) | Current status |
| --- | --- | --- | --- |
| OSC latest-value updates | `ControlServices` ingress bridge | `OscValueReceived`, optional `OscValueCoalesced` | Event publication exists; source-side pending map and app-level dispatcher coalescing both provide latest-value behavior. |
| OSC commit after settle | `ControlServices -> RuntimeCoordinator` bridge | `OscCommitRequested`, then `RuntimeMutationAccepted` or `RuntimeMutationRejected` | Event publication exists. Coordinator follow-up work now reaches the app path through events rather than a service-result queue. |
| HTTP/UI mutation needing response | direct call into `RuntimeCoordinator` | `RuntimeMutationAccepted` or `RuntimeMutationRejected` after the synchronous response path | Implemented as `sync-command-with-event`; synchronous response remains supported. |
| runtime-state broadcast request | presentation/broadcast bridge | `RuntimeStatePresentationChanged`, `RuntimeStateBroadcastRequested` | Request event exists, is handled, and is coalesced by the app dispatcher. Completion/failure events remain follow-ups. |
| manual reload button | control ingress bridge | `ManualReloadRequested`, then `RuntimeReloadRequested` | Ingress and follow-up events exist and are covered by tests. |
| file watcher changes | file-watch bridge | `FileChangeDetected`, then `RuntimeReloadRequested` | Poll fallback remains, but detected changes now publish ingress and follow-up events and are covered by tests. |
| runtime store poll fallback | compatibility poll adapter | `FileChangeDetected`, `RuntimeReloadRequested`, or warning/compile-status event | Still present by design as a transitional bridge with a condition-variable fallback timer. Detected changes publish ingress and follow-up events. |
| shader build request | runtime/render bridge | `ShaderBuildRequested` | Event publication, handler, and app dispatcher coalescing exist. |
| shader build ready/failure/apply | shader build lifecycle bridge | `ShaderBuildPrepared`, `ShaderBuildFailed`, `ShaderBuildApplied`, `CompileStatusChanged` | Implemented with generation matching. |
| render snapshot publication | snapshot bridge | `RenderSnapshotPublishRequested`, `RenderSnapshotPublished` | Implemented. Publish requests are coalesced by output dimensions in the app dispatcher. |
| render reset request/application | render bridge | `RenderResetRequested`, `RenderResetApplied` | Request handling exists; applied event coverage can be expanded in later render work. |
| input signal changes | backend observation bridge | `InputSignalChanged` | Implemented as backend observation publication. |
| output late/dropped/completed frames | backend timing bridge | `OutputFrameCompleted`, `OutputLateFrameDetected`, `OutputDroppedFrameDetected` | Implemented at the vocabulary and backend publication level. High-rate policy may be refined during backend lifecycle work. |
| warnings and recovery | telemetry bridge | `SubsystemWarningRaised`, `SubsystemWarningCleared`, `SubsystemRecovered` | Vocabulary exists; direct telemetry writes still coexist with event observations. |
| queue depth/timing samples | telemetry metrics bridge | `QueueDepthChanged`, `TimingSampleRecorded` | Implemented for dispatcher/backend observations and coalesced by metric key in the app dispatcher. |
### Bridge Rules
- A bridge may translate an old direct call into an owner command, but it must publish the accepted/rejected/completed event that describes the outcome.
- A bridge must not mutate state owned by another subsystem just because it handles that subsystem's event.
- A coalesced event must have a stable key in code and a documented policy here.
- A FIFO event should be cheap enough that retaining every occurrence is useful. If not, turn it into a coalesced metric before putting it on a hot path.
- A synchronous bridge must treat event publication as a side effect of the owner decision, not as the mechanism that produces the direct caller's response.
- A compatibility poll adapter should be named as temporary in code so it does not become the new long-term coordination model.
- Handler failure should be reported through telemetry and dispatch metrics. It should not throw back across subsystem boundaries.
### First Integration Recommendation
The safest first behavior-changing bridge is `RuntimeStateBroadcastRequested`.
It is low risk because:
- it is already a side effect of many coordinator outcomes
- duplicate requests are naturally coalescable
- the handler can call the existing `ControlServices::BroadcastState()` path
- success can be verified through existing UI behavior and event tests
After that, the next bridge should be `ShaderBuildRequested`, because it already behaves like a queued side effect and has clear follow-up events.
## Target Flow Examples
### OSC Parameter Update
1. `OscServer` decodes a packet.
2. `ControlServices` publishes or coalesces `OscValueReceived`.
3. The dispatcher routes the event to the render-overlay path or coordinator policy, depending on whether the value is transient or committing.
4. `RuntimeCoordinator` publishes `RuntimeMutationAccepted` or `RuntimeMutationRejected` for committed changes.
5. Accepted committed changes publish `RenderSnapshotPublishRequested` and `RuntimePersistenceRequested` as needed.
6. `ControlServices` receives `RuntimeStateBroadcastRequested` or a presentation-changed event and broadcasts at its own cadence.
### File Reload
1. File-watch or manual reload produces `FileChangeDetected` or `ManualReloadRequested`.
2. `ControlServices` coalesces reload bursts into one `RuntimeReloadRequested`.
3. `RuntimeCoordinator` classifies the reload.
4. Package/store refresh produces `ShaderPackagesChanged` if package metadata changed.
5. Coordinator publishes `ShaderBuildRequested`.
6. Shader build completion publishes `ShaderBuildPrepared` or `ShaderBuildFailed`.
7. Render applies the ready build and publishes `ShaderBuildApplied`.
### Runtime State Broadcast
1. A mutation or reload publishes `RuntimeStatePresentationChanged`.
2. `ControlServices` coalesces this into a broadcast request.
3. The broadcast path asks `RuntimeStatePresenter` for the current presentation read model.
4. `HealthTelemetry` records broadcast count, failures, and queue age.
### Backend Signal Change
1. Backend adapter detects input signal change.
2. `VideoBackend` publishes `InputSignalChanged`.
3. `HealthTelemetry` records the new signal status.
4. Later phases may let the backend lifecycle state machine react to the same event.
## Migration Plan
### Step 1. Add Event Types And A Minimal Dispatcher
Status: complete.
Introduce:
- `RuntimeEvent`
- `RuntimeEventType`
- typed payload structs for the smallest useful event family
- `RuntimeEventBus` or equivalent dispatcher
Start with events that do not change behavior:
- `RuntimeStateBroadcastRequested`
- `ShaderBuildRequested`
- `RuntimeMutationRejected`
- simple health/log observations
### Step 2. Convert `RuntimeUpdateController` Into An Event Handler
Status: complete for the Phase 2 target, with synchronous API helpers retained.
`RuntimeUpdateController` is already close to an event effect applier. Phase 2 should narrow it into a handler for:
- coordinator outcome events
- shader build readiness events
- snapshot publication requests
- broadcast requests
The class should stop being the place that polls every source of work.
Current note: `RuntimeUpdateController` now subscribes to the dispatcher and handles broadcast, reload, shader build, compile status, render reset, and health observation paths. It still accepts synchronous `RuntimeCoordinatorResult` values for UI/API calls that need immediate success or error responses.
### Step 3. Replace `ControlServices::PollLoop` Sleep With Wakeups
Status: complete for OSC commit wakeups; runtime-store compatibility polling remains explicitly transitional.
Keep coalescing, but replace the fixed `25 x Sleep(10)` cadence with:
- a condition variable or waitable event
- wakeups when OSC commit work arrives
- wakeups when file/reload work arrives
- a fallback timer only for compatibility polling that cannot yet be evented
This is the most direct Phase 2 timing win.
Current note: `ControlServices` now uses a condition variable and fallback timer. The fallback exists for runtime-store polling until a later file-watch implementation can replace scanning as the change source. Detected reload/file changes publish typed ingress and follow-up events.
### Step 4. Route Shader Build Lifecycle Through Events
Status: mostly complete.
Turn the current request/apply/failure/success path into explicit events:
- `ShaderBuildRequested`
- `ShaderBuildPrepared`
- `ShaderBuildFailed`
- `ShaderBuildApplied`
- `CompileStatusChanged`
This should preserve the current off-frame-path compile behavior while making readiness visible.
Current note: request, prepared, failed, applied, and compile-status events exist. Generation-aware consumption is covered by tests. Request events are coalesced by build dimensions and preserve-feedback policy in the app dispatcher.
### Step 5. Route Runtime Broadcasts Through Events
Status: partially complete.
Replace direct "broadcast now" decisions with:
- `RuntimeStatePresentationChanged`
- `RuntimeStateBroadcastRequested`
- `RuntimeStateBroadcastCompleted`
- `RuntimeStateBroadcastFailed`
This keeps UI delivery in `ControlServices` while keeping presentation ownership in the runtime presentation layer.
Current note: `RuntimeStateBroadcastRequested` exists, is coalesced by the app dispatcher, and is handled. Broadcast completion/failure events have not been added yet.
### Step 6. Add Event Metrics
Status: mostly complete for dispatcher metrics; broader health-event observation continues.
Before using the event system for hotter paths, add metrics:
- event queue depth
- oldest event age
- event dispatch duration
- coalesced event count
- dropped event count
- handler failure count
These should feed `HealthTelemetry`.
Current note: queue depth, oldest-event age, dispatch duration, dropped count, coalesced count, and handler failure counts feed telemetry. Queue/timing events are also published and coalesced by metric key.
## Dependency Rules
Allowed:
- producers publish events to the bus
- the composition root registers handlers
- handlers call owner subsystem APIs
- `HealthTelemetry` observes event metrics and failures
Avoid:
- subsystems subscribing directly to each other in constructors
- event handlers mutating state outside their owner subsystem
- using one global event payload with many nullable fields
- making render hot paths block on the event bus
- requiring health/telemetry event delivery for core behavior
The dispatcher is coordination infrastructure, not a new domain owner.
## Testing Strategy
Phase 2 should add tests that do not require GL, DeckLink, or network sockets.
Implemented tests:
- FIFO events dispatch in sequence order
- coalesced events keep the latest payload and count collapsed updates
- rejected mutations publish rejection events without downstream snapshot/build events
- accepted parameter mutations publish the expected follow-up event set
- handler failures are reported as health/log events
- queue depth and oldest-event-age metrics update predictably
- typed payload mapping covers persistence, render snapshot, backend, timing, queue-depth, and late/dropped output-frame events
- shader build generation matching applies only the expected prepared build
Remaining useful tests before deeper file-watch work:
- file reload bursts collapse into one reload request across a real poll burst
- broadcast completion/failure events are observable once those payloads exist
The existing `RuntimeEventTypeTests` target is now the main pure event behavior harness. `RuntimeEventTestHarness` should remain the shared test helper so future lanes do not invent their own dispatcher plumbing.
## Phase 2 Exit Criteria
Phase 2 can be considered complete once the project can say:
- [x] there is a typed internal event envelope and dispatcher
- [x] `OpenGLComposite` owns the dispatcher as the current composition root
- [x] `ControlServices` emits typed events for OSC commits and broadcast requests
- [x] reload/file-change work publishes typed ingress and follow-up events
- [x] `RuntimeCoordinator` publishes explicit accepted/rejected/follow-up events
- [x] callers no longer need broad compatibility result queues for normal runtime side effects
- [x] `RuntimeUpdateController` handles event-driven broadcast, shader build, compile status, render reset, and health observation paths
- [x] `RuntimeUpdateController` no longer needs compatibility result draining for ordinary service work
- [x] shader build request/readiness/failure/application is represented as events
- [x] shader build requests are coalesced by dimensions and preserve-feedback policy in the app path
- [x] render snapshot publication is represented as request/published events
- [x] render snapshot publish requests are coalesced in the app path where needed
- [x] backend observations publish typed events
- [x] event queues expose depth, age, dropped, coalescing, and failure metrics
- [x] production event paths use coalescing for broadcast requests, shader-build requests, and high-rate metrics
- [x] coarse sleep polling is no longer the default coordination model for OSC commit service work
- [x] runtime-store/file-change compatibility polling is explicitly contained and publishes event-first reload bridge events when changes are detected
Phase 2 closure note:
- The checklist above is complete for the internal event model substrate.
- Broadcast completion/failure events and real file-watch burst tests are useful follow-ups, but they are no longer foundation blockers.
- `RuntimeCoordinatorResult` may remain as a synchronous return type for command APIs; the Phase 2 requirement is that accepted/rejected/follow-up behavior is also published as typed events, which is now true.
## Open Questions For Implementation
- Resolved: the first dispatcher is single-process, app-owned, and pumped through the current app/update path.
- Resolved: event payloads use typed structs carried by `std::variant`.
- Resolved: persistence requests are represented in Phase 2 even though background persistence lands later.
- Resolved: backend callback events are introduced now as observation-only events.
- Still open: should high-rate OSC transient overlay events enter the app dispatcher, or should they remain source-local until the live-state layering phase?
- Resolved for Phase 2: `RuntimeCoordinatorResult` can survive as a synchronous helper for command APIs, as long as event publication remains the coordination path for downstream effects.
- Resolved: app-level coalescing lives inside `RuntimeEventDispatcher`; source-specific bridges can still coalesce before publication when they own useful domain-specific collapse policy.
## Short Version
Phase 2 should give the app a typed nervous system.
- external inputs become typed events
- owner subsystems still make decisions
- decisions publish explicit outcomes
- follow-up work is routed by handlers, not inferred from scattered call paths
- high-rate work is bounded or coalesced
- timing and queue pressure become observable
If this boundary holds, later render-thread, persistence, backend, and telemetry work can move independently without returning to shared-object polling as the default coordination model.

View File

@@ -1,383 +0,0 @@
# Phase 3 Design: Live State And Service Coordination
This document expands Phase 3 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phase 1 split runtime responsibilities into named subsystems. Phase 2 added the typed internal event model those subsystems can coordinate through. Phase 3 should now finish the service-facing and live-state cleanup needed before the app attempts sole-owner GL rendering.
## Status
- Phase 3 design package: accepted.
- Phase 3 implementation: exit criteria satisfied for the current architecture.
- Current alignment: the repo now has the live-state/composer building blocks, a service bridge, and a named frame-state handoff. `OpenGLComposite::renderEffect()` still remains the app-level frame entrypoint, but the service drain, layer-state resolution, and OSC commit handoff now sit behind named helpers and frame-state data.
Current footholds:
- `RuntimeStore` is split into durable state collaborators: `RuntimeConfigStore`, `LayerStackStore`, `ShaderPackageCatalog`, `RenderSnapshotBuilder`, presentation read models, and `HealthTelemetry`.
- `RuntimeCoordinator` owns mutation validation/classification and publishes accepted/rejected/follow-up events.
- `RuntimeSnapshotProvider` publishes render snapshots from `RenderSnapshotBuilder`.
- `RuntimeLiveState` owns transient OSC overlay bookkeeping and commit-settlement policy.
- `RenderStateComposer` exists as the first pure composition boundary for combining base layer state with live overlays.
- `RenderFrameInput` / `RenderFrameState` now provide a named frame-facing handoff model for preparing layer state and render inputs before drawing.
- `RenderFrameStateResolver` now owns snapshot cache selection, parameter refresh decisions, and final frame-state resolution before drawing.
- `RenderEngine` owns GL/render resources and delegates frame-state preparation to the resolver.
- `ControlServices` owns OSC ingress, pending OSC updates, completed OSC commit notifications, and service start/stop.
- `RuntimeServiceLiveBridge` translates service OSC queues into render live-state updates and queues settled overlay commit requests.
- `RuntimeEventDispatcher` now routes accepted mutations, reloads, snapshots, shader build events, backend observations, and health observations.
The current architecture is much better than the original `RuntimeHost` shape. Phase 4 has since moved normal runtime GL work onto the `RenderEngine` render thread, so the remaining render-facing risk is no longer shared context ownership; it is the later producer/consumer playout work needed to keep DeckLink callbacks from synchronously waiting on output production.
## Why Phase 3 Exists
The resilience review says render-thread isolation should come after state access and control coordination are no longer centered on a large mutable runtime object. Phase 2 gives us the event substrate; Phase 3 should make the data flowing into render explicit enough that Phase 4 can make the render thread the sole GL owner without dragging service coordination and state reconciliation with it.
The main problems Phase 3 addressed:
- transient OSC overlay state and persisted committed state needed a named reconciliation boundary
- `RenderEngine` needed to move final frame-state selection and value composition out of drawing code
- service-side queues for pending OSC updates and completed OSC commits needed a bridge outside `OpenGLComposite`
- `RuntimeStore` still performs synchronous persistence directly from many state mutation paths
- `RuntimeUpdateController` still exists partly as compatibility glue between synchronous coordinator results and event-driven effects
## Goals
Phase 3 should establish:
- an explicit live-state model separating persisted state, committed runtime state, and transient automation overlay
- service-facing event bridges for OSC overlay updates and overlay commit completions
- a narrower `OpenGLComposite::renderEffect()` that renders a prepared read model instead of orchestrating runtime/service state
- a clear owner for final render-layer state resolution before it reaches GL drawing
- a contained persistence request model that prepares for the later background writer phase
- tests for live-state composition, overlay settlement, and service-to-runtime event behavior without GL or DeckLink
## Non-Goals
Phase 3 should not require:
- a dedicated render thread
- moving all GL calls off the current callback path
- a background persistence writer implementation
- a final DeckLink lifecycle state machine
- replacing every direct synchronous command API
- a final cue/preset/timeline system
Those are later phases. Phase 3 is about making state and service coordination clean enough for those later phases.
## Current Coordination Shape
`OpenGLComposite::renderEffect()` is now the app-level frame entrypoint, but it is intentionally narrow:
1. pumps `RuntimeUpdateController::ProcessRuntimeWork()`
2. builds a `RenderFrameInput`
3. renders the frame through `RuntimeServiceLiveBridge`, `RenderFrameStateResolver`, and `RenderEngine`
The bridge now owns service queue draining, live automation settlement, committed/live state selection, and OSC commit handoff. `RenderFrameStateResolver` owns snapshot cache selection, parameter refresh decisions, and dynamic render-field refresh before handing a prepared frame state to `RenderEngine`.
## Target State Model
Phase 3 should formalize three state categories:
| State category | Owner | Lifetime | Render role |
| --- | --- | --- | --- |
| Persisted layer state | `LayerStackStore` behind `RuntimeStore` | saved durable state | base layer stack and saved parameter values |
| Committed runtime state | `RuntimeCoordinator` / snapshot publication | accepted operator/UI/OSC commits | stable render snapshot selected for rendering |
| Transient automation overlay | new live-state collaborator or narrowed render-side owner | high-rate OSC automation between commits | temporary per-route override blended into final values |
Render should eventually consume:
```text
final render state = published snapshot + committed live selection + transient overlay
```
The important change is not the exact formula name. The important change is that final render-state composition has one named owner and can be tested without GL.
## Phase 3 Collaborators
### `RuntimeLiveState`
Small runtime collaborator for transient automation state.
Responsibilities:
- keep transient OSC overlay values keyed by route
- track overlay generation and pending commit generation
- apply overlay commit completions
- decide when an overlay value has settled enough to request a commit
- build a `LiveStateOverlaySnapshot` for final render-state composition
Non-responsibilities:
- persistent state mutation
- shader package lookup
- GL resources
- OSC socket ownership
### `RenderStateComposer`
Pure or mostly pure collaborator for frame value composition.
Responsibilities:
- combine published render snapshots with live overlay state
- apply smoothing/time-based automation policy
- return final `RuntimeRenderState` values plus any commit requests
- stay testable without OpenGL
Non-responsibilities:
- drawing
- service queue draining
- disk persistence
- OSC packet parsing
### `RuntimeServiceLiveBridge`
`RuntimeServiceLiveBridge` is the current source-local bridge between services, live state, and render-state preparation.
Responsibilities:
- translate service-side OSC ingress into typed events or live-state commands
- publish overlay applied/settled events where useful
- route overlay commit requests to `RuntimeCoordinator`
- keep `OpenGLComposite` out of service queue draining
Non-responsibilities:
- final GL rendering
- persistent store mutation outside coordinator APIs
## Event Bridge Targets
| Current flow | Phase 3 bridge target | Notes |
| --- | --- | --- |
| pending OSC updates drained by `OpenGLComposite` | `OscValueReceived` -> live-state overlay update handler | Phase 2 already has the event type; Phase 3 decides whether transient overlay updates enter the app dispatcher or a source-local bridge. |
| render asks for overlay commit requests | `OscOverlaySettled` or direct coordinator command plus event publication | Commit request creation should leave `renderEffect()` and live near the live-state owner. |
| completed OSC commits drained by `OpenGLComposite` | `RuntimeMutationAccepted` / completion event -> live-state commit completion | Completed commit routing should be event-driven or owned by live-state service bridge. |
| `RenderFrameStateResolver::Resolve(...)` | `RenderStateComposer::BuildFrameState(...)` | Keep final state composition testable without GL. |
| direct persistence writes from store mutations | `RuntimePersistenceRequested` as the durable write trigger | Background writer lands later; Phase 3 should make request boundaries clear. |
| runtime-state broadcast side effects | `RuntimeStateBroadcastRequested` plus optional completed/failed observations | Keep broadcast delivery in services and presentation ownership in runtime presentation. |
## Runtime Store Scope In Phase 3
`RuntimeStore` is already much smaller than the original host, but Phase 3 should keep narrowing it toward durable state and read-model publishing.
Target responsibilities:
- initialize runtime config and persistent state
- expose durable layer/package/config read models
- own saved layer stack and preset serialization until the background writer phase
- publish or support immutable render/presentation snapshots
Avoid adding:
- transient OSC overlay state
- frame-local render composition decisions
- service queue coordination
- background worker policy
## Runtime Coordinator Scope In Phase 3
`RuntimeCoordinator` should remain the command/mutation policy owner.
Keep:
- validation/classification
- accepted/rejected mutation publication
- reload/build/persistence follow-up events
- synchronous command results for UI/API callers that need immediate success or error
Narrow:
- any behavior that looks like render-frame state composition
- any direct service queue interpretation
- any persistence timing policy beyond publishing `RuntimePersistenceRequested`
## Render Engine Scope In Phase 3
`RenderEngine` should move closer to being a GL/render-local owner.
Keep:
- GL resources
- shader programs
- render passes
- preview/output rendering
- temporal history and feedback resources
Move or narrow:
- transient OSC overlay bookkeeping
- final layer-state composition from snapshot plus overlay
- creation of commit requests from smoothed overlay values
Some transient render-only state may remain in `RenderEngine` if it truly belongs to GL or temporal resources. But value composition should be separable from drawing.
## OpenGLComposite Scope In Phase 3
`OpenGLComposite` should remain the current composition root, but not the runtime-service coordinator.
Target:
- wire collaborators
- own app-level lifecycle
- initialize GL/backend/runtime services
- call narrow render/update entrypoints
Avoid:
- draining OSC queues directly
- converting service DTOs into render DTOs
- deciding final layer-state composition
- coordinating commit completion settlement
## Persistence Position
Phase 3 should not implement the background writer, but it should prepare for it.
Target behavior by Phase 3 exit:
- state mutations publish `RuntimePersistenceRequested`
- persistence can be observed and tested as an event side effect
- synchronous `SavePersistentState()` remains allowed as an implementation detail inside `RuntimeStore`
- callers outside the store/coordinator should not infer disk writes from mutation categories
This keeps Phase 6 smaller: the background snapshot writer can subscribe to persistence requests and consume a stored-state snapshot rather than rediscovering mutation policy.
## Migration Plan
### Step 1. Name The Live State Boundary
Introduce `RuntimeLiveState`, `RenderStateComposer`, or an equivalent pair of classes.
Start by moving pure data operations out of frame rendering without changing behavior.
Status: complete for Phase 3. `runtime/live/RuntimeLiveState` and `runtime/live/RenderStateComposer` exist, are included in the build, and have a focused `RuntimeLiveStateTests` target.
### Step 2. Move OSC Overlay Bookkeeping Behind The Boundary
Move these responsibilities out of the current frame orchestration:
- overlay updates by route
- commit completion tracking
- generation matching
- settle/commit request creation
The first implementation can still be called synchronously from the current render path. The important part is that the behavior has a named owner and tests.
Status: complete for Phase 3. `RenderEngine` still exposes compatibility methods used by the service bridge, but it delegates overlay updates, commit completions, smoothing, generation matching, and commit-request creation to `RuntimeLiveState`/`RenderStateComposer`.
### Step 3. Bridge Service Queues To Events Or Live-State Commands
Replace `OpenGLComposite::renderEffect()` queue draining with a bridge that publishes or applies:
- `OscValueReceived`
- `OscOverlayApplied`
- `OscOverlaySettled`
- overlay commit completion observations
This is where the remaining Phase 2 open question about transient OSC overlay event scope should be resolved for the current architecture.
Status: complete for Phase 3. `RuntimeServiceLiveBridge` now drains pending OSC updates and completed OSC commits, applies them to render live state, and queues settled commit requests. It remains a source-local bridge by design until later live-state layering decides whether transient automation should enter the app-level dispatcher.
### Step 4. Narrow `OpenGLComposite::renderEffect()`
Target shape:
```cpp
void OpenGLComposite::renderEffect()
{
mRuntimeUpdateController->ProcessRuntimeWork();
const RenderFrameInput frameInput = BuildRenderFrameInput();
RenderFrame(frameInput);
}
```
The exact names can change. The goal is that render effect no longer manually drains services, settles overlay commits, and resolves layer values.
Status: complete for Phase 3. `OpenGLComposite::renderEffect()` now processes runtime work, builds `RenderFrameInput`, and calls a narrow frame-render helper. Service draining, state resolution, and commit handoff sit behind `RuntimeServiceLiveBridge::PrepareLiveRenderFrameState(...)`, `RenderFrameStateResolver`, and `RenderFrameState`.
### Step 5. Add Persistence Boundary Tests
Add behavior tests for:
- accepted persisted mutations publish `RuntimePersistenceRequested`
- transient OSC commits do not force immediate persistence
- preset load/save persistence requests remain explicit
- rejected mutations do not publish persistence work
Status: complete for Phase 3. `RuntimeSubsystemTests` and `RuntimeEventTypeTests` cover accepted mutation persistence requests, rejected mutations, and transient OSC overlay behavior that does not request persistence.
### Step 6. Update Docs And Phase 4 Readiness
Before calling Phase 3 complete, update:
- subsystem docs for new live-state/composer collaborators
- architecture review checklist
- Phase 4 assumptions about render thread input state
Status: complete. The Phase 4 design note started from the `RenderFrameInput` / `RenderFrameState` contract and has now completed the shared-GL ownership migration.
## Testing Strategy
Phase 3 tests should avoid GL, DeckLink, and sockets.
Recommended tests:
- final layer-state composition applies snapshot values when no overlay exists
- transient overlay overrides the matching parameter by route
- smoothing moves toward target values over time
- overlay settle creates one commit request per route/generation
- completed commits clear pending overlay commit state
- stale commit completions are ignored by generation
- accepted mutations publish persistence requests where expected
- rejected mutations do not publish persistence or render follow-ups
- `OpenGLComposite` no longer needs to drain service result queues for runtime effects
Existing useful homes:
- `RuntimeSubsystemTests` for pure state/composer behavior
- `RuntimeEventTypeTests` for event bridge behavior
- `RuntimeLiveStateTests` for the new live-state/composer boundary
## Parallel Work Lanes
The current groundwork is intended to let these lanes proceed in parallel with low overlap:
| Lane | Primary files | Goal |
| --- | --- | --- |
| A. Live-state behavior | `runtime/live/RuntimeLiveState.*`, `tests/RuntimeLiveStateTests.cpp` | Implemented for Phase 3: stale completion, smoothing, trigger behavior, and overlay settle policy are covered by focused tests. |
| B. Render-state composition | `runtime/live/RenderStateComposer.*`, `gl/RenderFrameStateResolver.*`, `gl/RenderEngine.*` | Implemented for Phase 3: value composition and frame-state selection sit outside GL drawing while GL calls remain in `RenderEngine`. |
| C. Service bridge | `control/RuntimeServices.*`, `control/RuntimeServiceLiveBridge.*`, `control/ControlServices.*` | Implemented for Phase 3: `OpenGLComposite::renderEffect()` no longer drains OSC update/completion queues directly. |
| D. App-frame orchestration | `gl/OpenGLComposite.*`, `gl/RuntimeUpdateController.*` | Implemented for Phase 3: render-effect glue is a narrow runtime-work, frame-input, render-frame sequence. |
| E. Persistence boundary | `runtime/coordination/RuntimeCoordinator.*`, `runtime/store/*`, event tests | Implemented for Phase 3: persistence request publication is explicit and ready for a later background writer. |
## Phase 3 Exit Criteria
Phase 3 can be considered complete once the project can say:
- [x] final render-state composition has named owners outside `OpenGLComposite` (`RenderStateComposer` covers live value composition; `RenderFrameStateResolver` covers snapshot/cache selection and frame-state resolution)
- [x] transient OSC overlay state has a named owner and tests
- [x] overlay commit requests and completions no longer require `OpenGLComposite` to drain service queues directly
- [x] `RenderEngine` is closer to GL/render resource ownership and less responsible for value composition
- [x] `RuntimeStore` remains durable-state focused and does not gain live overlay responsibilities
- [x] persistence requests are explicit event outcomes for persisted mutations
- [x] Phase 4 can define a render-thread input contract around immutable or near-immutable frame state
## Open Questions
- Should transient OSC overlay values enter the app-level event dispatcher, or should they use a dedicated source-local latest-value bridge until live-state layering is finalized?
- Should the new live-state owner live under `runtime/`, `gl/`, or a new `renderstate/` boundary?
- Should smoothing policy be owned by live state, render-state composition, or render settings?
- Should overlay commit completion be represented as a new typed event, or derived from existing accepted mutation events with route/generation metadata?
- How much of persistence should remain synchronous until Phase 6?
## Short Version
Phase 3 should make the app's live state boring and explicit.
- persisted state stays in the store
- accepted command policy stays in the coordinator
- transient automation gets a named owner
- final render-state composition becomes testable without GL
- `OpenGLComposite` stops manually reconciling service queues and layer values
Once that is true, Phase 4 can make the render thread the sole GL owner without also having to invent a clean state model at the same time.

View File

@@ -1,408 +0,0 @@
# Phase 4 Design: Render Thread Ownership
This document expands Phase 4 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phase 1 named the subsystems. Phase 2 added the typed event substrate. Phase 3 made render-facing live state explicit through `RuntimeLiveState`, `RenderStateComposer`, `RenderFrameInput`, `RenderFrameState`, `RenderFrameStateResolver`, and `RuntimeServiceLiveBridge`. Phase 4 can now focus on the core timing-risk boundary: making one render thread the only owner of OpenGL work.
## Status
- Phase 4 design package: implemented.
- Phase 4 implementation: complete for GL ownership. `RenderEngine` starts a dedicated render thread, owns the GL context during normal runtime work, and exposes queue/request entrypoints for input upload, output render, preview presentation, screenshot capture, shader rebuild application, and render-local resets.
- Current alignment: normal runtime GL work is routed through the render thread after startup. Startup initialization still runs before the render thread starts while the app explicitly owns the context, and shutdown now stops DeckLink/backend work before destroying render-thread GL resources and deleting the context.
Current GL ownership footholds:
- `RenderEngine` owns GL resources, a dedicated render thread, synchronous request/response for output frames, a small render command mailbox, named render-thread helper methods, and wrong-thread diagnostics for those helpers.
- `RenderFrameInput` / `RenderFrameState` provide the frame-state contract that a render thread can consume.
- `RenderFrameStateResolver` prepares the render-facing layer state before drawing.
- `OpenGLVideoIOBridge` calls `RenderEngine::QueueInputFrame(...)` from the input path and `RenderEngine::RequestOutputFrame(...)` from the output path.
- `OpenGLComposite::paintGL(...)`, screenshot capture, input upload, and output rendering enter render work through explicit `RenderEngine` requests. After `OpenGLComposite::Start()` starts the render thread, those requests do not bind the GL context on the caller thread.
## Why Phase 4 Exists
The resilience review identifies shared GL ownership as the main remaining timing and failure-isolation risk. Today the shared context lock protects correctness, but it does not isolate timing:
- input callbacks can attempt texture upload
- output callbacks can trigger frame rendering and readback
- preview paint can enter the same GL context
- screenshot capture can enter the same GL context
- the DeckLink completion path is still too close to render work
That means brief input, preview, readback, or callback stalls can still collide on the most timing-sensitive path.
Phase 4 should turn GL from a shared resource guarded by a lock into a resource owned by one thread with explicit queues and handoff points.
## Goals
Phase 4 should establish:
- one render thread as the sole long-lived owner of the GL context
- non-render threads enqueue work instead of binding the GL context
- input upload requests are accepted and executed by the render thread
- output frame rendering is requested or scheduled through render-owned work
- preview and screenshot requests become render-thread commands or consumers
- `RenderFrameInput` / `RenderFrameState` become the stable data contract for frame production
- GL context entrypoints are reduced to render-thread-only code paths
- tests for queue semantics and request coalescing without requiring DeckLink hardware, plus explicit lifecycle ordering in code
## Non-Goals
Phase 4 should not require:
- the final producer/consumer playout queue for DeckLink
- the final DeckLink lifecycle state machine
- replacing the async readback policy
- implementing background persistence
- completing Phase 5's deeper live-state layering
- replacing every UI or backend API at once
Those are later phases or follow-on work. Phase 4 is about making GL ownership deterministic first.
## Current GL Entry Points
The current code paths that matter most are:
| Entry point | Current behavior | Phase 4 direction |
| --- | --- | --- |
| `RenderEngine::QueueInputFrame(...)` | copies the latest input frame into the render mailbox and returns without waiting for GL | render thread uploads latest input without callback-owned GL |
| `RenderEngine::RequestOutputFrame(...)` | synchronous output request; after render-thread startup it queues output render work and waits for render-thread completion with timeout/failure reporting | render thread executes output frame production |
| `RenderEngine::TryPresentPreview(...)` | best-effort request; callers queue preview presentation and return | render thread consumes latest completed frame for preview |
| `RenderEngine::RequestScreenshotCapture(...)` | queues screenshot capture and async disk write completion | screenshot capture is a render-thread command |
| `OpenGLVideoIOBridge::UploadInputFrame(...)` | copies the latest input frame into the render mailbox and returns without waiting for GL | render thread uploads the latest queued input frame |
| `OpenGLVideoIOBridge::RenderScheduledFrame(...)` | requests render-thread output production and reports success/failure to the backend | consume render-produced output without callback-owned GL |
## Target Ownership Model
### Render Thread
The render thread should own:
- `wglMakeCurrent(...)` for the rendering context
- all GL resource creation/destruction
- input texture upload
- pass execution
- output pack conversion
- async readback buffers and fences
- preview presentation or preview frame publication
- screenshot readback
- temporal history and feedback resources
### Other Threads
Other threads may:
- enqueue input frames or replace the latest input frame
- publish control/runtime/backend events
- request shader build application
- request render-local resets
- request screenshots
- consume ready output frames or receive completion notifications
Other threads should not:
- call GL directly
- bind or unbind the render context
- wait on GL fences directly
- mutate render-local resource state
## Proposed Collaborators
### `RenderThread`
Owns the OS thread, wakeup primitive, lifecycle, and render-loop execution.
Responsibilities:
- start and stop the render thread
- bind the GL context for the thread lifetime or render-loop lifetime
- drain render commands
- execute frame production work
- publish lifecycle and failure observations
Non-responsibilities:
- runtime mutation policy
- DeckLink scheduling policy
- durable persistence
### `RenderCommandQueue`
Small bounded queue or command mailbox for render-thread work.
Current implementation:
- `RenderCommandQueue` exists as a pure C++ mailbox helper.
- Preview present and screenshot capture requests use latest-value coalescing.
- Input upload requests use latest-value coalescing with owned frame bytes copied at enqueue time.
- Output frame requests use FIFO semantics so scheduled output demand is not collapsed.
- Render-local reset requests coalesce to the strongest pending reset scope.
- Output frame requests use synchronous request/response through the render thread as the remaining transitional playout bridge.
Possible commands:
- `UploadInputFrame`
- `RenderOutputFrame`
- `PrepareFrameState`
- `ApplyShaderBuild`
- `ResetTemporalHistory`
- `ResetShaderFeedback`
- `PresentPreview`
- `CaptureScreenshot`
- `Stop`
High-rate commands should be coalesced where appropriate. Input frames should likely be latest-value rather than unbounded FIFO.
### `RenderFrameCoordinator`
Optional helper that combines Phase 3's frame contract with render-thread execution.
Responsibilities:
- build or receive `RenderFrameInput`
- call `RuntimeServiceLiveBridge` and `RenderFrameStateResolver`
- hand `RenderFrameState` to `RenderEngine`
This can begin as a thin helper. The important part is that it keeps frame-state preparation explicit when `renderEffect()` stops being called directly from the callback path.
### `RenderOutputMailbox`
Optional transitional bridge for output frames.
Responsibilities:
- hold the latest completed output frame or a small bounded set
- let backend code consume output without owning GL
- report underrun/stale-frame reuse observations
This may be a Phase 4 late step or a Phase 7 playout-policy step. Phase 4 should at least avoid designing the render thread in a way that blocks it.
## Threading Contract
Phase 4 should make thread ownership visible in APIs.
Candidate naming:
- `RenderEngine::StartRenderThread(...)`
- `RenderEngine::StopRenderThread()`
- `RenderEngine::EnqueueInputFrame(...)`
- `RenderEngine::RequestOutputFrame(...)`
- `RenderEngine::RequestPreviewPresent(...)`
- `RenderEngine::RequestScreenshot(...)`
Render-thread-only methods should be private or clearly named:
- `RenderEngine::UploadInputFrameOnRenderThread(...)`
- `RenderEngine::RenderOutputFrameOnRenderThread(...)`
- `RenderEngine::CaptureOutputFrameRgbaTopDownOnRenderThread(...)`
The public runtime entrypoints now use queue/request language. `RequestOutputFrame(...)` remains synchronous so the existing DeckLink callback path can keep producing an output frame while Phase 7's producer/consumer playout queue is still future work.
## Frame Production Shape
A target render-thread frame should look like:
1. wake for input, output demand, preview demand, shader build, reset, screenshot, or stop
2. drain bounded render commands
3. coalesce to the latest input frame and latest control/live state
4. build `RenderFrameInput`
5. prepare `RenderFrameState`
6. upload accepted input frame
7. render layer stack
8. pack output if needed
9. stage readback or output buffer
10. publish preview/screenshot/output completion as needed
11. record timing and queue metrics
The exact cadence can remain demand-driven initially. The architectural win is that the demand wakes the render thread rather than borrowing GL from the caller.
## Migration Plan
### Step 1. Name Render-Thread-Only Methods
Split existing direct GL methods into public request methods and private render-thread methods without changing behavior much.
Initial target:
- [x] keep current synchronous behavior where callers need a result
- [x] move GL bodies into clearly render-thread-owned helpers for upload, output render, preview presentation, and screenshot readback
- [x] make future queue migration mechanical
### Step 2. Add Render Command Queue
Introduce a small queue/mailbox for render commands.
Start with low-risk commands:
- [x] preview present request
- [x] screenshot request
- [x] render-local reset requests
- [x] input upload request
- [x] output render request
The queue and wakeup behavior still need the dedicated render thread before the callbacks stop borrowing the GL context.
### Step 3. Start A Dedicated Render Thread
Create the render thread and make it own context binding.
- [x] create a dedicated render thread owned by `RenderEngine`
- [x] bind the existing GL context on the render thread for normal runtime work
- [x] stop the render thread before GL context destruction
- [x] keep transitional synchronous request/response for output frames
- [x] remove normal runtime dependence on the shared GL `CRITICAL_SECTION`
- [x] add timeout/failure behavior for render-thread requests
Transitional behavior still allows synchronous request/response for output frames. Render-thread requests now fail fast if they cannot begin within the request timeout, and log over-budget tasks that have already started before waiting for safe completion. The important change is that the caller waits for render-thread completion rather than taking the GL context itself.
### Step 4. Move Input Upload To The Render Thread
Change `OpenGLVideoIOBridge::UploadInputFrame(...)` so it enqueues or replaces the latest input frame.
Policy targets:
- [x] bounded memory
- [x] latest-frame wins under load
- [x] input upload skip count is observable through render command coalescing metrics
- [x] input callback never waits for GL
Current implementation: `OpenGLVideoIOBridge::UploadInputFrame(...)` calls `RenderEngine::QueueInputFrame(...)`, which copies the input bytes into the latest-value render mailbox and schedules one bounded render-thread wakeup to upload the newest pending frame.
### Step 5. Move Output Rendering To The Render Thread
Change `OpenGLVideoIOBridge::RenderScheduledFrame(...)` so it requests render-thread output production or consumes a completed render-thread output.
Transitional option:
- [x] synchronous request/response through the render thread
Better follow-up:
- render ahead into a bounded output queue and let backend callbacks consume ready frames
Current implementation: `OpenGLVideoIOBridge::RenderScheduledFrame(...)` calls `RenderEngine::RequestOutputFrame(...)` and returns whether the render-thread request produced an output frame. `VideoBackend` skips scheduling that frame when render production fails or times out.
### Step 6. Decouple Preview And Screenshot Requests
Preview should become best-effort:
- [x] request preview presentation from the render thread
- [x] skip/coalesce when render is busy or output deadline pressure is high
- [x] record preview skips through render command coalescing metrics
Screenshot should become:
- [x] queued render-thread capture request
- [x] async disk write remains outside render thread
Current implementation: `OpenGLComposite::RequestScreenshot(...)` builds the output path, queues `RenderEngine::RequestScreenshotCapture(...)`, and the render thread captures pixels before handing them to the existing async PNG writer. Preview presentation is a latest-value best-effort render command that is queued behind output render work, even when requested from the render pipeline.
### Step 7. Remove Shared GL Lock From Normal Paths
Once all GL entrypoints are render-thread-owned:
- [x] remove normal dependence on `pMutex` for render correctness
- [x] keep diagnostics that detect wrong-thread render-thread helper calls
- [x] leave only lifecycle context binding where needed
Current implementation: `OpenGLComposite` no longer owns or passes a shared `CRITICAL_SECTION`, and `RenderEngine` no longer has caller-thread GL fallback paths for preview, input upload, output render, or screenshot capture. Runtime callers must go through the render thread; pre-start direct GL fallback is limited to startup initialization while the app explicitly owns the context.
### Shutdown Order
Current shutdown order is explicit in code:
1. `OpenGLComposite::Stop()` stops runtime services so control/OSC work stops entering the runtime.
2. `VideoBackend::Stop()` stops DeckLink streams/playout so input and output callbacks stop requesting render work.
3. `RenderEngine::StopRenderThread()` destroys GL resources on the render thread, signals the render thread to stop, joins it, and unbinds the context on render-thread exit.
4. `WM_DESTROY` deletes `OpenGLComposite`, unbinds the window context, and deletes the GL context.
This order is build-tested, and `RenderCommandQueue` behavior is covered by non-GL unit tests. It still benefits from a real-window/DeckLink shutdown smoke test, but the code path is explicit enough for Phase 4's design exit.
## Testing Strategy
Phase 4 tests should avoid hardware where possible.
Recommended tests:
- render command queue preserves FIFO for non-coalesced commands
- latest-input mailbox drops older frames under load
- shutdown path stops backend callbacks before stopping and joining the render thread
- screenshot request receives one completion or failure
- output render request reports failure if render thread is stopped
- render reset commands coalesce where expected
- wrong-thread render-only diagnostics are present on private render-thread helpers
Existing useful homes:
- `RuntimeEventTypeTests` for new render/backend observations
- `RuntimeSubsystemTests` for pure request/coalescing helpers
- a future `RenderThreadTests` target if render-thread lifecycle is extracted behind a non-GL test seam
Manual verification will still be needed for:
- real DeckLink input/output
- preview interaction
- screenshot capture
- shader reload while rendering
- real window/context shutdown
## Telemetry Added During Phase 4
Phase 4 should add minimal metrics while moving ownership:
- render command queue depth
- input frames accepted, replaced, and dropped
- render-thread wake reason counts
- render-thread frame duration
- output request latency
- preview request skipped count
- screenshot request success/failure count
- wrong-thread GL call diagnostics if practical
Full operational reporting remains Phase 8, but these metrics make the threading migration debuggable.
## Risks
### Deadlock Risk
Synchronous request/response shims can deadlock if the caller is already on the render thread or holds a lock the render thread needs. Phase 4 should keep request waits narrow and add render-thread detection early.
### Latency Risk
Moving work through queues can hide latency. Queue depth and output request latency should be measured from the first migration step.
### Lifetime Risk
Moving context ownership changes startup and shutdown order. The render thread must stop before GL resources or window/context handles are destroyed.
### Callback Pressure Risk
If DeckLink callbacks wait too long for render-thread work, Phase 4 may improve GL ownership but still leave callback timing fragile. A synchronous bridge is acceptable as a transition, but the design should keep the path open for producer/consumer playout.
### Preview Coupling Risk
Preview can remain a hidden budget consumer if it stays in the output frame path. Phase 4 should keep preview explicitly best-effort, even if physical decoupling continues later.
## Phase 4 Exit Criteria
Phase 4 can be considered complete once the project can say:
- [x] one render thread owns the GL context during normal operation
- [x] input callbacks do not bind GL or wait on GL upload
- [x] output callbacks do not bind GL directly
- [x] preview and screenshot requests enter render through explicit render-thread requests
- [x] `RenderFrameInput` / `RenderFrameState` remain the frame-state contract
- [x] normal frame production no longer depends on a shared GL `CRITICAL_SECTION`
- [x] render-thread queue/mailbox behavior has non-GL tests
- [x] shutdown order is explicit and tested or manually verified
## Open Questions
- What exact producer/consumer output queue shape should replace the current synchronous output request in Phase 7?
- Should preview present on the render thread, or should render publish a preview image/texture to a separate presenter?
- Should wrong-thread GL access eventually escalate from debug diagnostics to structured telemetry or assertions?
## Short Version
Phase 4 should make GL ownership boring and deterministic.
One render thread owns the context. Other threads submit work or consume results. Input upload, frame rendering, readback, preview, and screenshot capture all move behind render-thread entrypoints. Output production remains a synchronous request/response bridge for now, but the app no longer relies on callback and UI paths borrowing the GL context under one shared lock.

View File

@@ -1,398 +0,0 @@
# Phase 5 Design: Live State Layering And Composition
This document expands Phase 5 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phase 1 named the subsystems. Phase 2 added the typed event substrate. Phase 3 made render-facing live state explicit through `RuntimeLiveState`, `RenderStateComposer`, `RenderFrameInput`, `RenderFrameState`, `RenderFrameStateResolver`, and `RuntimeServiceLiveBridge`. Phase 4 made one render thread the owner of normal runtime GL work. Phase 5 should now make the live parameter model itself explicit: persisted truth, operator/session truth, and transient automation should be separate layers with one predictable composition rule.
## Status
- Phase 5 design package: proposed.
- Phase 5 implementation: Step 3 complete.
- Current alignment: Phase 3 introduced the first pure composition boundary and transient OSC overlay owner. Phase 5 now has a small `RuntimeStateLayerModel` inventory that names the current state categories, `RenderStateComposer` consumes a `LayeredRenderStateInput` whose fields make base persisted, committed live, and transient automation inputs explicit, and `RuntimeLiveState` owns transient-overlay invalidation against current layer/parameter compatibility. Committed runtime values are still physically stored through `RuntimeStore`/`LayerStackStore`.
Current live-state footholds:
- `RuntimeStore` owns persisted layer stack, parameter values, presets, config, and render snapshot read models.
- `RuntimeCoordinator` owns mutation validation, classification, accepted/rejected event publication, snapshot/reload follow-ups, and the policy switch between committed states and live snapshots.
- `RuntimeSnapshotProvider` publishes render-facing snapshots from committed runtime state.
- `RuntimeLiveState` owns transient OSC overlay bookkeeping, smoothing, generation tracking, and commit-settlement policy.
- `RenderStateComposer` consumes `LayeredRenderStateInput`, chooses committed-live layer states over base-persisted layer states when both are supplied, applies transient automation on top, and returns final per-frame layer states plus settled commit requests.
- `RuntimeServiceLiveBridge` drains OSC ingress/completion queues and applies them to render live state during frame preparation.
- `RuntimeStateLayerModel` names the Phase 5 state categories and classifies current fields as base persisted, committed live, transient automation, render-local, or health/config state.
- `RuntimeCoordinator` can request layer-scoped transient OSC invalidation, while `RuntimeLiveState` prunes overlays that no longer map to the current render-facing layer/parameter definitions.
## Why Phase 5 Exists
The resilience review identifies live OSC overlay and persisted state as separate concepts that still do not have a fully formal model. The app now has better boundaries, but several policies are still implicit:
- whether a value is durable, committed for the current session, or transient automation
- whether an OSC value should merely influence the current frame or eventually commit
- what reload, preset load, layer removal, shader change, and reset should do to transient values
- which layer wins when UI/operator changes race with OSC automation
- which state changes should publish snapshots, request persistence, or only affect render frames
Without a formal layering model, these rules can leak across `RuntimeStore`, `RuntimeCoordinator`, `RuntimeLiveState`, `RenderStateComposer`, and service bridges. Phase 5 should make those rules boring and testable.
## Goals
Phase 5 should establish:
- explicit state layers for persisted, committed/session, and transient automation values
- one named composition contract for final render values
- clear ownership for layer-specific mutation policy
- explicit reset/reload/preset behavior for transient and committed state
- a clean path for OSC automation to remain high-rate without becoming durable state by accident
- tests for layer precedence, lifecycle, invalidation, and commit policy without GL or DeckLink
- documentation that distinguishes render-local temporal/feedback state from parameter/live-state overlays
## Non-Goals
Phase 5 should not require:
- a background persistence writer implementation
- a DeckLink producer/consumer playout queue
- a full cue/timeline/preset performance system
- a new UI state-management framework
- replacing every synchronous coordinator API
- moving temporal history or shader feedback into the runtime state model
Those are later phases or separate feature work. Phase 5 is about parameter and live-value layering.
## Target State Model
Phase 5 should formalize three layers:
| Layer | Owner | Lifetime | Persistence | Render role |
| --- | --- | --- | --- | --- |
| Base persisted state | `RuntimeStore` / `LayerStackStore` | survives restart | written to disk | default layer stack, shader selections, saved parameter values |
| Committed live state | `RuntimeCoordinator` or a new live-session collaborator | current running session | may request persistence depending on mutation type | operator/UI/current truth until changed again |
| Transient automation overlay | `RuntimeLiveState` or a new automation overlay collaborator | high-rate/short-lived | not persisted directly | temporary OSC/automation target applied over committed truth |
The target composition rule is:
```text
final render state = base persisted state + committed live state + transient automation overlay
```
The actual implementation may continue using render snapshots as the base transport. The important part is that each layer has named ownership, documented lifetime, and tested precedence.
## Current Composition Shape
Today, final frame state is prepared through this path:
1. `OpenGLComposite::renderEffect()` processes runtime work.
2. `OpenGLComposite` builds `RenderFrameInput`.
3. `RuntimeServiceLiveBridge` drains OSC updates and completed commits.
4. `RenderEngine` updates `RuntimeLiveState`.
5. `RenderFrameStateResolver` chooses committed states or live snapshot states.
6. `RenderStateComposer` applies transient overlay values.
7. `RenderEngine::RenderPreparedFrame(...)` consumes `RenderFrameState`.
That is a good Phase 3/4 foundation. Phase 5 should make the hidden assumptions in steps 5 and 6 explicit enough that reset/reload/preset and future UI automation behavior are not scattered across those collaborators.
## Proposed Collaborators
### `RuntimeStateLayerModel`
Optional pure model that names the layers and composition metadata.
Responsibilities:
- represent base, committed, and transient layer state inputs
- define precedence and invalidation categories
- expose a pure composition function or input object
- keep GL, services, persistence, and device callbacks out of the model
Non-responsibilities:
- disk IO
- OSC socket handling
- render-thread scheduling
- shader compilation
This may be a small set of structs rather than a large class. The value is in naming the contract.
### `CommittedLiveState`
Optional runtime/session collaborator if committed session state needs to move out of `RuntimeStore`.
Responsibilities:
- hold operator/UI committed values that are true for the current session
- distinguish persistence-required commits from session-only commits
- expose a read model for snapshot publication
- provide reset/load behavior separate from durable storage
Non-responsibilities:
- transient OSC smoothing
- disk writes
- GL resources
Phase 5 can defer this physical split if the policy is documented and covered by tests. The key is that committed-live state becomes a distinct concept even if it still lives inside existing storage temporarily.
### `AutomationOverlayState`
Possible evolution of `RuntimeLiveState`.
Responsibilities:
- hold transient automation values keyed by route/layer/parameter identity
- track generation, commit-in-flight, and completion
- apply smoothing and settle policy
- decide whether an overlay is render-only, commit-requesting, stale, or invalidated
Non-responsibilities:
- owning committed truth
- persistent state mutation
- snapshot publication
This can start by renaming or narrowing current `RuntimeLiveState` responsibilities rather than replacing it outright.
### `LayeredStateComposer`
Possible evolution of `RenderStateComposer`.
Responsibilities:
- apply the target precedence rule
- produce final `RuntimeRenderState` values for a frame
- return commit requests or overlay observations when policy says a transient value settled
- keep value composition testable without OpenGL
Non-responsibilities:
- frame rendering
- service queue draining
- storage mutation
## Layering Rules
### Precedence
Default precedence should be:
1. base persisted/snapshot value
2. committed live/session value
3. transient automation overlay
The topmost valid layer wins for discrete values. Numeric/vector values may be smoothed by overlay policy before they win.
### Identity
Layering should use stable render-facing identity:
- layer id for persisted structural identity
- layer key/control key for OSC-facing identity
- parameter id for shader-defined identity
- parameter control key for external-control identity
Phase 5 should document which identity is authoritative when layer id and control key disagree or when a shader changes.
### Invalidations
The following should have explicit behavior:
- layer removed: clear committed and transient state for that layer
- layer shader changed: clear or remap parameter overlays according to compatible control keys
- preset loaded: replace base/committed state and clear incompatible transient overlays
- shader reload with same controls: preserve compatible transient overlays where safe
- manual reset parameters: clear committed overrides and transient overlays for that layer
- no input/source changes: should not affect parameter layers
### Commit Policy
Transient automation may:
- remain render-only
- settle and request a committed mutation
- commit without persistence
- commit with persistence only when the control path explicitly requests it
The policy should be explicit per ingress path or parameter category. Phase 5 does not need a full UI for it, but the default behavior should be documented and tested.
## Event And Snapshot Contract
Phase 5 should clarify which changes publish which effects:
| Change | Snapshot publication | Persistence request | Render reset | Runtime event |
| --- | --- | --- | --- | --- |
| persisted layer stack mutation | yes | yes | maybe | accepted mutation + persistence requested |
| operator live parameter change | yes | maybe | no, unless structural | accepted mutation |
| transient OSC overlay update | no committed snapshot by default | no | no | optional overlay observation |
| overlay settled commit | yes if accepted | usually no for OSC | no | accepted mutation or overlay-settled observation |
| preset load | yes | maybe | temporal/feedback policy dependent | accepted mutation + reload/reset observations |
| shader change/reload | yes after build | maybe | temporal/feedback policy dependent | shader build/reload events |
This table should evolve with implementation, but Phase 5 should prevent transient overlay updates from masquerading as durable committed state.
## Migration Plan
### Step 1. Inventory Current State Layers
Document and/or encode where each current state category lives:
- persisted layer stack and parameter values
- committed current-session parameter values
- runtime compile/reload flags
- transient OSC overlays
- render-local temporal history and feedback state
Initial target:
- [x] identify which fields are durable, committed-live, transient automation, render-local, or health/config
- [x] update subsystem docs where the current ownership is misleading
- [x] add small tests for classification if a pure helper exists
### Step 2. Name The Layered Composition Input
Introduce a named composition input model around the previous `RenderStateCompositionInput`.
Initial target:
- [x] make base/committed/transient inputs visible in type names or field names
- [x] keep `RenderStateComposer` behavior unchanged at first
- [x] add tests that assert precedence with no GL
Possible outcomes:
- [x] add a new `LayeredRenderStateInput`
- [ ] add a thin adapter if a later migration needs compatibility with the previous input shape
### Step 3. Make Reset And Reload Policy Explicit
Move reset/reload transient-state decisions into one policy point.
Initial target:
- [x] layer removal clears matching transient overlays
- [x] shader change clears incompatible overlays
- [x] preset load clears incompatible overlays
- [x] shader reload can preserve compatible overlays when requested
- [x] temporal/feedback resets stay render-local and separate from parameter overlays
This is where Phase 5 should prevent "clear everything" and "preserve everything" from being scattered through unrelated code.
Current implementation:
- `RuntimeCoordinatorResult` carries a named `RuntimeCoordinatorTransientOscInvalidation` request rather than a raw clear-all flag.
- `RuntimeUpdateController` applies layer-scoped invalidation to both render-owned overlay state and queued OSC service state.
- `RuntimeLiveState::PruneIncompatibleOverlays(...)` is the central compatibility policy for current render-facing layer/parameter definitions.
- `RuntimeLiveState::ApplyToLayerStates(...)` prunes incompatible overlays before applying transient values, so shader changes, preset loads, and layer removals stop carrying stale overlays once the current frame state no longer maps them.
### Step 4. Clarify OSC Commit Semantics
Make the transient-to-committed path explicit.
Initial target:
- document and test whether settled OSC commits persist
- ensure stale generation completions are ignored
- ensure one settled route does not clear unrelated overlay state
- publish or preserve useful events for accepted overlay commits
Current Phase 3 behavior is a good base; Phase 5 should make the policy easier to reason about from the code.
### Step 5. Separate Committed-Live Concept From Durable Storage
Decide whether to physically split committed-live state now or introduce a read/model boundary first.
Conservative option:
- leave storage physically in `RuntimeStore`
- add a named committed-live read model
- keep persistence decisions in `RuntimeCoordinator`
Stronger option:
- introduce `CommittedLiveState`
- make `RuntimeSnapshotProvider` consume committed live state through a read model
- leave durable writes in `RuntimeStore`
Phase 5 does not need a flag-day split. It needs the concept to stop being implicit.
### Step 6. Update Docs And Exit Criteria
Before calling Phase 5 complete, update:
- architecture review checklist
- `RuntimeCoordinator`, `RuntimeStore`, `RuntimeSnapshotProvider`, `RenderEngine`, and `ControlServices` subsystem docs
- Phase 6 assumptions about persistence inputs
- Phase 7 assumptions about what render/backend state is not part of live parameter layering
## Testing Strategy
Phase 5 tests should avoid GL, DeckLink, sockets, and filesystem writes where possible.
Recommended tests:
- base value is used when no committed or transient value exists
- committed value overrides base value
- transient overlay overrides committed value
- numeric smoothing applies only to transient overlay values
- trigger/bool/discrete overlay behavior is explicit
- layer removal clears matching transient state
- shader change preserves only compatible overlays if policy allows
- preset load clears or replaces committed/transient state according to policy
- settled OSC overlay creates the expected commit request
- settled OSC commit does not request persistence unless policy says so
- stale commit completion does not clear a newer overlay
- render-local temporal/feedback resets do not mutate parameter layers
Existing useful homes:
- `RuntimeLiveStateTests` for overlay generation, smoothing, settle, and invalidation behavior
- `RuntimeSubsystemTests` for coordinator mutation, persistence request, and reset/reload policy
- `RuntimeEventTypeTests` for any new observations or accepted mutation events
- a possible new `RuntimeStateLayeringTests` target if the composition model gets a pure helper
## Risks
### Over-Abstraction Risk
It would be easy to introduce too many state containers. Phase 5 should add names where they clarify behavior, not create an elaborate framework.
### Persistence Confusion Risk
Committed live state and persisted state are related but not identical. If Phase 5 blurs them, Phase 6's background persistence writer will inherit ambiguous inputs.
### Automation Surprise Risk
OSC automation can be high-rate and transient, but users may expect settled values to become "real." The commit policy needs to be explicit enough that UI, OSC, presets, and reloads behave predictably.
### Identity/Compatibility Risk
Shader changes and preset loads can invalidate layer/parameter identities. Phase 5 should prefer conservative clearing over accidental application of an old automation value to the wrong control.
### Render Coupling Risk
Render-local resources such as temporal history, feedback buffers, readback caches, and playout queues are not parameter layers. Keeping them out of this model avoids turning Phase 5 into a render-resource refactor.
## Phase 5 Exit Criteria
Phase 5 can be considered complete once the project can say:
- [x] persisted, committed-live, and transient automation layers are named in code or clear read models
- [x] final render-value precedence is explicit and covered by tests
- [x] `RenderStateComposer` or its replacement consumes a layered input contract
- [x] reset/reload/preset behavior for transient overlays is centralized or clearly delegated
- [ ] OSC overlay settle/commit behavior is explicit, including persistence policy
- [ ] `RuntimeStore` remains durable-state focused and does not absorb transient automation policy
- [ ] render-local temporal/feedback state remains separate from live parameter layering
- [ ] subsystem docs and the architecture review reflect the final ownership model
## Open Questions
- Should committed live state remain physically in `RuntimeStore` for now, or move to a `CommittedLiveState` collaborator?
- Should transient OSC overlay updates become app-level typed events, or stay source-local through `RuntimeServiceLiveBridge`?
- Should overlay commit persistence be global, ingress-specific, or parameter-definition-driven?
- What compatibility rule should apply when shader reload preserves a control key but changes parameter shape?
- Should preset load clear all transient automation, or only automation that no longer maps to the loaded stack?
- Should UI slider drags use the committed-live layer directly, or a short-lived transient layer that commits on release?
## Short Version
Phase 5 should make live values boring and explicit.
Persisted state is durable truth. Committed live state is current-session/operator truth. Transient automation is high-rate overlay truth. Render consumes the composed result, and each layer has clear ownership, lifetime, persistence behavior, and reset/reload rules.

View File

@@ -1,301 +0,0 @@
# Phase 6 Design: Background Persistence
This document expands Phase 6 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phases 1-5 separate durable state, coordination policy, render-facing snapshots, render-thread ownership, and live-state layering. Phase 6 should make disk persistence a background snapshot-writing concern instead of a synchronous side effect of mutations.
## Status
- Phase 6 design package: proposed.
- Phase 6 implementation: not started.
- Current alignment: `RuntimeStore` owns durable state and serialization, while `RuntimeCoordinator` already publishes explicit persistence-request outcomes for persisted mutations. The remaining issue is that actual disk writes are still synchronous store work rather than queued, debounced, atomic background writes.
Current persistence footholds:
- `RuntimeStore` owns persistent runtime-state serialization and stack preset serialization.
- `LayerStackStore` owns durable layer and parameter state.
- `RuntimeCoordinatorResult::persistenceRequested` exists as an explicit mutation outcome.
- `RuntimeEventType::RuntimePersistenceRequested` exists as the event-level persistence request.
- Phase 5 is expected to clarify which live-state mutations are durable, committed-live, or transient.
## Why Phase 6 Exists
Synchronous persistence is a poor fit for live software. A mutation that changes state should not also have to block on filesystem timing, antivirus scans, slow disks, or transient IO failures. The app needs persistence to be reliable and observable, but not timing-sensitive.
The resilience review calls this out because `SavePersistentState()` style behavior can create unnecessary stalls and makes recovery harder to reason about.
Phase 6 should turn persistence into:
- request
- snapshot
- background write
- completion/failure observation
## Goals
Phase 6 should establish:
- a queued persistence request path
- debounced/coalesced durable-state snapshot writes
- atomic file replacement for runtime-state saves where practical
- structured completion/failure reporting
- clear separation between state mutation and disk flush
- deterministic shutdown flushing policy
- tests for coalescing, snapshot selection, write failure, and shutdown behavior without rendering or DeckLink
## Non-Goals
Phase 6 should not require:
- changing live-state layering rules
- changing DeckLink/backend lifecycle
- replacing stack preset semantics wholesale
- adding cloud sync or external storage
- building an unlimited historical state archive
- making every write async immediately if a narrow compatibility path still needs a synchronous result
## Target Model
Phase 6 should make persistence a small pipeline:
```text
RuntimeCoordinator accepts mutation
-> publishes/returns persistence request
-> PersistenceWriter captures a durable snapshot
-> background worker debounces/coalesces writes
-> atomic write commits file
-> HealthTelemetry/runtime event records success or failure
```
The key rule is:
- `RuntimeStore` owns durable state and serialization
- `PersistenceWriter` owns when and how snapshots are written
- `RuntimeCoordinator` owns whether a mutation requests persistence
## Proposed Collaborators
### `PersistenceWriter`
Owns the worker thread, queue, debounce timer, and write execution.
Responsibilities:
- accept persistence requests
- coalesce repeated runtime-state writes
- request/build a durable snapshot from `RuntimeStore`
- write to a temporary file and atomically replace the target
- report success/failure observations
- flush on shutdown according to policy
Non-responsibilities:
- deciding mutation validity
- owning durable in-memory state
- composing render snapshots
- blocking render/backend timing paths
### `PersistenceSnapshot`
Immutable write input captured from durable state.
Responsibilities:
- contain serialized runtime-state text or structured data ready to serialize
- identify target path and snapshot generation
- preserve enough metadata for completion/failure diagnostics
Non-responsibilities:
- mutation policy
- file IO
### `PersistenceRequest`
Small request object or event payload.
Expected fields:
- reason/action name
- target kind: runtime state, preset, config if later needed
- optional debounce key
- force/flush flag for explicit save operations
- generation or sequence
## Write Policy
### Runtime State
Default policy:
- coalesce repeated requests
- debounce short bursts
- write newest snapshot
- report failures without blocking render/control paths
### Stack Presets
Preset save is more operator-explicit than routine runtime-state persistence.
Initial policy options:
- keep preset save synchronous while runtime-state persistence becomes async
- or route preset writes through the same worker with a completion result for the caller
Conservative Phase 6 default:
- background runtime-state persistence first
- leave preset save/load synchronous unless the implementation has a clean completion path
### Shutdown
Shutdown should explicitly decide:
- flush latest pending snapshot before exit
- skip flush if no pending durable change exists
- report/write failure if flush fails
- avoid indefinite hang on shutdown
## Atomicity And Failure Handling
Runtime-state writes should prefer:
1. serialize snapshot content in memory
2. write to `target.tmp`
3. flush/close file
4. replace target atomically where platform support allows
5. retain or report backup/failure context if replacement fails
Failures should not silently disappear. They should publish:
- persistence target
- reason/action
- error message
- whether a newer request is pending
- whether the app is still running with unsaved changes
## Migration Plan
### Step 1. Name Persistence Requests
Make request types and event payloads explicit enough that callers stop thinking in terms of direct disk writes.
Initial target:
- keep existing coordinator persistence decisions
- introduce a `PersistenceRequest`/`PersistenceSnapshot` shape
- document which requests are debounceable
### Step 2. Extract Snapshot Writing From `RuntimeStore`
Move file-write mechanics behind a helper while keeping serialization ownership in `RuntimeStore`.
Initial target:
- `RuntimeStore` can build serialized runtime-state snapshots
- `PersistenceWriter` writes the snapshot
- existing synchronous save path can call through the writer/helper during transition
### Step 3. Add Debounced Background Worker
Introduce a worker thread or queued task owner.
Initial target:
- repeated runtime-state requests coalesce
- worker writes only latest pending snapshot
- tests cover coalescing without filesystem where possible
### Step 4. Add Atomic Write And Failure Reporting
Make disk writes safer and observable.
Initial target:
- temp-file then replace
- failure returned/published with structured reason
- `HealthTelemetry` receives persistence warning state
### Step 5. Wire Coordinator/Event Requests To Writer
Route `RuntimePersistenceRequested` or coordinator persistence outcomes into the writer.
Initial target:
- accepted durable mutations request persistence
- transient-only mutations do not
- runtime reload/preset policies remain explicit
### Step 6. Define Shutdown Flush
Make app shutdown persistence behavior deterministic.
Initial target:
- stop accepting new requests
- flush latest pending snapshot with bounded wait
- report failure if flush fails
## Testing Strategy
Recommended tests:
- repeated persistence requests coalesce into one write
- newest snapshot wins after multiple mutations
- transient-only mutation does not request persistence
- write failure records an error and keeps unsaved state visible
- shutdown flush writes pending snapshot
- shutdown with no pending request does not write
- preset save path remains explicit
- temp-file replacement success/failure is handled
Useful homes:
- `RuntimeSubsystemTests` for coordinator persistence outcomes
- a new `PersistenceWriterTests` target for worker/coalescing/write policy
- filesystem tests using a temporary directory for atomic write behavior
## Risks
### Data Loss Risk
Debouncing introduces a window where in-memory state is newer than disk. Shutdown flush and unsaved-state telemetry are the guardrails.
### Complexity Risk
A persistence worker can become a hidden second store if it owns mutable truth. It should own snapshots and write policy only.
### Blocking Shutdown Risk
Flushing forever on shutdown is not acceptable. Use bounded waits and visible failure reporting.
### Preset Semantics Risk
Operator-triggered preset save often feels like it should complete before reporting success. Keep preset behavior explicit rather than silently changing it.
## Phase 6 Exit Criteria
Phase 6 can be considered complete once the project can say:
- [ ] durable mutations enqueue persistence instead of directly writing from mutation paths
- [ ] runtime-state writes are debounced/coalesced
- [ ] writes use temp-file/replace or equivalent atomic policy
- [ ] persistence failures are reported through structured health/events
- [ ] transient/live-only mutations do not request persistence
- [ ] shutdown flush behavior is explicit and tested
- [ ] `RuntimeStore` remains durable-state/serialization owner, not worker policy owner
- [ ] persistence behavior has focused non-render tests
## Open Questions
- Should preset save remain synchronous, or move behind a completion-based async request?
- What debounce interval is appropriate for routine runtime-state writes?
- Should failed persistence retry automatically, or wait for the next mutation/request?
- Should the app expose "unsaved changes" in the UI/health snapshot?
- Should runtime config writes share this worker, or stay separate?
## Short Version
Phase 6 should make persistence boring, safe, and off the hot path.
Mutations update in-memory durable state. Persistence requests are queued and coalesced. A background writer saves atomic snapshots and reports failures. Render, backend callbacks, and control ingress should not pay filesystem costs.

View File

@@ -1,333 +0,0 @@
# Phase 7 Design: Backend Lifecycle And Playout
This document expands Phase 7 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Phase 4 made the render thread the sole owner of normal runtime GL work, but output timing is still callback-coupled: DeckLink completion callbacks synchronously request render-thread output production before scheduling the next hardware frame. Phase 7 should make backend lifecycle, buffer policy, playout headroom, and recovery explicit.
## Status
- Phase 7 design package: proposed.
- Phase 7 implementation: not started.
- Current alignment: `VideoBackend`, `VideoIODevice`, `DeckLinkSession`, and `VideoPlayoutScheduler` exist. Phase 4 removed callback-thread GL ownership, but the DeckLink completion path still waits for render-thread output production.
Current backend footholds:
- `VideoBackend` wraps device discovery/configuration, start/stop, input callback handling, output completion handling, and telemetry publication.
- `DeckLinkSession` owns DeckLink device handles, frame pool creation, preroll, keyer configuration, and scheduled playback.
- `VideoPlayoutScheduler` owns basic schedule time generation and simple late/drop skip-ahead behavior.
- `OpenGLVideoIOBridge` is the current adapter between `VideoBackend` and `RenderEngine`.
- `HealthTelemetry` receives some signal, render, and pacing stats.
## Why Phase 7 Exists
The current output path works only while render/readback stays comfortably inside budget. A late render can make the callback late, which reduces device-side headroom, which makes the next callback more fragile.
The resilience review calls this the main remaining live-resilience risk after Phase 4:
- output playout is still effectively render-on-demand from the DeckLink completion callback
- buffer pool size and preroll depth are not sourced from one policy
- late/dropped recovery is a fixed skip rule
- backend lifecycle is imperative rather than represented as explicit states
Phase 7 should separate hardware timing from render production.
## Goals
Phase 7 should establish:
- explicit backend lifecycle states and allowed transitions
- one playout policy for frame pool size, preroll, headroom, and underrun behavior
- a bounded producer/consumer output queue between render and DeckLink scheduling
- lightweight DeckLink callbacks that dequeue/schedule/account rather than render
- measured recovery from late/dropped frames
- structured backend health reporting
- tests for scheduler, queue, lifecycle, and underrun policy without DeckLink hardware
## Non-Goals
Phase 7 should not require:
- a new renderer
- changing shader/state composition
- replacing DeckLink support with multiple backends
- full telemetry UI redesign
- removing every synchronous API immediately
- perfect adaptive latency policy in the first pass
## Target Timing Model
The target model is producer/consumer playout:
```text
RenderEngine/render scheduler produces completed output frames
-> bounded ready-frame queue
-> VideoBackend consumes ready frames
-> DeckLink callback schedules already-prepared frames
```
The callback should not wait for rendering. It should:
- record completion result
- recycle/release completed buffers
- dequeue a ready frame or apply underrun policy
- schedule the next frame
- publish backend timing/health observations
## Target Lifecycle Model
Suggested backend states:
1. `Uninitialized`
2. `Discovering`
3. `Discovered`
4. `Configuring`
5. `Configured`
6. `Prerolling`
7. `Running`
8. `Degraded`
9. `Stopping`
10. `Stopped`
11. `Failed`
Suggested transition rules:
- `Uninitialized -> Discovering`
- `Discovering -> Discovered | Failed`
- `Discovered -> Configuring | Stopped`
- `Configuring -> Configured | Failed`
- `Configured -> Prerolling | Stopped`
- `Prerolling -> Running | Failed | Stopping`
- `Running -> Degraded | Stopping | Failed`
- `Degraded -> Running | Stopping | Failed`
- `Stopping -> Stopped`
The exact enum can change, but the lifecycle should become observable and testable.
## Proposed Collaborators
### `VideoBackendStateMachine`
Pure or mostly pure lifecycle transition helper.
Responsibilities:
- validate state transitions
- produce transition observations
- track failure reasons
- keep start/stop/recovery behavior auditable
Non-responsibilities:
- DeckLink API calls
- rendering
- persistence
### `PlayoutPolicy`
Policy object for queue and timing behavior.
Expected fields:
- target preroll frames
- maximum ready frames
- minimum spare device buffers
- underrun behavior
- maximum catch-up frames
- adaptive headroom enabled/disabled
### `RenderOutputQueue`
Bounded queue or ring for completed output frames.
Responsibilities:
- accept completed render outputs
- expose ready frames for scheduling
- track depth, drops, stale reuse, and underruns
- keep ownership/lifetime clear between render and backend
### `OutputFramePool`
Backend-owned device buffer pool.
Responsibilities:
- own DeckLink mutable frames
- expose available buffers for render/readback or scheduling
- recycle completed frames
- report spare-buffer depth
### `PlayoutController`
Coordinates policy, ready frames, device schedule times, and completion accounting.
Responsibilities:
- preroll frames
- schedule next frame
- handle late/drop/completed/flushed results
- apply underrun policy
- publish timing state
## Output Queue Policy
The initial output queue should be small and bounded.
Candidate defaults:
- target ready frames: 2-3
- max ready frames: 3-5
- underrun: reuse last completed frame if available, otherwise black
- late/drop: increase degraded counters and optionally increase headroom within limits
The exact numbers should be measured, but the policy should live in one place instead of being split across constants.
## Underrun Policy
When no fresh rendered frame is available, options are:
1. reuse newest completed frame
2. reuse last scheduled frame
3. schedule black/degraded frame
4. skip/catch up schedule time
Phase 7 should pick one default and make it visible in telemetry. Reusing the newest completed frame is often the best first policy for live visual continuity, but key/fill behavior may require careful testing.
## Migration Plan
### Step 1. Name Lifecycle States
Introduce backend state enum and transition reporting without changing scheduling behavior much.
Initial target:
- state changes are explicit
- invalid transitions are detectable
- tests cover allowed transitions
### Step 2. Create Playout Policy Object
Unify fixed constants and scheduler assumptions.
Initial target:
- frame pool size derives from policy
- preroll count derives from policy
- late/drop recovery reads policy
### Step 3. Add Ready Output Queue
Introduce a bounded queue for completed output frames.
Initial target:
- pure queue tests
- explicit depth/underrun metrics
- no DeckLink dependency in queue tests
### Step 4. Move Callback Toward Dequeue/Schedule
Stop producing frames directly in the completion callback path.
Transitional target:
- callback wakes/schedules a backend worker
- worker consumes ready frames
Final target:
- callback only records, recycles, dequeues, schedules
### Step 5. Make Render Produce Ahead
Teach render/output code to keep the ready queue filled to target headroom.
Initial target:
- render thread produces on demand until queue has target depth
- callback does not synchronously wait for fresh render
- stale/black fallback is explicit on underrun
### Step 6. Replace Fixed Late/Drop Recovery
Replace fixed `+2` schedule-index recovery with measured lag/headroom accounting.
Initial target:
- track scheduled index, completed index, queue depth, late streak, drop streak
- recovery decisions use measured lag
### Step 7. Route Backend Health Structurally
Publish backend lifecycle, queue depth, underrun, late/drop, and degraded-state observations through `HealthTelemetry`.
## Testing Strategy
Recommended tests:
- allowed lifecycle transitions pass
- invalid lifecycle transitions fail
- playout policy derives frame pool/preroll sizes consistently
- output queue preserves ordering
- bounded output queue rejects/drops according to policy
- underrun reuses last frame or black according to policy
- late/drop accounting updates degraded state
- scheduler catch-up uses measured lag, not fixed skip
- stop drains/recycles device-frame ownership in pure fakes
Useful homes:
- `VideoPlayoutSchedulerTests` for scheduler evolution
- `VideoIODeviceFakeTests` for fake backend lifecycle
- a new `VideoBackendStateMachineTests`
- a new `RenderOutputQueueTests`
## Risks
### Latency Risk
More headroom means more latency. Phase 7 should make latency a visible policy choice.
### Buffer Lifetime Risk
Render and backend will share ownership boundaries around output buffers. Frame ownership must be explicit to avoid reuse while hardware still owns a frame.
### Underrun Policy Risk
Reusing stale frames can be visually acceptable, but wrong key/fill behavior may be worse than black. Test with real output.
### Callback Thread Risk
Even after decoupling render, callback work must stay small and bounded.
### Scope Risk
Backend lifecycle and playout queue are related, but either can grow large. Implement in small, testable slices.
## Phase 7 Exit Criteria
Phase 7 can be considered complete once the project can say:
- [ ] backend lifecycle states and transitions are explicit
- [ ] playout policy owns preroll, pool size, headroom, and underrun behavior
- [ ] output callbacks no longer synchronously wait for render production
- [ ] render produces completed output frames into a bounded queue
- [ ] underrun behavior is explicit and observable
- [ ] late/drop recovery is measured rather than fixed skip-only
- [ ] backend health reports lifecycle, queue, underrun, late, and dropped state
- [ ] queue/lifecycle/scheduler behavior has non-DeckLink tests
## Open Questions
- What should the default ready-frame depth be at 30fps and 60fps?
- Should underrun reuse last completed, last scheduled, or black?
- Should output queue depth be user-configurable?
- Should render cadence be driven by backend demand, a timer, or queue-fill pressure?
- How should external keying influence stale-frame/black fallback?
- Should input and output lifecycle states be separate endpoints under one backend shell?
## Short Version
Phase 7 should stop making DeckLink callbacks wait for render.
Render produces ahead into a bounded queue. The backend consumes ready frames according to explicit lifecycle and playout policy. Queue depth, underruns, late frames, dropped frames, and degraded states become measured and visible.

View File

@@ -1,367 +0,0 @@
# Phase 8 Design: Health, Telemetry, And Operational Reporting
This document expands Phase 8 of [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) into a concrete design target.
Earlier phases clarify subsystem ownership, state layering, render-thread ownership, persistence, and backend lifecycle. Phase 8 should make operational visibility match that architecture: structured health state, timing, counters, warnings, and logs should flow through one telemetry subsystem instead of scattered debug strings and ad hoc status fields.
## Status
- Phase 8 design package: proposed.
- Phase 8 implementation: not started.
- Current alignment: `HealthTelemetry` exists and already receives some render, signal, video IO, and pacing observations. Runtime events also carry some timing and backend observations. The remaining work is to make health/telemetry structured, comprehensive, bounded, and operator-facing.
Current telemetry footholds:
- `HealthTelemetry` owns basic signal, performance, frame pacing, and video IO status reporting.
- `RuntimeEventDispatcher` publishes typed observations such as timing samples and backend state changes.
- `RuntimeStatePresenter` includes some health/performance fields in runtime-state output.
- Render and backend paths already collect some timing and late/drop counts.
## Why Phase 8 Exists
The app can detect many problems, but operational visibility is still fragmented:
- some failures show modal dialogs
- some warnings go only to `OutputDebugStringA`
- some timing lives in health telemetry
- some observations are runtime events
- UI-facing state combines operational state with runtime state
- repeated warnings are not uniformly deduplicated, classified, or summarized
Live software needs to answer:
- what is healthy right now?
- what is degraded but still running?
- what recently failed?
- which subsystem is under timing pressure?
- what should an operator see versus what should an engineer debug?
## Goals
Phase 8 should establish:
- structured log entries with subsystem, severity, category, timestamp, and message
- subsystem-scoped health states
- bounded recent warning/error history
- timing samples, counters, and gauges for render/control/backend/persistence
- stable health snapshots for UI/diagnostics
- direct debug-output paths wrapped by structured telemetry
- low-overhead reporting from render and callback paths
- tests for severity, deduplication, counters, snapshots, and bounded retention
## Non-Goals
Phase 8 should not require:
- a cloud telemetry service
- external metrics database
- a full UI redesign
- automatic recovery policy owned by telemetry
- unbounded logs or time-series storage
- replacing every `MessageBoxA` on day one
Telemetry observes and reports. It does not become the control plane.
## Target Model
Suggested core model:
- `TelemetrySubsystem`
- `TelemetrySeverity`
- `TelemetryLogEntry`
- `TelemetryWarningRecord`
- `TelemetryCounter`
- `TelemetryGauge`
- `TelemetryTimingSample`
- `SubsystemHealthState`
- `HealthSnapshot`
Important distinction:
- raw observations are append/update operations
- health snapshots are derived read models
## Health Domains
At minimum:
- `ApplicationShell`
- `RuntimeStore`
- `RuntimeCoordinator`
- `RuntimeSnapshotProvider`
- `ControlServices`
- `RenderEngine`
- `VideoBackend`
- `Persistence`
Suggested states:
- `Healthy`
- `Warning`
- `Degraded`
- `Error`
- `Unavailable`
The overall app health should be derived from subsystem states.
## Proposed Interfaces
### Write Interface
Target operations:
- `AppendLog(...)`
- `RaiseWarning(...)`
- `ClearWarning(...)`
- `RecordCounterDelta(...)`
- `RecordGauge(...)`
- `RecordTimingSample(...)`
- `ReportSubsystemState(...)`
Hot-path producers should be able to record observations cheaply and return.
### Read Interface
Target operations:
- `BuildHealthSnapshot()`
- `GetSubsystemHealth(...)`
- `GetActiveWarnings()`
- `GetRecentLogs(...)`
- `GetTimingSummary(...)`
UI/control services should consume snapshots, not scrape subsystem internals.
## Producer Expectations
### `RenderEngine`
Expected observations:
- render frame duration
- input upload duration/count/drop/coalescing
- output request latency
- readback duration
- synchronous readback fallback count
- preview present cost/skips
- wrong-thread diagnostics
### `VideoBackend`
Expected observations:
- lifecycle state
- playout queue depth
- output underruns
- late/dropped/flushed/completed counts
- input signal state
- output model/mode status
- spare buffer depth
### `ControlServices`
Expected observations:
- OSC decode errors
- control request failures
- websocket broadcast failures
- ingress queue depth
- file-watch/reload events
- service start/stop state
### `RuntimeCoordinator`
Expected observations:
- rejected mutation count and reasons
- reload requests
- preset failures
- transient-state invalidations
- persistence request publication
### `RuntimeSnapshotProvider`
Expected observations:
- snapshot publish duration
- snapshot version churn
- stale snapshot/fallback behavior
- publish failures
### `PersistenceWriter`
Expected observations:
- pending write count
- coalesced write count
- write duration
- write failure
- unsaved durable changes
- shutdown flush result
## Logging Policy
Direct string logging can remain as an output sink, but not as the source of truth.
Target flow:
```text
subsystem reports structured warning/log
-> HealthTelemetry stores bounded structured entry
-> optional debug sink prints text
-> UI/diagnostics reads health snapshot
```
Repeated warnings should be deduplicated by key while preserving counts and last-seen timestamps.
## Snapshot Contract
`HealthSnapshot` should answer:
- overall health
- subsystem health states
- active warnings
- recent important logs
- key counters
- key timing summaries
- degraded-state reasons
The snapshot should avoid copying durable runtime truth. Runtime state and health state can be published together by `ControlServices`, but they should remain separate read models.
## Migration Plan
### Step 1. Expand Health Model Types
Add structured subsystem/severity/category types and snapshot models.
Initial target:
- keep existing health fields
- add structured warning/log/counter/gauge containers
- add tests for bounded retention and deduplication
### Step 2. Wrap Direct Warning Paths
Route common direct logs through telemetry first.
Initial candidates:
- backend fallback warnings
- screenshot write failures
- OSC decode/dispatch failures
- render-thread request failures
### Step 3. Add Subsystem Health States
Let subsystems report state transitions.
Initial target:
- `RenderEngine`: healthy/degraded on render-thread request failures
- `VideoBackend`: configured/running/degraded/no-input/dropping
- `ControlServices`: running/degraded/stopped
- `Persistence`: clean/pending/error
### Step 4. Split Timing Into Named Metrics
Move from broad timing fields to named samples/gauges.
Initial target:
- render duration
- readback duration/fallback count
- output request latency
- playout completion interval
- event queue depth
- persistence write duration
### Step 5. Publish Health Snapshot
Expose `HealthTelemetry` snapshot through control/runtime presentation.
Initial target:
- UI can distinguish runtime state from operational health
- active warnings are visible
- recent degraded reasons are visible
### Step 6. Add Operational Tests
Cover:
- warning raise/clear
- repeated warning coalescing
- counter/gauge updates
- health derivation
- bounded log retention
- snapshot stability
## Testing Strategy
Recommended tests:
- warning raised appears in active warnings
- warning clear removes active warning but preserves history
- repeated warning increments count and updates last-seen time
- bounded log keeps newest entries
- subsystem `Error` makes overall health `Error`
- subsystem `Degraded` makes overall health degraded if no error exists
- timing sample updates summary
- counter delta accumulates
- health snapshot is read-only/stable
Useful homes:
- `HealthTelemetryTests`
- `RuntimeEventTypeTests` for observation event payloads
- future integration tests for control-service health publication
## Risks
### Telemetry Becomes Behavior
Telemetry must not become the hidden way subsystems command each other. It reports. Subsystems own mitigation.
### Too Much Hot-Path Cost
Render and callback paths need cheap writes. Use bounded structures and avoid expensive formatting on hot paths.
### String-Only Logging
Centralizing strings is not enough. Severity, subsystem, category, and structured fields should be first-class.
### Snapshot Bloat
Health snapshots should summarize operational state, not duplicate full runtime/project state.
### Alert Noise
Without deduplication and severity discipline, operator-facing health can become noisy and ignored.
## Phase 8 Exit Criteria
Phase 8 can be considered complete once the project can say:
- [ ] major subsystems publish structured health/telemetry observations
- [ ] active warnings and recent logs are structured and bounded
- [ ] subsystem health states roll up to an overall health state
- [ ] render/backend/control/persistence timing metrics are named and visible
- [ ] direct debug-string warning paths are wrapped or retired for major cases
- [ ] UI/control diagnostics can consume a stable health snapshot
- [ ] telemetry write paths are cheap enough for render/callback use
- [ ] telemetry behavior has focused tests
## Open Questions
- Should debug output remain enabled by default as a telemetry sink?
- How many recent logs/warnings should be retained in memory?
- Should timing summaries store raw samples, rolling windows, or both?
- Should warning thresholds be declared centrally or owned by each subsystem?
- Should health snapshots be published with runtime state or on a separate endpoint/channel?
- Should logs eventually be written to disk, and if so, through Phase 6 persistence infrastructure or a separate log sink?
## Short Version
Phase 8 should make the app diagnosable.
Subsystems report structured observations. `HealthTelemetry` records bounded logs, warnings, counters, gauges, timing, and subsystem states. UI and diagnostics consume stable health snapshots. Debug strings become a sink, not the source of truth.

View File

@@ -363,6 +363,10 @@ components:
$ref: "#/components/schemas/VideoIOStatus" $ref: "#/components/schemas/VideoIOStatus"
performance: performance:
$ref: "#/components/schemas/PerformanceStatus" $ref: "#/components/schemas/PerformanceStatus"
backendPlayout:
$ref: "#/components/schemas/BackendPlayoutStatus"
runtimeEvents:
$ref: "#/components/schemas/RuntimeEventStatus"
shaders: shaders:
type: array type: array
items: items:
@@ -382,10 +386,16 @@ components:
type: number type: number
oscPort: oscPort:
type: number type: number
oscBindAddress:
type: string
oscSmoothing:
type: number
autoReload: autoReload:
type: boolean type: boolean
maxTemporalHistoryFrames: maxTemporalHistoryFrames:
type: number type: number
previewFps:
type: number
enableExternalKeying: enableExternalKeying:
type: boolean type: boolean
inputVideoFormat: inputVideoFormat:
@@ -478,6 +488,175 @@ components:
type: number type: number
flushedFrameCount: flushedFrameCount:
type: number type: number
BackendPlayoutStatus:
type: object
properties:
lifecycleState:
type: string
example: running
degraded:
type: boolean
statusMessage:
type: string
lateFrameCount:
type: number
droppedFrameCount:
type: number
flushedFrameCount:
type: number
readyQueue:
$ref: "#/components/schemas/BackendReadyQueueStatus"
outputRender:
$ref: "#/components/schemas/BackendOutputRenderStatus"
recovery:
$ref: "#/components/schemas/BackendPlayoutRecoveryStatus"
BackendReadyQueueStatus:
type: object
properties:
depth:
type: number
description: Current number of ready output frames.
capacity:
type: number
description: Maximum ready output frames currently allowed.
minDepth:
type: number
description: Minimum observed ready queue depth since backend worker start.
maxDepth:
type: number
description: Maximum observed ready queue depth since backend worker start.
zeroDepthCount:
type: number
description: Number of observed samples where the ready queue was empty.
pushedCount:
type: number
poppedCount:
type: number
droppedCount:
type: number
underrunCount:
type: number
BackendOutputRenderStatus:
type: object
properties:
renderMs:
type: number
description: Most recent output render duration in milliseconds.
smoothedRenderMs:
type: number
description: Smoothed output render duration in milliseconds.
maxRenderMs:
type: number
description: Maximum observed output render duration in milliseconds.
acquireFrameMs:
type: number
description: Time spent acquiring a writable backend output frame in milliseconds.
renderRequestMs:
type: number
description: Time spent executing the render-thread output frame request in milliseconds.
endAccessMs:
type: number
description: Time spent ending write access to the backend output frame in milliseconds.
queueWaitMs:
type: number
description: Time the output render request spent waiting for the render thread in milliseconds.
drawMs:
type: number
description: Time spent drawing, blitting, packing, and flushing the output frame in milliseconds.
fenceWaitMs:
type: number
description: Time spent waiting for the async readback fence in milliseconds.
mapMs:
type: number
description: Time spent mapping the async readback pixel buffer in milliseconds.
readbackCopyMs:
type: number
description: Time spent copying async readback bytes into the backend output frame in milliseconds.
cachedCopyMs:
type: number
description: Time spent copying the cached output frame when async readback is not ready in milliseconds.
asyncQueueMs:
type: number
description: Time spent queueing the next async readback in milliseconds.
asyncQueueBufferMs:
type: number
description: Time spent orphaning or allocating the async readback pixel buffer in milliseconds.
asyncQueueSetupMs:
type: number
description: Time spent applying readback pixel-store, framebuffer, and pixel-pack-buffer state in milliseconds.
asyncQueueReadPixelsMs:
type: number
description: Time spent issuing glReadPixels for the async readback in milliseconds.
asyncQueueFenceMs:
type: number
description: Time spent creating the async readback fence in milliseconds.
syncReadMs:
type: number
description: Time spent in bootstrap synchronous readback in milliseconds.
asyncReadbackMissCount:
type: number
description: Count of output render requests where async readback was not ready.
cachedFallbackCount:
type: number
description: Count of output render requests served from the cached output frame.
syncFallbackCount:
type: number
description: Count of output render requests that used bootstrap synchronous readback.
BackendPlayoutRecoveryStatus:
type: object
properties:
completionResult:
type: string
enum: [Completed, DisplayedLate, Dropped, Flushed, Unknown]
completedFrameIndex:
type: number
scheduledFrameIndex:
type: number
scheduledLeadFrames:
type: number
measuredLagFrames:
type: number
catchUpFrames:
type: number
lateStreak:
type: number
dropStreak:
type: number
RuntimeEventStatus:
type: object
properties:
queue:
$ref: "#/components/schemas/RuntimeEventQueueStatus"
dispatch:
$ref: "#/components/schemas/RuntimeEventDispatchStatus"
RuntimeEventQueueStatus:
type: object
properties:
name:
type: string
depth:
type: number
capacity:
type: number
droppedCount:
type: number
oldestEventAgeMs:
type: number
RuntimeEventDispatchStatus:
type: object
properties:
dispatchCallCount:
type: number
dispatchedEventCount:
type: number
handlerInvocationCount:
type: number
handlerFailureCount:
type: number
lastDispatchDurationMs:
type: number
maxDispatchDurationMs:
type: number
ShaderSummary: ShaderSummary:
type: object type: object
properties: properties:
@@ -497,6 +676,8 @@ components:
description: Error text for unavailable shader packages. description: Error text for unavailable shader packages.
temporal: temporal:
$ref: "#/components/schemas/TemporalState" $ref: "#/components/schemas/TemporalState"
feedback:
$ref: "#/components/schemas/FeedbackState"
TemporalState: TemporalState:
type: object type: object
properties: properties:
@@ -509,6 +690,13 @@ components:
type: number type: number
effectiveHistoryLength: effectiveHistoryLength:
type: number type: number
FeedbackState:
type: object
properties:
enabled:
type: boolean
writePass:
type: string
LayerState: LayerState:
type: object type: object
properties: properties:

View File

@@ -153,7 +153,7 @@ The following must stay outside `ControlServices` in the target design.
The subsystem may report that an input requested a state change, but it should not decide whether that change is persisted. The subsystem may report that an input requested a state change, but it should not decide whether that change is persisted.
That belongs to `RuntimeCoordinator` and `RuntimeStore`. That belongs to `RuntimeCoordinator`, with `RuntimeStore` and the later persistence writer carrying out durable writes when policy requests them.
### Render Snapshot Publication ### Render Snapshot Publication
@@ -161,9 +161,9 @@ That belongs to `RuntimeCoordinator` and `RuntimeStore`.
### Render-Local Overlay Ownership ### Render-Local Overlay Ownership
Live OSC overlays, temporal state, shader feedback, and render-only transient state belong to `RenderEngine`. Live OSC automation overlays belong to the live-state/render preparation boundary (`RuntimeLiveState` today). Temporal state, shader feedback, output staging, and other render-only transient state belong to `RenderEngine`.
`ControlServices` may ingest automation targets, but it should not own how those targets are applied inside the render domain. `ControlServices` may ingest and coalesce automation targets, but it should not own how those targets are composed, committed, persisted, or applied inside the render domain.
### Hardware Timing or Playout Recovery ### Hardware Timing or Playout Recovery
@@ -508,8 +508,8 @@ The goal is for transports to emit actions, even if temporary adapters still cal
`OpenGLComposite` currently owns `RuntimeServices` startup and consumption: `OpenGLComposite` currently owns `RuntimeServices` startup and consumption:
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:312) - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:312)
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:723) - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:723)
That should move toward a composition root or subsystem host arrangement where render is no longer the owner of control ingress. That should move toward a composition root or subsystem host arrangement where render is no longer the owner of control ingress.

View File

@@ -362,6 +362,9 @@ Expected observations:
Expected observations: Expected observations:
- current playout queue depth - current playout queue depth
- system-memory playout frame counts by state: free, ready, and scheduled
- system-memory playout underrun, repeat, and drop counters
- system-memory frame age at schedule and completion time
- input signal state - input signal state
- late frames - late frames
- dropped frames - dropped frames

View File

@@ -1,70 +1,44 @@
# Phase 1 Subsystem Design Index # Subsystem Notes Index
This directory contains the subsystem-specific design notes for Phase 1 of the architecture roadmap. The current, phase-free architecture summary is:
Start here if you want the Phase 1 package to read as one coherent deliverable rather than as separate subsystem writeups. - [Current System Architecture](../CURRENT_SYSTEM_ARCHITECTURE.md)
Parent documents: Start there when you want to understand how the application works now.
- [Architecture Resilience Review](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) This directory contains deeper notes for individual subsystem boundaries. These notes were originally written during the phased architecture work, so some files may still mention migration steps or target-state language. Treat them as companion notes, not as the source of truth when they disagree with the current architecture summary.
- [Phase 1: Subsystem Boundaries and Target Architecture](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_1_SUBSYSTEM_BOUNDARIES_DESIGN.md)
## How This Set Fits Together
- [PHASE_1_SUBSYSTEM_BOUNDARIES_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_1_SUBSYSTEM_BOUNDARIES_DESIGN.md) defines the top-level subsystem split, dependency rules, state categories, and migration guardrails.
- The notes in this directory expand each subsystem boundary without changing the parent Phase 1 design.
- The subsystem notes are meant to be read as design companions, not as independent alternate architectures.
Status note:
- The Phase 1 design package is complete.
- The runtime implementation foothold is complete: the named runtime subsystems exist in code, `RuntimeHost` is retired from the compiled runtime path, and subsystem tests cover the new seams.
- The whole app is not fully extracted yet, so these notes still describe the architecture later phases should continue toward.
## Recommended Reading Order ## Recommended Reading Order
1. [PHASE_1_SUBSYSTEM_BOUNDARIES_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_1_SUBSYSTEM_BOUNDARIES_DESIGN.md) 1. [Current System Architecture](../CURRENT_SYSTEM_ARCHITECTURE.md)
2. [RuntimeStore.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeStore.md) 2. [RuntimeStore](RuntimeStore.md)
3. [RuntimeCoordinator.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeCoordinator.md) 3. [RuntimeCoordinator](RuntimeCoordinator.md)
4. [RuntimeSnapshotProvider.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeSnapshotProvider.md) 4. [RuntimeSnapshotProvider](RuntimeSnapshotProvider.md)
5. [ControlServices.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/ControlServices.md) 5. [ControlServices](ControlServices.md)
6. [RenderEngine.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RenderEngine.md) 6. [RenderEngine](RenderEngine.md)
7. [VideoBackend.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/VideoBackend.md) 7. [VideoBackend](VideoBackend.md)
8. [HealthTelemetry.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/HealthTelemetry.md) 8. [HealthTelemetry](HealthTelemetry.md)
That order mirrors the intended dependency story: That order follows the current ownership story:
- durable state first - durable state first
- mutation and publication next - mutation and publication next
- ingress and render boundaries after that - control ingress after that
- device timing and operational visibility last - render ownership and video timing next
- operational visibility last
## Subsystem Notes ## Subsystem Notes
- [RuntimeStore.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeStore.md) - [RuntimeStore](RuntimeStore.md): durable runtime-state facade over layer-stack, config, package-catalog, presentation, and persistence boundaries.
Durable runtime-state facade over layer-stack, config, package-catalog, presentation, and persistence boundaries. - [RuntimeCoordinator](RuntimeCoordinator.md): mutation validation, state classification, reset/reload policy, and publication/persistence requests.
- [RuntimeCoordinator.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeCoordinator.md) - [RuntimeSnapshotProvider](RuntimeSnapshotProvider.md): render-facing snapshot publication boundary backed by explicit render snapshot building/versioning.
Mutation validation, state classification, reset/reload policy, and publication/persistence requests. - [ControlServices](ControlServices.md): OSC, HTTP/WebSocket, and file-watch ingress plus normalization and service-local buffering.
- [RuntimeSnapshotProvider.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RuntimeSnapshotProvider.md) - [RenderEngine](RenderEngine.md): GL ownership boundary, render-local transient state, preview, and playout-ready frame production.
Render-facing snapshot publication boundary backed by explicit render snapshot building/versioning. - [VideoBackend](VideoBackend.md): device lifecycle, input/output pacing, buffer policy, and producer/consumer playout behavior.
- [ControlServices.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/ControlServices.md) - [HealthTelemetry](HealthTelemetry.md): logs, warnings, counters, timing traces, and subsystem health snapshots.
OSC, HTTP/WebSocket, and file-watch ingress plus normalization and service-local buffering.
- [RenderEngine.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/RenderEngine.md)
Sole-owner render/GL boundary, render-local transient state, preview, and playout-ready frame production.
- [VideoBackend.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/VideoBackend.md)
Device lifecycle, input/output pacing, buffer policy, and producer/consumer playout direction.
- [HealthTelemetry.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/subsystems/HealthTelemetry.md)
Logs, warnings, counters, timing traces, and subsystem health snapshots.
## What Phase 1 Should Settle ## Historical Documents
Phase 1 should leave the project with: The `docs/PHASE_*` files and experiment logs record how the architecture evolved. They are useful when you need rationale, investigation history, or rejected paths, but they are no longer arranged as the main feature split for the app.
- one agreed subsystem vocabulary For current implementation work, use [Current System Architecture](../CURRENT_SYSTEM_ARCHITECTURE.md) as the entry point and only dip into the phase documents when you need context for why a subsystem ended up this way.
- one agreed dependency direction map
- one agreed state-category model
- one agreed current-to-target migration story
Phase 1 does not need to settle every later implementation detail. The subsystem notes intentionally leave some questions open where later phases need room to choose concrete mechanics.
As of the current codebase, those design questions are settled well enough for later work to build against them. Remaining implementation work should be tracked under later phases, especially eventing, render-thread ownership, persistence, backend lifecycle, live-state layering, and telemetry.

View File

@@ -36,17 +36,16 @@ In the Phase 1 terminology, `RenderEngine` consumes snapshots plus render-local
The current rendering path is split across several classes: The current rendering path is split across several classes:
- [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:86) constructs the renderer, render pipeline, shader programs, runtime services, and video bridge in one owner. - [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:86) constructs the renderer, render pipeline, shader programs, runtime services, and video bridge in one owner.
- [OpenGLRenderPipeline.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLRenderPipeline.cpp:31) performs pass execution, pack/readback, preview paint, and performance stat publication. - [OpenGLRenderPipeline.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLRenderPipeline.cpp:31) performs pass execution, pack/readback, preview paint, and performance stat publication.
- [OpenGLVideoIOBridge.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLVideoIOBridge.cpp:58) accepts capture frames and still performs render work from the playout completion callback path. - [OpenGLVideoIOBridge.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/pipeline/OpenGLVideoIOBridge.cpp:58) accepts capture frames and still performs render work from the playout completion callback path.
- `RenderFrameStateResolver` and `RenderStateComposer` now keep frame-state selection and live value composition outside GL drawing, while `RenderEngine` still owns the current GL resource and draw path. - `RenderFrameStateResolver` and `RenderStateComposer` now keep frame-state selection and live value composition outside GL drawing, while `RenderEngine` still owns the current GL resource and draw path.
That split is workable today, but it creates architectural pressure: That split is workable today, but it creates architectural pressure:
- GL ownership is thread-shared instead of sole-owned.
- render and playout timing are still callback-coupled. - render and playout timing are still callback-coupled.
- preview and playout are produced in the same immediate path. - preview and playout are produced in the same immediate path.
- render-local transient state now has clearer Phase 3 boundaries, but GL ownership is still shared through callback and UI entrypoints. - render-local transient state now has clearer Phase 3/5 boundaries, but output production is still synchronously requested by the backend completion path.
- it is difficult to test render behavior separately from app bootstrap and hardware integration. - it is difficult to test render behavior separately from app bootstrap and hardware integration.
`RenderEngine` exists to absorb that responsibility into one subsystem with one direction of ownership. Phase 4 has completed the GL ownership part of this target: normal runtime GL work now enters through the `RenderEngine` render thread. `RenderEngine` exists to absorb that responsibility into one subsystem with one direction of ownership. Phase 4 has completed the GL ownership part of this target: normal runtime GL work now enters through the `RenderEngine` render thread.
@@ -113,6 +112,8 @@ Phase 5's `RuntimeStateLayerModel` explicitly keeps temporal history, feedback s
`RuntimeLiveState` now owns transient automation invalidation for render-facing compatibility. It can clear overlays for a target layer/control key and prunes overlays that no longer resolve to the current layer and parameter definitions before applying them to a frame. This keeps shader reload, preset load, and layer removal behavior local to the live-state/composition boundary instead of scattering it through GL drawing code. `RuntimeLiveState` now owns transient automation invalidation for render-facing compatibility. It can clear overlays for a target layer/control key and prunes overlays that no longer resolve to the current layer and parameter definitions before applying them to a frame. This keeps shader reload, preset load, and layer removal behavior local to the live-state/composition boundary instead of scattering it through GL drawing code.
Render snapshots now flow through a named `CommittedLiveStateReadModel`, so render-facing committed state is distinct from durable storage and physically owned by `CommittedLiveState`.
### 5. Shader Build Application ### 5. Shader Build Application
Compilation itself may eventually move into a separate build service, but once shader build outputs exist, `RenderEngine` owns: Compilation itself may eventually move into a separate build service, but once shader build outputs exist, `RenderEngine` owns:

View File

@@ -12,7 +12,7 @@ Before the Phase 1 runtime split, the app's mutation path was split across sever
- `RuntimeHost.h` - `RuntimeHost.h`
- `RuntimeHost.cpp` - `RuntimeHost.cpp`
- `OpenGLComposite` currently acts like an orchestration shell and a mutation coordinator at the same time: - `OpenGLComposite` currently acts like an orchestration shell and a mutation coordinator at the same time:
- [OpenGLCompositeRuntimeControls.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLCompositeRuntimeControls.cpp:1) - [OpenGLCompositeRuntimeControls.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLCompositeRuntimeControls.cpp:1)
- `RuntimeServices` still owns some deferred control flow around OSC commit and polling: - `RuntimeServices` still owns some deferred control flow around OSC commit and polling:
- [RuntimeServices.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.h:46) - [RuntimeServices.h](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/control/RuntimeServices.h:46)
@@ -80,7 +80,7 @@ The coordinator decides which state category a mutation affects:
The design rule is that classification belongs here, not in the ingress layer and not in render code. The design rule is that classification belongs here, not in the ingress layer and not in render code.
Phase 5 has started codifying the shared vocabulary for this classification in `RuntimeStateLayerModel`. The current model records committed session parameter values, layer bypass state, and runtime compile/reload flags as committed-live/session coordination state, even though some of those values are still physically backed by `RuntimeStore` during migration. Phase 5 codifies the shared vocabulary for this classification in `RuntimeStateLayerModel`. Current committed session parameter values and layer bypass state are committed-live/session state owned by `CommittedLiveState`; runtime compile/reload flags are coordination state rather than durable store truth.
### 4. Snapshot publication requests ### 4. Snapshot publication requests
@@ -248,13 +248,15 @@ Typical interaction:
This is the coordinator's primary logical domain. This is the coordinator's primary logical domain.
Even while committed live state is physically stored inside `RuntimeStore`, the coordinator should be considered the policy owner of: The coordinator is the policy owner of:
- current layer stack composition - current layer stack composition
- current selected shaders - current selected shaders
- current bypass flags - current bypass flags
- current operator-authored parameter values - current operator-authored parameter values
`CommittedLiveState` is the physical owner for this current-session layer state. `RuntimeStore` persists or skips disk writes according to coordinator policy and remains the compatibility facade for existing mutation call shapes.
### Transient live overlay state ### Transient live overlay state
The coordinator defines the rules for transient state, but should not become the long-term storage owner for render-local transient data. The coordinator defines the rules for transient state, but should not become the long-term storage owner for render-local transient data.
@@ -272,6 +274,10 @@ For OSC specifically, the coordinator should eventually decide:
- whether it should later commit into committed live state - whether it should later commit into committed live state
- what reset/reload actions invalidate it - what reset/reload actions invalidate it
Phase 5 sets the default settled OSC policy to session-only. `CommitOscParameterByControlKey(...)` updates committed session state through the store with persistence disabled, publishes ordinary mutation/state-change observations, and does not request a persistence write unless a future explicit policy opts into durable OSC commits.
The committed-live concept now has a physical owner, `CommittedLiveState`, plus a named read model, `CommittedLiveStateReadModel`. The coordinator remains the owner of whether a mutation should be durable or session-only, while `RuntimeStore` persists or skips disk writes according to that policy.
### Health and timing state ### Health and timing state
The coordinator may emit events like: The coordinator may emit events like:
@@ -363,7 +369,7 @@ currently do this pattern:
2. decide whether to call `ReloadShader(...)` 2. decide whether to call `ReloadShader(...)`
3. call `broadcastRuntimeState()` 3. call `broadcastRuntimeState()`
See [OpenGLCompositeRuntimeControls.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLCompositeRuntimeControls.cpp:1). See [OpenGLCompositeRuntimeControls.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLCompositeRuntimeControls.cpp:1).
That "call host, then decide reload/broadcast policy" logic is a direct candidate for migration into `RuntimeCoordinator`. That "call host, then decide reload/broadcast policy" logic is a direct candidate for migration into `RuntimeCoordinator`.
@@ -530,7 +536,6 @@ Mitigation:
## Open Questions ## Open Questions
- Should committed live state remain physically stored in `RuntimeStore`, or should the coordinator gain a live-session companion object before Phase 3?
- Should preset load/save stay synchronous through early migration, or should the coordinator always treat them as policy requests whose persistence effects may complete later? - Should preset load/save stay synchronous through early migration, or should the coordinator always treat them as policy requests whose persistence effects may complete later?
- Should reload requests be modeled as a dedicated mutation class distinct from ordinary control mutations from the start? - Should reload requests be modeled as a dedicated mutation class distinct from ordinary control mutations from the start?
- How much normalization of parameter values should remain in store-side helpers versus moving into coordinator policy helpers? - How much normalization of parameter values should remain in store-side helpers versus moving into coordinator policy helpers?

View File

@@ -21,9 +21,9 @@ Before the Phase 1 runtime split, the closest behavior lived in:
- `RuntimeHost::TryRefreshCachedLayerStates(...)` - `RuntimeHost::TryRefreshCachedLayerStates(...)`
- `RuntimeHost::RefreshDynamicRenderStateFields(...)` - `RuntimeHost::RefreshDynamicRenderStateFields(...)`
- `RuntimeHost::BuildLayerRenderStatesLocked(...)` - `RuntimeHost::BuildLayerRenderStatesLocked(...)`
- the render-side cache usage in [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:589) - the render-side cache usage in [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:589)
`RuntimeSnapshotProvider` should absorb that responsibility, but in a cleaner and more publish-oriented way. `RuntimeSnapshotProvider` has absorbed that responsibility in a cleaner and more publish-oriented way.
## Responsibilities ## Responsibilities
@@ -35,7 +35,7 @@ Before the Phase 1 runtime split, the closest behavior lived in:
`RenderSnapshotBuilder` is responsible for: `RenderSnapshotBuilder` is responsible for:
- building render-facing snapshots from durable store state plus whatever committed-live state view the Phase 3 split ultimately exposes - building render-facing snapshots from the committed-live read model and package/runtime metadata supplied by `RuntimeStore`
- separating structural snapshot changes from dynamic frame fields - separating structural snapshot changes from dynamic frame fields
- translating runtime layer state into render-ready layer descriptors - translating runtime layer state into render-ready layer descriptors
- attaching immutable or near-immutable shader/package-derived data needed by render - attaching immutable or near-immutable shader/package-derived data needed by render
@@ -81,13 +81,11 @@ The shape of render-facing layer state should remain consistent across phases ev
`RenderSnapshotBuilder` should build from a read-oriented runtime view, not from direct mutation calls. `RuntimeSnapshotProvider` should consume the builder's output and own publication/cache behavior. `RenderSnapshotBuilder` should build from a read-oriented runtime view, not from direct mutation calls. `RuntimeSnapshotProvider` should consume the builder's output and own publication/cache behavior.
That view will likely include: That view now includes:
- durable configuration and layer-stack data from `RuntimeStore` - committed live layer state from `CommittedLiveStateReadModel`
- committed live values from either: - package and manifest metadata supplied through `RuntimeStore`
- `RuntimeStore`, while committed live state is still co-located there, or - durable runtime configuration needed to describe render-facing dimensions and defaults
- a coordinator-owned live-state companion once Phase 3 finishes the split
- package and manifest metadata required to describe render-facing layer structure
The important Phase 1 rule is not "the provider always reads one specific object." It is: The important Phase 1 rule is not "the provider always reads one specific object." It is:
@@ -222,9 +220,9 @@ The target read contract for `RenderEngine` should be:
Important rule: Important rule:
- `RenderEngine` should never partially mutate the providers published snapshot in place - `RenderEngine` should never partially mutate the provider's published snapshot in place.
That means todays `TryRefreshCachedLayerStates(...)` behavior is a migration waypoint, not a target pattern. Once the provider exists, the render side should treat the snapshot as immutable input and keep any overlays or last-frame adjusted values inside `RenderEngine`. The old `TryRefreshCachedLayerStates(...)` host path is gone. The remaining dynamic refresh is explicit: `RuntimeSnapshotProvider::RefreshDynamicRenderStateFields(...)` updates frame-local fields on render-owned copies, while published snapshot structure and committed parameter data stay behind the provider boundary.
## Render-Facing Data Shape Rules ## Render-Facing Data Shape Rules
@@ -297,9 +295,9 @@ Notes:
### `RuntimeStore` ### `RuntimeStore`
`RenderSnapshotBuilder` depends on store-owned durable data and package metadata through a read-oriented interface or view. `RuntimeSnapshotProvider` depends on the builder rather than reaching into store internals directly. `RenderSnapshotBuilder` depends on store-owned durable metadata and the committed-live read model exposed through store-facing read APIs. `RuntimeSnapshotProvider` depends on the builder rather than reaching into store internals directly.
If committed live state remains physically co-located with the store during early migration, the builder may read it through the same view. If committed live state moves behind a coordinator-owned live-session model later, the builder should consume that through a similarly read-oriented view. Committed session layer state now lives in `CommittedLiveState`; `RuntimeStore` remains the facade that combines that read model with package metadata and persistence-owned data for snapshot publication.
Neither the builder nor provider should mutate the store directly. Neither the builder nor provider should mutate the store directly.
@@ -340,42 +338,7 @@ This is especially important while migrating away from the current lock/fallback
## Current Code Mapping ## Current Code Mapping
The current code follows this migration map. The current runtime path is:
### Move into `RenderSnapshotBuilder`
From `RuntimeHost`:
- layer render-state construction from `BuildLayerRenderStatesLocked(...)`
- render-facing translation of layer persistent state plus package metadata
- explicit version composition for render-visible state
- dynamic frame-context construction currently done in `RefreshDynamicRenderStateFields(...)`
### Move into `RuntimeSnapshotProvider`
- published snapshot cache ownership
- version matching for already-published snapshots
- render-facing compatibility API while render callers migrate
### Stop exposing directly from the host/store boundary
Current methods that should become compatibility shims and later disappear:
- `GetLayerRenderStates(...)`
- `TryGetLayerRenderStates(...)`
- `TryRefreshCachedLayerStates(...)`
- `RefreshDynamicRenderStateFields(...)`
### Render-side compatibility during migration
The previous `OpenGLComposite` cache path:
- read versions from `RuntimeHost`/store-owned counters
- conditionally calls `TryRefreshCachedLayerStates(...)`
- conditionally rebuilds full layer state
- then reapplies render-local OSC overlay state
The migrated runtime path is:
1. get latest published snapshot from provider 1. get latest published snapshot from provider
2. compare snapshot versions produced by `RenderSnapshotBuilder` 2. compare snapshot versions produced by `RenderSnapshotBuilder`
@@ -383,7 +346,20 @@ The migrated runtime path is:
4. apply render-local overlay state 4. apply render-local overlay state
5. attach frame context 5. attach frame context
That is a much cleaner split than the current mixed lock/cache/fallback flow in [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/OpenGLComposite.cpp:589). That replaced the old mixed lock/cache/fallback flow that lived around [OpenGLComposite.cpp](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/apps/LoopThroughWithOpenGLCompositing/gl/composite/OpenGLComposite.cpp:589).
`RenderSnapshotBuilder` now owns:
- layer render-state construction
- render-facing translation of committed live state plus package metadata
- explicit version composition for render-visible state
- dynamic frame-field refresh for render-owned copies
`RuntimeSnapshotProvider` now owns:
- published snapshot cache ownership
- version matching for already-published snapshots
- publication events and snapshot publish observations
## Migration Plan ## Migration Plan
@@ -405,8 +381,8 @@ That is a much cleaner split than the current mixed lock/cache/fallback flow in
### Step 4: Remove mutable snapshot refresh paths ### Step 4: Remove mutable snapshot refresh paths
- retire `TryRefreshCachedLayerStates(...)` - completed: retire the old `TryRefreshCachedLayerStates(...)` host path
- publish new snapshots for committed parameter changes instead of mutating render-cached host-derived vectors in place - publish new snapshots for committed parameter changes instead of mutating published snapshot structure in place
### Step 5: Move publication triggering fully behind `RuntimeCoordinator` ### Step 5: Move publication triggering fully behind `RuntimeCoordinator`

View File

@@ -95,7 +95,9 @@ Those are coordinator concerns, not store concerns.
`RuntimeStore` should own the following state categories. `RuntimeStore` should own the following state categories.
Phase 5 names this boundary in code through `RuntimeStateLayerModel`: persisted layer stack data, saved parameter values, and stack presets are classified as base persisted state. Operator/session values may still be backed by the store during migration, but their mutation policy is committed-live policy owned by the coordinator, not durable-store policy by default. Phase 5 names this boundary in code through `RuntimeStateLayerModel`: persisted layer stack data, saved parameter values, and stack presets are classified as base persisted state. Operator/session values are owned by `CommittedLiveState`; their mutation policy is committed-live policy owned by the coordinator, not durable-store policy by default.
Phase 5 also adds `CommittedLiveState` as the physical owner of current session/operator layer state and `CommittedLiveStateReadModel` as the named read boundary for render snapshot publication. `RuntimeStore` still owns file IO, config, package metadata, preset persistence, and persistence requests, but it delegates current-session layer mutations to `CommittedLiveState`.
### Runtime Configuration ### Runtime Configuration
@@ -185,7 +187,8 @@ Expected responsibilities:
- `LoadConfig()` - `LoadConfig()`
- `LoadPersistentState()` - `LoadPersistentState()`
- `SavePersistentStateSnapshot(...)` - `BuildPersistentStateSnapshot(...)`
- `RequestPersistence(...)`
- `LoadStackPreset(...)` - `LoadStackPreset(...)`
- `SaveStackPreset(...)` - `SaveStackPreset(...)`
- `GetStackPresetNames()` - `GetStackPresetNames()`
@@ -194,7 +197,7 @@ Design notes:
- `Load*` operations should parse and normalize external file content into durable in-memory models. - `Load*` operations should parse and normalize external file content into durable in-memory models.
- `Save*` operations should serialize durable models without needing render or control subsystem context. - `Save*` operations should serialize durable models without needing render or control subsystem context.
- later debounce/background writing should wrap these operations, not redefine their ownership - debounce/background writing wraps these operations rather than redefining store ownership
### Read Interface ### Read Interface
@@ -364,10 +367,14 @@ Those belong under other target subsystems.
- runtime host config load and resolved paths - runtime host config load and resolved paths
The current codebase has completed this part of the split: `RuntimeConfigStore` owns config parsing, path resolution, configured ports/formats, runtime roots, and shader compiler paths, while `RuntimeStore` exposes compatibility-shaped delegates for existing callers. The current codebase has completed this part of the split: `RuntimeConfigStore` owns config parsing, path resolution, configured ports/formats, runtime roots, and shader compiler paths, while `RuntimeStore` exposes compatibility-shaped delegates for existing callers.
- `CommittedLiveState`
- current committed/session layer stack and parameter values
- layer CRUD/reorder and shader selection for the running session
- committed-live read model for snapshot publication
- `LayerStackStore` - `LayerStackStore`
- durable layer stack and parameter values - backing layer stack mechanics used by committed-live state
- layer CRUD/reorder and shader selection - layer CRUD/reorder and shader selection helpers
- stack preset value serialization/load - stack preset value serialization/load helpers
- `RuntimeStatePresenter` / `RuntimeStateJson` - `RuntimeStatePresenter` / `RuntimeStateJson`
- runtime-state JSON assembly - runtime-state JSON assembly
- layer-stack presentation serialization - layer-stack presentation serialization
@@ -380,7 +387,7 @@ The current codebase has completed this part of the split: `RuntimeConfigStore`
- `PersistenceWriter` helper - `PersistenceWriter` helper
- synchronous at first, async/debounced later - synchronous at first, async/debounced later
The current codebase has completed the layer split: `LayerStackStore` owns durable layer state, layer CRUD/reorder, parameter persistence, and stack preset value serialization/load. `RuntimeStore` keeps file IO and facade methods for existing callers. The current codebase has completed the committed-live split: `CommittedLiveState` owns current committed/session layer state using `LayerStackStore` backing mechanics. `RuntimeStore` keeps file IO, package metadata, persistence serialization, persistence requests, preset file access, and facade methods for existing callers.
The current codebase has completed the render snapshot split: `RenderSnapshotBuilder` owns render-state assembly, cached parameter refresh, dynamic frame-field refresh, and render snapshot versions. `RuntimeSnapshotProvider` depends on this builder rather than on `RuntimeStore` friendship. The current codebase has completed the render snapshot split: `RenderSnapshotBuilder` owns render-state assembly, cached parameter refresh, dynamic frame-field refresh, and render snapshot versions. `RuntimeSnapshotProvider` depends on this builder rather than on `RuntimeStore` friendship.
@@ -400,14 +407,14 @@ Target behavior:
- serialization snapshots are built from those models - serialization snapshots are built from those models
- save requests persist a coherent snapshot - save requests persist a coherent snapshot
This matters because the current code still calls `SavePersistentState()` directly from many mutation paths. That is one of the architectural pressure points already called out in [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md). This matters because earlier code called persistent-state saves directly from mutation paths. Phase 6 removed that pressure point: accepted durable mutations now publish persistence requests, and `RuntimeStore::RequestPersistence(...)` builds a coherent snapshot for the background writer.
The Phase 1 design for `RuntimeStore` should therefore assume: The Phase 1 design for `RuntimeStore` should therefore assume:
- store ownership of serialization remains - store ownership of serialization remains
- immediate save-after-mutate is a migration detail, not the final behavioral contract - persistence requests, not mutation methods, are the durable write boundary
By Phase 6, a background snapshot writer may sit underneath or beside this subsystem, but the durable model still belongs here. Phase 6 added that background snapshot writer underneath this subsystem, while keeping the durable model here.
## Migration Plan From Current Code ## Migration Plan From Current Code
@@ -534,16 +541,7 @@ Current recommendation:
- keep only durable reference/package metadata here - keep only durable reference/package metadata here
### 2. Should Committed Live State Be Co-Located With Persisted State? ### 2. Should Preset Application Be A Store Operation Or A Coordinator Operation?
The Phase 1 parent doc leaves open whether committed live state stays in the store or is split with a live companion model owned by the coordinator.
For `RuntimeStore`, the important rule is:
- if a piece of state is part of the durable truth model, the store should own it
- if it is transient or session-only, it should not be forced into the store just for convenience
### 3. Should Preset Application Be A Store Operation Or A Coordinator Operation?
The file load and preset parse clearly belong here. The file load and preset parse clearly belong here.
@@ -554,7 +552,7 @@ Current recommendation:
- `RuntimeStore` loads preset content - `RuntimeStore` loads preset content
- `RuntimeCoordinator` decides how to apply it - `RuntimeCoordinator` decides how to apply it
### 4. How Early Should Async Persistence Land? ### 3. How Early Should Async Persistence Land?
Phase 1 does not require it, but the store design should not block it. Phase 1 does not require it, but the store design should not block it.

View File

@@ -235,10 +235,15 @@ Examples:
Examples: Examples:
- output queue depth - output queue depth
- free system-memory playout frame count
- ready system-memory playout frame count
- scheduled system-memory playout frame count
- scheduled frame index - scheduled frame index
- completed frame index - completed frame index
- late frame count - late frame count
- dropped frame count - dropped frame count
- underrun/repeat/drop counters for system-memory playout policy
- frame age at schedule time and completion callback time
- spare buffer count - spare buffer count
- current headroom target - current headroom target

View File

@@ -53,6 +53,204 @@ void TestRuntimeEventTryRecord()
Expect(metrics.queue.oldestEventAgeMilliseconds == 0.0, "queue age is clamped to non-negative values"); Expect(metrics.queue.oldestEventAgeMilliseconds == 0.0, "queue age is clamped to non-negative values");
Expect(metrics.dispatch.lastDispatchDurationMilliseconds == 0.0, "dispatch duration is clamped to non-negative values"); Expect(metrics.dispatch.lastDispatchDurationMilliseconds == 0.0, "dispatch duration is clamped to non-negative values");
} }
void TestPersistenceWriteHealth()
{
HealthTelemetry telemetry;
telemetry.RecordPersistenceWriteResult(false, "runtime-state", "runtime/runtime_state.json", "UpdateLayerParameter",
"disk full", true);
HealthTelemetry::PersistenceSnapshot persistence = telemetry.GetPersistenceSnapshot();
Expect(persistence.writeFailureCount == 1, "persistence health counts write failures");
Expect(!persistence.lastWriteSucceeded, "persistence health records failed write state");
Expect(persistence.unsavedChanges, "persistence health reports unsaved changes after failure");
Expect(persistence.newerRequestPending, "persistence health records pending newer request");
Expect(persistence.lastTargetKind == "runtime-state", "persistence health records target kind");
Expect(persistence.lastReason == "UpdateLayerParameter", "persistence health records reason");
Expect(persistence.lastErrorMessage == "disk full", "persistence health records error message");
Expect(telemetry.TryRecordPersistenceWriteResult(true, "runtime-state", "runtime/runtime_state.json", "flush", "", false),
"try persistence health succeeds when uncontended");
persistence = telemetry.GetPersistenceSnapshot();
Expect(persistence.writeSuccessCount == 1, "persistence health counts write successes");
Expect(persistence.lastWriteSucceeded, "persistence health records successful write state");
Expect(!persistence.unsavedChanges, "persistence health clears unsaved changes after latest successful write with no pending request");
}
void TestBackendPlayoutHealth()
{
HealthTelemetry telemetry;
telemetry.RecordBackendPlayoutHealth(
"Degraded",
"Dropped",
1,
4,
12,
0,
3,
4,
10,
2,
1,
8.5,
7.25,
12.0,
1.0,
6.5,
0.5,
8,
11,
3,
2,
2,
1,
2,
5,
3,
1,
true,
"Output underrun");
const HealthTelemetry::BackendPlayoutSnapshot playout = telemetry.GetBackendPlayoutSnapshot();
Expect(playout.lifecycleState == "Degraded", "backend playout health stores lifecycle state");
Expect(playout.completionResult == "Dropped", "backend playout health stores completion result");
Expect(playout.readyQueueDepth == 1, "backend playout health stores ready queue depth");
Expect(playout.readyQueueCapacity == 4, "backend playout health stores ready queue capacity");
Expect(playout.minReadyQueueDepth == 0, "backend playout health stores min ready queue depth");
Expect(playout.maxReadyQueueDepth == 3, "backend playout health stores max ready queue depth");
Expect(playout.readyQueueZeroDepthCount == 4, "backend playout health stores zero-depth queue samples");
Expect(playout.readyQueueDroppedCount == 2, "backend playout health stores queue dropped count");
Expect(playout.readyQueueUnderrunCount == 1, "backend playout health stores queue underrun count");
Expect(playout.outputRenderMilliseconds == 8.5, "backend playout health stores output render duration");
Expect(playout.smoothedOutputRenderMilliseconds == 7.25, "backend playout health stores smoothed output render duration");
Expect(playout.maxOutputRenderMilliseconds == 12.0, "backend playout health stores max output render duration");
Expect(playout.outputFrameAcquireMilliseconds == 1.0, "backend playout health stores output frame acquire duration");
Expect(playout.outputFrameRenderRequestMilliseconds == 6.5, "backend playout health stores output render request duration");
Expect(playout.outputFrameEndAccessMilliseconds == 0.5, "backend playout health stores output frame end access duration");
Expect(playout.completedFrameIndex == 8, "backend playout health stores completed index");
Expect(playout.scheduledFrameIndex == 11, "backend playout health stores scheduled index");
Expect(playout.scheduledLeadFrames == 3, "backend playout health stores synthetic scheduled lead");
Expect(playout.measuredLagFrames == 2, "backend playout health stores measured lag");
Expect(playout.catchUpFrames == 2, "backend playout health stores catch-up frames");
Expect(playout.lateStreak == 1, "backend playout health stores late streak");
Expect(playout.dropStreak == 2, "backend playout health stores drop streak");
Expect(playout.lateFrameCount == 5, "backend playout health stores late frame count");
Expect(playout.droppedFrameCount == 3, "backend playout health stores dropped frame count");
Expect(playout.flushedFrameCount == 1, "backend playout health stores flushed frame count");
Expect(playout.degraded, "backend playout health stores degraded state");
Expect(playout.statusMessage == "Output underrun", "backend playout health stores status message");
Expect(telemetry.TryRecordBackendPlayoutHealth(
"Running",
"Completed",
2,
4,
13,
1,
3,
4,
11,
2,
1,
-5.0,
-4.0,
-3.0,
-2.0,
-1.0,
-0.5,
9,
12,
3,
0,
0,
0,
0,
5,
3,
1,
false,
""),
"try backend playout health succeeds when uncontended");
const HealthTelemetry::Snapshot snapshot = telemetry.GetSnapshot();
Expect(snapshot.backendPlayout.lifecycleState == "Running", "full health snapshot includes backend playout state");
Expect(!snapshot.backendPlayout.degraded, "full health snapshot includes backend degraded state");
}
void TestOutputRenderPipelineTiming()
{
HealthTelemetry telemetry;
telemetry.RecordOutputRenderQueueWait(2.5);
telemetry.RecordOutputRenderPipelineTiming(1.0, 0.5, 0.25, 0.75, 0.125, 0.375, 0.1, 0.2, 0.3, 0.4, 3.5, true, true, false);
Expect(telemetry.TryRecordOutputRenderPipelineTiming(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, false, false, true),
"try output render timing succeeds when uncontended");
const HealthTelemetry::BackendPlayoutSnapshot playout = telemetry.GetBackendPlayoutSnapshot();
Expect(playout.outputRenderQueueWaitMilliseconds == 2.5, "output render timing stores queue wait");
Expect(playout.outputRenderDrawMilliseconds == 0.0, "output render timing clamps draw duration");
Expect(playout.outputReadbackFenceWaitMilliseconds == 0.0, "output render timing clamps fence wait duration");
Expect(playout.outputReadbackMapMilliseconds == 0.0, "output render timing clamps map duration");
Expect(playout.outputReadbackCopyMilliseconds == 0.0, "output render timing clamps readback copy duration");
Expect(playout.outputCachedCopyMilliseconds == 0.0, "output render timing clamps cached copy duration");
Expect(playout.outputAsyncQueueMilliseconds == 0.0, "output render timing clamps async queue duration");
Expect(playout.outputAsyncQueueBufferMilliseconds == 0.0, "output render timing clamps async queue buffer duration");
Expect(playout.outputAsyncQueueSetupMilliseconds == 0.0, "output render timing clamps async queue setup duration");
Expect(playout.outputAsyncQueueReadPixelsMilliseconds == 0.0, "output render timing clamps async queue read pixels duration");
Expect(playout.outputAsyncQueueFenceMilliseconds == 0.0, "output render timing clamps async queue fence duration");
Expect(playout.outputSyncReadMilliseconds == 0.0, "output render timing clamps sync read duration");
Expect(playout.outputAsyncReadbackMissCount == 1, "output render timing counts async readback misses");
Expect(playout.outputCachedFallbackCount == 1, "output render timing counts cached fallbacks");
Expect(playout.outputSyncFallbackCount == 1, "output render timing counts sync fallbacks");
}
void TestSystemMemoryPlayoutStats()
{
HealthTelemetry telemetry;
telemetry.RecordSystemMemoryPlayoutStats(2, 3, 1, 4, 5, 6, 12.5, 24.0);
HealthTelemetry::BackendPlayoutSnapshot playout = telemetry.GetBackendPlayoutSnapshot();
Expect(playout.systemFramePoolFree == 2, "system-memory playout stores free frame count");
Expect(playout.systemFramePoolReady == 3, "system-memory playout stores ready frame count");
Expect(playout.systemFramePoolScheduled == 1, "system-memory playout stores scheduled frame count");
Expect(playout.systemFrameUnderrunCount == 4, "system-memory playout stores underrun count");
Expect(playout.systemFrameRepeatCount == 5, "system-memory playout stores repeat count");
Expect(playout.systemFrameDropCount == 6, "system-memory playout stores drop count");
Expect(playout.systemFrameAgeAtScheduleMilliseconds == 12.5, "system-memory playout stores schedule age");
Expect(playout.systemFrameAgeAtCompletionMilliseconds == 24.0, "system-memory playout stores completion age");
Expect(telemetry.TryRecordSystemMemoryPlayoutStats(1, 0, 2, 7, 8, 9, -1.0, -2.0),
"try system-memory playout stats succeeds when uncontended");
playout = telemetry.GetBackendPlayoutSnapshot();
Expect(playout.systemFramePoolFree == 1, "try system-memory playout stores free frame count");
Expect(playout.systemFramePoolReady == 0, "try system-memory playout stores ready frame count");
Expect(playout.systemFramePoolScheduled == 2, "try system-memory playout stores scheduled frame count");
Expect(playout.systemFrameUnderrunCount == 7, "try system-memory playout stores underrun count");
Expect(playout.systemFrameRepeatCount == 8, "try system-memory playout stores repeat count");
Expect(playout.systemFrameDropCount == 9, "try system-memory playout stores drop count");
Expect(playout.systemFrameAgeAtScheduleMilliseconds == 0.0, "system-memory playout clamps negative schedule age");
Expect(playout.systemFrameAgeAtCompletionMilliseconds == 0.0, "system-memory playout clamps negative completion age");
}
void TestDeckLinkBufferTelemetry()
{
HealthTelemetry telemetry;
telemetry.RecordDeckLinkBufferTelemetry(true, 4, 5, 0.25, 2);
HealthTelemetry::BackendPlayoutSnapshot playout = telemetry.GetBackendPlayoutSnapshot();
Expect(playout.actualDeckLinkBufferedFramesAvailable, "DeckLink buffer telemetry records availability");
Expect(playout.actualDeckLinkBufferedFrames == 4, "DeckLink buffer telemetry stores actual device depth");
Expect(playout.targetDeckLinkBufferedFrames == 5, "DeckLink buffer telemetry stores target device depth");
Expect(playout.deckLinkScheduleCallMilliseconds == 0.25, "DeckLink buffer telemetry stores schedule call duration");
Expect(playout.deckLinkScheduleFailureCount == 2, "DeckLink buffer telemetry stores schedule failures");
Expect(telemetry.TryRecordDeckLinkBufferTelemetry(false, 9, 3, -1.0, 7),
"try DeckLink buffer telemetry succeeds when uncontended");
playout = telemetry.GetBackendPlayoutSnapshot();
Expect(!playout.actualDeckLinkBufferedFramesAvailable, "DeckLink buffer telemetry records unavailable device depth");
Expect(playout.actualDeckLinkBufferedFrames == 0, "unavailable DeckLink device depth clears actual count");
Expect(playout.targetDeckLinkBufferedFrames == 3, "try DeckLink buffer telemetry stores target device depth");
Expect(playout.deckLinkScheduleCallMilliseconds == 0.0, "DeckLink buffer telemetry clamps negative schedule call duration");
Expect(playout.deckLinkScheduleFailureCount == 7, "try DeckLink buffer telemetry stores schedule failures");
}
} }
int main() int main()
@@ -60,6 +258,11 @@ int main()
TestRuntimeEventQueueMetrics(); TestRuntimeEventQueueMetrics();
TestRuntimeEventDispatchStats(); TestRuntimeEventDispatchStats();
TestRuntimeEventTryRecord(); TestRuntimeEventTryRecord();
TestPersistenceWriteHealth();
TestBackendPlayoutHealth();
TestOutputRenderPipelineTiming();
TestSystemMemoryPlayoutStats();
TestDeckLinkBufferTelemetry();
if (gFailures != 0) if (gFailures != 0)
{ {

View File

@@ -0,0 +1,142 @@
#include "OutputProductionController.h"
#include <iostream>
#include <string>
namespace
{
int gFailures = 0;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
void TestLowQueueRequestsProductionToTarget()
{
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 3;
policy.maxReadyFrames = 5;
OutputProductionController controller(policy);
OutputProductionPressure pressure;
pressure.readyQueueDepth = 1;
pressure.readyQueueCapacity = 5;
const OutputProductionDecision decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Produce, "low ready queue requests production");
Expect(decision.requestedFrames == 2, "low ready queue requests enough frames to reach target");
Expect(decision.targetReadyFrames == 3, "decision reports effective target");
Expect(decision.maxReadyFrames == 5, "decision reports effective max");
Expect(decision.reason == "ready-queue-below-target", "low queue decision names reason");
}
void TestFullQueueThrottles()
{
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 2;
policy.maxReadyFrames = 4;
OutputProductionController controller(policy);
OutputProductionPressure pressure;
pressure.readyQueueDepth = 4;
pressure.readyQueueCapacity = 4;
const OutputProductionDecision decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Throttle, "full ready queue throttles production");
Expect(decision.requestedFrames == 0, "full ready queue requests no frames");
Expect(decision.reason == "ready-queue-full", "full queue decision names reason");
}
void TestAtTargetWaitsWithoutPressure()
{
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 2;
policy.maxReadyFrames = 4;
OutputProductionController controller(policy);
OutputProductionPressure pressure;
pressure.readyQueueDepth = 2;
pressure.readyQueueCapacity = 4;
const OutputProductionDecision decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Wait, "ready queue at target waits without pressure");
Expect(decision.requestedFrames == 0, "wait decision requests no frames");
Expect(decision.reason == "ready-queue-at-target", "wait decision names reason");
}
void TestLateDropPressureRequestsHeadroom()
{
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 2;
policy.maxReadyFrames = 4;
OutputProductionController controller(policy);
OutputProductionPressure pressure;
pressure.readyQueueDepth = 2;
pressure.readyQueueCapacity = 4;
pressure.lateStreak = 1;
OutputProductionDecision decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Produce, "late pressure requests extra headroom");
Expect(decision.requestedFrames == 1, "late pressure requests one frame");
Expect(decision.reason == "playout-pressure", "late pressure decision names reason");
pressure.lateStreak = 0;
pressure.dropStreak = 2;
decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Produce, "drop pressure requests extra headroom");
pressure.dropStreak = 0;
pressure.readyQueueUnderrunCount = 1;
decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Produce, "underrun pressure requests extra headroom");
}
void TestPolicyNormalizesAndClampsToCapacity()
{
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 0;
policy.maxReadyFrames = 8;
OutputProductionController controller(policy);
OutputProductionPressure pressure;
pressure.readyQueueDepth = 1;
pressure.readyQueueCapacity = 3;
const OutputProductionDecision decision = controller.Decide(pressure);
Expect(decision.action == OutputProductionAction::Wait, "normalized target at current depth waits");
Expect(decision.targetReadyFrames == 1, "target normalizes to at least one frame");
Expect(decision.maxReadyFrames == 3, "max ready frames clamps to queue capacity");
}
void TestActionNames()
{
Expect(OutputProductionActionName(OutputProductionAction::Produce) == std::string("Produce"), "produce action has name");
Expect(OutputProductionActionName(OutputProductionAction::Wait) == std::string("Wait"), "wait action has name");
Expect(OutputProductionActionName(OutputProductionAction::Throttle) == std::string("Throttle"), "throttle action has name");
}
}
int main()
{
TestLowQueueRequestsProductionToTarget();
TestFullQueueThrottles();
TestAtTargetWaitsWithoutPressure();
TestLateDropPressureRequestsHeadroom();
TestPolicyNormalizesAndClampsToCapacity();
TestActionNames();
if (gFailures != 0)
{
std::cerr << gFailures << " OutputProductionController test failure(s).\n";
return 1;
}
std::cout << "OutputProductionController tests passed.\n";
return 0;
}

View File

@@ -0,0 +1,210 @@
#include "PersistenceWriter.h"
#include <condition_variable>
#include <chrono>
#include <filesystem>
#include <iostream>
#include <mutex>
#include <string>
#include <vector>
namespace
{
int gFailures = 0;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
PersistenceSnapshot MakeRuntimeSnapshot(const std::string& contents)
{
PersistenceSnapshot snapshot;
snapshot.targetKind = PersistenceTargetKind::RuntimeState;
snapshot.targetPath = std::filesystem::temp_directory_path() / "video-shader-persistence-writer-test.json";
snapshot.contents = contents;
snapshot.reason = "test";
snapshot.debounceKey = "runtime-state";
snapshot.debounceAllowed = true;
return snapshot;
}
void TestDebouncedRequestsCoalesceToNewestSnapshot()
{
std::mutex mutex;
std::vector<PersistenceSnapshot> writtenSnapshots;
PersistenceWriter writer(
std::chrono::milliseconds(1000),
[&](const PersistenceSnapshot& snapshot, std::string&) {
std::lock_guard<std::mutex> lock(mutex);
writtenSnapshots.push_back(snapshot);
return true;
});
std::string error;
Expect(writer.EnqueueSnapshot(MakeRuntimeSnapshot("first"), error), "first debounced snapshot enqueues");
Expect(writer.EnqueueSnapshot(MakeRuntimeSnapshot("second"), error), "second debounced snapshot enqueues");
PersistenceWriterMetrics metrics = writer.GetMetrics();
Expect(metrics.pendingCount == 1, "debounced snapshots share one pending slot");
Expect(metrics.enqueuedCount == 1, "first debounced snapshot counts as enqueue");
Expect(metrics.coalescedCount == 1, "second debounced snapshot counts as coalesced");
Expect(writer.StopAndFlush(std::chrono::seconds(1), error), "flush drains debounced snapshots");
{
std::lock_guard<std::mutex> lock(mutex);
Expect(writtenSnapshots.size() == 1, "flush writes one coalesced snapshot");
Expect(!writtenSnapshots.empty() && writtenSnapshots[0].contents == "second", "coalesced writer keeps newest snapshot");
}
metrics = writer.GetMetrics();
Expect(metrics.pendingCount == 0, "flush drains pending debounced snapshot");
Expect(metrics.writtenCount == 1, "flush records one successful write");
}
void TestImmediateRequestsAreNotCoalesced()
{
std::mutex mutex;
std::vector<PersistenceSnapshot> writtenSnapshots;
PersistenceWriter writer(
std::chrono::milliseconds(1000),
[&](const PersistenceSnapshot& snapshot, std::string&) {
std::lock_guard<std::mutex> lock(mutex);
writtenSnapshots.push_back(snapshot);
return true;
});
PersistenceSnapshot first = MakeRuntimeSnapshot("first");
first.debounceAllowed = false;
PersistenceSnapshot second = MakeRuntimeSnapshot("second");
second.debounceAllowed = false;
std::string error;
Expect(writer.EnqueueSnapshot(first, error), "first immediate snapshot enqueues");
Expect(writer.EnqueueSnapshot(second, error), "second immediate snapshot enqueues");
Expect(writer.StopAndFlush(std::chrono::seconds(1), error), "flush drains immediate snapshots");
{
std::lock_guard<std::mutex> lock(mutex);
Expect(writtenSnapshots.size() == 2, "immediate snapshots are written independently");
Expect(writtenSnapshots.size() == 2 && writtenSnapshots[0].contents == "first" && writtenSnapshots[1].contents == "second",
"immediate snapshots preserve order");
}
}
void TestWriteFailureReportsStructuredResult()
{
std::vector<PersistenceWriteResult> results;
PersistenceWriter writer(
std::chrono::milliseconds(1),
[](const PersistenceSnapshot&, std::string& error) {
error = "simulated failure";
return false;
});
writer.SetResultCallback([&results](const PersistenceWriteResult& result) {
results.push_back(result);
});
PersistenceSnapshot snapshot = MakeRuntimeSnapshot("payload");
snapshot.debounceAllowed = false;
snapshot.reason = "failure-test";
std::string error;
Expect(writer.EnqueueSnapshot(snapshot, error), "failing snapshot still enqueues");
Expect(writer.StopAndFlush(std::chrono::seconds(1), error), "flush reports failing snapshot result");
Expect(results.size() == 1, "writer reports one failure result");
Expect(!results.empty() && !results[0].succeeded, "writer result records failure");
Expect(!results.empty() && results[0].reason == "failure-test", "writer result preserves reason");
Expect(!results.empty() && results[0].errorMessage == "simulated failure", "writer result preserves error message");
Expect(!results.empty() && !results[0].newerRequestPending, "writer result reports no newer pending request");
Expect(writer.GetMetrics().failedCount == 1, "writer metrics count failed writes");
}
void TestShutdownFlushDrainsPendingSnapshotAndRejectsNewRequests()
{
std::mutex mutex;
std::vector<PersistenceSnapshot> writtenSnapshots;
PersistenceWriter writer(
std::chrono::milliseconds(1000),
[&](const PersistenceSnapshot& snapshot, std::string&) {
std::lock_guard<std::mutex> lock(mutex);
writtenSnapshots.push_back(snapshot);
return true;
});
std::string error;
Expect(writer.EnqueueSnapshot(MakeRuntimeSnapshot("pending"), error), "pending snapshot enqueues before shutdown");
Expect(writer.StopAndFlush(std::chrono::seconds(1), error), "bounded shutdown flush completes");
{
std::lock_guard<std::mutex> lock(mutex);
Expect(writtenSnapshots.size() == 1, "shutdown flush writes pending debounced snapshot");
Expect(!writtenSnapshots.empty() && writtenSnapshots[0].contents == "pending", "shutdown flush preserves pending snapshot contents");
}
Expect(!writer.EnqueueSnapshot(MakeRuntimeSnapshot("late"), error), "writer rejects requests after shutdown flush");
}
void TestShutdownFlushTimeoutCanBeRetried()
{
std::mutex mutex;
std::condition_variable condition;
bool sinkStarted = false;
bool releaseSink = false;
PersistenceWriter writer(
std::chrono::milliseconds(1),
[&](const PersistenceSnapshot&, std::string&) {
std::unique_lock<std::mutex> lock(mutex);
sinkStarted = true;
condition.notify_all();
condition.wait(lock, [&]() { return releaseSink; });
return true;
});
PersistenceSnapshot snapshot = MakeRuntimeSnapshot("slow");
snapshot.debounceAllowed = false;
std::string error;
Expect(writer.EnqueueSnapshot(snapshot, error), "slow snapshot enqueues");
{
std::unique_lock<std::mutex> lock(mutex);
Expect(condition.wait_for(lock, std::chrono::seconds(1), [&]() { return sinkStarted; }),
"slow sink starts before timeout test");
}
Expect(!writer.StopAndFlush(std::chrono::milliseconds(10), error), "bounded shutdown flush reports timeout");
Expect(error.find("Timed out") != std::string::npos, "shutdown timeout returns a useful error");
{
std::lock_guard<std::mutex> lock(mutex);
releaseSink = true;
}
condition.notify_all();
error.clear();
Expect(writer.StopAndFlush(std::chrono::seconds(1), error), "shutdown flush can complete after earlier timeout");
}
}
int main()
{
TestDebouncedRequestsCoalesceToNewestSnapshot();
TestImmediateRequestsAreNotCoalesced();
TestWriteFailureReportsStructuredResult();
TestShutdownFlushDrainsPendingSnapshotAndRejectsNewRequests();
TestShutdownFlushTimeoutCanBeRetried();
if (gFailures != 0)
{
std::cerr << gFailures << " persistence writer test(s) failed.\n";
return 1;
}
std::cout << "Persistence writer tests passed.\n";
return 0;
}

View File

@@ -0,0 +1,172 @@
#include "RenderCadenceController.h"
#include <chrono>
#include <iostream>
#include <string>
namespace
{
int gFailures = 0;
using Clock = RenderCadenceController::Clock;
using Duration = RenderCadenceController::Duration;
using TimePoint = RenderCadenceController::TimePoint;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
Duration Ms(int64_t value)
{
return std::chrono::duration_cast<Duration>(std::chrono::milliseconds(value));
}
void TestExactCadenceAdvancesFrameIndexAndNextTick()
{
RenderCadenceController controller;
const TimePoint start = Clock::time_point(Ms(1000));
controller.Configure(Ms(20), start);
RenderCadenceDecision first = controller.Tick(start);
Expect(first.action == RenderCadenceAction::Render, "first exact tick renders");
Expect(first.frameIndex == 0, "first exact tick renders frame zero");
Expect(first.renderTargetTime == start, "first exact target is configured start");
Expect(first.nextRenderTime == start + Ms(20), "first exact tick advances next render time");
Expect(first.skippedTicks == 0, "first exact tick skips no ticks");
Expect(first.lateness == Duration::zero(), "first exact tick records no lateness");
RenderCadenceDecision second = controller.Tick(start + Ms(20));
Expect(second.action == RenderCadenceAction::Render, "second exact tick renders");
Expect(second.frameIndex == 1, "second exact tick renders frame one");
Expect(controller.NextFrameIndex() == 2, "controller tracks next frame index after exact ticks");
Expect(controller.Metrics().renderedFrameCount == 2, "metrics count exact rendered frames");
}
void TestEarlyTickWaitsWithoutAdvancing()
{
RenderCadenceController controller;
const TimePoint start = Clock::time_point(Ms(0));
controller.Configure(Ms(20), start);
(void)controller.Tick(start);
RenderCadenceDecision decision = controller.Tick(start + Ms(10));
Expect(decision.action == RenderCadenceAction::Wait, "early tick waits");
Expect(decision.waitDuration == Ms(10), "early tick reports wait duration");
Expect(decision.frameIndex == 1, "early tick reports next pending frame");
Expect(controller.NextFrameIndex() == 1, "early tick does not advance frame index");
Expect(controller.NextRenderTime() == start + Ms(20), "early tick does not advance next render time");
}
void TestSlightLatenessRendersAndRecordsMetrics()
{
RenderCadencePolicy policy;
policy.skipThresholdFrames = 3.0;
RenderCadenceController controller;
const TimePoint start = Clock::time_point(Ms(0));
controller.Configure(Ms(20), start, policy);
RenderCadenceDecision decision = controller.Tick(start + Ms(5));
Expect(decision.action == RenderCadenceAction::Render, "slightly late tick renders");
Expect(decision.frameIndex == 0, "slightly late tick keeps pending frame");
Expect(decision.skippedTicks == 0, "slightly late tick skips no ticks");
Expect(decision.lateness == Ms(5), "slightly late tick reports lateness");
Expect(controller.Metrics().lateFrameCount == 1, "metrics count late rendered frame");
Expect(controller.Metrics().lastLateness == Ms(5), "metrics keep last lateness");
Expect(controller.Metrics().maxLateness == Ms(5), "metrics keep max lateness");
}
void TestLargeLatenessSkipsTicksAccordingToPolicy()
{
RenderCadencePolicy policy;
policy.skipLateTicks = true;
policy.skipThresholdFrames = 2.0;
policy.maxSkippedTicksPerDecision = 8;
RenderCadenceController controller;
const TimePoint start = Clock::time_point(Ms(0));
controller.Configure(Ms(20), start, policy);
RenderCadenceDecision decision = controller.Tick(start + Ms(70));
Expect(decision.action == RenderCadenceAction::Render, "large late tick renders newest allowed frame");
Expect(decision.skippedTicks == 3, "large late tick skips elapsed render ticks");
Expect(decision.frameIndex == 3, "large late tick renders skipped-to frame");
Expect(decision.renderTargetTime == start + Ms(60), "large late tick targets newest elapsed tick");
Expect(decision.lateness == Ms(10), "large late tick measures residual lateness");
Expect(controller.NextFrameIndex() == 4, "large late tick advances past rendered frame");
Expect(controller.NextRenderTime() == start + Ms(80), "large late tick advances to following cadence");
Expect(controller.Metrics().skippedTickCount == 3, "metrics count skipped ticks");
}
void TestSkipPolicyCanDisableOrCapSkippedTicks()
{
const TimePoint start = Clock::time_point(Ms(0));
RenderCadencePolicy disabledPolicy;
disabledPolicy.skipLateTicks = false;
RenderCadenceController disabledController;
disabledController.Configure(Ms(20), start, disabledPolicy);
RenderCadenceDecision disabled = disabledController.Tick(start + Ms(90));
Expect(disabled.skippedTicks == 0, "disabled skip policy renders pending frame");
Expect(disabled.frameIndex == 0, "disabled skip policy preserves pending frame index");
RenderCadencePolicy cappedPolicy;
cappedPolicy.skipThresholdFrames = 1.0;
cappedPolicy.maxSkippedTicksPerDecision = 2;
RenderCadenceController cappedController;
cappedController.Configure(Ms(20), start, cappedPolicy);
RenderCadenceDecision capped = cappedController.Tick(start + Ms(90));
Expect(capped.skippedTicks == 2, "skip policy caps skipped ticks");
Expect(capped.frameIndex == 2, "capped skip renders capped frame index");
}
void TestResetRestartsCadenceAndMetrics()
{
RenderCadenceController controller;
const TimePoint start = Clock::time_point(Ms(0));
controller.Configure(Ms(20), start);
(void)controller.Tick(start + Ms(50));
const TimePoint restarted = start + Ms(200);
controller.Reset(restarted);
Expect(controller.NextFrameIndex() == 0, "reset restarts frame index");
Expect(controller.NextRenderTime() == restarted, "reset restarts next render time");
Expect(controller.Metrics().renderedFrameCount == 0, "reset clears rendered metrics");
RenderCadenceDecision decision = controller.Tick(restarted);
Expect(decision.action == RenderCadenceAction::Render, "reset cadence renders at new start");
Expect(decision.frameIndex == 0, "reset cadence renders frame zero");
}
void TestActionNames()
{
Expect(RenderCadenceActionName(RenderCadenceAction::Render) == std::string("Render"), "render action has name");
Expect(RenderCadenceActionName(RenderCadenceAction::Wait) == std::string("Wait"), "wait action has name");
}
}
int main()
{
TestExactCadenceAdvancesFrameIndexAndNextTick();
TestEarlyTickWaitsWithoutAdvancing();
TestSlightLatenessRendersAndRecordsMetrics();
TestLargeLatenessSkipsTicksAccordingToPolicy();
TestSkipPolicyCanDisableOrCapSkippedTicks();
TestResetRestartsCadenceAndMetrics();
TestActionNames();
if (gFailures != 0)
{
std::cerr << gFailures << " RenderCadenceController test failure(s).\n";
return 1;
}
std::cout << "RenderCadenceController tests passed.\n";
return 0;
}

View File

@@ -0,0 +1,209 @@
#include "RenderOutputQueue.h"
#include <iostream>
namespace
{
int gFailures = 0;
int gReleasedFrames = 0;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
RenderOutputFrame MakeFrame(uint64_t index)
{
RenderOutputFrame frame;
frame.frameIndex = index;
frame.frame.nativeFrame = reinterpret_cast<void*>(static_cast<uintptr_t>(index + 1));
return frame;
}
void CountReleasedFrame(VideoIOOutputFrame& frame)
{
if (frame.nativeFrame != nullptr)
{
++gReleasedFrames;
frame.nativeFrame = nullptr;
}
}
RenderOutputFrame MakeOwnedFrame(uint64_t index)
{
RenderOutputFrame frame = MakeFrame(index);
frame.releaseFrame = CountReleasedFrame;
return frame;
}
void TestQueuePreservesOrdering()
{
VideoPlayoutPolicy policy;
policy.maxReadyFrames = 3;
RenderOutputQueue queue(policy);
Expect(queue.Push(MakeFrame(1)), "first ready frame pushes");
Expect(queue.Push(MakeFrame(2)), "second ready frame pushes");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "first ready frame pops");
Expect(frame.frameIndex == 1, "queue pops first frame first");
Expect(queue.TryPop(frame), "second ready frame pops");
Expect(frame.frameIndex == 2, "queue pops second frame second");
}
void TestBoundedQueueDropsOldestFrame()
{
VideoPlayoutPolicy policy;
policy.maxReadyFrames = 2;
RenderOutputQueue queue(policy);
queue.Push(MakeFrame(1));
queue.Push(MakeFrame(2));
queue.Push(MakeFrame(3));
RenderOutputQueueMetrics metrics = queue.GetMetrics();
Expect(metrics.depth == 2, "bounded queue depth stays at capacity");
Expect(metrics.droppedCount == 1, "bounded queue counts dropped oldest frame");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "bounded queue pops after drop");
Expect(frame.frameIndex == 2, "oldest frame was dropped when queue overflowed");
}
void TestOverflowReleasesDroppedFrame()
{
gReleasedFrames = 0;
VideoPlayoutPolicy policy;
policy.targetReadyFrames = 1;
policy.maxReadyFrames = 1;
RenderOutputQueue queue(policy);
queue.Push(MakeOwnedFrame(1));
queue.Push(MakeOwnedFrame(2));
Expect(gReleasedFrames == 1, "overflow releases dropped ready frame");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "newest owned frame remains queued");
Expect(frame.frameIndex == 2, "overflow keeps newest owned frame");
Expect(gReleasedFrames == 1, "pop transfers ownership without releasing");
}
void TestDropOldestFrameReleasesFrame()
{
gReleasedFrames = 0;
VideoPlayoutPolicy policy;
policy.maxReadyFrames = 2;
RenderOutputQueue queue(policy);
queue.Push(MakeOwnedFrame(1));
queue.Push(MakeOwnedFrame(2));
Expect(queue.DropOldestFrame(), "oldest ready frame can be explicitly dropped");
Expect(gReleasedFrames == 1, "explicit drop releases oldest frame");
RenderOutputQueueMetrics metrics = queue.GetMetrics();
Expect(metrics.depth == 1, "explicit drop reduces queue depth");
Expect(metrics.droppedCount == 1, "explicit drop increments dropped count");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "newest frame remains after explicit drop");
Expect(frame.frameIndex == 2, "explicit drop keeps newest frame");
Expect(!queue.DropOldestFrame(), "empty queue cannot drop a frame");
}
void TestUnderrunIsCounted()
{
RenderOutputQueue queue;
RenderOutputFrame frame;
Expect(!queue.TryPop(frame), "empty queue reports underrun");
RenderOutputQueueMetrics metrics = queue.GetMetrics();
Expect(metrics.underrunCount == 1, "empty pop increments underrun count");
}
void TestConfigureShrinksDepthToNewCapacity()
{
VideoPlayoutPolicy policy;
policy.maxReadyFrames = 4;
RenderOutputQueue queue(policy);
queue.Push(MakeFrame(1));
queue.Push(MakeFrame(2));
queue.Push(MakeFrame(3));
VideoPlayoutPolicy smallerPolicy;
smallerPolicy.targetReadyFrames = 1;
smallerPolicy.maxReadyFrames = 1;
queue.Configure(smallerPolicy);
RenderOutputQueueMetrics metrics = queue.GetMetrics();
Expect(metrics.depth == 1, "configure trims queue to new capacity");
Expect(metrics.droppedCount == 2, "configure counts trimmed frames as drops");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "trimmed queue still has newest frame");
Expect(frame.frameIndex == 3, "configure keeps newest ready frame");
}
void TestConfigureReleasesTrimmedFrames()
{
gReleasedFrames = 0;
VideoPlayoutPolicy policy;
policy.maxReadyFrames = 3;
RenderOutputQueue queue(policy);
queue.Push(MakeOwnedFrame(1));
queue.Push(MakeOwnedFrame(2));
queue.Push(MakeOwnedFrame(3));
VideoPlayoutPolicy smallerPolicy;
smallerPolicy.targetReadyFrames = 1;
smallerPolicy.maxReadyFrames = 1;
queue.Configure(smallerPolicy);
Expect(gReleasedFrames == 2, "configure releases trimmed ready frames");
RenderOutputFrame frame;
Expect(queue.TryPop(frame), "trimmed owned queue still has newest frame");
Expect(frame.frameIndex == 3, "configure keeps newest owned frame after release");
}
void TestClearReleasesQueuedFrames()
{
gReleasedFrames = 0;
RenderOutputQueue queue;
queue.Push(MakeOwnedFrame(1));
queue.Push(MakeOwnedFrame(2));
queue.Clear();
RenderOutputQueueMetrics metrics = queue.GetMetrics();
Expect(metrics.depth == 0, "clear empties ready queue");
Expect(gReleasedFrames == 2, "clear releases queued ready frames");
}
}
int main()
{
TestQueuePreservesOrdering();
TestBoundedQueueDropsOldestFrame();
TestOverflowReleasesDroppedFrame();
TestDropOldestFrameReleasesFrame();
TestUnderrunIsCounted();
TestConfigureShrinksDepthToNewCapacity();
TestConfigureReleasesTrimmedFrames();
TestClearReleasesQueuedFrames();
if (gFailures != 0)
{
std::cerr << gFailures << " render output queue test failure(s).\n";
return 1;
}
std::cout << "RenderOutputQueue tests passed.\n";
return 0;
}

View File

@@ -62,10 +62,19 @@ void TestRuntimeEventPayloadTypes()
Expect(rejectedMutation.errorMessage == "Unknown layer.", "mutation payload carries rejection error"); Expect(rejectedMutation.errorMessage == "Unknown layer.", "mutation payload carries rejection error");
RuntimePersistenceRequestedEvent persistence; RuntimePersistenceRequestedEvent persistence;
persistence.reason = "UpdateLayerParameter"; persistence.request = PersistenceRequest::RuntimeStateRequest("UpdateLayerParameter");
persistence.debounceAllowed = true;
Expect(RuntimeEventPayloadType(persistence) == RuntimeEventType::RuntimePersistenceRequested, "runtime persistence payload maps to persistence event type"); Expect(RuntimeEventPayloadType(persistence) == RuntimeEventType::RuntimePersistenceRequested, "runtime persistence payload maps to persistence event type");
Expect(persistence.debounceAllowed, "runtime persistence payload carries debounce policy"); Expect(persistence.request.targetKind == PersistenceTargetKind::RuntimeState, "runtime persistence payload carries target kind");
Expect(persistence.request.reason == "UpdateLayerParameter", "runtime persistence payload carries request reason");
Expect(persistence.request.debounceAllowed, "runtime persistence payload carries debounce policy");
Expect(persistence.request.debounceKey == "runtime-state", "runtime persistence payload carries debounce key");
PersistenceSnapshot persistenceSnapshot;
persistenceSnapshot.targetKind = PersistenceTargetKind::RuntimeState;
persistenceSnapshot.reason = persistence.request.reason;
persistenceSnapshot.contents = "{}";
Expect(persistenceSnapshot.reason == "UpdateLayerParameter", "persistence snapshot carries capture reason");
Expect(persistenceSnapshot.contents == "{}", "persistence snapshot carries serialized content");
FileChangeDetectedEvent fileChange; FileChangeDetectedEvent fileChange;
fileChange.path = "PollRuntimeStoreChanges"; fileChange.path = "PollRuntimeStoreChanges";
@@ -459,8 +468,7 @@ void TestAcceptedMutationFollowUps()
stateChanged.persistenceRequested = true; stateChanged.persistenceRequested = true;
RuntimePersistenceRequestedEvent persistence; RuntimePersistenceRequestedEvent persistence;
persistence.reason = mutation.action; persistence.request = PersistenceRequest::RuntimeStateRequest(mutation.action);
persistence.debounceAllowed = true;
RuntimeReloadRequestedEvent reload; RuntimeReloadRequestedEvent reload;
reload.reason = mutation.action; reload.reason = mutation.action;
@@ -487,7 +495,8 @@ void TestAcceptedMutationFollowUps()
const RuntimeEvent* persistenceEvent = harness.LastSeen(RuntimeEventType::RuntimePersistenceRequested); const RuntimeEvent* persistenceEvent = harness.LastSeen(RuntimeEventType::RuntimePersistenceRequested);
const auto* persistencePayload = persistenceEvent ? std::get_if<RuntimePersistenceRequestedEvent>(&persistenceEvent->payload) : nullptr; const auto* persistencePayload = persistenceEvent ? std::get_if<RuntimePersistenceRequestedEvent>(&persistenceEvent->payload) : nullptr;
Expect(persistencePayload && persistencePayload->reason == "SetLayerShader", "persistence follow-up preserves mutation action reason"); Expect(persistencePayload && persistencePayload->request.reason == "SetLayerShader", "persistence follow-up preserves mutation action reason");
Expect(persistencePayload && persistencePayload->request.debounceKey == "runtime-state", "persistence follow-up preserves debounce key");
} }
void TestAppLevelBroadcastAndBuildCoalescing() void TestAppLevelBroadcastAndBuildCoalescing()

View File

@@ -9,6 +9,7 @@
#include <filesystem> #include <filesystem>
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <map>
#include <string> #include <string>
#include <variant> #include <variant>
#include <windows.h> #include <windows.h>
@@ -235,6 +236,19 @@ void TestRuntimeCoordinatorPersistenceEvents()
Expect(store.InitializeStore(error), "runtime store initializes in isolated fixture"); Expect(store.InitializeStore(error), "runtime store initializes in isolated fixture");
Expect(error.empty(), "runtime store initialization has no error"); Expect(error.empty(), "runtime store initialization has no error");
const PersistenceRequest snapshotRequest = PersistenceRequest::RuntimeStateRequest("unit-test");
const PersistenceSnapshot snapshot = store.BuildRuntimeStatePersistenceSnapshot(snapshotRequest);
Expect(snapshot.targetKind == PersistenceTargetKind::RuntimeState, "runtime store builds a runtime-state persistence snapshot");
Expect(snapshot.reason == "unit-test", "runtime-state persistence snapshot preserves request reason");
Expect(snapshot.targetPath.filename().string() == "runtime_state.json", "runtime-state persistence snapshot targets the runtime state file");
Expect(snapshot.contents.find("\"layers\"") != std::string::npos, "runtime-state persistence snapshot contains serialized layer state");
Expect(store.RequestPersistence(PersistenceRequest::RuntimeStateRequest("unit-test-request"), error),
"runtime store accepts runtime-state persistence requests");
PersistenceRequest unsupportedRequest;
unsupportedRequest.targetKind = PersistenceTargetKind::StackPreset;
unsupportedRequest.reason = "unsupported-unit-test";
Expect(!store.RequestPersistence(unsupportedRequest, error), "runtime store rejects unsupported persistence request targets");
RuntimeEventDispatcher dispatcher(64); RuntimeEventDispatcher dispatcher(64);
std::vector<RuntimeEvent> seenEvents; std::vector<RuntimeEvent> seenEvents;
dispatcher.SubscribeAll([&seenEvents](const RuntimeEvent& event) { dispatcher.SubscribeAll([&seenEvents](const RuntimeEvent& event) {
@@ -258,7 +272,7 @@ void TestRuntimeCoordinatorPersistenceEvents()
if (event.type != RuntimeEventType::RuntimePersistenceRequested) if (event.type != RuntimeEventType::RuntimePersistenceRequested)
continue; continue;
const auto* payload = std::get_if<RuntimePersistenceRequestedEvent>(&event.payload); const auto* payload = std::get_if<RuntimePersistenceRequestedEvent>(&event.payload);
return payload ? payload->reason : std::string(); return payload ? payload->request.reason : std::string();
} }
return std::string(); return std::string();
}; };
@@ -311,8 +325,34 @@ void TestRuntimeCoordinatorPersistenceEvents()
Expect(countEvents(overlayEvents, RuntimeEventType::OscOverlayApplied) == 1, "transient OSC overlay is observable"); Expect(countEvents(overlayEvents, RuntimeEventType::OscOverlayApplied) == 1, "transient OSC overlay is observable");
Expect(countEvents(overlayEvents, RuntimeEventType::RuntimePersistenceRequested) == 0, "transient OSC overlay does not request persistence"); Expect(countEvents(overlayEvents, RuntimeEventType::RuntimePersistenceRequested) == 0, "transient OSC overlay does not request persistence");
expectAcceptedPersistence(coordinator.CommitOscParameterByControlKey("alpha", "gain", JsonValue(0.2)), "CommitOscParameterByControlKey", RuntimeCoordinatorResult oscCommitResult = coordinator.CommitOscParameterByControlKey("alpha", "gain", JsonValue(0.2));
"accepted OSC commit is persistent"); std::vector<RuntimeEvent> oscCommitEvents = dispatchAndClear();
Expect(oscCommitResult.accepted, "accepted OSC commit updates committed session state");
Expect(!oscCommitResult.persistenceRequested, "settled OSC commit does not request persistence by default");
Expect(countEvents(oscCommitEvents, RuntimeEventType::RuntimeMutationAccepted) == 1, "settled OSC commit publishes accepted fact");
Expect(countEvents(oscCommitEvents, RuntimeEventType::RuntimeStateChanged) == 1, "settled OSC commit publishes state change");
Expect(countEvents(oscCommitEvents, RuntimeEventType::RuntimePersistenceRequested) == 0, "settled OSC commit publishes no persistence request");
RuntimeStore::StoredParameterSnapshot oscCommitSnapshot;
Expect(store.TryGetStoredParameterByControlKey("alpha", "gain", oscCommitSnapshot, error), "settled OSC commit can be read back");
Expect(!oscCommitSnapshot.currentValue.numberValues.empty() &&
oscCommitSnapshot.currentValue.numberValues[0] == 0.2,
"settled OSC commit updates the committed session value");
CommittedLiveStateReadModel committedLiveState = store.BuildCommittedLiveStateReadModel();
Expect(!committedLiveState.layers.empty(), "committed live read model exposes current session layers");
const auto committedLayerIt = std::find_if(committedLiveState.layers.begin(), committedLiveState.layers.end(),
[&oscCommitSnapshot](const RuntimeStore::LayerPersistentState& layer) { return layer.id == oscCommitSnapshot.layerId; });
Expect(committedLayerIt != committedLiveState.layers.end(), "committed live read model preserves layer identity");
if (committedLayerIt != committedLiveState.layers.end())
{
const auto committedValueIt = committedLayerIt->parameterValues.find("gain");
Expect(committedValueIt != committedLayerIt->parameterValues.end() &&
!committedValueIt->second.numberValues.empty() &&
committedValueIt->second.numberValues[0] == 0.2,
"committed live read model includes session-only OSC commit value");
}
Expect(committedLiveState.packagesById.find("alpha") != committedLiveState.packagesById.end(),
"committed live read model carries package definitions for snapshot publication");
} }
std::filesystem::remove_all(root); std::filesystem::remove_all(root);

View File

@@ -0,0 +1,231 @@
#include "SystemOutputFramePool.h"
#include <cstdint>
#include <iostream>
namespace
{
int gFailures = 0;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
SystemOutputFramePoolConfig MakeConfig(std::size_t capacity = 2)
{
SystemOutputFramePoolConfig config;
config.width = 4;
config.height = 3;
config.pixelFormat = VideoIOPixelFormat::Bgra8;
config.capacity = capacity;
return config;
}
void TestAcquireHonorsCapacityAndFrameShape()
{
SystemOutputFramePool pool(MakeConfig(2));
OutputFrameSlot first;
OutputFrameSlot second;
OutputFrameSlot third;
Expect(pool.AcquireFreeSlot(first), "first slot can be acquired");
Expect(pool.AcquireFreeSlot(second), "second slot can be acquired");
Expect(!pool.AcquireFreeSlot(third), "fixed capacity rejects third acquire");
Expect(first.frame.bytes != nullptr, "acquired slot has system memory");
Expect(first.frame.nativeBuffer == first.frame.bytes, "native buffer points at system memory");
Expect(first.frame.nativeFrame == nullptr, "system frame has no native frame");
Expect(first.frame.width == 4, "frame width is configured");
Expect(first.frame.height == 3, "frame height is configured");
Expect(first.frame.rowBytes == 16, "BGRA8 row bytes are inferred");
Expect(first.frame.pixelFormat == VideoIOPixelFormat::Bgra8, "BGRA8 is the default output format");
Expect(first.frame.bytes != second.frame.bytes, "each slot owns distinct memory");
SystemOutputFramePoolMetrics metrics = pool.GetMetrics();
Expect(metrics.freeCount == 0, "all slots are in use");
Expect(metrics.renderingCount == 2, "rendering slots are counted");
Expect(metrics.acquiredCount == 2, "acquired slots are counted");
Expect(metrics.acquireMissCount == 1, "capacity miss is counted");
}
void TestPhase77StateContract()
{
SystemOutputFramePool pool(MakeConfig(1));
SystemOutputFramePoolMetrics metrics = pool.GetMetrics();
Expect(metrics.freeCount == 1, "new pool starts with one free slot");
Expect(metrics.renderingCount == 0, "new pool starts with no rendering slots");
Expect(metrics.completedCount == 0, "new pool starts with no completed slots");
Expect(metrics.scheduledCount == 0, "new pool starts with no scheduled slots");
OutputFrameSlot slot;
Expect(pool.AcquireRenderingSlot(slot), "free slot moves to rendering");
metrics = pool.GetMetrics();
Expect(metrics.freeCount == 0, "rendering slot leaves free pool");
Expect(metrics.renderingCount == 1, "rendering slot is counted");
Expect(pool.PublishCompletedSlot(slot), "rendering slot moves to completed");
metrics = pool.GetMetrics();
Expect(metrics.renderingCount == 0, "completed slot leaves rendering");
Expect(metrics.completedCount == 1, "completed slot is counted");
Expect(metrics.readyCount == 1, "completed slot is available to scheduler");
OutputFrameSlot completed;
Expect(pool.ConsumeCompletedSlot(completed), "completed slot can be dequeued for scheduling");
metrics = pool.GetMetrics();
Expect(metrics.completedCount == 1, "dequeued completed slot remains completed until scheduled");
Expect(metrics.readyCount == 0, "dequeued completed slot leaves ready queue");
Expect(pool.MarkScheduled(completed), "completed slot moves to scheduled");
metrics = pool.GetMetrics();
Expect(metrics.completedCount == 0, "scheduled slot leaves completed state");
Expect(metrics.scheduledCount == 1, "scheduled slot is counted");
Expect(pool.ReleaseScheduledSlot(completed), "scheduled slot returns to free");
metrics = pool.GetMetrics();
Expect(metrics.freeCount == 1, "released scheduled slot returns to free");
Expect(metrics.scheduledCount == 0, "released scheduled slot leaves scheduled state");
}
void TestReadySlotsAreConsumedFifo()
{
SystemOutputFramePool pool(MakeConfig(2));
OutputFrameSlot first;
OutputFrameSlot second;
Expect(pool.AcquireFreeSlot(first), "first FIFO slot can be acquired");
Expect(pool.AcquireFreeSlot(second), "second FIFO slot can be acquired");
Expect(pool.PublishReadySlot(first), "first FIFO slot can be published");
Expect(pool.PublishReadySlot(second), "second FIFO slot can be published");
OutputFrameSlot consumed;
Expect(pool.ConsumeReadySlot(consumed), "first ready slot can be consumed");
Expect(consumed.index == first.index, "first published slot is consumed first");
Expect(pool.MarkScheduled(consumed), "consumed slot can be marked scheduled");
Expect(pool.ReleaseScheduledSlot(consumed), "scheduled slot can be released");
Expect(pool.ConsumeReadySlot(consumed), "second ready slot can be consumed");
Expect(consumed.index == second.index, "second published slot is consumed second");
Expect(pool.ReleaseSlot(consumed), "consumed slot can be released without scheduling");
SystemOutputFramePoolMetrics metrics = pool.GetMetrics();
Expect(metrics.freeCount == 2, "released slots return to free pool");
Expect(metrics.readyCount == 0, "ready queue is empty after consumption");
}
void TestCompletedSlotCannotBeAcquiredUntilReleased()
{
SystemOutputFramePool pool(MakeConfig(1));
OutputFrameSlot slot;
OutputFrameSlot extra;
Expect(pool.AcquireRenderingSlot(slot), "single slot can be acquired for rendering");
Expect(pool.PublishCompletedSlot(slot), "single slot can be published completed");
Expect(!pool.AcquireRenderingSlot(extra), "completed slot is not available for rendering");
OutputFrameSlot completed;
Expect(pool.ConsumeCompletedSlot(completed), "completed slot can be dequeued");
Expect(!pool.AcquireRenderingSlot(extra), "dequeued completed slot is still not free");
Expect(pool.MarkScheduled(completed), "dequeued completed slot can be scheduled");
Expect(!pool.AcquireRenderingSlot(extra), "scheduled slot is still not free");
Expect(pool.ReleaseScheduledSlot(completed), "scheduled slot can be released");
Expect(pool.AcquireRenderingSlot(extra), "released slot can be acquired again");
}
void TestReadySlotCanBeScheduledByBuffer()
{
SystemOutputFramePool pool(MakeConfig(1));
OutputFrameSlot slot;
Expect(pool.AcquireFreeSlot(slot), "buffer schedule slot can be acquired");
void* bytes = slot.frame.bytes;
Expect(pool.PublishReadySlot(slot), "buffer schedule slot can be published");
Expect(pool.MarkScheduledByBuffer(bytes), "ready slot can be marked scheduled by buffer");
SystemOutputFramePoolMetrics metrics = pool.GetMetrics();
Expect(metrics.readyCount == 0, "scheduled-by-buffer removes slot from ready queue");
Expect(metrics.scheduledCount == 1, "scheduled-by-buffer counts scheduled slot");
Expect(pool.ReleaseSlotByBuffer(bytes), "scheduled slot can be released by buffer");
metrics = pool.GetMetrics();
Expect(metrics.freeCount == 1, "released-by-buffer slot returns to free pool");
}
void TestInvalidTransitionsAreRejected()
{
SystemOutputFramePool pool(MakeConfig(1));
OutputFrameSlot slot;
Expect(pool.AcquireFreeSlot(slot), "transition slot can be acquired");
Expect(!pool.MarkScheduled(slot), "acquired slot cannot be marked scheduled");
Expect(pool.PublishReadySlot(slot), "acquired slot can be published");
Expect(!pool.PublishReadySlot(slot), "ready slot cannot be published twice");
Expect(pool.ReleaseSlot(slot), "ready slot can be released to free");
Expect(!pool.ReleaseSlot(slot), "free slot cannot be released again");
OutputFrameSlot next;
Expect(pool.AcquireFreeSlot(next), "slot can be reacquired after release");
Expect(next.index == slot.index, "same storage slot can be reused");
Expect(next.generation != slot.generation, "stale handles are invalidated on reacquire");
Expect(!pool.PublishReadySlot(slot), "stale handle cannot publish reacquired slot");
}
void TestPixelFormatAwareSizing()
{
SystemOutputFramePoolConfig config;
config.width = 7;
config.height = 2;
config.pixelFormat = VideoIOPixelFormat::V210;
config.capacity = 1;
SystemOutputFramePool pool(config);
OutputFrameSlot slot;
Expect(pool.AcquireFreeSlot(slot), "v210 slot can be acquired");
Expect(slot.frame.pixelFormat == VideoIOPixelFormat::V210, "slot keeps configured pixel format");
Expect(slot.frame.rowBytes == static_cast<long>(MinimumV210RowBytes(config.width)), "v210 row bytes are inferred");
SystemOutputFramePoolConfig explicitConfig = config;
explicitConfig.pixelFormat = VideoIOPixelFormat::Uyvy8;
explicitConfig.rowBytes = 64;
pool.Configure(explicitConfig);
Expect(pool.AcquireFreeSlot(slot), "explicit row-byte slot can be acquired");
Expect(slot.frame.pixelFormat == VideoIOPixelFormat::Uyvy8, "slot keeps reconfigured pixel format");
Expect(slot.frame.rowBytes == 64, "explicit row bytes are preserved");
}
void TestEmptyReadyQueueUnderrunIsCounted()
{
SystemOutputFramePool pool(MakeConfig(1));
OutputFrameSlot slot;
Expect(!pool.ConsumeReadySlot(slot), "empty ready queue cannot be consumed");
SystemOutputFramePoolMetrics metrics = pool.GetMetrics();
Expect(metrics.readyUnderrunCount == 1, "ready underrun is counted");
}
}
int main()
{
TestAcquireHonorsCapacityAndFrameShape();
TestPhase77StateContract();
TestReadySlotsAreConsumedFifo();
TestCompletedSlotCannotBeAcquiredUntilReleased();
TestReadySlotCanBeScheduledByBuffer();
TestInvalidTransitionsAreRejected();
TestPixelFormatAwareSizing();
TestEmptyReadyQueueUnderrunIsCounted();
if (gFailures != 0)
{
std::cerr << gFailures << " system output frame pool test failure(s).\n";
return 1;
}
std::cout << "SystemOutputFramePool tests passed.\n";
return 0;
}

View File

@@ -0,0 +1,96 @@
#include "VideoBackendLifecycle.h"
#include <iostream>
#include <string>
namespace
{
int gFailures = 0;
void Expect(bool condition, const char* message)
{
if (condition)
return;
std::cerr << "FAIL: " << message << "\n";
++gFailures;
}
void TestAllowedLifecycleTransitions()
{
VideoBackendLifecycle lifecycle;
Expect(lifecycle.State() == VideoBackendLifecycleState::Uninitialized, "lifecycle starts uninitialized");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Discovering, "discover").accepted,
"uninitialized can transition to discovering");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Discovered, "discovered").accepted,
"discovering can transition to discovered");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Configuring, "configuring").accepted,
"discovered can transition to configuring");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Configured, "configured").accepted,
"configuring can transition to configured");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Prerolling, "preroll").accepted,
"configured can transition to prerolling");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Running, "running").accepted,
"prerolling can transition to running");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Degraded, "degraded").accepted,
"running can transition to degraded");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Running, "recovered").accepted,
"degraded can transition back to running");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Stopping, "stopping").accepted,
"running can transition to stopping");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Stopped, "stopped").accepted,
"stopping can transition to stopped");
}
void TestInvalidLifecycleTransitionIsRejected()
{
VideoBackendLifecycle lifecycle;
const VideoBackendLifecycleTransition transition =
lifecycle.TransitionTo(VideoBackendLifecycleState::Running, "skip setup");
Expect(!transition.accepted, "uninitialized cannot transition directly to running");
Expect(lifecycle.State() == VideoBackendLifecycleState::Uninitialized, "invalid transition leaves state unchanged");
Expect(transition.errorMessage.find("Invalid video backend lifecycle transition") != std::string::npos,
"invalid transition reports an error");
}
void TestFailureStateRecordsReasonAndCanRecover()
{
VideoBackendLifecycle lifecycle;
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Discovering, "discover").accepted,
"lifecycle can start discovery");
Expect(lifecycle.Fail("no device").accepted, "discovering can transition to failed");
Expect(lifecycle.State() == VideoBackendLifecycleState::Failed, "failure transition sets failed state");
Expect(lifecycle.FailureReason() == "no device", "failure reason is retained");
Expect(lifecycle.TransitionTo(VideoBackendLifecycleState::Discovering, "retry").accepted,
"failed lifecycle can retry discovery");
Expect(lifecycle.FailureReason().empty(), "successful non-failed transition clears failure reason");
}
void TestStateNamesAreStable()
{
Expect(std::string(VideoBackendLifecycle::StateName(VideoBackendLifecycleState::Uninitialized)) == "uninitialized",
"uninitialized state name is stable");
Expect(std::string(VideoBackendLifecycle::StateName(VideoBackendLifecycleState::Running)) == "running",
"running state name is stable");
Expect(std::string(VideoBackendLifecycle::StateName(VideoBackendLifecycleState::Failed)) == "failed",
"failed state name is stable");
}
}
int main()
{
TestAllowedLifecycleTransitions();
TestInvalidLifecycleTransitionIsRejected();
TestFailureStateRecordsReasonAndCanRecover();
TestStateNamesAreStable();
if (gFailures != 0)
{
std::cerr << gFailures << " video backend lifecycle test failure(s).\n";
return 1;
}
std::cout << "VideoBackendLifecycle tests passed.\n";
return 0;
}

View File

@@ -54,8 +54,15 @@ public:
return true; return true;
} }
bool Start() override bool PrepareOutputSchedule() override
{ {
mPreparedOutputSchedule = true;
return true;
}
bool StartInputStreams() override
{
mInputStreamsStarted = true;
mState.hasInputSource = true; mState.hasInputSource = true;
VideoIOFrame input; VideoIOFrame input;
input.bytes = mInputBytes.data(); input.bytes = mInputBytes.data();
@@ -65,11 +72,22 @@ public:
input.pixelFormat = mState.inputPixelFormat; input.pixelFormat = mState.inputPixelFormat;
if (mInputCallback) if (mInputCallback)
mInputCallback(input); mInputCallback(input);
return true;
}
bool StartScheduledPlayback() override
{
mScheduledPlaybackStarted = true;
if (mOutputCallback) if (mOutputCallback)
mOutputCallback(VideoIOCompletion{ VideoIOCompletionResult::Completed }); mOutputCallback(VideoIOCompletion{ VideoIOCompletionResult::Completed });
return true; return true;
} }
bool Start() override
{
return PrepareOutputSchedule() && StartInputStreams() && StartScheduledPlayback();
}
bool Stop() override { return true; } bool Stop() override { return true; }
const VideoIOState& State() const override { return mState; } const VideoIOState& State() const override { return mState; }
VideoIOState& MutableState() override { return mState; } VideoIOState& MutableState() override { return mState; }
@@ -92,13 +110,22 @@ public:
return true; return true;
} }
void AccountForCompletionResult(VideoIOCompletionResult result) override VideoPlayoutRecoveryDecision AccountForCompletionResult(VideoIOCompletionResult result, uint64_t readyQueueDepth) override
{ {
mLastCompletion = result; mLastCompletion = result;
mLastReadyQueueDepth = readyQueueDepth;
VideoPlayoutRecoveryDecision decision;
decision.result = result;
decision.readyQueueDepth = readyQueueDepth;
return decision;
} }
unsigned ScheduledFrames() const { return mScheduledFrames; } unsigned ScheduledFrames() const { return mScheduledFrames; }
bool PreparedOutputSchedule() const { return mPreparedOutputSchedule; }
bool InputStreamsStarted() const { return mInputStreamsStarted; }
bool ScheduledPlaybackStarted() const { return mScheduledPlaybackStarted; }
VideoIOCompletionResult LastCompletion() const { return mLastCompletion; } VideoIOCompletionResult LastCompletion() const { return mLastCompletion; }
uint64_t LastReadyQueueDepth() const { return mLastReadyQueueDepth; }
private: private:
VideoIOState mState; VideoIOState mState;
@@ -107,7 +134,11 @@ private:
std::array<unsigned char, 3840> mInputBytes = {}; std::array<unsigned char, 3840> mInputBytes = {};
std::array<unsigned char, 7680> mOutputBytes = {}; std::array<unsigned char, 7680> mOutputBytes = {};
unsigned mScheduledFrames = 0; unsigned mScheduledFrames = 0;
bool mPreparedOutputSchedule = false;
bool mInputStreamsStarted = false;
bool mScheduledPlaybackStarted = false;
VideoIOCompletionResult mLastCompletion = VideoIOCompletionResult::Unknown; VideoIOCompletionResult mLastCompletion = VideoIOCompletionResult::Unknown;
uint64_t mLastReadyQueueDepth = 0;
}; };
} }
@@ -132,13 +163,17 @@ int main()
VideoIOOutputFrame outputFrame; VideoIOOutputFrame outputFrame;
Expect(device.BeginOutputFrame(outputFrame), "fake output frame can be acquired"); Expect(device.BeginOutputFrame(outputFrame), "fake output frame can be acquired");
device.EndOutputFrame(outputFrame); device.EndOutputFrame(outputFrame);
device.AccountForCompletionResult(VideoIOCompletionResult::Completed); device.AccountForCompletionResult(VideoIOCompletionResult::Completed, 2);
Expect(device.ScheduleOutputFrame(outputFrame), "fake output frame can be scheduled"); Expect(device.ScheduleOutputFrame(outputFrame), "fake output frame can be scheduled");
Expect(inputSeen, "fake input callback emits generic frame"); Expect(inputSeen, "fake input callback emits generic frame");
Expect(outputSeen, "fake output callback emits generic completion"); Expect(outputSeen, "fake output callback emits generic completion");
Expect(device.PreparedOutputSchedule(), "fake output schedule was prepared");
Expect(device.InputStreamsStarted(), "fake input streams started");
Expect(device.ScheduledPlaybackStarted(), "fake scheduled playback started");
Expect(device.ScheduledFrames() == 1, "fake backend schedules one frame"); Expect(device.ScheduledFrames() == 1, "fake backend schedules one frame");
Expect(device.LastCompletion() == VideoIOCompletionResult::Completed, "fake backend records generic completion"); Expect(device.LastCompletion() == VideoIOCompletionResult::Completed, "fake backend records generic completion");
Expect(device.LastReadyQueueDepth() == 2, "fake backend records ready queue depth");
if (gFailures != 0) if (gFailures != 0)
{ {

View File

@@ -37,17 +37,81 @@ void TestScheduleAdvancesFromZero()
Expect(third.streamTime == 2002, "third frame advances by two durations"); Expect(third.streamTime == 2002, "third frame advances by two durations");
} }
void TestLateAndDroppedSkipAhead() void TestLateAndDroppedRecoveryUsesMeasuredPressure()
{
VideoPlayoutPolicy policy;
policy.lateOrDropCatchUpFrames = 2;
VideoPlayoutScheduler scheduler;
scheduler.Configure(1000, 50000, policy);
(void)scheduler.NextScheduleTime();
VideoPlayoutRecoveryDecision lateDecision = scheduler.AccountForCompletionResult(VideoIOCompletionResult::DisplayedLate, 2);
Expect(lateDecision.catchUpFrames == 1, "single late completion catches up by measured one-frame lag");
Expect(lateDecision.lateStreak == 1, "late completion increments late streak");
Expect(scheduler.NextScheduleTime().streamTime == 2000, "single late recovery advances by measured lag");
VideoPlayoutRecoveryDecision dropDecision = scheduler.AccountForCompletionResult(VideoIOCompletionResult::Dropped, 2);
Expect(dropDecision.catchUpFrames == 2, "dropped completion catches up by measured drop pressure");
Expect(dropDecision.lateStreak == 0, "dropped completion resets late streak");
Expect(dropDecision.dropStreak == 1, "dropped completion increments drop streak");
Expect(scheduler.NextScheduleTime().streamTime == 5000, "drop recovery advances by measured lag");
}
void TestDefaultPolicyReportsLagWithoutSkippingScheduleTime()
{ {
VideoPlayoutScheduler scheduler; VideoPlayoutScheduler scheduler;
scheduler.Configure(1000, 50000); scheduler.Configure(1000, 50000);
(void)scheduler.NextScheduleTime(); (void)scheduler.NextScheduleTime();
scheduler.AccountForCompletionResult(VideoIOCompletionResult::DisplayedLate); VideoPlayoutRecoveryDecision decision = scheduler.AccountForCompletionResult(VideoIOCompletionResult::Dropped, 0);
Expect(scheduler.NextScheduleTime().streamTime == 3000, "late completion preserves the existing two-frame skip policy"); Expect(decision.measuredLagFrames > 0, "default policy still measures dropped-frame lag");
Expect(decision.catchUpFrames == 0, "default policy does not skip schedule time");
Expect(scheduler.NextScheduleTime().streamTime == 1000, "default recovery keeps stream time continuous");
}
scheduler.AccountForCompletionResult(VideoIOCompletionResult::Dropped); void TestMeasuredRecoveryIsCappedByPolicy()
Expect(scheduler.NextScheduleTime().streamTime == 6000, "dropped completion preserves the existing two-frame skip policy"); {
VideoPlayoutPolicy policy;
policy.lateOrDropCatchUpFrames = 1;
VideoPlayoutScheduler scheduler;
scheduler.Configure(1000, 50000, policy);
(void)scheduler.NextScheduleTime();
VideoPlayoutRecoveryDecision decision = scheduler.AccountForCompletionResult(VideoIOCompletionResult::Dropped, 0);
Expect(decision.measuredLagFrames > decision.catchUpFrames, "policy caps measured recovery");
Expect(decision.catchUpFrames == 1, "drop recovery obeys policy cap");
Expect(scheduler.NextScheduleTime().streamTime == 2000, "capped recovery advances by one frame");
}
void TestCleanCompletionTracksCompletedIndexAndClearsStreaks()
{
VideoPlayoutScheduler scheduler;
scheduler.Configure(1000, 50000);
(void)scheduler.NextScheduleTime();
(void)scheduler.AccountForCompletionResult(VideoIOCompletionResult::DisplayedLate, 2);
VideoPlayoutRecoveryDecision decision = scheduler.AccountForCompletionResult(VideoIOCompletionResult::Completed, 2);
Expect(decision.completedFrameIndex == 2, "completion accounting tracks completed index");
Expect(decision.catchUpFrames == 0, "clean completion does not catch up");
Expect(decision.lateStreak == 0, "clean completion clears late streak");
Expect(decision.dropStreak == 0, "clean completion keeps drop streak clear");
}
void TestPolicyNormalization()
{
VideoPlayoutPolicy policy;
policy.outputFramePoolSize = 0;
policy.targetPrerollFrames = 0;
policy.targetReadyFrames = 5;
policy.maxReadyFrames = 2;
VideoPlayoutPolicy normalized = NormalizeVideoPlayoutPolicy(policy);
Expect(normalized.targetPrerollFrames == 1, "policy normalization keeps at least one preroll frame");
Expect(normalized.maxReadyFrames == normalized.targetReadyFrames, "policy normalization keeps max ready frames above target");
Expect(normalized.outputFramePoolSize >= normalized.targetPrerollFrames + normalized.maxReadyFrames + normalized.minimumSpareDeviceFrames,
"policy normalization keeps enough output frames for preroll and ready queue ownership");
} }
void TestFrameBudgets() void TestFrameBudgets()
@@ -67,7 +131,11 @@ void TestFrameBudgets()
int main() int main()
{ {
TestScheduleAdvancesFromZero(); TestScheduleAdvancesFromZero();
TestLateAndDroppedSkipAhead(); TestLateAndDroppedRecoveryUsesMeasuredPressure();
TestDefaultPolicyReportsLagWithoutSkippingScheduleTime();
TestMeasuredRecoveryIsCappedByPolicy();
TestCleanCompletionTracksCompletedIndexAndClearsStreaks();
TestPolicyNormalization();
TestFrameBudgets(); TestFrameBudgets();
if (gFailures != 0) if (gFailures != 0)