Phase 7.5 timing logs
Some checks failed
CI / React UI Build (push) Successful in 11s
CI / Native Windows Build And Tests (push) Successful in 2m45s
CI / Windows Release Package (push) Has been cancelled

This commit is contained in:
Aiden
2026-05-11 21:32:40 +10:00
parent 0a7954e879
commit f8adbbe0fe
8 changed files with 457 additions and 4 deletions

View File

@@ -83,11 +83,19 @@ JsonValue RuntimeStatePresenter::BuildRuntimeStateValue(const RuntimeStore& runt
JsonValue readyQueue = JsonValue::MakeObject();
readyQueue.set("depth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueDepth)));
readyQueue.set("capacity", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueCapacity)));
readyQueue.set("minDepth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.minReadyQueueDepth)));
readyQueue.set("maxDepth", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.maxReadyQueueDepth)));
readyQueue.set("zeroDepthCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueZeroDepthCount)));
readyQueue.set("pushedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueuePushedCount)));
readyQueue.set("poppedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueuePoppedCount)));
readyQueue.set("droppedCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueDroppedCount)));
readyQueue.set("underrunCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.readyQueueUnderrunCount)));
JsonValue outputRender = JsonValue::MakeObject();
outputRender.set("renderMs", JsonValue(telemetrySnapshot.backendPlayout.outputRenderMilliseconds));
outputRender.set("smoothedRenderMs", JsonValue(telemetrySnapshot.backendPlayout.smoothedOutputRenderMilliseconds));
outputRender.set("maxRenderMs", JsonValue(telemetrySnapshot.backendPlayout.maxOutputRenderMilliseconds));
JsonValue recovery = JsonValue::MakeObject();
recovery.set("completionResult", JsonValue(telemetrySnapshot.backendPlayout.completionResult));
recovery.set("completedFrameIndex", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.completedFrameIndex)));
@@ -106,6 +114,7 @@ JsonValue RuntimeStatePresenter::BuildRuntimeStateValue(const RuntimeStore& runt
backendPlayout.set("droppedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.droppedFrameCount)));
backendPlayout.set("flushedFrameCount", JsonValue(static_cast<double>(telemetrySnapshot.backendPlayout.flushedFrameCount)));
backendPlayout.set("readyQueue", readyQueue);
backendPlayout.set("outputRender", outputRender);
backendPlayout.set("recovery", recovery);
root.set("backendPlayout", backendPlayout);

View File

@@ -209,7 +209,9 @@ bool HealthTelemetry::TryRecordPersistenceWriteResult(bool succeeded, const std:
void HealthTelemetry::RecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
@@ -220,10 +222,16 @@ void HealthTelemetry::RecordBackendPlayoutHealth(const std::string& lifecycleSta
mBackendPlayout.completionResult = completionResult;
mBackendPlayout.readyQueueDepth = readyQueueDepth;
mBackendPlayout.readyQueueCapacity = readyQueueCapacity;
mBackendPlayout.minReadyQueueDepth = minReadyQueueDepth;
mBackendPlayout.maxReadyQueueDepth = maxReadyQueueDepth;
mBackendPlayout.readyQueueZeroDepthCount = readyQueueZeroDepthCount;
mBackendPlayout.readyQueuePushedCount = readyQueuePushedCount;
mBackendPlayout.readyQueuePoppedCount = readyQueuePoppedCount;
mBackendPlayout.readyQueueDroppedCount = readyQueueDroppedCount;
mBackendPlayout.readyQueueUnderrunCount = readyQueueUnderrunCount;
mBackendPlayout.outputRenderMilliseconds = std::max(outputRenderMilliseconds, 0.0);
mBackendPlayout.smoothedOutputRenderMilliseconds = std::max(smoothedOutputRenderMilliseconds, 0.0);
mBackendPlayout.maxOutputRenderMilliseconds = std::max(maxOutputRenderMilliseconds, 0.0);
mBackendPlayout.completedFrameIndex = completedFrameIndex;
mBackendPlayout.scheduledFrameIndex = scheduledFrameIndex;
mBackendPlayout.scheduledLeadFrames = scheduledLeadFrames;
@@ -240,7 +248,9 @@ void HealthTelemetry::RecordBackendPlayoutHealth(const std::string& lifecycleSta
bool HealthTelemetry::TryRecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
@@ -254,10 +264,16 @@ bool HealthTelemetry::TryRecordBackendPlayoutHealth(const std::string& lifecycle
mBackendPlayout.completionResult = completionResult;
mBackendPlayout.readyQueueDepth = readyQueueDepth;
mBackendPlayout.readyQueueCapacity = readyQueueCapacity;
mBackendPlayout.minReadyQueueDepth = minReadyQueueDepth;
mBackendPlayout.maxReadyQueueDepth = maxReadyQueueDepth;
mBackendPlayout.readyQueueZeroDepthCount = readyQueueZeroDepthCount;
mBackendPlayout.readyQueuePushedCount = readyQueuePushedCount;
mBackendPlayout.readyQueuePoppedCount = readyQueuePoppedCount;
mBackendPlayout.readyQueueDroppedCount = readyQueueDroppedCount;
mBackendPlayout.readyQueueUnderrunCount = readyQueueUnderrunCount;
mBackendPlayout.outputRenderMilliseconds = std::max(outputRenderMilliseconds, 0.0);
mBackendPlayout.smoothedOutputRenderMilliseconds = std::max(smoothedOutputRenderMilliseconds, 0.0);
mBackendPlayout.maxOutputRenderMilliseconds = std::max(maxOutputRenderMilliseconds, 0.0);
mBackendPlayout.completedFrameIndex = completedFrameIndex;
mBackendPlayout.scheduledFrameIndex = scheduledFrameIndex;
mBackendPlayout.scheduledLeadFrames = scheduledLeadFrames;

View File

@@ -87,10 +87,16 @@ public:
std::string completionResult = "Unknown";
std::size_t readyQueueDepth = 0;
std::size_t readyQueueCapacity = 0;
std::size_t minReadyQueueDepth = 0;
std::size_t maxReadyQueueDepth = 0;
uint64_t readyQueueZeroDepthCount = 0;
uint64_t readyQueuePushedCount = 0;
uint64_t readyQueuePoppedCount = 0;
uint64_t readyQueueDroppedCount = 0;
uint64_t readyQueueUnderrunCount = 0;
double outputRenderMilliseconds = 0.0;
double smoothedOutputRenderMilliseconds = 0.0;
double maxOutputRenderMilliseconds = 0.0;
uint64_t completedFrameIndex = 0;
uint64_t scheduledFrameIndex = 0;
uint64_t scheduledLeadFrames = 0;
@@ -152,14 +158,18 @@ public:
void RecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,
bool degraded, const std::string& statusMessage);
bool TryRecordBackendPlayoutHealth(const std::string& lifecycleState, const std::string& completionResult,
std::size_t readyQueueDepth, std::size_t readyQueueCapacity, uint64_t readyQueuePushedCount,
std::size_t minReadyQueueDepth, std::size_t maxReadyQueueDepth, uint64_t readyQueueZeroDepthCount,
uint64_t readyQueuePoppedCount, uint64_t readyQueueDroppedCount, uint64_t readyQueueUnderrunCount,
double outputRenderMilliseconds, double smoothedOutputRenderMilliseconds, double maxOutputRenderMilliseconds,
uint64_t completedFrameIndex, uint64_t scheduledFrameIndex, uint64_t scheduledLeadFrames,
uint64_t measuredLagFrames, uint64_t catchUpFrames, uint64_t lateStreak, uint64_t dropStreak,
uint64_t lateFrameCount, uint64_t droppedFrameCount, uint64_t flushedFrameCount,

View File

@@ -6,6 +6,7 @@
#include "RenderEngine.h"
#include "RuntimeEventDispatcher.h"
#include <algorithm>
#include <chrono>
#include <cstring>
#include <windows.h>
@@ -294,6 +295,13 @@ void VideoBackend::StartOutputCompletionWorker()
mPendingOutputCompletions.clear();
mReadyOutputQueue.Clear();
mNextReadyOutputFrameIndex = 0;
mHasReadyQueueDepthBaseline = false;
mMinReadyQueueDepth = 0;
mMaxReadyQueueDepth = 0;
mReadyQueueZeroDepthCount = 0;
mOutputRenderMilliseconds = 0.0;
mSmoothedOutputRenderMilliseconds = 0.0;
mMaxOutputRenderMilliseconds = 0.0;
mOutputCompletionWorkerStopping = false;
mOutputCompletionWorkerRunning = true;
mOutputCompletionWorker = std::thread(&VideoBackend::OutputCompletionWorkerMain, this);
@@ -347,7 +355,9 @@ void VideoBackend::ProcessOutputFrameCompletion(const VideoIOCompletion& complet
{
RecordFramePacing(completion.result);
PublishOutputFrameCompleted(completion);
const VideoPlayoutRecoveryDecision recoveryDecision = AccountForCompletionResult(completion.result, mReadyOutputQueue.GetMetrics().depth);
const RenderOutputQueueMetrics initialQueueMetrics = mReadyOutputQueue.GetMetrics();
RecordReadyQueueDepthSample(initialQueueMetrics);
const VideoPlayoutRecoveryDecision recoveryDecision = AccountForCompletionResult(completion.result, initialQueueMetrics.depth);
FillReadyOutputQueue(completion);
if (!ScheduleReadyOutputFrame())
@@ -364,9 +374,15 @@ void VideoBackend::RecordBackendPlayoutHealth(VideoIOCompletionResult result, co
queueMetrics.depth,
queueMetrics.capacity,
queueMetrics.pushedCount,
mMinReadyQueueDepth,
mMaxReadyQueueDepth,
mReadyQueueZeroDepthCount,
queueMetrics.poppedCount,
queueMetrics.droppedCount,
queueMetrics.underrunCount,
mOutputRenderMilliseconds,
mSmoothedOutputRenderMilliseconds,
mMaxOutputRenderMilliseconds,
recoveryDecision.completedFrameIndex,
recoveryDecision.scheduledFrameIndex,
recoveryDecision.scheduledLeadFrames,
@@ -391,12 +407,14 @@ bool VideoBackend::FillReadyOutputQueue(const VideoIOCompletion& completion)
return filledAny;
filledAny = true;
metrics = mReadyOutputQueue.GetMetrics();
RecordReadyQueueDepthSample(metrics);
}
return true;
}
bool VideoBackend::RenderReadyOutputFrame(const VideoIOState& state, const VideoIOCompletion& completion)
{
const auto renderStart = std::chrono::steady_clock::now();
VideoIOOutputFrame outputFrame;
if (!BeginOutputFrame(outputFrame))
return false;
@@ -409,9 +427,16 @@ bool VideoBackend::RenderReadyOutputFrame(const VideoIOState& state, const Video
if (!rendered)
{
ApplyLifecycleTransition(VideoBackendLifecycleState::Degraded, "Output frame render request failed; skipping schedule for this frame.");
const double renderMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
std::chrono::steady_clock::now() - renderStart).count();
RecordOutputRenderDuration(renderMilliseconds);
return false;
}
const double renderMilliseconds = std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
std::chrono::steady_clock::now() - renderStart).count();
RecordOutputRenderDuration(renderMilliseconds);
RenderOutputFrame readyFrame;
readyFrame.frame = outputFrame;
readyFrame.frameIndex = ++mNextReadyOutputFrameIndex;
@@ -423,6 +448,7 @@ bool VideoBackend::ScheduleReadyOutputFrame()
RenderOutputFrame readyFrame;
if (!mReadyOutputQueue.TryPop(readyFrame))
return false;
RecordReadyQueueDepthSample(mReadyOutputQueue.GetMetrics());
if (!ScheduleOutputFrame(readyFrame.frame))
return false;
@@ -488,6 +514,37 @@ void VideoBackend::RecordFramePacing(VideoIOCompletionResult completionResult)
PublishTimingSample("VideoBackend", "smoothedCompletionInterval", mSmoothedCompletionIntervalMilliseconds, "ms");
}
void VideoBackend::RecordReadyQueueDepthSample(const RenderOutputQueueMetrics& metrics)
{
if (!mHasReadyQueueDepthBaseline)
{
mHasReadyQueueDepthBaseline = true;
mMinReadyQueueDepth = metrics.depth;
mMaxReadyQueueDepth = metrics.depth;
}
else
{
mMinReadyQueueDepth = (std::min)(mMinReadyQueueDepth, metrics.depth);
mMaxReadyQueueDepth = (std::max)(mMaxReadyQueueDepth, metrics.depth);
}
if (metrics.depth == 0)
++mReadyQueueZeroDepthCount;
}
void VideoBackend::RecordOutputRenderDuration(double renderMilliseconds)
{
mOutputRenderMilliseconds = (std::max)(renderMilliseconds, 0.0);
if (mSmoothedOutputRenderMilliseconds <= 0.0)
mSmoothedOutputRenderMilliseconds = mOutputRenderMilliseconds;
else
mSmoothedOutputRenderMilliseconds = mSmoothedOutputRenderMilliseconds * 0.9 + mOutputRenderMilliseconds * 0.1;
mMaxOutputRenderMilliseconds = (std::max)(mMaxOutputRenderMilliseconds, mOutputRenderMilliseconds);
PublishTimingSample("VideoBackend", "outputRender", mOutputRenderMilliseconds, "ms");
PublishTimingSample("VideoBackend", "smoothedOutputRender", mSmoothedOutputRenderMilliseconds, "ms");
}
bool VideoBackend::ApplyLifecycleTransition(VideoBackendLifecycleState state, const std::string& message)
{
const VideoBackendLifecycleTransition transition = mLifecycle.TransitionTo(state, message);

View File

@@ -75,6 +75,8 @@ private:
bool ScheduleReadyOutputFrame();
bool ScheduleBlackUnderrunFrame();
void RecordFramePacing(VideoIOCompletionResult completionResult);
void RecordReadyQueueDepthSample(const RenderOutputQueueMetrics& metrics);
void RecordOutputRenderDuration(double renderMilliseconds);
bool ApplyLifecycleTransition(VideoBackendLifecycleState state, const std::string& message);
bool ApplyLifecycleFailure(const std::string& message);
void PublishBackendStateChanged(const std::string& state, const std::string& message);
@@ -112,6 +114,13 @@ private:
double mCompletionIntervalMilliseconds = 0.0;
double mSmoothedCompletionIntervalMilliseconds = 0.0;
double mMaxCompletionIntervalMilliseconds = 0.0;
bool mHasReadyQueueDepthBaseline = false;
std::size_t mMinReadyQueueDepth = 0;
std::size_t mMaxReadyQueueDepth = 0;
uint64_t mReadyQueueZeroDepthCount = 0;
double mOutputRenderMilliseconds = 0.0;
double mSmoothedOutputRenderMilliseconds = 0.0;
double mMaxOutputRenderMilliseconds = 0.0;
uint64_t mLateFrameCount = 0;
uint64_t mDroppedFrameCount = 0;
uint64_t mFlushedFrameCount = 0;

View File

@@ -11,7 +11,8 @@ Phase checklist:
- [x] Make the render thread the sole GL owner
- [x] Refactor live state layering into an explicit composition model
- [x] Move persistence onto a background snapshot writer
- [ ] Make DeckLink/backend lifecycle explicit with a state machine
- [x] Make DeckLink/backend lifecycle explicit with a state machine
- [ ] Make playout timing proactive and deadline-aware
- [ ] Add structured health, telemetry, and operational reporting
Checklist note:
@@ -632,6 +633,21 @@ Expected benefits:
- easier handling of missing input, dropped frames, or reconfiguration
- a clearer place to own playout headroom policy, output queue sizing, and late-frame recovery behavior
### Phase 7.5. Make playout timing proactive and deadline-aware
Phase 7 made backend lifecycle, ready-frame queueing, measured recovery, and backend playout health visible. The remaining timing-specific work is to make output production proactive instead of demand-filled by completion pressure.
Dedicated design note:
- [PHASE_7_5_PROACTIVE_PLAYOUT_TIMING_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_7_5_PROACTIVE_PLAYOUT_TIMING_DESIGN.md)
Expected benefits:
- output frames are produced ahead based on queue pressure or cadence
- DeckLink completion handling normally consumes already-ready frames
- preview and synchronous readback fallback become explicitly subordinate to playout deadlines
- queue depth, readback misses, preview skips, and render timing explain why headroom drains
### Phase 8. Add structured health, telemetry, and operational reporting
This phase should happen after the main ownership changes so the telemetry can reflect the final architecture instead of a transient one.
@@ -680,7 +696,8 @@ If this is approached as a serious architecture program rather than opportunisti
5. Formalize live state layering and composition.
6. Move persistence to a background snapshot writer.
7. Refactor DeckLink/backend lifecycle into an explicit state machine.
8. Add structured telemetry, health reporting, and operational diagnostics.
8. Make playout timing proactive and deadline-aware.
9. Add structured telemetry, health reporting, and operational diagnostics.
## Why This Order Makes Sense
@@ -703,6 +720,7 @@ The app is in a much better place than it was before the OSC timing work. The sh
5. formalize layered live state
6. complete background persistence
7. explicit backend lifecycle
8. health and telemetry
8. proactive playout timing
9. health and telemetry
That sequence gives each later phase a cleaner foundation than the current app has today.

View File

@@ -0,0 +1,316 @@
# Phase 7.5 Design: Proactive Playout Timing
This document summarizes the timing-specific findings from [ARCHITECTURE_RESILIENCE_REVIEW.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/ARCHITECTURE_RESILIENCE_REVIEW.md) and turns them into a focused bridge phase after [PHASE_7_BACKEND_LIFECYCLE_PLAYOUT_DESIGN.md](/c:/Users/Aiden/Documents/GitHub/video-shader-toys/docs/PHASE_7_BACKEND_LIFECYCLE_PLAYOUT_DESIGN.md).
Phase 7 made backend lifecycle, playout policy, ready-frame queueing, late/drop recovery, and backend playout health explicit. Phase 7.5 should use those foundations to move output production from demand-filled scheduling toward proactive, deadline-aware playout.
## Status
- Phase 7.5 design package: proposed.
- Phase 7.5 implementation: Step 1 in progress.
- Current alignment: Phase 7 is complete. `RenderOutputQueue`, `VideoPlayoutPolicy`, `VideoPlayoutScheduler`, `VideoBackendLifecycle`, and backend playout telemetry exist. The backend worker fills the ready queue on completion demand, but render production is not yet proactively driven by queue pressure or video cadence.
Current footholds:
- `RenderEngine` owns normal GL work on the render thread.
- `VideoBackend` owns backend lifecycle, completion processing, ready-frame queue use, and backend playout health reporting.
- `RenderOutputQueue` reports depth, capacity, pushed, popped, dropped, and underrun counts.
- `VideoPlayoutPolicy` names ready-frame headroom and catch-up policy.
- `HealthTelemetry::BackendPlayoutSnapshot` exposes queue depth, underruns, late/drop streaks, and recovery decisions.
- Step 1 adds baseline timing fields for ready-queue min/max/zero-depth samples and output render duration.
## Timing Review Findings
The resilience review highlights several timing risks that remain after basic render-thread and backend ownership cleanup:
- playout is still effectively filled on demand instead of continuously produced ahead
- output buffering is named, but queue depth is not yet tuned against measured render/readback cost
- GPU readback has an asynchronous path, but the miss path can still fall back to synchronous readback
- preview presentation is best-effort, but it still shares render-thread budget with playout
- telemetry is improving, but render timing is still too coarse to distinguish draw, pack, fence wait, readback copy, and preview cost
The practical concern is not average frame time. It is what happens during a short spike. A single slow render, readback wait, preview present, or callback scheduling delay can drain playout headroom and cause late or dropped output frames.
## Why Phase 7.5 Exists
Phase 7 made the backend safer and observable, but Step 5 intentionally stopped at demand-filled queue behavior:
- a completion arrives
- the backend worker fills the ready queue to target depth
- the backend schedules one ready frame
That is better than callback-thread rendering, but it still couples frame production to output completion pressure. Phase 7.5 should make render production proactive:
- keep the ready queue near target depth before the device asks for the next frame
- let DeckLink consume already-prepared frames
- treat queue depth as the pressure signal between render and backend
- make preview and readback fallback subordinate to output deadlines
## Goals
Phase 7.5 should establish:
- a proactive output producer that fills `RenderOutputQueue` based on queue pressure
- a clear trigger model for output production: queue-low, cadence tick, or both
- a bounded sleep/yield strategy when the ready queue is full
- explicit priority rules between playout, preview, screenshots, shader work, and background render requests
- readback miss behavior that does not blindly return to the most timing-sensitive synchronous path
- telemetry that can explain why the queue drains: render cost, readback wait, preview cost, or scheduling pressure
- pure tests for producer pressure policy where possible
## Non-Goals
Phase 7.5 should not require:
- replacing the renderer
- replacing DeckLink support
- a full telemetry subsystem rewrite
- perfect adaptive latency
- a new UI
- changing live-state layering or persistence semantics
This phase is about output timing behavior, not broad subsystem redesign.
## Target Timing Model
The target model is:
```text
Video cadence / queue pressure
-> proactive output producer request
-> RenderEngine renders and reads back output frame
-> RenderOutputQueue stores ready frame
-> VideoBackend consumes ready frame for DeckLink scheduling
```
The important difference from Phase 7 is that output production should not wait until a completion has already created demand. The queue should usually have headroom before the completion worker needs to schedule.
Suggested pressure rules:
- if ready depth is below `targetReadyFrames`, request output production immediately
- if ready depth is at or above `maxReadyFrames`, producer sleeps or yields
- if late/drop streak grows, temporarily bias toward output production over preview
- if readback is late, prefer stale/black underrun policy over blocking the deadline path
- if preview is due but output queue is below target, skip or delay preview
## Proposed Collaborators
### `OutputProductionController`
Small policy owner that decides when to request another output frame.
Responsibilities:
- evaluate ready queue depth and capacity
- evaluate late/drop/underrun pressure
- decide whether to produce, sleep, or yield
- keep policy testable without DeckLink or GL
Non-responsibilities:
- GL rendering
- DeckLink scheduling
- live-state composition
### `OutputProducerWorker`
Worker or render-thread-adjacent loop that keeps output frames ready.
Responsibilities:
- wake on queue-low pressure
- request render-thread output production
- push completed frames into `RenderOutputQueue`
- stop cleanly before render/backend teardown
Non-responsibilities:
- device callback handling
- hardware scheduling
- persistent state mutation
### `RenderTimingBreakdown`
Lightweight render timing sample for the output path.
Initial fields:
- total output render time
- draw/composite time
- output pack time
- readback fence wait time
- readback copy time
- synchronous readback fallback count
- preview present cost
- preview skipped count
This can be reported into existing telemetry first, then Phase 8 can fold it into the broader health model.
## Migration Plan
### Step 1. Snapshot Current Timing Behavior
Use existing Phase 7 telemetry to capture baseline behavior before changing production cadence.
Initial target:
- [x] record ready queue depth over time while running
- [x] record underrun count, late/drop streaks, and catch-up frames
- [x] record output render duration and completion interval
- [x] identify whether queue depth regularly falls to zero
Exit criteria:
- [x] there is a clear before/after baseline for proactive production
- [x] runtime-state output exposes enough values to diagnose whether queue starvation is happening
Implementation notes:
- `HealthTelemetry::BackendPlayoutSnapshot` exposes current, min, max, and zero-depth ready-queue samples.
- `VideoBackend` samples ready-queue depth before demand-fill, after queue fill, and after scheduling from the queue.
- `VideoBackend` records last, smoothed, and max output render duration for demand-produced output frames.
- Runtime-state JSON exposes the baseline under `backendPlayout.readyQueue` and `backendPlayout.outputRender`.
### Step 2. Extract Output Production Policy
Introduce a pure policy helper for queue-pressure decisions.
Initial target:
- input: ready depth, capacity, target depth, late/drop streaks, underrun count
- output: produce, wait, or throttle
- tests cover low queue, full queue, late/drop pressure, and normalized policy values
Exit criteria:
- production cadence policy can evolve without touching DeckLink or GL code
### Step 3. Add A Proactive Producer Loop
Move from demand-filled output production to queue-pressure production.
Initial target:
- producer wakes when queue depth is below target
- producer requests render-thread output production until target depth is reached
- producer stops when backend stops or render thread shuts down
- completion worker mostly schedules from already-ready frames
Exit criteria:
- normal playback does not depend on completion processing to fill the queue from empty
- callback/completion pressure and render production pressure are separate
### Step 4. Prioritize Playout Over Preview
Make preview explicitly subordinate to output playout deadlines.
Initial target:
- skip or delay preview when ready queue depth is below target
- count skipped previews
- record preview present cost separately from output render cost
Exit criteria:
- preview cannot drain output headroom invisibly
- runtime telemetry shows preview skips and preview present cost
### Step 5. Make Readback Miss Policy Deadline-Aware
Avoid turning a late async readback fence into synchronous deadline pressure by default.
Initial target:
- count async readback misses
- count synchronous fallback uses
- allow policy to prefer stale/black output over synchronous fallback when queue pressure is high
- keep current fallback available while behavior is measured
Exit criteria:
- readback fallback is an explicit policy decision
- late GPU fences do not automatically block the most timing-sensitive path
### Step 6. Tune Headroom Policy
Use measured behavior to choose default queue depth and latency tradeoffs.
Initial target:
- compare 30fps and 60fps behavior
- tune `targetReadyFrames` and `maxReadyFrames`
- document expected latency cost of each default
- keep the setting centralized in `VideoPlayoutPolicy`
Exit criteria:
- default headroom values are based on observed timing, not guesswork
- latency versus resilience tradeoff is documented
## Testing Strategy
Recommended tests:
- production policy requests work when queue is below target
- production policy throttles when queue is full
- late/drop pressure biases toward production
- preview policy skips when output queue is below target
- readback miss policy selects stale/black versus synchronous fallback according to pressure
- producer shutdown drains or cancels work without touching destroyed render/backend state
Useful homes:
- a new `OutputProductionControllerTests`
- `RenderOutputQueueTests` for pressure-adjacent queue behavior
- `VideoPlayoutSchedulerTests` for recovery/pressure interactions
- non-GL fakes for producer loop wake/stop behavior
## Risks
### Latency Risk
More ready frames means more latency. Phase 7.5 should make that latency a visible, measured policy choice.
### Producer Runaway Risk
A proactive producer must not spin when the queue is full or when output is stopped.
### Buffer Ownership Risk
Ready frames must not be reused while DeckLink or the render path still owns their buffers.
### Readback Policy Risk
Stale or black output may be preferable to a missed deadline, but it can be visually obvious. External keying may make stale/black fallback more sensitive.
### Preview Regression Risk
Treating preview as subordinate may make desktop preview less smooth. That is acceptable only if playout quality improves and preview skips are visible.
## Phase 7.5 Exit Criteria
Phase 7.5 can be considered complete once the project can say:
- [ ] output production is driven by queue pressure or cadence, not only by completion demand
- [ ] completion handling normally schedules already-ready frames
- [ ] preview work is explicitly lower priority than playout
- [ ] readback miss behavior is explicit and deadline-aware
- [ ] queue depth, underruns, render timing, readback misses, and preview skips are visible
- [ ] default ready-frame headroom is documented for target frame rates
- [ ] production policy has non-DeckLink tests
## Open Questions
- Should proactive production be driven by a timer, queue-low notifications, or both?
- Should the producer live inside `VideoBackend`, `RenderEngine`, or a small playout controller between them?
- Should underrun default to black, last scheduled, or newest completed output once proactive production exists?
- How much latency is acceptable at 30fps and 60fps?
- Should preview have a hard minimum frame rate, or be fully opportunistic under playout pressure?
- Should synchronous readback fallback be disabled automatically after repeated late/drop pressure?
## Short Version
Phase 7 made playout observable and safer. Phase 7.5 should make it proactive.
The render side should keep the output queue warm before DeckLink needs the next frame. DeckLink should consume ready frames. Preview and synchronous readback fallback should never quietly steal the budget needed to hit output deadlines.

View File

@@ -86,9 +86,15 @@ void TestBackendPlayoutHealth()
1,
4,
12,
0,
3,
4,
10,
2,
1,
8.5,
7.25,
12.0,
8,
11,
3,
@@ -107,8 +113,14 @@ void TestBackendPlayoutHealth()
Expect(playout.completionResult == "Dropped", "backend playout health stores completion result");
Expect(playout.readyQueueDepth == 1, "backend playout health stores ready queue depth");
Expect(playout.readyQueueCapacity == 4, "backend playout health stores ready queue capacity");
Expect(playout.minReadyQueueDepth == 0, "backend playout health stores min ready queue depth");
Expect(playout.maxReadyQueueDepth == 3, "backend playout health stores max ready queue depth");
Expect(playout.readyQueueZeroDepthCount == 4, "backend playout health stores zero-depth queue samples");
Expect(playout.readyQueueDroppedCount == 2, "backend playout health stores queue dropped count");
Expect(playout.readyQueueUnderrunCount == 1, "backend playout health stores queue underrun count");
Expect(playout.outputRenderMilliseconds == 8.5, "backend playout health stores output render duration");
Expect(playout.smoothedOutputRenderMilliseconds == 7.25, "backend playout health stores smoothed output render duration");
Expect(playout.maxOutputRenderMilliseconds == 12.0, "backend playout health stores max output render duration");
Expect(playout.completedFrameIndex == 8, "backend playout health stores completed index");
Expect(playout.scheduledFrameIndex == 11, "backend playout health stores scheduled index");
Expect(playout.measuredLagFrames == 2, "backend playout health stores measured lag");
@@ -127,9 +139,15 @@ void TestBackendPlayoutHealth()
2,
4,
13,
1,
3,
4,
11,
2,
1,
-5.0,
-4.0,
-3.0,
9,
12,
3,