Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,693 changes: 875 additions & 818 deletions Cargo.lock

Large diffs are not rendered by default.

15 changes: 15 additions & 0 deletions Justfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
set shell := ["powershell", "-NoProfile", "-Command"]

# Usage:
# just test video-streamer
# just test # defaults to video-streamer
#
# Notes:
# - Requires `DGATEWAY_LIB_XMF_PATH` to point to `xmf.dll` for video streaming tests.
# - Writes logs to `.llm/test-<streamer>.log`.
test streamer="video-streamer":
@$ErrorActionPreference = 'Continue'; $llm = Join-Path (Get-Location) '.llm'; New-Item -ItemType Directory -Force -Path $llm | Out-Null; $log = Join-Path $llm ('test-' + '{{streamer}}' + '.log'); $env:RUST_LOG = 'video_streamer=info,webm_stream_correctness=info'; $env:RUST_BACKTRACE = '1'; $env:CARGO_TARGET_DIR = Join-Path $env:TEMP ('cargo-target-' + '{{streamer}}' + '-test'); if ('{{streamer}}' -eq 'video-streamer') { cmd /c "cargo test -p video-streamer --test webm_stream_correctness -- --ignored --nocapture 2>&1" | Tee-Object -FilePath $log; if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } } else { throw ('Unknown streamer: ' + '{{streamer}}' + ' (supported: video-streamer)') }

# Convenience alias (avoids confusion with positional params).
test-streamer:
@just test video-streamer
13 changes: 12 additions & 1 deletion crates/video-streamer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@ authors = ["Devolutions Inc. <infos@devolutions.net>"]
edition = "2024"
publish = false

[features]
# Enables extra per-frame logging and diagnostics.
perf-diagnostics = []
# Enables internal helpers used by Criterion benchmarks.
bench = ["perf-diagnostics"]

[dependencies]
anyhow = "1.0"
futures-util = { version = "0.3", features = ["sink"] }
Expand All @@ -24,7 +30,8 @@ ebml-iterable = "0.6"
webm-iterable = "0.6"

[dev-dependencies]
tracing-subscriber = "0.3"
criterion = "0.5"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tokio = { version = "1.45", features = [
"io-util",
"rt",
Expand All @@ -40,3 +47,7 @@ transport = { path = "../transport" }

[lints]
workspace = true

[[bench]]
name = "vpx_reencode"
harness = false
57 changes: 57 additions & 0 deletions crates/video-streamer/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# video-streamer

This crate takes an unseekable WebM recording (typically from Chrome CaptureStream) and rewrites it into a “fresh” WebM stream that can start playing immediately.
It does this by parsing the incoming WebM, finding the correct cut point, and re-encoding frames so the output stream begins with a keyframe and valid headers.

## Prerequisites

This crate relies on `cadeau` and its XMF backend for VP8/VP9 decode+encode.
If you want to override which XMF implementation is used at runtime, set `DGATEWAY_LIB_XMF_PATH` to an `xmf.dll` path before running tests or benches.

Example:

`$env:DGATEWAY_LIB_XMF_PATH = 'D:\library\cadeau\xmf.dll'`

## Tests

Run all tests:

`cargo test -p video-streamer`

Run the WebM streaming correctness suite:

`cargo test -p video-streamer --test webm_stream_correctness -- --nocapture`

Some tests are marked `#[ignore]` because they require large local assets or are intended for local investigation.
To run ignored tests:

`cargo test -p video-streamer -- --ignored --nocapture`

Test assets live under `testing-assets\`.

## Logging and diagnostics

Most detailed diagnostics are compiled out by default to keep production logs clean.
To include extra diagnostics, build with `perf-diagnostics`:

`cargo test -p video-streamer --features perf-diagnostics -- --nocapture`

Then set `RUST_LOG` as needed.
Example:

`$env:RUST_LOG = 'video_streamer=trace'`

## Benchmarks

The main benchmark is `benches\vpx_reencode.rs`.
Run it with:

`cargo bench -p video-streamer --bench vpx_reencode --features bench -- --nocapture`

Benchmark output is intentionally quiet by default.
To print detailed per-run results, set `VIDEO_STREAMER_BENCH_VERBOSE`:

`$env:VIDEO_STREAMER_BENCH_VERBOSE = '1'`

If you want to correlate benchmark results with internal timing, also enable `perf-diagnostics` (the `bench` feature enables it).
This is intentionally a build-time gate so production logs stay clean.
114 changes: 114 additions & 0 deletions crates/video-streamer/benches/vpx_reencode.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
use std::io::{self, Write as _};
use std::time::{Duration, Instant};

use criterion::{Criterion, criterion_group, criterion_main};

fn verbose_bench_logging_enabled() -> bool {
let value = std::env::var_os("VIDEO_STREAMER_BENCH_VERBOSE");
value.is_some_and(|v| !v.is_empty() && v != "0")
}

fn maybe_log_line(message: impl std::fmt::Display) {
if !verbose_bench_logging_enabled() {
return;
}

let _ = writeln!(io::stdout(), "{message}");
}

fn bench_reencode_first_500_tags(c: &mut Criterion) {
let xmf_initialized = {
let Ok(path) = std::env::var("DGATEWAY_LIB_XMF_PATH") else {
maybe_log_line("DGATEWAY_LIB_XMF_PATH not set; skipping benchmarks");
return;
};

// SAFETY: This is how the project loads XMF elsewhere.
if let Err(e) = unsafe { cadeau::xmf::init(&path) } {
maybe_log_line(format_args!(
"failed to initialize XMF from DGATEWAY_LIB_XMF_PATH={path}: {e:#}"
));
return;
}

true
};
if !xmf_initialized {
return;
}

let input = {
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
manifest_dir.join("testing-assets").join("uncued-recording.webm")
};
let mut group = c.benchmark_group("vpx_reencode");
// Criterion requires sample_size >= 10.
group.sample_size(10);
group.warm_up_time(Duration::from_secs(1));
group.measurement_time(Duration::from_secs(5));

group.bench_function("reencode_first_500_tags_uncued_recording", |b| {
// Keep per-iteration work bounded; Criterion may run many iterations per sample.
let per_iter_deadline = Duration::from_millis(200);

b.iter_custom(|iters| {
let start = Instant::now();
let mut tags_processed_total: u64 = 0;
let mut bytes_written_total: u64 = 0;
let mut frames_reencoded_total: u64 = 0;
let mut input_media_span_ms_total: u64 = 0;
let mut timed_out_any = false;

for _ in 0..iters {
let stats =
video_streamer::bench_support::reencode_first_tags_from_path_until_deadline(
&input,
video_streamer::StreamingConfig {
encoder_threads: video_streamer::config::CpuCount::new(1),
},
500,
per_iter_deadline,
)
.expect("reencode failed");

tags_processed_total += stats.tags_processed as u64;
bytes_written_total += stats.bytes_written as u64;
frames_reencoded_total += stats.frames_reencoded as u64;
input_media_span_ms_total += stats.input_media_span_ms as u64;
timed_out_any |= stats.timed_out;

criterion::black_box(stats);
}

let elapsed = start.elapsed();
let elapsed_secs = elapsed.as_secs_f64().max(1e-9);
let tags_per_sec = (tags_processed_total as f64) / elapsed_secs;
let bytes_per_sec = (bytes_written_total as f64) / elapsed_secs;
let frames_per_sec = (frames_reencoded_total as f64) / elapsed_secs;
let media_ms_per_sec = (input_media_span_ms_total as f64) / elapsed_secs;

maybe_log_line(format_args!(
"[LibVPx-Performance-Hypothesis] iters={} elapsed_ms={} per_iter_deadline_ms={} frames_total={} frames_per_sec={:.2} input_media_ms_total={} input_media_ms_per_sec={:.2} tags_total={} tags_per_sec={:.2} bytes_total={} bytes_per_sec={:.2} timed_out_any={}",
iters,
elapsed.as_millis(),
per_iter_deadline.as_millis(),
frames_reencoded_total,
frames_per_sec,
input_media_span_ms_total,
media_ms_per_sec,
tags_processed_total,
tags_per_sec,
bytes_written_total,
bytes_per_sec,
timed_out_any,
));

elapsed
});
});

group.finish();
}

criterion_group!(benches, bench_reencode_first_500_tags);
criterion_main!(benches);
Loading
Loading