Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 49 additions & 1 deletion bench-support/src/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -479,15 +479,32 @@ pub const EXTENDED_WORKLOADS: &[WorkloadCase] = &[
},
];

/// Build a `WorkloadSpec` from a workload case and runtime parameters.
impl WorkloadCase {
/// Build a [`WorkloadSpec`] from this case and runtime parameters.
pub fn with_params(self, universe: u64, seed: u64) -> WorkloadSpec {
WorkloadSpec {
universe,
workload: self.workload,
seed,
}
}

/// Look up a workload by its stable `id` in a suite slice.
///
/// Use this instead of positional indexing into [`STANDARD_WORKLOADS`] or
/// [`EXTENDED_WORKLOADS`]; the order of those constants is not part of the
/// public contract and may change without notice.
///
/// # Panics
///
/// Panics if no workload in `suite` has the given `id`.
#[track_caller]
pub fn by_id<'a>(suite: &'a [WorkloadCase], id: &str) -> &'a WorkloadCase {
suite
.iter()
.find(|case| case.id == id)
.unwrap_or_else(|| panic!("workload `{id}` not found in suite"))
}
}

#[cfg(test)]
Expand Down Expand Up @@ -551,4 +568,35 @@ mod tests {
"duplicate display_name in POLICIES",
);
}

/// Pin the workload ids that benchmarks look up by name so renaming or
/// removing one fails compilation/tests instead of silently changing
/// which workload a report runs against.
#[test]
fn standard_workloads_expose_required_ids() {
for required in ["uniform", "hotset_90_10", "zipfian_1.0"] {
let case = WorkloadCase::by_id(STANDARD_WORKLOADS, required);
assert_eq!(case.id, required);
}
}

#[test]
#[should_panic(expected = "workload `does_not_exist` not found in suite")]
fn by_id_panics_for_missing_workload() {
let _ = WorkloadCase::by_id(STANDARD_WORKLOADS, "does_not_exist");
}

#[test]
fn workload_ids_are_unique_within_each_suite() {
for (suite_name, suite) in [
("STANDARD_WORKLOADS", STANDARD_WORKLOADS),
("EXTENDED_WORKLOADS", EXTENDED_WORKLOADS),
] {
let mut ids: Vec<&str> = suite.iter().map(|c| c.id).collect();
let total = ids.len();
ids.sort_unstable();
ids.dedup();
assert_eq!(ids.len(), total, "duplicate workload id in {suite_name}");
}
}
}
7 changes: 4 additions & 3 deletions benches/reports.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use common::metrics::{
run_benchmark,
};
use common::operation::{ReadThrough, run_operations};
use common::registry::{EXTENDED_WORKLOADS, STANDARD_WORKLOADS};
use common::registry::{EXTENDED_WORKLOADS, STANDARD_WORKLOADS, WorkloadCase};

const CAPACITY: usize = 4096;
const UNIVERSE: u64 = 16_384;
Expand Down Expand Up @@ -264,8 +264,9 @@ fn run_comprehensive_comparison() {
fn run_detailed_single_benchmark() {
println!("\n=== Detailed Benchmark Results ===\n");

// Use first workload from standard suite (zipfian 1.0)
let workload_case = &STANDARD_WORKLOADS[3]; // zipfian_1.0
// Look up by stable id so reordering STANDARD_WORKLOADS can't silently
// change which workload this report runs against.
let workload_case = WorkloadCase::by_id(STANDARD_WORKLOADS, "zipfian_1.0");
let spec = workload_case.with_params(UNIVERSE, SEED);

let config = BenchmarkConfig {
Expand Down
12 changes: 7 additions & 5 deletions benches/runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use common::metrics::{
run_benchmark,
};
use common::operation::{ReadThrough, run_operations};
use common::registry::STANDARD_WORKLOADS;
use common::registry::{STANDARD_WORKLOADS, WorkloadCase};

// Benchmark configuration constants
const CAPACITY: usize = 4096;
Expand Down Expand Up @@ -330,11 +330,13 @@ fn run_adaptation_benchmarks(artifact: &mut BenchmarkArtifact) {
fn run_comprehensive_benchmarks(artifact: &mut BenchmarkArtifact) {
println!("[4/4] Running comprehensive benchmarks...");

// Use a subset of workloads for comprehensive benchmarks (they're slower)
// Use a subset of workloads for comprehensive benchmarks (they're slower).
// Look up by stable id so reordering STANDARD_WORKLOADS can't silently
// change which workloads are included in this report.
let comprehensive_workloads = [
&STANDARD_WORKLOADS[0], // uniform
&STANDARD_WORKLOADS[3], // zipfian_1.0
&STANDARD_WORKLOADS[1], // hotset_90_10
WorkloadCase::by_id(STANDARD_WORKLOADS, "uniform"),
WorkloadCase::by_id(STANDARD_WORKLOADS, "zipfian_1.0"),
WorkloadCase::by_id(STANDARD_WORKLOADS, "hotset_90_10"),
];

for workload_case in &comprehensive_workloads {
Expand Down
18 changes: 11 additions & 7 deletions benches/workloads.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,23 @@ use common::metrics::{
};
use common::operation::{ReadThrough, run_operations};
use common::registry::STANDARD_WORKLOADS;
use common::workload::WorkloadSpec;
use common::workload::{Workload, WorkloadGenerator, WorkloadSpec};
use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};

const CAPACITY: usize = 4096;
const UNIVERSE: u64 = 16_384;
const OPS: usize = 200_000;
const SEED: u64 = 42;

fn make_generator(workload: Workload) -> WorkloadGenerator {
WorkloadSpec {
universe: UNIVERSE,
workload,
seed: SEED,
}
.generator()
}

// ============================================================================
// Hit Rate Benchmarks
// ============================================================================
Expand All @@ -61,12 +70,7 @@ fn bench_hit_rates(c: &mut Criterion) {
let mut total = std::time::Duration::default();
for _ in 0..iters {
let mut cache = make_cache(CAPACITY);
let mut generator = WorkloadSpec {
universe: UNIVERSE,
workload: wl,
seed: SEED,
}
.generator();
let mut generator = make_generator(wl);
let mut op_model = ReadThrough::new(1.0, SEED);
let start = Instant::now();
let _ = run_operations(
Expand Down
41 changes: 20 additions & 21 deletions src/store/weight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1183,9 +1183,8 @@ mod tests {

use proptest::prelude::*;

#[allow(clippy::ptr_arg)]
fn weight_by_len(value: &String) -> usize {
value.len()
fn weight_by_len() -> impl Fn(&String) -> usize + Copy + Send + Sync {
|value: &String| value.len()
}

fn sum_entry_weights<K, V, F>(store: &WeightStore<K, V, F>) -> usize
Expand Down Expand Up @@ -1247,7 +1246,7 @@ mod tests {

#[test]
fn weight_store_tracks_weight() {
let mut store = WeightStore::with_capacity(3, 10, weight_by_len);
let mut store = WeightStore::with_capacity(3, 10, weight_by_len());
assert_eq!(store.total_weight(), 0);
assert_eq!(store.try_insert("k1", Arc::new("aa".to_string())), Ok(None));
assert_eq!(store.total_weight(), 2);
Expand All @@ -1262,7 +1261,7 @@ mod tests {

#[test]
fn weight_store_enforces_capacity() {
let mut store = WeightStore::with_capacity(10, 5, weight_by_len);
let mut store = WeightStore::with_capacity(10, 5, weight_by_len());
assert_eq!(
store.try_insert("k1", Arc::new("aaaaa".to_string())),
Ok(None)
Expand All @@ -1275,7 +1274,7 @@ mod tests {

#[test]
fn weight_store_update_adjusts_weight() {
let mut store = WeightStore::with_capacity(10, 10, weight_by_len);
let mut store = WeightStore::with_capacity(10, 10, weight_by_len());
assert_eq!(store.try_insert("k1", Arc::new("aa".to_string())), Ok(None));
assert_eq!(store.total_weight(), 2);
assert_eq!(
Expand All @@ -1287,7 +1286,7 @@ mod tests {

#[test]
fn weight_store_update_rejected_when_new_weight_exceeds_capacity() {
let mut store = WeightStore::with_capacity(4, 5, weight_by_len);
let mut store = WeightStore::with_capacity(4, 5, weight_by_len());
assert_eq!(store.try_insert("k1", Arc::new("aa".to_string())), Ok(None));
assert_eq!(store.try_insert("k2", Arc::new("bb".to_string())), Ok(None));
assert_eq!(store.total_weight(), 4);
Expand Down Expand Up @@ -1315,7 +1314,7 @@ mod tests {

#[test]
fn weight_store_remove_missing_does_not_mutate_weight_or_metrics() {
let mut store = WeightStore::with_capacity(4, 20, weight_by_len);
let mut store = WeightStore::with_capacity(4, 20, weight_by_len());
assert_eq!(store.try_insert("k1", Arc::new("aa".to_string())), Ok(None));
let before_weight = store.total_weight();
let before_metrics = store.metrics();
Expand All @@ -1327,7 +1326,7 @@ mod tests {

#[test]
fn weight_store_clear_resets_contents_but_preserves_counters() {
let mut store = WeightStore::with_capacity(4, 20, weight_by_len);
let mut store = WeightStore::with_capacity(4, 20, weight_by_len());
assert_eq!(store.try_insert("k1", Arc::new("aa".to_string())), Ok(None));
assert_eq!(
store.try_insert("k2", Arc::new("bbb".to_string())),
Expand Down Expand Up @@ -1355,7 +1354,7 @@ mod tests {

#[test]
fn weight_store_api_surface_and_peek_semantics() {
let mut store = WeightStore::with_capacity(3, 9, weight_by_len);
let mut store = WeightStore::with_capacity(3, 9, weight_by_len());
assert_eq!(store.capacity(), 3);
assert_eq!(store.capacity_weight(), 9);
assert!(store.is_empty());
Expand Down Expand Up @@ -1635,7 +1634,7 @@ mod tests {
#[cfg(feature = "concurrency")]
#[test]
fn concurrent_weight_store_basic_ops() {
let store = ConcurrentWeightStore::with_capacity(2, 10, weight_by_len);
let store = ConcurrentWeightStore::with_capacity(2, 10, weight_by_len());
let value = Arc::new("aa".to_string());
assert_eq!(store.try_insert("k1", value.clone()), Ok(None));
assert_eq!(store.get(&"k1"), Some(value.clone()));
Expand Down Expand Up @@ -1683,7 +1682,7 @@ mod tests {
#[cfg(feature = "concurrency")]
#[test]
fn concurrent_weight_store_failed_ops_do_not_increment_success_counters() {
let store = Arc::new(ConcurrentWeightStore::with_capacity(1, 1, weight_by_len));
let store = Arc::new(ConcurrentWeightStore::with_capacity(1, 1, weight_by_len()));
assert_eq!(
store.try_insert("seed".to_string(), Arc::new("x".to_string())),
Ok(None)
Expand Down Expand Up @@ -1760,7 +1759,7 @@ mod tests {
#[test]
fn concurrent_weight_store_metrics_track_inserts() {
let store: ConcurrentWeightStore<&str, String, _> =
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len);
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len());

store.try_insert("k1", Arc::new("v1".into())).unwrap();
store.try_insert("k2", Arc::new("v2".into())).unwrap();
Expand All @@ -1774,7 +1773,7 @@ mod tests {
#[test]
fn concurrent_weight_store_metrics_track_updates() {
let store: ConcurrentWeightStore<&str, String, _> =
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len);
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len());

store.try_insert("k1", Arc::new("v1".into())).unwrap();
store.try_insert("k1", Arc::new("updated".into())).unwrap();
Expand All @@ -1787,7 +1786,7 @@ mod tests {
#[test]
fn concurrent_weight_store_metrics_track_removes() {
let store: ConcurrentWeightStore<&str, String, _> =
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len);
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len());

store.try_insert("k1", Arc::new("v1".into())).unwrap();
store.remove(&"k1");
Expand All @@ -1800,7 +1799,7 @@ mod tests {
#[test]
fn concurrent_weight_store_metrics_track_hits_misses() {
let store: ConcurrentWeightStore<&str, String, _> =
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len);
ConcurrentWeightStore::with_capacity(100, 100_000, weight_by_len());

store.try_insert("k1", Arc::new("v1".into())).unwrap();
let _ = store.get(&"k1");
Expand All @@ -1818,7 +1817,7 @@ mod tests {
#[test]
fn weight_store_try_with_capacity_reserves_capacity() {
let store: WeightStore<&str, String, _> =
WeightStore::try_with_capacity(16, 1_024, weight_by_len)
WeightStore::try_with_capacity(16, 1_024, weight_by_len())
.expect("try_with_capacity(16, ...) should succeed");
assert_eq!(store.capacity(), 16);
assert_eq!(store.capacity_weight(), 1_024);
Expand Down Expand Up @@ -1846,15 +1845,15 @@ mod tests {
// immediately-full store. Matches the semantics of
// `with_capacity(0, ...)`.
let store: WeightStore<&str, String, _> =
WeightStore::try_with_capacity(0, 1_024, weight_by_len)
WeightStore::try_with_capacity(0, 1_024, weight_by_len())
.expect("try_with_capacity(0, ...) should succeed");
assert_eq!(store.capacity(), 0);
assert!(store.is_empty());
}

#[test]
fn weight_store_clear_attributes_removes() {
let mut store = WeightStore::with_capacity(4, 1_024, weight_by_len);
let mut store = WeightStore::with_capacity(4, 1_024, weight_by_len());
for i in 0..4u32 {
store
.try_insert(i, Arc::new(format!("v{i}")))
Expand Down Expand Up @@ -1935,7 +1934,7 @@ mod tests {
#[test]
fn concurrent_weight_store_try_with_capacity_reserves_capacity() {
let store: ConcurrentWeightStore<&str, String, _> =
ConcurrentWeightStore::try_with_capacity(16, 1_024, weight_by_len)
ConcurrentWeightStore::try_with_capacity(16, 1_024, weight_by_len())
.expect("try_with_capacity(16, ...) should succeed");
assert_eq!(store.capacity(), 16);
assert_eq!(store.capacity_weight(), 1_024);
Expand All @@ -1958,7 +1957,7 @@ mod tests {
#[test]
fn concurrent_weight_store_clear_attributes_removes() {
let store: ConcurrentWeightStore<u32, String, _> =
ConcurrentWeightStore::with_capacity(4, 1_024, weight_by_len);
ConcurrentWeightStore::with_capacity(4, 1_024, weight_by_len());
for i in 0..4u32 {
store
.try_insert(i, Arc::new(format!("v{i}")))
Expand Down
Loading