Skip to content

Commit f92cd30

Browse files
committed
run: add --details options
Closes: #58
1 parent d71bcc0 commit f92cd30

File tree

6 files changed

+281
-36
lines changed

6 files changed

+281
-36
lines changed

crates/cargo-codspeed/README.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,26 @@ Options:
4848
-V, --version Print version information
4949
```
5050

51+
### Running benchmarks with details
52+
53+
Use the `--details` flag to see timing information for each benchmark:
54+
55+
```bash
56+
cargo codspeed run --details
57+
```
58+
59+
This will show execution times for each benchmark:
60+
```
61+
Checked: benches/example.rs::fibonacci (5.6 us)
62+
Checked: benches/example.rs::factorial (368 ns)
63+
```
64+
65+
The output also shows the total number of benchmarks executed:
66+
```
67+
Done running benchmark_suite (5 benchmarks)
68+
Finished running 2 benchmark suite(s) (10 benchmarks total)
69+
```
70+
5171
## Development
5272

5373
### Troubleshooting

crates/cargo-codspeed/src/app.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,10 @@ enum Commands {
100100

101101
#[command(flatten)]
102102
bench_target_filters: BenchTargetFilters,
103+
104+
/// Print per-benchmark duration details
105+
#[arg(long)]
106+
details: bool,
103107
},
104108
}
105109

@@ -164,12 +168,14 @@ pub fn run(args: impl Iterator<Item = OsString>) -> Result<()> {
164168
benchname,
165169
package_filters,
166170
bench_target_filters,
171+
details,
167172
} => run_benches(
168173
&metadata,
169174
benchname,
170175
package_filters,
171176
bench_target_filters,
172177
measurement_mode,
178+
details,
173179
),
174180
};
175181

crates/cargo-codspeed/src/run.rs

Lines changed: 90 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ use codspeed::walltime_results::WalltimeResults;
1010
use std::{
1111
io::{self, Write},
1212
path::{Path, PathBuf},
13+
process::Stdio,
1314
};
1415

1516
#[cfg(unix)]
@@ -99,6 +100,7 @@ pub fn run_benches(
99100
package_filters: PackageFilters,
100101
bench_target_filters: BenchTargetFilters,
101102
measurement_mode: MeasurementMode,
103+
show_details: bool,
102104
) -> Result<()> {
103105
let codspeed_target_dir = get_codspeed_target_dir(metadata, measurement_mode);
104106
let workspace_root = metadata.workspace_root.as_std_path();
@@ -113,6 +115,8 @@ pub fn run_benches(
113115

114116
eprintln!("Collected {} benchmark suite(s) to run", benches.len());
115117

118+
let mut total_benchmark_count = 0;
119+
116120
for bench in benches.iter() {
117121
let bench_target_name = &bench.bench_target_name;
118122
// workspace_root is needed since file! returns the path relatively to the workspace root
@@ -124,6 +128,11 @@ pub fn run_benches(
124128
.env("CODSPEED_CARGO_WORKSPACE_ROOT", workspace_root)
125129
.current_dir(&bench.working_directory);
126130

131+
if show_details {
132+
command.env("CODSPEED_SHOW_DETAILS", "1");
133+
command.stdout(Stdio::piped()).stderr(Stdio::inherit());
134+
}
135+
127136
if measurement_mode == MeasurementMode::Walltime {
128137
command.arg("--bench"); // Walltime targets need this additional argument (inherited from running them with `cargo bench`)
129138
}
@@ -132,33 +141,89 @@ pub fn run_benches(
132141
command.arg(bench_name_filter);
133142
}
134143

135-
command
136-
.status()
137-
.map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))
138-
.and_then(|status| {
139-
if status.success() {
140-
Ok(())
141-
} else {
142-
#[cfg(unix)]
143-
{
144-
let code = status
145-
.code()
146-
.or(status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html)
147-
.unwrap_or(1);
148-
149-
eprintln!("failed to execute the benchmark process, exit code: {code}");
150-
151-
std::process::exit(code);
152-
}
153-
#[cfg(not(unix))]
154-
{
155-
bail!("failed to execute the benchmark process: {}", status)
156-
}
144+
if show_details {
145+
// Only capture and process output when details are requested
146+
let output = command
147+
.output()
148+
.map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))?;
149+
150+
// Count benchmarks by looking for "Measured:" or "Checked:" lines
151+
let stdout = String::from_utf8_lossy(&output.stdout);
152+
let benchmark_count = stdout
153+
.lines()
154+
.filter(|line| {
155+
line.trim_start().starts_with("Measured:")
156+
|| line.trim_start().starts_with("Checked:")
157+
|| line.trim_start().starts_with(" Checked:")
158+
|| line.trim_start().starts_with(" Measured:")
159+
})
160+
.count();
161+
total_benchmark_count += benchmark_count;
162+
163+
// Print captured output
164+
print!("{stdout}");
165+
io::stdout().flush().unwrap();
166+
167+
if !output.status.success() {
168+
#[cfg(unix)]
169+
{
170+
let code = output
171+
.status
172+
.code()
173+
.or(output.status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html)
174+
.unwrap_or(1);
175+
176+
eprintln!("failed to execute the benchmark process, exit code: {code}");
177+
178+
std::process::exit(code);
157179
}
158-
})?;
159-
eprintln!("Done running {bench_target_name}");
180+
#[cfg(not(unix))]
181+
{
182+
bail!("failed to execute the benchmark process: {}", output.status)
183+
}
184+
}
185+
186+
if benchmark_count == 0 && !stdout.is_empty() {
187+
eprintln!("Warning: No benchmarks detected in output for {bench_target_name}");
188+
}
189+
eprintln!("Done running {bench_target_name} ({benchmark_count} benchmarks)");
190+
} else {
191+
// Fast path: don't capture output when details aren't needed
192+
command
193+
.status()
194+
.map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))
195+
.and_then(|status| {
196+
if status.success() {
197+
Ok(())
198+
} else {
199+
#[cfg(unix)]
200+
{
201+
let code = status
202+
.code()
203+
.or(status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html)
204+
.unwrap_or(1);
205+
206+
eprintln!("failed to execute the benchmark process, exit code: {code}");
207+
208+
std::process::exit(code);
209+
}
210+
#[cfg(not(unix))]
211+
{
212+
bail!("failed to execute the benchmark process: {}", status)
213+
}
214+
}
215+
})?;
216+
eprintln!("Done running {bench_target_name}");
217+
}
218+
}
219+
if show_details {
220+
eprintln!(
221+
"Finished running {} benchmark suite(s) ({total_benchmark_count} benchmarks total)",
222+
benches.len()
223+
);
224+
} else {
225+
eprintln!("Finished running {} benchmark suite(s)", benches.len());
160226
}
161-
eprintln!("Finished running {} benchmark suite(s)", benches.len());
162227

163228
if measurement_mode == MeasurementMode::Walltime {
164229
aggregate_raw_walltime_data(workspace_root)?;

crates/codspeed/src/codspeed.rs

Lines changed: 54 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
use crate::measurement;
1+
use crate::{measurement, utils};
22
use colored::Colorize;
33
use std::ffi::CString;
4+
use std::time::Instant;
45

56
pub use std::hint::black_box;
67

@@ -15,11 +16,14 @@ pub struct CodSpeed {
1516
current_benchmark: CString,
1617
group_stack: Vec<String>,
1718
is_instrumented: bool,
19+
start_time: Option<Instant>,
20+
show_details: bool,
1821
}
1922

2023
impl CodSpeed {
2124
pub fn new() -> Self {
2225
let is_instrumented = measurement::is_instrumented();
26+
let show_details = utils::show_details();
2327
if !is_instrumented {
2428
println!(
2529
"{} codspeed is enabled, but no performance measurement will be made since it's running in an unknown environment.",
@@ -32,6 +36,8 @@ impl CodSpeed {
3236
current_benchmark: CString::new("").expect("CString::new failed"),
3337
group_stack: Vec::new(),
3438
is_instrumented,
39+
start_time: None,
40+
show_details,
3541
}
3642
}
3743

@@ -47,29 +53,66 @@ impl CodSpeed {
4753
pub fn start_benchmark(&mut self, name: &str) {
4854
self.current_benchmark = CString::new(name).expect("CString::new failed");
4955
measurement::start();
56+
if self.show_details && !self.is_instrumented {
57+
self.start_time = Some(Instant::now());
58+
}
5059
}
5160

5261
#[inline(always)]
5362
pub fn end_benchmark(&mut self) {
5463
measurement::stop(&self.current_benchmark);
5564
self.benchmarked
5665
.push(self.current_benchmark.to_str().unwrap().to_string());
66+
67+
// Early return for instrumented mode with details - no output needed
68+
if self.show_details && self.is_instrumented {
69+
return;
70+
}
71+
72+
// For --details mode in non-instrumented environment, show timing
73+
if self.show_details {
74+
let elapsed = self
75+
.start_time
76+
.take()
77+
.map(|start| start.elapsed())
78+
.unwrap_or_default();
79+
if self.group_stack.is_empty() {
80+
println!(
81+
" Checked: {} ({})",
82+
self.current_benchmark.to_string_lossy(),
83+
crate::utils::format_duration_nanos(elapsed.as_nanos())
84+
);
85+
} else {
86+
println!(
87+
" Checked: {} (group: {}) ({})",
88+
self.current_benchmark.to_string_lossy(),
89+
self.group_stack.join("/"),
90+
crate::utils::format_duration_nanos(elapsed.as_nanos())
91+
);
92+
}
93+
return;
94+
}
95+
96+
// Default output for non-details mode
5797
let action_str = if self.is_instrumented {
5898
"Measured"
5999
} else {
60100
"Checked"
61101
};
62-
let group_str = if self.group_stack.is_empty() {
63-
"".to_string()
102+
if self.group_stack.is_empty() {
103+
println!(
104+
"{}: {}",
105+
action_str,
106+
self.current_benchmark.to_string_lossy()
107+
);
64108
} else {
65-
format!(" (group: {})", self.group_stack.join("/"))
66-
};
67-
println!(
68-
"{}: {}{}",
69-
action_str,
70-
self.current_benchmark.to_string_lossy(),
71-
group_str
72-
);
109+
println!(
110+
"{}: {} (group: {})",
111+
action_str,
112+
self.current_benchmark.to_string_lossy(),
113+
self.group_stack.join("/")
114+
);
115+
}
73116
}
74117
}
75118

0 commit comments

Comments
 (0)