Skip to content

Commit 17c6225

Browse files
committed
[cargo-nextest] add a "bench" command
See #2125 -- it can be helpful to run benchmarks with setup scripts and such configured.
1 parent 356b3bb commit 17c6225

File tree

37 files changed

+1079
-232
lines changed

37 files changed

+1079
-232
lines changed

cargo-nextest/src/dispatch.rs

Lines changed: 339 additions & 39 deletions
Large diffs are not rendered by default.

cargo-nextest/src/errors.rs

Lines changed: 44 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@ use indent_write::indentable::Indented;
77
use itertools::Itertools;
88
use nextest_filtering::errors::FiltersetParseErrors;
99
use nextest_metadata::NextestExitCode;
10-
use nextest_runner::{errors::*, helpers::plural, redact::Redactor};
10+
use nextest_runner::{
11+
config::ConfigExperimental, errors::*, helpers::plural, redact::Redactor,
12+
run_mode::NextestRunMode,
13+
};
1114
use owo_colors::OwoColorize;
1215
use semver::Version;
1316
use std::{error::Error, io, path::PathBuf, process::ExitStatus, string::FromUtf8Error};
@@ -212,6 +215,7 @@ pub enum ExpectedError {
212215
TestRunFailed,
213216
#[error("no tests to run")]
214217
NoTestsRun {
218+
mode: NextestRunMode,
215219
/// The no-tests-run error was chosen because it was the default (we show a hint in this
216220
/// case)
217221
is_default: bool,
@@ -249,6 +253,11 @@ pub enum ExpectedError {
249253
name: &'static str,
250254
var_name: &'static str,
251255
},
256+
#[error("config experimental features not enabled")]
257+
ConfigExperimentalFeaturesNotEnabled {
258+
config_file: Utf8PathBuf,
259+
missing: Vec<ConfigExperimental>,
260+
},
252261
#[error("filterset parse error")]
253262
FiltersetParseError {
254263
all_errors: Vec<FiltersetParseErrors>,
@@ -467,9 +476,7 @@ impl ExpectedError {
467476
| Self::DebugExtractWriteError { .. } => NextestExitCode::WRITE_OUTPUT_ERROR,
468477
#[cfg(feature = "self-update")]
469478
Self::UpdateError { .. } => NextestExitCode::UPDATE_ERROR,
470-
Self::ExperimentalFeatureNotEnabled { .. } => {
471-
NextestExitCode::EXPERIMENTAL_FEATURE_NOT_ENABLED
472-
}
479+
Self::ExperimentalFeatureNotEnabled { .. } | Self::ConfigExperimentalFeaturesNotEnabled { .. } => NextestExitCode::EXPERIMENTAL_FEATURE_NOT_ENABLED,
473480
Self::FiltersetParseError { .. } => NextestExitCode::INVALID_FILTERSET,
474481
}
475482
}
@@ -870,13 +877,16 @@ impl ExpectedError {
870877
error!("test run failed");
871878
None
872879
}
873-
Self::NoTestsRun { is_default } => {
880+
Self::NoTestsRun { mode, is_default } => {
874881
let hint_str = if *is_default {
875882
"\n(hint: use `--no-tests` to customize)"
876883
} else {
877884
""
878885
};
879-
error!("no tests to run{hint_str}");
886+
error!(
887+
"no {} to run{hint_str}",
888+
plural::tests_plural_if(*mode, true),
889+
);
880890
None
881891
}
882892
Self::ShowTestGroupsError { err } => {
@@ -936,6 +946,34 @@ impl ExpectedError {
936946
);
937947
None
938948
}
949+
Self::ConfigExperimentalFeaturesNotEnabled {
950+
config_file,
951+
missing,
952+
} => {
953+
if missing.len() == 1 {
954+
let env_hint = if let Some(env_var) = missing[0].env_var() {
955+
format!(" or set {env_var}=1")
956+
} else {
957+
String::new()
958+
};
959+
error!(
960+
"experimental feature not enabled: {}\n(hint: add to the {} list in {}{})",
961+
missing[0].style(styles.bold),
962+
"experimental".style(styles.bold),
963+
config_file.style(styles.bold),
964+
env_hint,
965+
);
966+
} else {
967+
error!(
968+
"experimental features not enabled: {}\n(hint: add to the {} list in {})",
969+
missing.iter().map(|f| f.style(styles.bold)).join(", "),
970+
"experimental".style(styles.bold),
971+
config_file.style(styles.bold),
972+
);
973+
// TODO: env_hint?
974+
}
975+
None
976+
}
939977
Self::FiltersetParseError { all_errors } => {
940978
for errors in all_errors {
941979
for single_error in &errors.errors {

fixture-data/src/models.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,4 +175,5 @@ pub enum TestCaseFixtureProperty {
175175
MatchesCdylib = 4,
176176
MatchesTestMultiplyTwo = 8,
177177
NotInDefaultSetUnix = 16,
178+
IsBenchmark = 32,
178179
}

fixture-data/src/nextest_tests.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,10 @@ pub static EXPECTED_TEST_SUITES: LazyLock<IdOrdMap<TestSuiteFixture>> = LazyLock
131131
"my-bench",
132132
BuildPlatform::Target,
133133
id_ord_map! {
134-
TestCaseFixture::new("bench_add_two", TestCaseFixtureStatus::Pass),
134+
TestCaseFixture::new("bench_add_two", TestCaseFixtureStatus::Pass)
135+
.with_property(TestCaseFixtureProperty::IsBenchmark),
136+
TestCaseFixture::new("bench_ignored", TestCaseFixtureStatus::IgnoredPass)
137+
.with_property(TestCaseFixtureProperty::IsBenchmark),
135138
TestCaseFixture::new("tests::test_execute_bin", TestCaseFixtureStatus::Pass),
136139
},
137140
),

fixtures/nextest-tests/.cargo/config

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,8 @@ __NEXTEST_TESTING_EXTRA_CONFIG_OVERRIDE_FORCE_IN_MAIN = { value = "test-FAILED-v
2828
__NEXTEST_TESTING_EXTRA_CONFIG_OVERRIDE_FORCE_IN_BOTH = { value = "test-FAILED-value-set-by-main-config", force = true }
2929
__NEXTEST_TESTING_EXTRA_CONFIG_OVERRIDE_FORCE_NONE = { value = "test-FAILED-value-set-by-main-config" }
3030
__NEXTEST_TESTING_EXTRA_CONFIG_OVERRIDE_FORCE_FALSE = { value = "test-FAILED-value-set-by-main-config", force = true }
31+
32+
[term]
33+
# Disable OSC 9;4 terminal integration for output reproducibility in case it's enabled globally.
34+
progress.when = "auto"
35+
progress.term-integration = false

fixtures/nextest-tests/.config/nextest.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
nextest-version = { required = "0.9.54", recommended = "0.9.56" }
66

77
# This must be on one line for test_setup_scripts_not_enabled to work.
8-
experimental = ["setup-scripts", "wrapper-scripts"]
8+
experimental = ["benchmarks", "setup-scripts", "wrapper-scripts"]
99

1010
[profile.default]
1111
# disable fail-fast to ensure a deterministic test run

fixtures/nextest-tests/benches/my-bench.rs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,17 @@
55

66
extern crate test;
77

8-
// TODO: add benchmarks
98
#[bench]
109
fn bench_add_two(b: &mut test::Bencher) {
1110
b.iter(|| 2 + 2);
1211
}
1312

13+
#[bench]
14+
#[ignore]
15+
fn bench_ignored(b: &mut test::Bencher) {
16+
b.iter(|| 2 + 2);
17+
}
18+
1419
#[cfg(test)]
1520
mod tests {
1621
/// Test that a binary can be successfully executed.

integration-tests/tests/integration/fixtures.rs

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,7 @@ pub enum RunProperty {
174174
WithSkipCdylibFilter = 4,
175175
// --exact test_multiply_two tests::test_multiply_two_cdylib
176176
WithMultiplyTwoExactFilter = 8,
177+
Benchmarks = 16,
177178
}
178179

179180
fn debug_run_properties(properties: u64) -> String {
@@ -190,6 +191,9 @@ fn debug_run_properties(properties: u64) -> String {
190191
if properties & RunProperty::WithMultiplyTwoExactFilter as u64 != 0 {
191192
ret.push_str("with-exact-filter ");
192193
}
194+
if properties & RunProperty::Benchmarks as u64 != 0 {
195+
ret.push_str("benchmarks ");
196+
}
193197
ret
194198
}
195199

@@ -229,6 +233,13 @@ pub fn check_run_output(stderr: &[u8], properties: u64) {
229233
for test in &fixture.test_cases {
230234
let name = format!("{} {}", binary_id, test.name);
231235

236+
if properties & RunProperty::Benchmarks as u64 != 0 {
237+
// We don't consider skipped tests while running benchmarks.
238+
if !test.has_property(TestCaseFixtureProperty::IsBenchmark) {
239+
continue;
240+
}
241+
}
242+
232243
if test.has_property(TestCaseFixtureProperty::NotInDefaultSet)
233244
&& properties & RunProperty::WithDefaultFilter as u64 != 0
234245
{
@@ -343,7 +354,12 @@ pub fn check_run_output(stderr: &[u8], properties: u64) {
343354
}
344355
}
345356

346-
let tests_str = if run_count == 1 { "test" } else { "tests" };
357+
let tests_str = match (run_count, properties & RunProperty::Benchmarks as u64 != 0) {
358+
(1, false) => "test",
359+
(1, true) => "benchmark",
360+
(_, false) => "tests",
361+
(_, true) => "benchmarks",
362+
};
347363
let leak_fail_regex_str = if leak_fail_count > 0 {
348364
format!(r" \({leak_fail_count} due to being leaky\)")
349365
} else {

integration-tests/tests/integration/main.rs

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -967,6 +967,30 @@ fn run_archive(archive_file: &Utf8Path) -> (TempProject, Utf8PathBuf) {
967967
(p2, extract_to.join("target"))
968968
}
969969

970+
#[test]
971+
fn test_bench() {
972+
set_env_vars();
973+
let p = TempProject::new().unwrap();
974+
let output = CargoNextestCli::for_test()
975+
.args([
976+
"--manifest-path",
977+
p.manifest_path().as_str(),
978+
"bench",
979+
// Set the dev profile here to avoid a rebuild.
980+
"--cargo-profile",
981+
"dev",
982+
"--no-capture",
983+
])
984+
.unchecked(true)
985+
.output();
986+
assert_eq!(
987+
output.exit_status.code(),
988+
Some(0),
989+
"correct exit code for command\n{output}",
990+
);
991+
check_run_output(&output.stderr, RunProperty::Benchmarks as u64);
992+
}
993+
970994
#[test]
971995
fn test_show_config_test_groups() {
972996
set_env_vars();

integration-tests/tests/integration/snapshots/integration__list_binaries_only.snap

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ source: integration-tests/tests/integration/main.rs
33
expression: output.stderr_as_str()
44
snapshot_kind: text
55
---
6-
info: experimental features enabled: setup-scripts, wrapper-scripts
6+
info: experimental features enabled: setup-scripts, wrapper-scripts, benchmarks
77
error: operator didn't match any binary names
88
╭────
99
1binary(unknown) & binary_id(unknown) & binary(=unknown) | binary_id(=unknown)

0 commit comments

Comments
 (0)