mirror of
https://github.com/catchorg/Catch2.git
synced 2024-11-25 23:06:10 +01:00
Added --skip-benchmarks command-line option. (#2408)
Closes #2392 Co-authored-by: Martin Hořeňovský <martin.horenovsky@gmail.com>
This commit is contained in:
parent
f526ff0fc3
commit
291b35b389
@ -19,6 +19,7 @@
|
|||||||
[Specify a seed for the Random Number Generator](#specify-a-seed-for-the-random-number-generator)<br>
|
[Specify a seed for the Random Number Generator](#specify-a-seed-for-the-random-number-generator)<br>
|
||||||
[Identify framework and version according to the libIdentify standard](#identify-framework-and-version-according-to-the-libidentify-standard)<br>
|
[Identify framework and version according to the libIdentify standard](#identify-framework-and-version-according-to-the-libidentify-standard)<br>
|
||||||
[Wait for key before continuing](#wait-for-key-before-continuing)<br>
|
[Wait for key before continuing](#wait-for-key-before-continuing)<br>
|
||||||
|
[Skip benchmarks](#skip-benchmarks)<br>
|
||||||
[Specify the number of benchmark samples to collect](#specify-the-number-of-benchmark-samples-to-collect)<br>
|
[Specify the number of benchmark samples to collect](#specify-the-number-of-benchmark-samples-to-collect)<br>
|
||||||
[Specify the number of resamples for bootstrapping](#specify-the-number-of-resamples-for-bootstrapping)<br>
|
[Specify the number of resamples for bootstrapping](#specify-the-number-of-resamples-for-bootstrapping)<br>
|
||||||
[Specify the confidence-interval for bootstrapping](#specify-the-confidence-interval-for-bootstrapping)<br>
|
[Specify the confidence-interval for bootstrapping](#specify-the-confidence-interval-for-bootstrapping)<br>
|
||||||
@ -62,6 +63,7 @@ Click one of the following links to take you straight to that option - or scroll
|
|||||||
<a href="#rng-seed"> ` --rng-seed`</a><br />
|
<a href="#rng-seed"> ` --rng-seed`</a><br />
|
||||||
<a href="#libidentify"> ` --libidentify`</a><br />
|
<a href="#libidentify"> ` --libidentify`</a><br />
|
||||||
<a href="#wait-for-keypress"> ` --wait-for-keypress`</a><br />
|
<a href="#wait-for-keypress"> ` --wait-for-keypress`</a><br />
|
||||||
|
<a href="#skip-benchmarks"> ` --skip-benchmarks`</a><br />
|
||||||
<a href="#benchmark-samples"> ` --benchmark-samples`</a><br />
|
<a href="#benchmark-samples"> ` --benchmark-samples`</a><br />
|
||||||
<a href="#benchmark-resamples"> ` --benchmark-resamples`</a><br />
|
<a href="#benchmark-resamples"> ` --benchmark-resamples`</a><br />
|
||||||
<a href="#benchmark-confidence-interval"> ` --benchmark-confidence-interval`</a><br />
|
<a href="#benchmark-confidence-interval"> ` --benchmark-confidence-interval`</a><br />
|
||||||
@ -370,6 +372,16 @@ See [The LibIdentify repo for more information and examples](https://github.com/
|
|||||||
Will cause the executable to print a message and wait until the return/ enter key is pressed before continuing -
|
Will cause the executable to print a message and wait until the return/ enter key is pressed before continuing -
|
||||||
either before running any tests, after running all tests - or both, depending on the argument.
|
either before running any tests, after running all tests - or both, depending on the argument.
|
||||||
|
|
||||||
|
<a id="skip-benchmarks"></a>
|
||||||
|
## Skip all benchmarks
|
||||||
|
<pre>--skip-benchmarks</pre>
|
||||||
|
|
||||||
|
> [Introduced](https://github.com/catchorg/Catch2/issues/2408) in Catch X.Y.Z.
|
||||||
|
|
||||||
|
This flag tells Catch2 to skip running all benchmarks. Benchmarks in this
|
||||||
|
case mean code blocks in `BENCHMARK` and `BENCHMARK_ADVANCED` macros, not
|
||||||
|
test cases with the `[!benchmark]` tag.
|
||||||
|
|
||||||
<a id="benchmark-samples"></a>
|
<a id="benchmark-samples"></a>
|
||||||
## Specify the number of benchmark samples to collect
|
## Specify the number of benchmark samples to collect
|
||||||
<pre>--benchmark-samples <# of samples></pre>
|
<pre>--benchmark-samples <# of samples></pre>
|
||||||
|
@ -95,8 +95,11 @@ namespace Catch {
|
|||||||
// sets lambda to be used in fun *and* executes benchmark!
|
// sets lambda to be used in fun *and* executes benchmark!
|
||||||
template <typename Fun, std::enable_if_t<!Detail::is_related<Fun, Benchmark>::value, int> = 0>
|
template <typename Fun, std::enable_if_t<!Detail::is_related<Fun, Benchmark>::value, int> = 0>
|
||||||
Benchmark & operator=(Fun func) {
|
Benchmark & operator=(Fun func) {
|
||||||
|
auto const* cfg = getCurrentContext().getConfig();
|
||||||
|
if (!cfg->skipBenchmarks()) {
|
||||||
fun = Detail::BenchmarkFunction(func);
|
fun = Detail::BenchmarkFunction(func);
|
||||||
run();
|
run();
|
||||||
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,6 +152,7 @@ namespace Catch {
|
|||||||
bool Config::showInvisibles() const { return m_data.showInvisibles; }
|
bool Config::showInvisibles() const { return m_data.showInvisibles; }
|
||||||
Verbosity Config::verbosity() const { return m_data.verbosity; }
|
Verbosity Config::verbosity() const { return m_data.verbosity; }
|
||||||
|
|
||||||
|
bool Config::skipBenchmarks() const { return m_data.skipBenchmarks; }
|
||||||
bool Config::benchmarkNoAnalysis() const { return m_data.benchmarkNoAnalysis; }
|
bool Config::benchmarkNoAnalysis() const { return m_data.benchmarkNoAnalysis; }
|
||||||
unsigned int Config::benchmarkSamples() const { return m_data.benchmarkSamples; }
|
unsigned int Config::benchmarkSamples() const { return m_data.benchmarkSamples; }
|
||||||
double Config::benchmarkConfidenceInterval() const { return m_data.benchmarkConfidenceInterval; }
|
double Config::benchmarkConfidenceInterval() const { return m_data.benchmarkConfidenceInterval; }
|
||||||
|
@ -64,6 +64,7 @@ namespace Catch {
|
|||||||
unsigned int shardCount = 1;
|
unsigned int shardCount = 1;
|
||||||
unsigned int shardIndex = 0;
|
unsigned int shardIndex = 0;
|
||||||
|
|
||||||
|
bool skipBenchmarks = false;
|
||||||
bool benchmarkNoAnalysis = false;
|
bool benchmarkNoAnalysis = false;
|
||||||
unsigned int benchmarkSamples = 100;
|
unsigned int benchmarkSamples = 100;
|
||||||
double benchmarkConfidenceInterval = 0.95;
|
double benchmarkConfidenceInterval = 0.95;
|
||||||
@ -129,6 +130,7 @@ namespace Catch {
|
|||||||
int abortAfter() const override;
|
int abortAfter() const override;
|
||||||
bool showInvisibles() const override;
|
bool showInvisibles() const override;
|
||||||
Verbosity verbosity() const override;
|
Verbosity verbosity() const override;
|
||||||
|
bool skipBenchmarks() const override;
|
||||||
bool benchmarkNoAnalysis() const override;
|
bool benchmarkNoAnalysis() const override;
|
||||||
unsigned int benchmarkSamples() const override;
|
unsigned int benchmarkSamples() const override;
|
||||||
double benchmarkConfidenceInterval() const override;
|
double benchmarkConfidenceInterval() const override;
|
||||||
|
@ -88,6 +88,7 @@ namespace Catch {
|
|||||||
virtual std::vector<std::string> const& getSectionsToRun() const = 0;
|
virtual std::vector<std::string> const& getSectionsToRun() const = 0;
|
||||||
virtual Verbosity verbosity() const = 0;
|
virtual Verbosity verbosity() const = 0;
|
||||||
|
|
||||||
|
virtual bool skipBenchmarks() const = 0;
|
||||||
virtual bool benchmarkNoAnalysis() const = 0;
|
virtual bool benchmarkNoAnalysis() const = 0;
|
||||||
virtual unsigned int benchmarkSamples() const = 0;
|
virtual unsigned int benchmarkSamples() const = 0;
|
||||||
virtual double benchmarkConfidenceInterval() const = 0;
|
virtual double benchmarkConfidenceInterval() const = 0;
|
||||||
|
@ -296,6 +296,9 @@ namespace Catch {
|
|||||||
| Opt( setWaitForKeypress, "never|start|exit|both" )
|
| Opt( setWaitForKeypress, "never|start|exit|both" )
|
||||||
["--wait-for-keypress"]
|
["--wait-for-keypress"]
|
||||||
( "waits for a keypress before exiting" )
|
( "waits for a keypress before exiting" )
|
||||||
|
| Opt( config.skipBenchmarks)
|
||||||
|
["--skip-benchmarks"]
|
||||||
|
( "disable running benchmarks")
|
||||||
| Opt( config.benchmarkSamples, "samples" )
|
| Opt( config.benchmarkSamples, "samples" )
|
||||||
["--benchmark-samples"]
|
["--benchmark-samples"]
|
||||||
( "number of samples to collect (default: 100)" )
|
( "number of samples to collect (default: 100)" )
|
||||||
|
@ -359,6 +359,18 @@ add_test(NAME CheckConvenienceHeaders
|
|||||||
${PYTHON_EXECUTABLE} ${CATCH_DIR}/tools/scripts/checkConvenienceHeaders.py
|
${PYTHON_EXECUTABLE} ${CATCH_DIR}/tools/scripts/checkConvenienceHeaders.py
|
||||||
)
|
)
|
||||||
|
|
||||||
|
add_test(NAME "Benchmarking::SkipBenchmarkMacros"
|
||||||
|
COMMAND
|
||||||
|
$<TARGET_FILE:SelfTest> "Skip benchmark macros"
|
||||||
|
--reporter console
|
||||||
|
--skip-benchmarks
|
||||||
|
)
|
||||||
|
set_tests_properties("Benchmarking::SkipBenchmarkMacros"
|
||||||
|
PROPERTIES
|
||||||
|
PASS_REGULAR_EXPRESSION "All tests passed \\(2 assertions in 1 test case\\)"
|
||||||
|
FAIL_REGULAR_EXPRESSION "benchmark name"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
add_test(NAME "Benchmarking::FailureReporting::OptimizedOut"
|
add_test(NAME "Benchmarking::FailureReporting::OptimizedOut"
|
||||||
COMMAND
|
COMMAND
|
||||||
|
@ -152,3 +152,20 @@ TEST_CASE("Benchmark containers", "[!benchmark]") {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Skip benchmark macros", "[!benchmark]") {
|
||||||
|
std::vector<int> v;
|
||||||
|
BENCHMARK("fill vector") {
|
||||||
|
v.emplace_back(1);
|
||||||
|
v.emplace_back(2);
|
||||||
|
v.emplace_back(3);
|
||||||
|
};
|
||||||
|
REQUIRE(v.size() == 0);
|
||||||
|
|
||||||
|
std::size_t counter{0};
|
||||||
|
BENCHMARK_ADVANCED("construct vector")(Catch::Benchmark::Chronometer meter) {
|
||||||
|
std::vector<Catch::Benchmark::storage_for<std::string>> storage(meter.runs());
|
||||||
|
meter.measure([&](int i) { storage[i].construct("thing"); counter++; });
|
||||||
|
};
|
||||||
|
REQUIRE(counter == 0);
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user