mirror of
https://github.com/catchorg/Catch2.git
synced 2024-11-22 13:26:10 +01:00
Allow configuring of benchmark warmup time
This commit is contained in:
parent
e880da93bd
commit
3c7e737a7b
@ -24,6 +24,7 @@
|
|||||||
[Specify the number of resamples for bootstrapping](#specify-the-number-of-resamples-for-bootstrapping)<br>
|
[Specify the number of resamples for bootstrapping](#specify-the-number-of-resamples-for-bootstrapping)<br>
|
||||||
[Specify the confidence-interval for bootstrapping](#specify-the-confidence-interval-for-bootstrapping)<br>
|
[Specify the confidence-interval for bootstrapping](#specify-the-confidence-interval-for-bootstrapping)<br>
|
||||||
[Disable statistical analysis of collected benchmark samples](#disable-statistical-analysis-of-collected-benchmark-samples)<br>
|
[Disable statistical analysis of collected benchmark samples](#disable-statistical-analysis-of-collected-benchmark-samples)<br>
|
||||||
|
[Specify the amount of time in milliseconds spent on warming up each test](#specify-the-amount-of-time-in-milliseconds-spent-on-warming-up-each-test)<br>
|
||||||
[Usage](#usage)<br>
|
[Usage](#usage)<br>
|
||||||
[Specify the section to run](#specify-the-section-to-run)<br>
|
[Specify the section to run](#specify-the-section-to-run)<br>
|
||||||
[Filenames as tags](#filenames-as-tags)<br>
|
[Filenames as tags](#filenames-as-tags)<br>
|
||||||
@ -64,6 +65,7 @@ Click one of the following links to take you straight to that option - or scroll
|
|||||||
<a href="#benchmark-resamples"> ` --benchmark-resamples`</a><br />
|
<a href="#benchmark-resamples"> ` --benchmark-resamples`</a><br />
|
||||||
<a href="#benchmark-confidence-interval"> ` --benchmark-confidence-interval`</a><br />
|
<a href="#benchmark-confidence-interval"> ` --benchmark-confidence-interval`</a><br />
|
||||||
<a href="#benchmark-no-analysis"> ` --benchmark-no-analysis`</a><br />
|
<a href="#benchmark-no-analysis"> ` --benchmark-no-analysis`</a><br />
|
||||||
|
<a href="#benchmark-warmup-time"> ` --benchmark-warmup-time`</a><br />
|
||||||
<a href="#use-colour"> ` --use-colour`</a><br />
|
<a href="#use-colour"> ` --use-colour`</a><br />
|
||||||
|
|
||||||
</br>
|
</br>
|
||||||
@ -317,6 +319,14 @@ Must be between 0 and 1 and defaults to 0.95.
|
|||||||
When this flag is specified no bootstrapping or any other statistical analysis is performed.
|
When this flag is specified no bootstrapping or any other statistical analysis is performed.
|
||||||
Instead the user code is only measured and the plain mean from the samples is reported.
|
Instead the user code is only measured and the plain mean from the samples is reported.
|
||||||
|
|
||||||
|
<a id="benchmark-warmup-time"></a>
|
||||||
|
## Specify the amount of time in milliseconds spent on warming up each test
|
||||||
|
<pre>--benchmark-warmup-time</pre>
|
||||||
|
|
||||||
|
> [Introduced](https://github.com/catchorg/Catch2/pull/1844) in Catch X.Y.Z.
|
||||||
|
|
||||||
|
Configure the amount of time spent warming up each test.
|
||||||
|
|
||||||
<a id="usage"></a>
|
<a id="usage"></a>
|
||||||
## Usage
|
## Usage
|
||||||
<pre>-h, -?, --help</pre>
|
<pre>-h, -?, --help</pre>
|
||||||
|
@ -44,10 +44,10 @@ namespace Catch {
|
|||||||
template <typename Clock>
|
template <typename Clock>
|
||||||
ExecutionPlan<FloatDuration<Clock>> prepare(const IConfig &cfg, Environment<FloatDuration<Clock>> env) const {
|
ExecutionPlan<FloatDuration<Clock>> prepare(const IConfig &cfg, Environment<FloatDuration<Clock>> env) const {
|
||||||
auto min_time = env.clock_resolution.mean * Detail::minimum_ticks;
|
auto min_time = env.clock_resolution.mean * Detail::minimum_ticks;
|
||||||
auto run_time = std::max(min_time, std::chrono::duration_cast<decltype(min_time)>(Detail::warmup_time));
|
auto run_time = std::max(min_time, std::chrono::duration_cast<decltype(min_time)>(cfg.benchmarkWarmupTime()));
|
||||||
auto&& test = Detail::run_for_at_least<Clock>(std::chrono::duration_cast<ClockDuration<Clock>>(run_time), 1, fun);
|
auto&& test = Detail::run_for_at_least<Clock>(std::chrono::duration_cast<ClockDuration<Clock>>(run_time), 1, fun);
|
||||||
int new_iters = static_cast<int>(std::ceil(min_time * test.iterations / test.elapsed));
|
int new_iters = static_cast<int>(std::ceil(min_time * test.iterations / test.elapsed));
|
||||||
return { new_iters, test.elapsed / test.iterations * new_iters * cfg.benchmarkSamples(), fun, std::chrono::duration_cast<FloatDuration<Clock>>(Detail::warmup_time), Detail::warmup_iterations };
|
return { new_iters, test.elapsed / test.iterations * new_iters * cfg.benchmarkSamples(), fun, std::chrono::duration_cast<FloatDuration<Clock>>(cfg.benchmarkWarmupTime()), Detail::warmup_iterations };
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Clock = default_clock>
|
template <typename Clock = default_clock>
|
||||||
|
@ -210,6 +210,9 @@ namespace Catch {
|
|||||||
| Opt( config.benchmarkNoAnalysis )
|
| Opt( config.benchmarkNoAnalysis )
|
||||||
["--benchmark-no-analysis"]
|
["--benchmark-no-analysis"]
|
||||||
( "perform only measurements; do not perform any analysis" )
|
( "perform only measurements; do not perform any analysis" )
|
||||||
|
| Opt( config.benchmarkWarmupTime, "benchmarkWarmupTime" )
|
||||||
|
["--benchmark-warmup-time"]
|
||||||
|
( "amount of time in milliseconds spent on warming up each test (default: 100)" )
|
||||||
| Arg( config.testsOrTags, "test name|pattern|tags" )
|
| Arg( config.testsOrTags, "test name|pattern|tags" )
|
||||||
( "which test or tests to use" );
|
( "which test or tests to use" );
|
||||||
|
|
||||||
|
@ -75,10 +75,11 @@ namespace Catch {
|
|||||||
bool Config::showInvisibles() const { return m_data.showInvisibles; }
|
bool Config::showInvisibles() const { return m_data.showInvisibles; }
|
||||||
Verbosity Config::verbosity() const { return m_data.verbosity; }
|
Verbosity Config::verbosity() const { return m_data.verbosity; }
|
||||||
|
|
||||||
bool Config::benchmarkNoAnalysis() const { return m_data.benchmarkNoAnalysis; }
|
bool Config::benchmarkNoAnalysis() const { return m_data.benchmarkNoAnalysis; }
|
||||||
int Config::benchmarkSamples() const { return m_data.benchmarkSamples; }
|
int Config::benchmarkSamples() const { return m_data.benchmarkSamples; }
|
||||||
double Config::benchmarkConfidenceInterval() const { return m_data.benchmarkConfidenceInterval; }
|
double Config::benchmarkConfidenceInterval() const { return m_data.benchmarkConfidenceInterval; }
|
||||||
unsigned int Config::benchmarkResamples() const { return m_data.benchmarkResamples; }
|
unsigned int Config::benchmarkResamples() const { return m_data.benchmarkResamples; }
|
||||||
|
std::chrono::milliseconds Config::benchmarkWarmupTime() const { return std::chrono::milliseconds(m_data.benchmarkWarmupTime); }
|
||||||
|
|
||||||
IStream const* Config::openStream() {
|
IStream const* Config::openStream() {
|
||||||
return Catch::makeStream(m_data.outputFilename);
|
return Catch::makeStream(m_data.outputFilename);
|
||||||
|
@ -43,6 +43,7 @@ namespace Catch {
|
|||||||
unsigned int benchmarkSamples = 100;
|
unsigned int benchmarkSamples = 100;
|
||||||
double benchmarkConfidenceInterval = 0.95;
|
double benchmarkConfidenceInterval = 0.95;
|
||||||
unsigned int benchmarkResamples = 100000;
|
unsigned int benchmarkResamples = 100000;
|
||||||
|
std::chrono::milliseconds::rep benchmarkWarmupTime = 100;
|
||||||
|
|
||||||
Verbosity verbosity = Verbosity::Normal;
|
Verbosity verbosity = Verbosity::Normal;
|
||||||
WarnAbout::What warnings = WarnAbout::Nothing;
|
WarnAbout::What warnings = WarnAbout::Nothing;
|
||||||
@ -108,6 +109,7 @@ namespace Catch {
|
|||||||
int benchmarkSamples() const override;
|
int benchmarkSamples() const override;
|
||||||
double benchmarkConfidenceInterval() const override;
|
double benchmarkConfidenceInterval() const override;
|
||||||
unsigned int benchmarkResamples() const override;
|
unsigned int benchmarkResamples() const override;
|
||||||
|
std::chrono::milliseconds benchmarkWarmupTime() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <catch2/catch_common.h>
|
#include <catch2/catch_common.h>
|
||||||
#include <catch2/catch_option.hpp>
|
#include <catch2/catch_option.hpp>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -81,6 +82,7 @@ namespace Catch {
|
|||||||
virtual int benchmarkSamples() const = 0;
|
virtual int benchmarkSamples() const = 0;
|
||||||
virtual double benchmarkConfidenceInterval() const = 0;
|
virtual double benchmarkConfidenceInterval() const = 0;
|
||||||
virtual unsigned int benchmarkResamples() const = 0;
|
virtual unsigned int benchmarkResamples() const = 0;
|
||||||
|
virtual std::chrono::milliseconds benchmarkWarmupTime() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
using IConfigPtr = std::shared_ptr<IConfig const>;
|
using IConfigPtr = std::shared_ptr<IConfig const>;
|
||||||
|
@ -1152,6 +1152,8 @@ CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-confid
|
|||||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkConfidenceInterval == Catch::Approx(0.99) for: 0.99 == Approx( 0.99 )
|
CmdLine.tests.cpp:<line number>: passed: config.benchmarkConfidenceInterval == Catch::Approx(0.99) for: 0.99 == Approx( 0.99 )
|
||||||
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-no-analysis" }) for: {?}
|
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-no-analysis" }) for: {?}
|
||||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkNoAnalysis for: true
|
CmdLine.tests.cpp:<line number>: passed: config.benchmarkNoAnalysis for: true
|
||||||
|
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-warmup-time=10" }) for: {?}
|
||||||
|
CmdLine.tests.cpp:<line number>: passed: config.benchmarkWarmupTime == 10 for: 10 == 10
|
||||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 3 >= 1
|
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 3 >= 1
|
||||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 2 >= 1
|
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 2 >= 1
|
||||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 1 >= 1
|
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 1 >= 1
|
||||||
|
@ -1381,5 +1381,5 @@ due to unexpected exception with message:
|
|||||||
|
|
||||||
===============================================================================
|
===============================================================================
|
||||||
test cases: 328 | 254 passed | 70 failed | 4 failed as expected
|
test cases: 328 | 254 passed | 70 failed | 4 failed as expected
|
||||||
assertions: 1827 | 1675 passed | 131 failed | 21 failed as expected
|
assertions: 1829 | 1677 passed | 131 failed | 21 failed as expected
|
||||||
|
|
||||||
|
@ -8284,7 +8284,7 @@ with expansion:
|
|||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
Process can be configured on command line
|
Process can be configured on command line
|
||||||
Benchmark options
|
Benchmark options
|
||||||
resamples
|
confidence-interval
|
||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
CmdLine.tests.cpp:<line number>
|
CmdLine.tests.cpp:<line number>
|
||||||
...............................................................................
|
...............................................................................
|
||||||
@ -8302,7 +8302,7 @@ with expansion:
|
|||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
Process can be configured on command line
|
Process can be configured on command line
|
||||||
Benchmark options
|
Benchmark options
|
||||||
resamples
|
no-analysis
|
||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
CmdLine.tests.cpp:<line number>
|
CmdLine.tests.cpp:<line number>
|
||||||
...............................................................................
|
...............................................................................
|
||||||
@ -8317,6 +8317,24 @@ CmdLine.tests.cpp:<line number>: PASSED:
|
|||||||
with expansion:
|
with expansion:
|
||||||
true
|
true
|
||||||
|
|
||||||
|
-------------------------------------------------------------------------------
|
||||||
|
Process can be configured on command line
|
||||||
|
Benchmark options
|
||||||
|
warmup-time
|
||||||
|
-------------------------------------------------------------------------------
|
||||||
|
CmdLine.tests.cpp:<line number>
|
||||||
|
...............................................................................
|
||||||
|
|
||||||
|
CmdLine.tests.cpp:<line number>: PASSED:
|
||||||
|
CHECK( cli.parse({ "test", "--benchmark-warmup-time=10" }) )
|
||||||
|
with expansion:
|
||||||
|
{?}
|
||||||
|
|
||||||
|
CmdLine.tests.cpp:<line number>: PASSED:
|
||||||
|
REQUIRE( config.benchmarkWarmupTime == 10 )
|
||||||
|
with expansion:
|
||||||
|
10 == 10
|
||||||
|
|
||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
Product with differing arities - std::tuple<int, double, float>
|
Product with differing arities - std::tuple<int, double, float>
|
||||||
-------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------
|
||||||
@ -14283,5 +14301,5 @@ Misc.tests.cpp:<line number>: PASSED:
|
|||||||
|
|
||||||
===============================================================================
|
===============================================================================
|
||||||
test cases: 328 | 238 passed | 86 failed | 4 failed as expected
|
test cases: 328 | 238 passed | 86 failed | 4 failed as expected
|
||||||
assertions: 1844 | 1675 passed | 148 failed | 21 failed as expected
|
assertions: 1846 | 1677 passed | 148 failed | 21 failed as expected
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<testsuitesloose text artifact
|
<testsuitesloose text artifact
|
||||||
>
|
>
|
||||||
<testsuite name="<exe-name>" errors="17" failures="132" tests="1845" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
<testsuite name="<exe-name>" errors="17" failures="132" tests="1847" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
||||||
<properties>
|
<properties>
|
||||||
<property name="filters" value="~[!nonportable]~[!benchmark]~[approvals] *"/>
|
<property name="filters" value="~[!nonportable]~[!benchmark]~[approvals] *"/>
|
||||||
<property name="random-seed" value="1"/>
|
<property name="random-seed" value="1"/>
|
||||||
@ -1024,8 +1024,9 @@ Message.tests.cpp:<line number>
|
|||||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/use-colour/error" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/use-colour/error" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/samples" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/samples" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/confidence-interval" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/no-analysis" time="{duration}"/>
|
||||||
|
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/warmup-time" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double, float>" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double, float>" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double>" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double>" time="{duration}"/>
|
||||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int>" time="{duration}"/>
|
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int>" time="{duration}"/>
|
||||||
|
@ -64,8 +64,9 @@
|
|||||||
<testCase name="Process can be configured on command line/use-colour/error" duration="{duration}"/>
|
<testCase name="Process can be configured on command line/use-colour/error" duration="{duration}"/>
|
||||||
<testCase name="Process can be configured on command line/Benchmark options/samples" duration="{duration}"/>
|
<testCase name="Process can be configured on command line/Benchmark options/samples" duration="{duration}"/>
|
||||||
<testCase name="Process can be configured on command line/Benchmark options/resamples" duration="{duration}"/>
|
<testCase name="Process can be configured on command line/Benchmark options/resamples" duration="{duration}"/>
|
||||||
<testCase name="Process can be configured on command line/Benchmark options/resamples" duration="{duration}"/>
|
<testCase name="Process can be configured on command line/Benchmark options/confidence-interval" duration="{duration}"/>
|
||||||
<testCase name="Process can be configured on command line/Benchmark options/resamples" duration="{duration}"/>
|
<testCase name="Process can be configured on command line/Benchmark options/no-analysis" duration="{duration}"/>
|
||||||
|
<testCase name="Process can be configured on command line/Benchmark options/warmup-time" duration="{duration}"/>
|
||||||
<testCase name="Test with special, characters "in name" duration="{duration}"/>
|
<testCase name="Test with special, characters "in name" duration="{duration}"/>
|
||||||
</file>
|
</file>
|
||||||
<file path="tests/<exe-name>/IntrospectiveTests/GeneratorsImpl.tests.cpp">
|
<file path="tests/<exe-name>/IntrospectiveTests/GeneratorsImpl.tests.cpp">
|
||||||
|
@ -2226,6 +2226,10 @@ ok {test-number} - config.benchmarkConfidenceInterval == Catch::Approx(0.99) for
|
|||||||
ok {test-number} - cli.parse({ "test", "--benchmark-no-analysis" }) for: {?}
|
ok {test-number} - cli.parse({ "test", "--benchmark-no-analysis" }) for: {?}
|
||||||
# Process can be configured on command line
|
# Process can be configured on command line
|
||||||
ok {test-number} - config.benchmarkNoAnalysis for: true
|
ok {test-number} - config.benchmarkNoAnalysis for: true
|
||||||
|
# Process can be configured on command line
|
||||||
|
ok {test-number} - cli.parse({ "test", "--benchmark-warmup-time=10" }) for: {?}
|
||||||
|
# Process can be configured on command line
|
||||||
|
ok {test-number} - config.benchmarkWarmupTime == 10 for: 10 == 10
|
||||||
# Product with differing arities - std::tuple<int, double, float>
|
# Product with differing arities - std::tuple<int, double, float>
|
||||||
ok {test-number} - std::tuple_size<TestType>::value >= 1 for: 3 >= 1
|
ok {test-number} - std::tuple_size<TestType>::value >= 1 for: 3 >= 1
|
||||||
# Product with differing arities - std::tuple<int, double>
|
# Product with differing arities - std::tuple<int, double>
|
||||||
@ -3680,5 +3684,5 @@ ok {test-number} - q3 == 23. for: 23.0 == 23.0
|
|||||||
ok {test-number} -
|
ok {test-number} -
|
||||||
# xmlentitycheck
|
# xmlentitycheck
|
||||||
ok {test-number} -
|
ok {test-number} -
|
||||||
1..1836
|
1..1838
|
||||||
|
|
||||||
|
@ -10354,7 +10354,7 @@ Nor would this
|
|||||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||||
</Section>
|
</Section>
|
||||||
<Section name="Benchmark options" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Section name="Benchmark options" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Section name="resamples" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Section name="confidence-interval" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Expression success="true" type="CHECK" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Expression success="true" type="CHECK" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Original>
|
<Original>
|
||||||
cli.parse({ "test", "--benchmark-confidence-interval=0.99" })
|
cli.parse({ "test", "--benchmark-confidence-interval=0.99" })
|
||||||
@ -10376,7 +10376,7 @@ Nor would this
|
|||||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||||
</Section>
|
</Section>
|
||||||
<Section name="Benchmark options" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Section name="Benchmark options" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Section name="resamples" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Section name="no-analysis" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Expression success="true" type="CHECK" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
<Expression success="true" type="CHECK" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
<Original>
|
<Original>
|
||||||
cli.parse({ "test", "--benchmark-no-analysis" })
|
cli.parse({ "test", "--benchmark-no-analysis" })
|
||||||
@ -10397,6 +10397,28 @@ Nor would this
|
|||||||
</Section>
|
</Section>
|
||||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||||
</Section>
|
</Section>
|
||||||
|
<Section name="Benchmark options" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
|
<Section name="warmup-time" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
|
<Expression success="true" type="CHECK" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
|
<Original>
|
||||||
|
cli.parse({ "test", "--benchmark-warmup-time=10" })
|
||||||
|
</Original>
|
||||||
|
<Expanded>
|
||||||
|
{?}
|
||||||
|
</Expanded>
|
||||||
|
</Expression>
|
||||||
|
<Expression success="true" type="REQUIRE" filename="tests/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||||
|
<Original>
|
||||||
|
config.benchmarkWarmupTime == 10
|
||||||
|
</Original>
|
||||||
|
<Expanded>
|
||||||
|
10 == 10
|
||||||
|
</Expanded>
|
||||||
|
</Expression>
|
||||||
|
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||||
|
</Section>
|
||||||
|
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||||
|
</Section>
|
||||||
<OverallResult success="true"/>
|
<OverallResult success="true"/>
|
||||||
</TestCase>
|
</TestCase>
|
||||||
<TestCase name="Product with differing arities - std::tuple<int, double, float>" tags="[product][template]" filename="tests/<exe-name>/UsageTests/Misc.tests.cpp" >
|
<TestCase name="Product with differing arities - std::tuple<int, double, float>" tags="[product][template]" filename="tests/<exe-name>/UsageTests/Misc.tests.cpp" >
|
||||||
@ -17117,7 +17139,7 @@ loose text artifact
|
|||||||
</Section>
|
</Section>
|
||||||
<OverallResult success="true"/>
|
<OverallResult success="true"/>
|
||||||
</TestCase>
|
</TestCase>
|
||||||
<OverallResults successes="1675" failures="149" expectedFailures="21"/>
|
<OverallResults successes="1677" failures="149" expectedFailures="21"/>
|
||||||
</Group>
|
</Group>
|
||||||
<OverallResults successes="1675" failures="148" expectedFailures="21"/>
|
<OverallResults successes="1677" failures="148" expectedFailures="21"/>
|
||||||
</Catch>
|
</Catch>
|
||||||
|
@ -497,17 +497,23 @@ TEST_CASE( "Process can be configured on command line", "[config][command-line]"
|
|||||||
REQUIRE(config.benchmarkResamples == 20000);
|
REQUIRE(config.benchmarkResamples == 20000);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("resamples") {
|
SECTION("confidence-interval") {
|
||||||
CHECK(cli.parse({ "test", "--benchmark-confidence-interval=0.99" }));
|
CHECK(cli.parse({ "test", "--benchmark-confidence-interval=0.99" }));
|
||||||
|
|
||||||
REQUIRE(config.benchmarkConfidenceInterval == Catch::Approx(0.99));
|
REQUIRE(config.benchmarkConfidenceInterval == Catch::Approx(0.99));
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("resamples") {
|
SECTION("no-analysis") {
|
||||||
CHECK(cli.parse({ "test", "--benchmark-no-analysis" }));
|
CHECK(cli.parse({ "test", "--benchmark-no-analysis" }));
|
||||||
|
|
||||||
REQUIRE(config.benchmarkNoAnalysis);
|
REQUIRE(config.benchmarkNoAnalysis);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SECTION("warmup-time") {
|
||||||
|
CHECK(cli.parse({ "test", "--benchmark-warmup-time=10" }));
|
||||||
|
|
||||||
|
REQUIRE(config.benchmarkWarmupTime == 10);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user