mirror of
https://github.com/catchorg/Catch2.git
synced 2024-12-23 03:43:28 +01:00
Add an extra test for benchmarking macros
Also updated baselines
This commit is contained in:
parent
e340ab8db6
commit
10067a47da
@ -116,6 +116,17 @@ set_tests_properties(
|
||||
)
|
||||
|
||||
|
||||
add_executable(BenchmarkingMacros ${TESTS_DIR}/X20-BenchmarkingMacros.cpp)
|
||||
target_compile_definitions( BenchmarkingMacros PRIVATE CATCH_CONFIG_ENABLE_BENCHMARKING )
|
||||
|
||||
add_test(NAME BenchmarkingMacros COMMAND BenchmarkingMacros -r console -s)
|
||||
set_tests_properties(
|
||||
BenchmarkingMacros
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION "benchmark name samples iterations estimated"
|
||||
)
|
||||
|
||||
|
||||
set( EXTRA_TEST_BINARIES
|
||||
PrefixedMacros
|
||||
DisabledMacros
|
||||
@ -123,6 +134,7 @@ set( EXTRA_TEST_BINARIES
|
||||
DisabledExceptions-CustomHandler
|
||||
FallbackStringifier
|
||||
DisableStringification
|
||||
BenchmarkingMacros
|
||||
)
|
||||
|
||||
# Shared config
|
||||
|
133
projects/ExtraTests/X20-BenchmarkingMacros.cpp
Normal file
133
projects/ExtraTests/X20-BenchmarkingMacros.cpp
Normal file
@ -0,0 +1,133 @@
|
||||
// X20-BenchmarkingMacros.cpp
|
||||
// Test that the benchmarking support macros compile properly with the single header
|
||||
|
||||
#define CATCH_CONFIG_MAIN
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
namespace {
|
||||
std::uint64_t factorial(std::uint64_t number) {
|
||||
if (number < 2) {
|
||||
return 1;
|
||||
}
|
||||
return number * factorial(number - 1);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Benchmark factorial", "[benchmark]") {
|
||||
CHECK(factorial(0) == 1);
|
||||
// some more asserts..
|
||||
CHECK(factorial(10) == 3628800);
|
||||
|
||||
BENCHMARK("factorial 10") {
|
||||
return factorial(10);
|
||||
};
|
||||
|
||||
CHECK(factorial(14) == 87178291200ull);
|
||||
BENCHMARK("factorial 14") {
|
||||
return factorial(14);
|
||||
};
|
||||
//
|
||||
// BENCHMARK("factorial 20") {
|
||||
// return factorial(20);
|
||||
// };
|
||||
//
|
||||
// BENCHMARK("factorial 35") {
|
||||
// return factorial(35);
|
||||
// };
|
||||
}
|
||||
|
||||
TEST_CASE("Benchmark containers", "[.][benchmark]") {
|
||||
static const int size = 100;
|
||||
|
||||
std::vector<int> v;
|
||||
std::map<int, int> m;
|
||||
|
||||
SECTION("without generator") {
|
||||
BENCHMARK("Load up a vector") {
|
||||
v = std::vector<int>();
|
||||
for (int i = 0; i < size; ++i)
|
||||
v.push_back(i);
|
||||
};
|
||||
REQUIRE(v.size() == size);
|
||||
|
||||
// test optimizer control
|
||||
BENCHMARK("Add up a vector's content") {
|
||||
uint64_t add = 0;
|
||||
for (int i = 0; i < size; ++i)
|
||||
add += v[i];
|
||||
return add;
|
||||
};
|
||||
|
||||
BENCHMARK("Load up a map") {
|
||||
m = std::map<int, int>();
|
||||
for (int i = 0; i < size; ++i)
|
||||
m.insert({ i, i + 1 });
|
||||
};
|
||||
REQUIRE(m.size() == size);
|
||||
|
||||
BENCHMARK("Reserved vector") {
|
||||
v = std::vector<int>();
|
||||
v.reserve(size);
|
||||
for (int i = 0; i < size; ++i)
|
||||
v.push_back(i);
|
||||
};
|
||||
REQUIRE(v.size() == size);
|
||||
|
||||
BENCHMARK("Resized vector") {
|
||||
v = std::vector<int>();
|
||||
v.resize(size);
|
||||
for (int i = 0; i < size; ++i)
|
||||
v[i] = i;
|
||||
};
|
||||
REQUIRE(v.size() == size);
|
||||
|
||||
int array[size];
|
||||
BENCHMARK("A fixed size array that should require no allocations") {
|
||||
for (int i = 0; i < size; ++i)
|
||||
array[i] = i;
|
||||
};
|
||||
int sum = 0;
|
||||
for (int i = 0; i < size; ++i)
|
||||
sum += array[i];
|
||||
REQUIRE(sum > size);
|
||||
|
||||
SECTION("XYZ") {
|
||||
|
||||
BENCHMARK_ADVANCED("Load up vector with chronometer")(Catch::Benchmark::Chronometer meter) {
|
||||
std::vector<int> k;
|
||||
meter.measure([&](int idx) {
|
||||
k = std::vector<int>();
|
||||
for (int i = 0; i < size; ++i)
|
||||
k.push_back(idx);
|
||||
});
|
||||
REQUIRE(k.size() == size);
|
||||
};
|
||||
|
||||
int runs = 0;
|
||||
BENCHMARK("Fill vector indexed", benchmarkIndex) {
|
||||
v = std::vector<int>();
|
||||
v.resize(size);
|
||||
for (int i = 0; i < size; ++i)
|
||||
v[i] = benchmarkIndex;
|
||||
runs = benchmarkIndex;
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < v.size(); ++i) {
|
||||
REQUIRE(v[i] == runs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("with generator") {
|
||||
auto generated = GENERATE(range(0, 10));
|
||||
BENCHMARK("Fill vector generated") {
|
||||
v = std::vector<int>();
|
||||
v.resize(size);
|
||||
for (int i = 0; i < size; ++i)
|
||||
v[i] = generated;
|
||||
};
|
||||
for (size_t i = 0; i < v.size(); ++i) {
|
||||
REQUIRE(v[i] == generated);
|
||||
}
|
||||
}
|
||||
}
|
@ -944,6 +944,14 @@ CmdLine.tests.cpp:<line number>: passed: cli.parse({"test", "--use-colour", "no"
|
||||
CmdLine.tests.cpp:<line number>: passed: config.useColour == UseColour::No for: 2 == 2
|
||||
CmdLine.tests.cpp:<line number>: passed: !result for: true
|
||||
CmdLine.tests.cpp:<line number>: passed: result.errorMessage(), Contains( "colour mode must be one of" ) for: "colour mode must be one of: auto, yes or no. 'wrong' not recognised" contains: "colour mode must be one of"
|
||||
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-samples=200" }) for: {?}
|
||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkSamples == 200 for: 200 == 200
|
||||
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-resamples=20000" }) for: {?}
|
||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkResamples == 20000 for: 20000 (0x<hex digits>) == 20000 (0x<hex digits>)
|
||||
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-confidence-interval=0.99" }) for: {?}
|
||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkConfidenceInterval == Catch::Detail::Approx(0.99) for: 0.99 == Approx( 0.99 )
|
||||
CmdLine.tests.cpp:<line number>: passed: cli.parse({ "test", "--benchmark-no-analysis" }) for: {?}
|
||||
CmdLine.tests.cpp:<line number>: passed: config.benchmarkNoAnalysis for: true
|
||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 3 >= 1
|
||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 2 >= 1
|
||||
Misc.tests.cpp:<line number>: passed: std::tuple_size<TestType>::value >= 1 for: 1 >= 1
|
||||
|
@ -1381,5 +1381,5 @@ due to unexpected exception with message:
|
||||
|
||||
===============================================================================
|
||||
test cases: 289 | 215 passed | 70 failed | 4 failed as expected
|
||||
assertions: 1539 | 1387 passed | 131 failed | 21 failed as expected
|
||||
assertions: 1547 | 1395 passed | 131 failed | 21 failed as expected
|
||||
|
||||
|
@ -6954,6 +6954,78 @@ with expansion:
|
||||
"colour mode must be one of: auto, yes or no. 'wrong' not recognised"
|
||||
contains: "colour mode must be one of"
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Process can be configured on command line
|
||||
Benchmark options
|
||||
samples
|
||||
-------------------------------------------------------------------------------
|
||||
CmdLine.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
CHECK( cli.parse({ "test", "--benchmark-samples=200" }) )
|
||||
with expansion:
|
||||
{?}
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( config.benchmarkSamples == 200 )
|
||||
with expansion:
|
||||
200 == 200
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Process can be configured on command line
|
||||
Benchmark options
|
||||
resamples
|
||||
-------------------------------------------------------------------------------
|
||||
CmdLine.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
CHECK( cli.parse({ "test", "--benchmark-resamples=20000" }) )
|
||||
with expansion:
|
||||
{?}
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( config.benchmarkResamples == 20000 )
|
||||
with expansion:
|
||||
20000 (0x<hex digits>) == 20000 (0x<hex digits>)
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Process can be configured on command line
|
||||
Benchmark options
|
||||
resamples
|
||||
-------------------------------------------------------------------------------
|
||||
CmdLine.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
CHECK( cli.parse({ "test", "--benchmark-confidence-interval=0.99" }) )
|
||||
with expansion:
|
||||
{?}
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( config.benchmarkConfidenceInterval == Catch::Detail::Approx(0.99) )
|
||||
with expansion:
|
||||
0.99 == Approx( 0.99 )
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Process can be configured on command line
|
||||
Benchmark options
|
||||
resamples
|
||||
-------------------------------------------------------------------------------
|
||||
CmdLine.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
CHECK( cli.parse({ "test", "--benchmark-no-analysis" }) )
|
||||
with expansion:
|
||||
{?}
|
||||
|
||||
CmdLine.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( config.benchmarkNoAnalysis )
|
||||
with expansion:
|
||||
true
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
Product with differing arities - std::tuple<int, double, float>
|
||||
-------------------------------------------------------------------------------
|
||||
@ -12219,5 +12291,5 @@ Misc.tests.cpp:<line number>: PASSED:
|
||||
|
||||
===============================================================================
|
||||
test cases: 289 | 199 passed | 86 failed | 4 failed as expected
|
||||
assertions: 1556 | 1387 passed | 148 failed | 21 failed as expected
|
||||
assertions: 1564 | 1395 passed | 148 failed | 21 failed as expected
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuitesloose text artifact
|
||||
>
|
||||
<testsuite name="<exe-name>" errors="17" failures="132" tests="1557" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
||||
<testsuite name="<exe-name>" errors="17" failures="132" tests="1565" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
||||
<properties>
|
||||
<property name="filters" value="~[!nonportable]~[!benchmark]~[approvals]"/>
|
||||
<property name="random-seed" value="1"/>
|
||||
@ -648,6 +648,10 @@ Message.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/use-colour/yes" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/use-colour/no" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/use-colour/error" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/samples" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Process can be configured on command line/Benchmark options/resamples" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double, float>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int, double>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="Product with differing arities - std::tuple<int>" time="{duration}"/>
|
||||
|
@ -8669,6 +8669,94 @@ Nor would this
|
||||
</Section>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<Section name="Benchmark options" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Section name="samples" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Expression success="true" type="CHECK" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
cli.parse({ "test", "--benchmark-samples=200" })
|
||||
</Original>
|
||||
<Expanded>
|
||||
{?}
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<Expression success="true" type="REQUIRE" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
config.benchmarkSamples == 200
|
||||
</Original>
|
||||
<Expanded>
|
||||
200 == 200
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<Section name="Benchmark options" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Section name="resamples" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Expression success="true" type="CHECK" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
cli.parse({ "test", "--benchmark-resamples=20000" })
|
||||
</Original>
|
||||
<Expanded>
|
||||
{?}
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<Expression success="true" type="REQUIRE" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
config.benchmarkResamples == 20000
|
||||
</Original>
|
||||
<Expanded>
|
||||
20000 (0x<hex digits>) == 20000 (0x<hex digits>)
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<Section name="Benchmark options" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Section name="resamples" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Expression success="true" type="CHECK" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
cli.parse({ "test", "--benchmark-confidence-interval=0.99" })
|
||||
</Original>
|
||||
<Expanded>
|
||||
{?}
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<Expression success="true" type="REQUIRE" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
config.benchmarkConfidenceInterval == Catch::Detail::Approx(0.99)
|
||||
</Original>
|
||||
<Expanded>
|
||||
0.99 == Approx( 0.99 )
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<Section name="Benchmark options" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Section name="resamples" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Expression success="true" type="CHECK" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
cli.parse({ "test", "--benchmark-no-analysis" })
|
||||
</Original>
|
||||
<Expanded>
|
||||
{?}
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<Expression success="true" type="REQUIRE" filename="projects/<exe-name>/IntrospectiveTests/CmdLine.tests.cpp" >
|
||||
<Original>
|
||||
config.benchmarkNoAnalysis
|
||||
</Original>
|
||||
<Expanded>
|
||||
true
|
||||
</Expanded>
|
||||
</Expression>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<OverallResults successes="2" failures="0" expectedFailures="0"/>
|
||||
</Section>
|
||||
<OverallResult success="true"/>
|
||||
</TestCase>
|
||||
<TestCase name="Product with differing arities - std::tuple<int, double, float>" tags="[product][template]" filename="projects/<exe-name>/UsageTests/Misc.tests.cpp" >
|
||||
@ -14586,7 +14674,7 @@ loose text artifact
|
||||
</Section>
|
||||
<OverallResult success="true"/>
|
||||
</TestCase>
|
||||
<OverallResults successes="1387" failures="149" expectedFailures="21"/>
|
||||
<OverallResults successes="1395" failures="149" expectedFailures="21"/>
|
||||
</Group>
|
||||
<OverallResults successes="1387" failures="148" expectedFailures="21"/>
|
||||
<OverallResults successes="1395" failures="148" expectedFailures="21"/>
|
||||
</Catch>
|
||||
|
@ -3,10 +3,10 @@
|
||||
#include <map>
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
std::uint64_t Fibonacci(std::uint64_t number);
|
||||
|
||||
std::uint64_t Fibonacci(std::uint64_t number) {
|
||||
return number < 2 ? 1 : Fibonacci(number - 1) + Fibonacci(number - 2);
|
||||
namespace {
|
||||
std::uint64_t Fibonacci(std::uint64_t number) {
|
||||
return number < 2 ? 1 : Fibonacci(number - 1) + Fibonacci(number - 2);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Benchmark Fibonacci", "[!benchmark]") {
|
||||
|
Loading…
Reference in New Issue
Block a user