mirror of
https://github.com/catchorg/Catch2.git
synced 2025-08-01 21:05:39 +02:00
Fix inconsistencies in reporting benchmarking failures
With these changes, all these benchmarks ```cpp BENCHMARK("Empty benchmark") {}; BENCHMARK("Throwing benchmark") { throw "just a plain literal, bleh"; }; BENCHMARK("Asserting benchmark") { REQUIRE(1 == 2); }; BENCHMARK("FAIL'd benchmark") { FAIL("This benchmark only fails, nothing else"); }; ``` report the respective failure and mark the outer `TEST_CASE` as failed. Previously, the first two would not fail the `TEST_CASE`, and the latter two would break xml reporter's formatting, because `benchmarkFailed`, `benchmarkEnded` etc would not be be called properly in failure cases.
This commit is contained in:
@@ -315,6 +315,50 @@ add_test(NAME CheckConvenienceHeaders
|
||||
${PYTHON_EXECUTABLE} ${CATCH_DIR}/tools/scripts/checkConvenienceHeaders.py
|
||||
)
|
||||
|
||||
|
||||
add_test(NAME "Benchmarking::FailureReporting::OptimizedOut"
|
||||
COMMAND
|
||||
$<TARGET_FILE:SelfTest> "Failing benchmarks" -c "empty" -r xml
|
||||
# This test only makes sense with the optimizer being enabled when
|
||||
# the tests are being compiled.
|
||||
CONFIGURATIONS Release
|
||||
)
|
||||
set_tests_properties("Benchmarking::FailureReporting::OptimizedOut"
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION "could not measure benchmark\, maybe it was optimized away"
|
||||
FAIL_REGULAR_EXPRESSION "successes=\"1\""
|
||||
)
|
||||
|
||||
add_test(NAME "Benchmarking::FailureReporting::ThrowingBenchmark"
|
||||
COMMAND
|
||||
$<TARGET_FILE:SelfTest> "Failing benchmarks" -c "throw" -r xml
|
||||
)
|
||||
set_tests_properties("Benchmarking::FailureReporting::ThrowingBenchmark"
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION "<failed message=\"just a plain literal"
|
||||
FAIL_REGULAR_EXPRESSION "successes=\"1\""
|
||||
)
|
||||
|
||||
add_test(NAME "Benchmarking::FailureReporting::FailedAssertion"
|
||||
COMMAND
|
||||
$<TARGET_FILE:SelfTest> "Failing benchmarks" -c "assert" -r xml
|
||||
)
|
||||
set_tests_properties("Benchmarking::FailureReporting::FailedAssertion"
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION "<Expression success=\"false\""
|
||||
FAIL_REGULAR_EXPRESSION "successes=\"1\""
|
||||
)
|
||||
|
||||
add_test(NAME "Benchmarking::FailureReporting::FailMacro"
|
||||
COMMAND
|
||||
$<TARGET_FILE:SelfTest> "Failing benchmarks" -c "fail" -r xml
|
||||
)
|
||||
set_tests_properties("Benchmarking::FailureReporting::FailMacro"
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION "This benchmark only fails\, nothing else"
|
||||
FAIL_REGULAR_EXPRESSION "successes=\"1\""
|
||||
)
|
||||
|
||||
if (CATCH_USE_VALGRIND)
|
||||
add_test(NAME ValgrindRunTests COMMAND valgrind --leak-check=full --error-exitcode=1 $<TARGET_FILE:SelfTest>)
|
||||
add_test(NAME ValgrindListTests COMMAND valgrind --leak-check=full --error-exitcode=1 $<TARGET_FILE:SelfTest> --list-tests --verbosity high)
|
||||
|
@@ -412,3 +412,24 @@ TEST_CASE("run benchmark", "[benchmark][approvals]") {
|
||||
|
||||
CHECK((end - start).count() == 2867251000);
|
||||
}
|
||||
|
||||
TEST_CASE("Failing benchmarks", "[!benchmark][.approvals]") {
|
||||
SECTION("empty", "Benchmark that has been optimized away (because it is empty)") {
|
||||
BENCHMARK("Empty benchmark") {};
|
||||
}
|
||||
SECTION("throw", "Benchmark that throws an exception") {
|
||||
BENCHMARK("Throwing benchmark") {
|
||||
throw "just a plain literal, bleh";
|
||||
};
|
||||
}
|
||||
SECTION("assert", "Benchmark that asserts inside") {
|
||||
BENCHMARK("Asserting benchmark") {
|
||||
REQUIRE(1 == 2);
|
||||
};
|
||||
}
|
||||
SECTION("fail", "Benchmark that fails inside") {
|
||||
BENCHMARK("FAIL'd benchmark") {
|
||||
FAIL("This benchmark only fails, nothing else");
|
||||
};
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user