mirror of
https://github.com/catchorg/Catch2.git
synced 2024-11-15 02:09:53 +01:00
Remove CATCH_CONFIG_ENABLE_BENCHMARKING compilation toggle
Now that Catch2 is a proper library, we can always build the full library (comparatively minor slowdown) and the user can avoid including benchmarking headers to avoid the compilation slowdown.
This commit is contained in:
parent
86e19b952d
commit
cd7d7a1c67
@ -3,10 +3,6 @@
|
||||
|
||||
> [Introduced](https://github.com/catchorg/Catch2/issues/1616) in Catch 2.9.0.
|
||||
|
||||
_Note that benchmarking support is disabled by default and to enable it,
|
||||
you need to define `CATCH_CONFIG_ENABLE_BENCHMARKING`. For more details,
|
||||
see the [compile-time configuration documentation](configuration.md#top)._
|
||||
|
||||
Writing benchmarks is not easy. Catch simplifies certain aspects but you'll
|
||||
always need to take care about various aspects. Understanding a few things about
|
||||
the way Catch runs your code will be very helpful when writing your benchmarks.
|
||||
|
@ -152,7 +152,6 @@ by using `_NO_` in the macro, e.g. `CATCH_CONFIG_NO_CPP17_UNCAUGHT_EXCEPTIONS`.
|
||||
CATCH_CONFIG_DISABLE // Disables assertions and test case registration
|
||||
CATCH_CONFIG_WCHAR // Enables use of wchart_t
|
||||
CATCH_CONFIG_EXPERIMENTAL_REDIRECT // Enables the new (experimental) way of capturing stdout/stderr
|
||||
CATCH_CONFIG_ENABLE_BENCHMARKING // Enables the integrated benchmarking features (has a significant effect on compilation speed)
|
||||
CATCH_CONFIG_USE_ASYNC // Force parallel statistical processing of samples during benchmarking
|
||||
CATCH_CONFIG_ANDROID_LOGWRITE // Use android's logging system for debug output
|
||||
CATCH_CONFIG_GLOBAL_NEXTAFTER // Use nextafter{,f,l} instead of std::nextafter
|
||||
|
@ -23,6 +23,10 @@ set(BENCHMARK_HEADERS
|
||||
${SOURCES_DIR}/benchmark/detail/catch_timing.hpp
|
||||
)
|
||||
set(BENCHMARK_SOURCES
|
||||
${SOURCES_DIR}/benchmark/catch_chronometer.cpp
|
||||
${SOURCES_DIR}/benchmark/detail/catch_benchmark_function.cpp
|
||||
${SOURCES_DIR}/benchmark/detail/catch_complete_invoke.cpp
|
||||
${SOURCES_DIR}/benchmark/detail/catch_run_for_at_least.cpp
|
||||
${SOURCES_DIR}/benchmark/detail/catch_stats.cpp
|
||||
)
|
||||
|
||||
|
14
src/catch2/benchmark/catch_chronometer.cpp
Normal file
14
src/catch2/benchmark/catch_chronometer.cpp
Normal file
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include <catch2/benchmark/catch_chronometer.hpp>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
namespace Detail {
|
||||
ChronometerConcept::~ChronometerConcept() = default;
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
} // namespace Catch
|
@ -22,7 +22,7 @@ namespace Catch {
|
||||
struct ChronometerConcept {
|
||||
virtual void start() = 0;
|
||||
virtual void finish() = 0;
|
||||
virtual ~ChronometerConcept() = default;
|
||||
virtual ~ChronometerConcept(); // = default;
|
||||
};
|
||||
template <typename Clock>
|
||||
struct ChronometerModel final : public ChronometerConcept {
|
||||
|
@ -15,6 +15,9 @@
|
||||
# include <atomic> // atomic_thread_fence
|
||||
#endif
|
||||
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
|
14
src/catch2/benchmark/detail/catch_benchmark_function.cpp
Normal file
14
src/catch2/benchmark/detail/catch_benchmark_function.cpp
Normal file
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include <catch2/benchmark/detail/catch_benchmark_function.hpp>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
namespace Detail {
|
||||
BenchmarkFunction::callable::~callable() = default;
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
} // namespace Catch
|
@ -1,4 +1,4 @@
|
||||
/*
|
||||
/*
|
||||
* Created by Joachim on 16/04/2019.
|
||||
* Adapted from donated nonius code.
|
||||
*
|
||||
@ -41,7 +41,7 @@ namespace Catch {
|
||||
struct callable {
|
||||
virtual void call(Chronometer meter) const = 0;
|
||||
virtual callable* clone() const = 0;
|
||||
virtual ~callable() = default;
|
||||
virtual ~callable(); // = default;
|
||||
};
|
||||
template <typename Fun>
|
||||
struct model : public callable {
|
||||
|
14
src/catch2/benchmark/detail/catch_complete_invoke.cpp
Normal file
14
src/catch2/benchmark/detail/catch_complete_invoke.cpp
Normal file
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include <catch2/benchmark/detail/catch_complete_invoke.hpp>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
namespace Detail {
|
||||
const std::string benchmarkErrorMsg = "a benchmark failed to run successfully";
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
} // namespace Catch
|
@ -12,6 +12,8 @@
|
||||
#define TWOBLUECUBES_CATCH_DETAIL_COMPLETE_INVOKE_HPP_INCLUDED
|
||||
|
||||
#include <catch2/catch_enforce.h>
|
||||
#include <catch2/catch_interfaces_capture.h>
|
||||
#include <catch2/catch_interfaces_registry_hub.h>
|
||||
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
@ -51,7 +53,7 @@ namespace Catch {
|
||||
return CompleteInvoker<ResultOf_t<Fun(Args...)>>::invoke(std::forward<Fun>(fun), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
const std::string benchmarkErrorMsg = "a benchmark failed to run successfully";
|
||||
extern const std::string benchmarkErrorMsg;
|
||||
} // namespace Detail
|
||||
|
||||
template <typename Fun>
|
||||
|
29
src/catch2/benchmark/detail/catch_run_for_at_least.cpp
Normal file
29
src/catch2/benchmark/detail/catch_run_for_at_least.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
// Run a function for a minimum amount of time
|
||||
#include <catch2/benchmark/detail/catch_run_for_at_least.hpp>
|
||||
|
||||
#include <exception>
|
||||
#include <catch2/catch_enforce.h>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
namespace Detail {
|
||||
struct optimized_away_error : std::exception {
|
||||
const char* what() const noexcept override;
|
||||
};
|
||||
|
||||
const char* optimized_away_error::what() const noexcept {
|
||||
return "could not measure benchmark, maybe it was optimized away";
|
||||
}
|
||||
|
||||
void throw_optimized_away_error() {
|
||||
Catch::throw_exception(optimized_away_error{});
|
||||
}
|
||||
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
} // namespace Catch
|
@ -39,11 +39,9 @@ namespace Catch {
|
||||
template <typename Clock, typename Fun>
|
||||
using run_for_at_least_argument_t = typename std::conditional<is_callable<Fun(Chronometer)>::value, Chronometer, int>::type;
|
||||
|
||||
struct optimized_away_error : std::exception {
|
||||
const char* what() const noexcept override {
|
||||
return "could not measure benchmark, maybe it was optimized away";
|
||||
}
|
||||
};
|
||||
|
||||
[[noreturn]]
|
||||
void throw_optimized_away_error();
|
||||
|
||||
template <typename Clock, typename Fun>
|
||||
TimingOf<Clock, Fun(run_for_at_least_argument_t<Clock, Fun>)> run_for_at_least(ClockDuration<Clock> how_long, int seed, Fun&& fun) {
|
||||
@ -56,7 +54,7 @@ namespace Catch {
|
||||
}
|
||||
iters *= 2;
|
||||
}
|
||||
throw optimized_away_error{};
|
||||
throw_optimized_away_error();
|
||||
}
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
|
@ -8,8 +8,6 @@
|
||||
|
||||
// Statistical analysis tools
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
|
||||
#include <catch2/benchmark/detail/catch_stats.hpp>
|
||||
|
||||
#include <catch2/catch_compiler_capabilities.h>
|
||||
@ -220,5 +218,3 @@ namespace Catch {
|
||||
} // namespace Detail
|
||||
} // namespace Benchmark
|
||||
} // namespace Catch
|
||||
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <cmath>
|
||||
#include <utility>
|
||||
#include <cstddef>
|
||||
#include <random>
|
||||
|
||||
namespace Catch {
|
||||
namespace Benchmark {
|
||||
|
@ -42,10 +42,6 @@
|
||||
|
||||
#include <catch2/catch_external_interfaces.h>
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
#include <catch2/benchmark/catch_benchmark.hpp>
|
||||
#endif
|
||||
|
||||
#endif // ! CATCH_CONFIG_IMPL_ONLY
|
||||
|
||||
#if !defined(CATCH_CONFIG_IMPL_ONLY)
|
||||
|
@ -29,11 +29,9 @@ namespace Catch {
|
||||
struct ITransientExpression;
|
||||
struct IGeneratorTracker;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
struct BenchmarkInfo;
|
||||
template <typename Duration = std::chrono::duration<double, std::nano>>
|
||||
struct BenchmarkStats;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
struct IResultCapture {
|
||||
|
||||
@ -46,12 +44,10 @@ namespace Catch {
|
||||
|
||||
virtual auto acquireGeneratorTracker( SourceLineInfo const& lineInfo ) -> IGeneratorTracker& = 0;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
virtual void benchmarkPreparing( std::string const& name ) = 0;
|
||||
virtual void benchmarkStarting( BenchmarkInfo const& info ) = 0;
|
||||
virtual void benchmarkEnded( BenchmarkStats<> const& stats ) = 0;
|
||||
virtual void benchmarkFailed( std::string const& error ) = 0;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
virtual void pushScopedMessage( MessageInfo const& message ) = 0;
|
||||
virtual void popScopedMessage( MessageInfo const& message ) = 0;
|
||||
|
@ -18,10 +18,8 @@
|
||||
#include <catch2/catch_option.hpp>
|
||||
#include <catch2/catch_stringref.h>
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
#include <catch2/benchmark/catch_estimate.hpp>
|
||||
#include <catch2/benchmark/catch_outlier_classification.hpp>
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
|
||||
#include <string>
|
||||
@ -168,7 +166,7 @@ namespace Catch {
|
||||
bool aborting;
|
||||
};
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
|
||||
struct BenchmarkInfo {
|
||||
std::string name;
|
||||
double estimatedDuration;
|
||||
@ -204,7 +202,6 @@ namespace Catch {
|
||||
};
|
||||
}
|
||||
};
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
struct IStreamingReporter {
|
||||
virtual ~IStreamingReporter() = default;
|
||||
@ -224,12 +221,10 @@ namespace Catch {
|
||||
virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0;
|
||||
virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
virtual void benchmarkPreparing( std::string const& ) {}
|
||||
virtual void benchmarkStarting( BenchmarkInfo const& ) {}
|
||||
virtual void benchmarkEnded( BenchmarkStats<> const& ) {}
|
||||
virtual void benchmarkFailed( std::string const& ) {}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0;
|
||||
|
||||
|
@ -231,7 +231,6 @@ namespace Catch {
|
||||
m_unfinishedSections.push_back(endInfo);
|
||||
}
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void RunContext::benchmarkPreparing(std::string const& name) {
|
||||
m_reporter->benchmarkPreparing(name);
|
||||
}
|
||||
@ -241,10 +240,9 @@ namespace Catch {
|
||||
void RunContext::benchmarkEnded( BenchmarkStats<> const& stats ) {
|
||||
m_reporter->benchmarkEnded( stats );
|
||||
}
|
||||
void RunContext::benchmarkFailed(std::string const & error) {
|
||||
m_reporter->benchmarkFailed(error);
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
void RunContext::benchmarkFailed(std::string const & error) {
|
||||
m_reporter->benchmarkFailed(error);
|
||||
}
|
||||
|
||||
void RunContext::pushScopedMessage(MessageInfo const & message) {
|
||||
m_messages.push_back(message);
|
||||
|
@ -82,12 +82,10 @@ namespace Catch {
|
||||
|
||||
auto acquireGeneratorTracker( SourceLineInfo const& lineInfo ) -> IGeneratorTracker& override;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void benchmarkPreparing( std::string const& name ) override;
|
||||
void benchmarkStarting( BenchmarkInfo const& info ) override;
|
||||
void benchmarkEnded( BenchmarkStats<> const& stats ) override;
|
||||
void benchmarkFailed( std::string const& error ) override;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void pushScopedMessage( MessageInfo const& message ) override;
|
||||
void popScopedMessage( MessageInfo const& message ) override;
|
||||
|
@ -98,12 +98,10 @@
|
||||
#define CATCH_THEN( desc ) INTERNAL_CATCH_DYNAMIC_SECTION( " Then: " << desc )
|
||||
#define CATCH_AND_THEN( desc ) INTERNAL_CATCH_DYNAMIC_SECTION( " And: " << desc )
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
#define CATCH_BENCHMARK(...) \
|
||||
INTERNAL_CATCH_BENCHMARK(INTERNAL_CATCH_UNIQUE_NAME(____C_A_T_C_H____B_E_N_C_H____), INTERNAL_CATCH_GET_1_ARG(__VA_ARGS__,,), INTERNAL_CATCH_GET_2_ARG(__VA_ARGS__,,))
|
||||
#define CATCH_BENCHMARK_ADVANCED(name) \
|
||||
INTERNAL_CATCH_BENCHMARK_ADVANCED(INTERNAL_CATCH_UNIQUE_NAME(____C_A_T_C_H____B_E_N_C_H____), name)
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
// If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required
|
||||
#else
|
||||
@ -197,12 +195,10 @@
|
||||
#define THEN( desc ) INTERNAL_CATCH_DYNAMIC_SECTION( " Then: " << desc )
|
||||
#define AND_THEN( desc ) INTERNAL_CATCH_DYNAMIC_SECTION( " And: " << desc )
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
#define BENCHMARK(...) \
|
||||
INTERNAL_CATCH_BENCHMARK(INTERNAL_CATCH_UNIQUE_NAME(____C_A_T_C_H____B_E_N_C_H____), INTERNAL_CATCH_GET_1_ARG(__VA_ARGS__,,), INTERNAL_CATCH_GET_2_ARG(__VA_ARGS__,,))
|
||||
#define BENCHMARK_ADVANCED(name) \
|
||||
INTERNAL_CATCH_BENCHMARK_ADVANCED(INTERNAL_CATCH_UNIQUE_NAME(____C_A_T_C_H____B_E_N_C_H____), name)
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
#else // CATCH_CONFIG_DISABLE
|
||||
|
||||
|
@ -431,7 +431,6 @@ void ConsoleReporter::sectionEnded(SectionStats const& _sectionStats) {
|
||||
StreamingReporterBase::sectionEnded(_sectionStats);
|
||||
}
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void ConsoleReporter::benchmarkPreparing(std::string const& name) {
|
||||
lazyPrintWithoutClosingBenchmarkTable();
|
||||
|
||||
@ -477,7 +476,6 @@ void ConsoleReporter::benchmarkFailed(std::string const& error) {
|
||||
<< "Benchmark failed (" << error << ')'
|
||||
<< ColumnBreak() << RowBreak();
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void ConsoleReporter::testCaseEnded(TestCaseStats const& _testCaseStats) {
|
||||
m_tablePrinter->close();
|
||||
|
@ -41,12 +41,10 @@ namespace Catch {
|
||||
void sectionStarting(SectionInfo const& _sectionInfo) override;
|
||||
void sectionEnded(SectionStats const& _sectionStats) override;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void benchmarkPreparing(std::string const& name) override;
|
||||
void benchmarkStarting(BenchmarkInfo const& info) override;
|
||||
void benchmarkEnded(BenchmarkStats<> const& stats) override;
|
||||
void benchmarkFailed(std::string const& error) override;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void testCaseEnded(TestCaseStats const& _testCaseStats) override;
|
||||
void testGroupEnded(TestGroupStats const& _testGroupStats) override;
|
||||
|
@ -44,13 +44,12 @@ namespace Catch {
|
||||
m_reporter->reportInvalidArguments( arg );
|
||||
}
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void ListeningReporter::benchmarkPreparing( std::string const& name ) {
|
||||
for (auto const& listener : m_listeners) {
|
||||
listener->benchmarkPreparing(name);
|
||||
}
|
||||
m_reporter->benchmarkPreparing(name);
|
||||
}
|
||||
for (auto const& listener : m_listeners) {
|
||||
listener->benchmarkPreparing(name);
|
||||
}
|
||||
m_reporter->benchmarkPreparing(name);
|
||||
}
|
||||
void ListeningReporter::benchmarkStarting( BenchmarkInfo const& benchmarkInfo ) {
|
||||
for ( auto const& listener : m_listeners ) {
|
||||
listener->benchmarkStarting( benchmarkInfo );
|
||||
@ -64,13 +63,12 @@ namespace Catch {
|
||||
m_reporter->benchmarkEnded( benchmarkStats );
|
||||
}
|
||||
|
||||
void ListeningReporter::benchmarkFailed( std::string const& error ) {
|
||||
for (auto const& listener : m_listeners) {
|
||||
listener->benchmarkFailed(error);
|
||||
}
|
||||
m_reporter->benchmarkFailed(error);
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
void ListeningReporter::benchmarkFailed( std::string const& error ) {
|
||||
for (auto const& listener : m_listeners) {
|
||||
listener->benchmarkFailed(error);
|
||||
}
|
||||
m_reporter->benchmarkFailed(error);
|
||||
}
|
||||
|
||||
void ListeningReporter::testRunStarting( TestRunInfo const& testRunInfo ) {
|
||||
for ( auto const& listener : m_listeners ) {
|
||||
|
@ -31,12 +31,10 @@ namespace Catch {
|
||||
|
||||
void reportInvalidArguments(std::string const&arg) override;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void benchmarkPreparing(std::string const& name) override;
|
||||
void benchmarkStarting( BenchmarkInfo const& benchmarkInfo ) override;
|
||||
void benchmarkEnded( BenchmarkStats<> const& benchmarkStats ) override;
|
||||
void benchmarkFailed(std::string const&) override;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void testRunStarting( TestRunInfo const& testRunInfo ) override;
|
||||
void testGroupStarting( GroupInfo const& groupInfo ) override;
|
||||
|
@ -219,7 +219,6 @@ namespace Catch {
|
||||
m_xml.endElement();
|
||||
}
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void XmlReporter::benchmarkPreparing(std::string const& name) {
|
||||
m_xml.startElement("BenchmarkResults")
|
||||
.writeAttribute("name", name);
|
||||
@ -262,7 +261,6 @@ namespace Catch {
|
||||
writeAttribute("message", error);
|
||||
m_xml.endElement();
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void XmlReporter::listReporters(std::vector<ReporterDescription> const& descriptions, Config const&) {
|
||||
auto outerTag = m_xml.scopedElement("AvailableReporters");
|
||||
|
@ -50,12 +50,10 @@ namespace Catch {
|
||||
|
||||
void testRunEnded(TestRunStats const& testRunStats) override;
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
void benchmarkPreparing(std::string const& name) override;
|
||||
void benchmarkStarting(BenchmarkInfo const&) override;
|
||||
void benchmarkEnded(BenchmarkStats<> const&) override;
|
||||
void benchmarkFailed(std::string const&) override;
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
||||
void listReporters(std::vector<ReporterDescription> const& descriptions, Config const& config) override;
|
||||
void listTests(std::vector<TestCaseHandle> const& tests, Config const& config) override;
|
||||
|
@ -120,7 +120,6 @@ set_tests_properties(
|
||||
|
||||
|
||||
add_executable(BenchmarkingMacros ${TESTS_DIR}/X20-BenchmarkingMacros.cpp)
|
||||
target_compile_definitions( BenchmarkingMacros PUBLIC CATCH_CONFIG_ENABLE_BENCHMARKING )
|
||||
target_link_libraries( BenchmarkingMacros Catch2_buildall_interface )
|
||||
|
||||
add_test(NAME BenchmarkingMacros COMMAND BenchmarkingMacros -r console -s)
|
||||
|
@ -229,15 +229,20 @@ Message from section two
|
||||
:test-result: PASS X/level/1/a
|
||||
:test-result: PASS X/level/1/b
|
||||
:test-result: PASS XmlEncode
|
||||
:test-result: PASS analyse no analysis
|
||||
:test-result: PASS array<int, N> -> toString
|
||||
:test-result: PASS atomic if
|
||||
:test-result: PASS benchmark function call
|
||||
:test-result: PASS boolean member
|
||||
:test-result: PASS checkedElse
|
||||
:test-result: FAIL checkedElse, failing
|
||||
:test-result: PASS checkedIf
|
||||
:test-result: FAIL checkedIf, failing
|
||||
:test-result: PASS classify_outliers
|
||||
:test-result: PASS comparisons between const int variables
|
||||
:test-result: PASS comparisons between int variables
|
||||
:test-result: PASS erfc_inv
|
||||
:test-result: PASS estimate_clock_resolution
|
||||
:test-result: PASS even more nested SECTION tests
|
||||
:test-result: FAIL first tag
|
||||
loose text artifact
|
||||
@ -249,11 +254,15 @@ loose text artifact
|
||||
:test-result: PASS long long
|
||||
:test-result: FAIL looped SECTION tests
|
||||
:test-result: FAIL looped tests
|
||||
:test-result: PASS mean
|
||||
:test-result: PASS measure
|
||||
:test-result: FAIL mix info, unscoped info and warning
|
||||
:test-result: FAIL more nested SECTION tests
|
||||
:test-result: PASS nested SECTION tests
|
||||
:test-result: PASS non streamable - with conv. op
|
||||
:test-result: PASS non-copyable objects
|
||||
:test-result: PASS normal_cdf
|
||||
:test-result: PASS normal_quantile
|
||||
:test-result: PASS not allowed
|
||||
:test-result: FAIL not prints unscoped info from previous failures
|
||||
:test-result: PASS null strings
|
||||
@ -266,6 +275,9 @@ loose text artifact
|
||||
:test-result: FAIL prints unscoped info only for the first assertion
|
||||
:test-result: PASS random SECTION tests
|
||||
:test-result: PASS replaceInPlace
|
||||
:test-result: PASS resolution
|
||||
:test-result: PASS run_for_at_least, chronometer
|
||||
:test-result: PASS run_for_at_least, int
|
||||
:test-result: FAIL second tag
|
||||
:test-result: FAIL send a single char to INFO
|
||||
:test-result: FAIL sends information to INFO
|
||||
@ -304,10 +316,13 @@ loose text artifact
|
||||
:test-result: PASS tuple<0,int,const char *>
|
||||
:test-result: PASS tuple<string,string>
|
||||
:test-result: PASS tuple<tuple<int>,tuple<>,float>
|
||||
:test-result: PASS uniform samples
|
||||
:test-result: PASS vec<vec<string,alloc>> -> toString
|
||||
:test-result: PASS vector<bool> -> toString
|
||||
:test-result: PASS vector<int,allocator> -> toString
|
||||
:test-result: PASS vector<int> -> toString
|
||||
:test-result: PASS vector<string> -> toString
|
||||
:test-result: PASS vectors can be sized and resized
|
||||
:test-result: PASS warmup
|
||||
:test-result: PASS weighted_average_quantile
|
||||
:test-result: PASS xmlentitycheck
|
||||
|
@ -1562,10 +1562,33 @@ Xml.tests.cpp:<line number>: passed: encode( stringWithQuotes, Catch::XmlEncode:
|
||||
"don't "quote" me on that"
|
||||
Xml.tests.cpp:<line number>: passed: encode( "[\x01]" ) == "[\\x01]" for: "[\x01]" == "[\x01]"
|
||||
Xml.tests.cpp:<line number>: passed: encode( "[\x7F]" ) == "[\\x7F]" for: "[\x7F]" == "[\x7F]"
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.mean.point.count() == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.mean.lower_bound.count() == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.mean.upper_bound.count() == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.standard_deviation.point.count() == 0 for: 0.0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.standard_deviation.lower_bound.count() == 0 for: 0.0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.standard_deviation.upper_bound.count() == 0 for: 0.0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.total() == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.low_mild == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.low_severe == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.high_mild == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.high_severe == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outliers.samples_seen == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: analysis.outlier_variance == 0 for: 0.0 == 0
|
||||
ToStringVector.tests.cpp:<line number>: passed: Catch::Detail::stringify( empty ) == "{ }" for: "{ }" == "{ }"
|
||||
ToStringVector.tests.cpp:<line number>: passed: Catch::Detail::stringify( oneValue ) == "{ 42 }" for: "{ 42 }" == "{ 42 }"
|
||||
ToStringVector.tests.cpp:<line number>: passed: Catch::Detail::stringify( twoValues ) == "{ 42, 250 }" for: "{ 42, 250 }" == "{ 42, 250 }"
|
||||
Misc.tests.cpp:<line number>: passed: x == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.started == 1 for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.finished == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.started == 1 for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.finished == 1 for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: called == 1 for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.started == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.finished == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.started == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: model.finished == 0 for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: called == 1 for: 1 == 1
|
||||
Tricky.tests.cpp:<line number>: passed: obj.prop != 0 for: 0x<hex digits> != 0
|
||||
Misc.tests.cpp:<line number>: passed: flag for: true
|
||||
Misc.tests.cpp:<line number>: passed: testCheckedElse( true ) for: true
|
||||
@ -1575,6 +1598,42 @@ Misc.tests.cpp:<line number>: passed: flag for: true
|
||||
Misc.tests.cpp:<line number>: passed: testCheckedIf( true ) for: true
|
||||
Misc.tests.cpp:<line number>: failed: flag for: false
|
||||
Misc.tests.cpp:<line number>: failed: testCheckedIf( false ) for: false
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_severe == los for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.low_mild == lom for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_mild == him for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.high_severe == his for: 0 == 0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: o.total() == los + lom + him + his for: 2 == 2
|
||||
Condition.tests.cpp:<line number>: passed: unsigned_char_var == 1 for: 1 == 1
|
||||
Condition.tests.cpp:<line number>: passed: unsigned_short_var == 1 for: 1 == 1
|
||||
Condition.tests.cpp:<line number>: passed: unsigned_int_var == 1 for: 1 == 1
|
||||
@ -1583,6 +1642,11 @@ Condition.tests.cpp:<line number>: passed: long_var == unsigned_char_var for: 1
|
||||
Condition.tests.cpp:<line number>: passed: long_var == unsigned_short_var for: 1 == 1
|
||||
Condition.tests.cpp:<line number>: passed: long_var == unsigned_int_var for: 1 == 1
|
||||
Condition.tests.cpp:<line number>: passed: long_var == unsigned_long_var for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: erfc_inv(1.103560) == Approx(-0.09203687623843015) for: -0.0920368762 == Approx( -0.0920368762 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: erfc_inv(1.067400) == Approx(-0.05980291115763361) for: -0.0598029112 == Approx( -0.0598029112 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: erfc_inv(0.050000) == Approx(1.38590382434967796) for: 1.3859038243 == Approx( 1.3859038243 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res.mean.count() == rate for: 2000.0 == 2000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res.outliers.total() == 0 for: 0 == 0
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
@ -1610,6 +1674,15 @@ Misc.tests.cpp:<line number>: failed: ( fib[i] % 2 ) == 0 for: 1 == 0 with 1 mes
|
||||
Misc.tests.cpp:<line number>: passed: ( fib[i] % 2 ) == 0 for: 0 == 0 with 1 message: 'Testing if fib[5] (8) is even'
|
||||
Misc.tests.cpp:<line number>: failed: ( fib[i] % 2 ) == 0 for: 1 == 0 with 1 message: 'Testing if fib[6] (13) is even'
|
||||
Misc.tests.cpp:<line number>: failed: ( fib[i] % 2 ) == 0 for: 1 == 0 with 1 message: 'Testing if fib[7] (21) is even'
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: m == 19. for: 19.0 == 19.0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x == 17 for: 17 == 17
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x == 23 for: 23 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: r.elapsed.count() == 42 for: 42 == 42
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: r.result == 23 for: 23 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: r.iterations == 1 for: 1 == 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: s.elapsed.count() == 69 for: 69 == 69
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: s.result == 17 for: 17 == 17
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: s.iterations == 1 for: 1 == 1
|
||||
Message.tests.cpp:<line number>: warning: 'info' with 2 messages: 'unscoped info' and 'and warn may mix'
|
||||
Message.tests.cpp:<line number>: warning: 'info' with 2 messages: 'unscoped info' and 'they are not cleared after warnings'
|
||||
Misc.tests.cpp:<line number>: failed: a == b for: 1 == 2
|
||||
@ -1620,6 +1693,14 @@ Misc.tests.cpp:<line number>: passed: b != a for: 2 != 1
|
||||
Misc.tests.cpp:<line number>: passed: a != b for: 1 != 2
|
||||
Tricky.tests.cpp:<line number>: passed: s == "7" for: "7" == "7"
|
||||
Tricky.tests.cpp:<line number>: passed: ti == typeid(int) for: {?} == {?}
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_cdf(0.000000) == Approx(0.50000000000000000) for: 0.5 == Approx( 0.5 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_cdf(1.000000) == Approx(0.84134474606854293) for: 0.8413447461 == Approx( 0.8413447461 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_cdf(-1.000000) == Approx(0.15865525393145705) for: 0.1586552539 == Approx( 0.1586552539 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_cdf(2.809729) == Approx(0.99752083845315409) for: 0.9975208385 == Approx( 0.9975208385 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_cdf(-1.352570) == Approx(0.08809652095066035) for: 0.088096521 == Approx( 0.088096521 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_quantile(0.551780) == Approx(0.13015979861484198) for: 0.1301597986 == Approx( 0.1301597986 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_quantile(0.533700) == Approx(0.08457408802851875) for: 0.084574088 == Approx( 0.084574088 )
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: normal_quantile(0.025000) == Approx(-1.95996398454005449) for: -1.9599639845 == Approx( -1.9599639845 )
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
Message.tests.cpp:<line number>: passed: true with 1 message: 'this MAY be seen only for the FIRST assertion IF info is printed for passing assertions'
|
||||
Message.tests.cpp:<line number>: passed: true with 1 message: 'this MAY be seen only for the SECOND assertion IF info is printed for passing assertions'
|
||||
@ -1661,6 +1742,38 @@ StringManip.tests.cpp:<line number>: passed: !(Catch::replaceInPlace(letters, "x
|
||||
StringManip.tests.cpp:<line number>: passed: letters == letters for: "abcdefcg" == "abcdefcg"
|
||||
StringManip.tests.cpp:<line number>: passed: Catch::replaceInPlace(s, "'", "|'") for: true
|
||||
StringManip.tests.cpp:<line number>: passed: s == "didn|'t" for: "didn|'t" == "didn|'t"
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res.size() == count for: 10 == 10
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 1 >= 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 2 >= 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 4 >= 2
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 8 >= 4
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 16 >= 8
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 32 >= 16
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 64 >= 32
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: meter.runs() >= old_runs for: 128 >= 64
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.elapsed >= time for: 128 ns >= 100 ns
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.result == Timing.iterations + 17 for: 145 == 145
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.iterations >= time.count() for: 128 >= 100
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 1 >= 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 2 >= 1
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 4 >= 2
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 8 >= 4
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 16 >= 8
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 32 >= 16
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 64 >= 32
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: x >= old_x for: 128 >= 64
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.elapsed >= time for: 128 ns >= 100 ns
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.result == Timing.iterations + 17 for: 145 == 145
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: Timing.iterations >= time.count() for: 128 >= 100
|
||||
Misc.tests.cpp:<line number>: failed: false with 1 message: '3'
|
||||
Message.tests.cpp:<line number>: failed: false with 2 messages: 'hi' and 'i := 7'
|
||||
Tag.tests.cpp:<line number>: passed: tags, Catch::VectorContains("magic-tag"_catch_sr) && Catch::VectorContains("."_catch_sr) for: { ., magic-tag } ( Contains: magic-tag and Contains: . )
|
||||
@ -1753,6 +1866,10 @@ ToStringTuple.tests.cpp:<line number>: passed: "{ \"hello\", \"world\" }" == ::C
|
||||
ToStringTuple.tests.cpp:<line number>: passed: "{ { 42 }, { }, 1.2f }" == ::Catch::Detail::stringify(value) for: "{ { 42 }, { }, 1.2f }"
|
||||
==
|
||||
"{ { 42 }, { }, 1.2f }"
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: e.point == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: e.upper_bound == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: e.lower_bound == 23 for: 23.0 == 23
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: e.confidence_interval == 0.95 for: 0.95 == 0.95
|
||||
ToStringVector.tests.cpp:<line number>: passed: ::Catch::Detail::stringify(v) == "{ }" for: "{ }" == "{ }"
|
||||
ToStringVector.tests.cpp:<line number>: passed: ::Catch::Detail::stringify(v) == "{ { \"hello\" }, { \"world\" } }" for: "{ { "hello" }, { "world" } }"
|
||||
==
|
||||
@ -1788,6 +1905,11 @@ Misc.tests.cpp:<line number>: passed: v.size() == 5 for: 5 == 5
|
||||
Misc.tests.cpp:<line number>: passed: v.capacity() >= 5 for: 5 >= 5
|
||||
Misc.tests.cpp:<line number>: passed: v.size() == 5 for: 5 == 5
|
||||
Misc.tests.cpp:<line number>: passed: v.capacity() >= 5 for: 5 >= 5
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: (iterations * rate) > Catch::Benchmark::Detail::warmup_time.count() for: 160000000 (0x<hex digits>) > 100
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: (end - start) > Catch::Benchmark::Detail::warmup_time for: 310016000 ns > 100 ms
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: q1 == 14.5 for: 14.5 == 14.5
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: med == 18. for: 18.0 == 18.0
|
||||
InternalBenchmark.tests.cpp:<line number>: passed: q3 == 23. for: 23.0 == 23.0
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
Misc.tests.cpp:<line number>: passed:
|
||||
Failed 86 test cases, failed 148 assertions.
|
||||
|
@ -1380,6 +1380,6 @@ due to unexpected exception with message:
|
||||
Why would you throw a std::string?
|
||||
|
||||
===============================================================================
|
||||
test cases: 305 | 231 passed | 70 failed | 4 failed as expected
|
||||
assertions: 1654 | 1502 passed | 131 failed | 21 failed as expected
|
||||
test cases: 320 | 246 passed | 70 failed | 4 failed as expected
|
||||
assertions: 1776 | 1624 passed | 131 failed | 21 failed as expected
|
||||
|
||||
|
@ -11430,6 +11430,77 @@ Xml.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
"[\x7F]" == "[\x7F]"
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
analyse no analysis
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.mean.point.count() == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.mean.lower_bound.count() == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.mean.upper_bound.count() == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.standard_deviation.point.count() == 0 )
|
||||
with expansion:
|
||||
0.0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.standard_deviation.lower_bound.count() == 0 )
|
||||
with expansion:
|
||||
0.0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.standard_deviation.upper_bound.count() == 0 )
|
||||
with expansion:
|
||||
0.0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.total() == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.low_mild == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.low_severe == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.high_mild == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.high_severe == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outliers.samples_seen == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( analysis.outlier_variance == 0 )
|
||||
with expansion:
|
||||
0.0 == 0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
array<int, N> -> toString
|
||||
-------------------------------------------------------------------------------
|
||||
@ -11462,6 +11533,70 @@ Misc.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
benchmark function call
|
||||
without chronometer
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.started == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.finished == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.started == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.finished == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( called == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
benchmark function call
|
||||
with chronometer
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.started == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.finished == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.started == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( model.finished == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( called == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
boolean member
|
||||
-------------------------------------------------------------------------------
|
||||
@ -11537,6 +11672,228 @@ Misc.tests.cpp:<line number>: FAILED:
|
||||
with expansion:
|
||||
false
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
none
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
low severe
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
low mild
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
high mild
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
high severe
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
classify_outliers
|
||||
mixed
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.samples_seen == static_cast<int>(x.size()) )
|
||||
with expansion:
|
||||
6 == 6
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_severe == los )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.low_mild == lom )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_mild == him )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.high_severe == his )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( o.total() == los + lom + him + his )
|
||||
with expansion:
|
||||
2 == 2
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
comparisons between const int variables
|
||||
-------------------------------------------------------------------------------
|
||||
@ -11589,6 +11946,43 @@ Condition.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
erfc_inv
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( erfc_inv(1.103560) == Approx(-0.09203687623843015) )
|
||||
with expansion:
|
||||
-0.0920368762 == Approx( -0.0920368762 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( erfc_inv(1.067400) == Approx(-0.05980291115763361) )
|
||||
with expansion:
|
||||
-0.0598029112 == Approx( -0.0598029112 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( erfc_inv(0.050000) == Approx(1.38590382434967796) )
|
||||
with expansion:
|
||||
1.3859038243 == Approx( 1.3859038243 )
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
estimate_clock_resolution
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res.mean.count() == rate )
|
||||
with expansion:
|
||||
2000.0 == 2000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res.outliers.total() == 0 )
|
||||
with expansion:
|
||||
0 == 0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
even more nested SECTION tests
|
||||
c
|
||||
@ -11870,6 +12264,63 @@ with expansion:
|
||||
with message:
|
||||
Testing if fib[7] (21) is even
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
mean
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( m == 19. )
|
||||
with expansion:
|
||||
19.0 == 19.0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
measure
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x == 17 )
|
||||
with expansion:
|
||||
17 == 17
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x == 23 )
|
||||
with expansion:
|
||||
23 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( r.elapsed.count() == 42 )
|
||||
with expansion:
|
||||
42 == 42
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( r.result == 23 )
|
||||
with expansion:
|
||||
23 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( r.iterations == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( s.elapsed.count() == 69 )
|
||||
with expansion:
|
||||
69 == 69
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( s.result == 17 )
|
||||
with expansion:
|
||||
17 == 17
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( s.iterations == 1 )
|
||||
with expansion:
|
||||
1 == 1
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
mix info, unscoped info and warning
|
||||
-------------------------------------------------------------------------------
|
||||
@ -11980,6 +12431,58 @@ Tricky.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
{?} == {?}
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
normal_cdf
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_cdf(0.000000) == Approx(0.50000000000000000) )
|
||||
with expansion:
|
||||
0.5 == Approx( 0.5 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_cdf(1.000000) == Approx(0.84134474606854293) )
|
||||
with expansion:
|
||||
0.8413447461 == Approx( 0.8413447461 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_cdf(-1.000000) == Approx(0.15865525393145705) )
|
||||
with expansion:
|
||||
0.1586552539 == Approx( 0.1586552539 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_cdf(2.809729) == Approx(0.99752083845315409) )
|
||||
with expansion:
|
||||
0.9975208385 == Approx( 0.9975208385 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_cdf(-1.352570) == Approx(0.08809652095066035) )
|
||||
with expansion:
|
||||
0.088096521 == Approx( 0.088096521 )
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
normal_quantile
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_quantile(0.551780) == Approx(0.13015979861484198) )
|
||||
with expansion:
|
||||
0.1301597986 == Approx( 0.1301597986 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_quantile(0.533700) == Approx(0.08457408802851875) )
|
||||
with expansion:
|
||||
0.084574088 == Approx( 0.084574088 )
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( normal_quantile(0.025000) == Approx(-1.95996398454005449) )
|
||||
with expansion:
|
||||
-1.9599639845 == Approx( -1.9599639845 )
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
not allowed
|
||||
-------------------------------------------------------------------------------
|
||||
@ -12311,6 +12814,184 @@ StringManip.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
"didn|'t" == "didn|'t"
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
resolution
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res.size() == count )
|
||||
with expansion:
|
||||
10 == 10
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( res[i] == rate )
|
||||
with expansion:
|
||||
1000.0 == 1000 (0x<hex digits>)
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
run_for_at_least, chronometer
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
1 >= 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
2 >= 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
4 >= 2
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
8 >= 4
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
16 >= 8
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
32 >= 16
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
64 >= 32
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( meter.runs() >= old_runs )
|
||||
with expansion:
|
||||
128 >= 64
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.elapsed >= time )
|
||||
with expansion:
|
||||
128 ns >= 100 ns
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.result == Timing.iterations + 17 )
|
||||
with expansion:
|
||||
145 == 145
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.iterations >= time.count() )
|
||||
with expansion:
|
||||
128 >= 100
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
run_for_at_least, int
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
1 >= 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
2 >= 1
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
4 >= 2
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
8 >= 4
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
16 >= 8
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
32 >= 16
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
64 >= 32
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( x >= old_x )
|
||||
with expansion:
|
||||
128 >= 64
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.elapsed >= time )
|
||||
with expansion:
|
||||
128 ns >= 100 ns
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.result == Timing.iterations + 17 )
|
||||
with expansion:
|
||||
145 == 145
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( Timing.iterations >= time.count() )
|
||||
with expansion:
|
||||
128 >= 100
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
second tag
|
||||
-------------------------------------------------------------------------------
|
||||
@ -12946,6 +13627,32 @@ with expansion:
|
||||
==
|
||||
"{ { 42 }, { }, 1.2f }"
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
uniform samples
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( e.point == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( e.upper_bound == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( e.lower_bound == 23 )
|
||||
with expansion:
|
||||
23.0 == 23
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
CHECK( e.confidence_interval == 0.95 )
|
||||
with expansion:
|
||||
0.95 == 0.95
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
vec<vec<string,alloc>> -> toString
|
||||
-------------------------------------------------------------------------------
|
||||
@ -13195,6 +13902,43 @@ Misc.tests.cpp:<line number>: PASSED:
|
||||
with expansion:
|
||||
5 >= 5
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
warmup
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( (iterations * rate) > Catch::Benchmark::Detail::warmup_time.count() )
|
||||
with expansion:
|
||||
160000000 (0x<hex digits>) > 100
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( (end - start) > Catch::Benchmark::Detail::warmup_time )
|
||||
with expansion:
|
||||
310016000 ns > 100 ms
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
weighted_average_quantile
|
||||
-------------------------------------------------------------------------------
|
||||
InternalBenchmark.tests.cpp:<line number>
|
||||
...............................................................................
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( q1 == 14.5 )
|
||||
with expansion:
|
||||
14.5 == 14.5
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( med == 18. )
|
||||
with expansion:
|
||||
18.0 == 18.0
|
||||
|
||||
InternalBenchmark.tests.cpp:<line number>: PASSED:
|
||||
REQUIRE( q3 == 23. )
|
||||
with expansion:
|
||||
23.0 == 23.0
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
xmlentitycheck
|
||||
embedded xml: <test>it should be possible to embed xml characters, such as <,
|
||||
@ -13216,6 +13960,6 @@ Misc.tests.cpp:<line number>
|
||||
Misc.tests.cpp:<line number>: PASSED:
|
||||
|
||||
===============================================================================
|
||||
test cases: 305 | 215 passed | 86 failed | 4 failed as expected
|
||||
assertions: 1671 | 1502 passed | 148 failed | 21 failed as expected
|
||||
test cases: 320 | 230 passed | 86 failed | 4 failed as expected
|
||||
assertions: 1793 | 1624 passed | 148 failed | 21 failed as expected
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuitesloose text artifact
|
||||
>
|
||||
<testsuite name="<exe-name>" errors="17" failures="132" tests="1672" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
||||
<testsuite name="<exe-name>" errors="17" failures="132" tests="1794" hostname="tbd" time="{duration}" timestamp="{iso8601-timestamp}">
|
||||
<properties>
|
||||
<property name="filters" value="~[!nonportable]~[!benchmark]~[approvals] *"/>
|
||||
<property name="random-seed" value="1"/>
|
||||
@ -1403,8 +1403,11 @@ Exception.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="XmlEncode/string with quotes" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="XmlEncode/string with control char (1)" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="XmlEncode/string with control char (x7F)" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="analyse no analysis" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="array<int, N> -> toString" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="atomic if" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="benchmark function call/without chronometer" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="benchmark function call/with chronometer" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="boolean member" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="checkedElse" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="checkedElse, failing" time="{duration}">
|
||||
@ -1440,8 +1443,16 @@ with expansion:
|
||||
Misc.tests.cpp:<line number>
|
||||
</failure>
|
||||
</testcase>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/none" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/low severe" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/low mild" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/high mild" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/high severe" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="classify_outliers/mixed" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="comparisons between const int variables" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="comparisons between int variables" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="erfc_inv" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="estimate_clock_resolution" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="even more nested SECTION tests/c/d (leaf)" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="even more nested SECTION tests/c/e (leaf)" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="even more nested SECTION tests/f (leaf)" time="{duration}"/>
|
||||
@ -1536,6 +1547,8 @@ Testing if fib[7] (21) is even
|
||||
Misc.tests.cpp:<line number>
|
||||
</failure>
|
||||
</testcase>
|
||||
<testcase classname="<exe-name>.global" name="mean" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="measure" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="mix info, unscoped info and warning" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="more nested SECTION tests/equal/doesn't equal" time="{duration}">
|
||||
<failure message="a == b" type="REQUIRE">
|
||||
@ -1552,6 +1565,8 @@ Misc.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="nested SECTION tests/doesn't equal/not equal" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="non streamable - with conv. op" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="non-copyable objects" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="normal_cdf" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="normal_quantile" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="not allowed" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="not prints unscoped info from previous failures" time="{duration}">
|
||||
<failure message="false" type="REQUIRE">
|
||||
@ -1595,6 +1610,9 @@ Message.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="replaceInPlace/replace all chars" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="replaceInPlace/replace no chars" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="replaceInPlace/escape '" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="resolution" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="run_for_at_least, chronometer" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="run_for_at_least, int" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="send a single char to INFO" time="{duration}">
|
||||
<failure message="false" type="REQUIRE">
|
||||
FAILED:
|
||||
@ -1684,6 +1702,7 @@ Exception.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="tuple<0,int,const char *>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="tuple<string,string>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="tuple<tuple<int>,tuple<>,float>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="uniform samples" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="vec<vec<string,alloc>> -> toString" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="vector<bool> -> toString" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="vector<int,allocator> -> toString" time="{duration}"/>
|
||||
@ -1695,6 +1714,8 @@ Exception.tests.cpp:<line number>
|
||||
<testcase classname="<exe-name>.global" name="vectors can be sized and resized/resizing smaller changes size but not capacity/We can use the 'swap trick' to reset the capacity" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="vectors can be sized and resized/reserving bigger changes capacity but not size" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="vectors can be sized and resized/reserving smaller does not change size or capacity" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="warmup" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="weighted_average_quantile" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="xmlentitycheck/embedded xml: <test>it should be possible to embed xml characters, such as <, " or &, or even whole <xml>documents</xml> within an attribute</test>" time="{duration}"/>
|
||||
<testcase classname="<exe-name>.global" name="xmlentitycheck/encoded chars: these should all be encoded: &&&"""<<<&"<<&"" time="{duration}"/>
|
||||
<system-out>
|
||||
|
@ -90,6 +90,29 @@
|
||||
<testCase name="Generators internals/Range/Negative manual step/Integer/Slightly over end" duration="{duration}"/>
|
||||
<testCase name="Generators internals/Range/Negative manual step/Integer/Slightly under end" duration="{duration}"/>
|
||||
</file>
|
||||
<file path="tests/<exe-name>/IntrospectiveTests/InternalBenchmark.tests.cpp">
|
||||
<testCase name="analyse no analysis" duration="{duration}"/>
|
||||
<testCase name="benchmark function call/without chronometer" duration="{duration}"/>
|
||||
<testCase name="benchmark function call/with chronometer" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/none" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/low severe" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/low mild" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/high mild" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/high severe" duration="{duration}"/>
|
||||
<testCase name="classify_outliers/mixed" duration="{duration}"/>
|
||||
<testCase name="erfc_inv" duration="{duration}"/>
|
||||
<testCase name="estimate_clock_resolution" duration="{duration}"/>
|
||||
<testCase name="mean" duration="{duration}"/>
|
||||
<testCase name="measure" duration="{duration}"/>
|
||||
<testCase name="normal_cdf" duration="{duration}"/>
|
||||
<testCase name="normal_quantile" duration="{duration}"/>
|
||||
<testCase name="resolution" duration="{duration}"/>
|
||||
<testCase name="run_for_at_least, chronometer" duration="{duration}"/>
|
||||
<testCase name="run_for_at_least, int" duration="{duration}"/>
|
||||
<testCase name="uniform samples" duration="{duration}"/>
|
||||
<testCase name="warmup" duration="{duration}"/>
|
||||
<testCase name="weighted_average_quantile" duration="{duration}"/>
|
||||
</file>
|
||||
<file path="tests/<exe-name>/IntrospectiveTests/PartTracker.tests.cpp">
|
||||
<testCase name="Tracker" duration="{duration}"/>
|
||||
<testCase name="Tracker/successfully close one section" duration="{duration}"/>
|
||||
|
@ -2959,6 +2959,32 @@ ok {test-number} - encode( stringWithQuotes, Catch::XmlEncode::ForAttributes ) =
|
||||
ok {test-number} - encode( "[\x01]" ) == "[\\x01]" for: "[\x01]" == "[\x01]"
|
||||
# XmlEncode
|
||||
ok {test-number} - encode( "[\x7F]" ) == "[\\x7F]" for: "[\x7F]" == "[\x7F]"
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.mean.point.count() == 23 for: 23.0 == 23
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.mean.lower_bound.count() == 23 for: 23.0 == 23
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.mean.upper_bound.count() == 23 for: 23.0 == 23
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.standard_deviation.point.count() == 0 for: 0.0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.standard_deviation.lower_bound.count() == 0 for: 0.0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.standard_deviation.upper_bound.count() == 0 for: 0.0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.total() == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.low_mild == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.low_severe == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.high_mild == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.high_severe == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outliers.samples_seen == 0 for: 0 == 0
|
||||
# analyse no analysis
|
||||
ok {test-number} - analysis.outlier_variance == 0 for: 0.0 == 0
|
||||
# array<int, N> -> toString
|
||||
ok {test-number} - Catch::Detail::stringify( empty ) == "{ }" for: "{ }" == "{ }"
|
||||
# array<int, N> -> toString
|
||||
@ -2967,6 +2993,26 @@ ok {test-number} - Catch::Detail::stringify( oneValue ) == "{ 42 }" for: "{ 42 }
|
||||
ok {test-number} - Catch::Detail::stringify( twoValues ) == "{ 42, 250 }" for: "{ 42, 250 }" == "{ 42, 250 }"
|
||||
# atomic if
|
||||
ok {test-number} - x == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - model.started == 1 for: 1 == 1
|
||||
# benchmark function call
|
||||
ok {test-number} - model.finished == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - model.started == 1 for: 1 == 1
|
||||
# benchmark function call
|
||||
ok {test-number} - model.finished == 1 for: 1 == 1
|
||||
# benchmark function call
|
||||
ok {test-number} - called == 1 for: 1 == 1
|
||||
# benchmark function call
|
||||
ok {test-number} - model.started == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - model.finished == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - model.started == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - model.finished == 0 for: 0 == 0
|
||||
# benchmark function call
|
||||
ok {test-number} - called == 1 for: 1 == 1
|
||||
# boolean member
|
||||
ok {test-number} - obj.prop != 0 for: 0x<hex digits> != 0
|
||||
# checkedElse
|
||||
@ -2985,6 +3031,78 @@ ok {test-number} - testCheckedIf( true ) for: true
|
||||
not ok {test-number} - flag for: false
|
||||
# checkedIf, failing
|
||||
not ok {test-number} - testCheckedIf( false ) for: false
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.samples_seen == static_cast<int>(x.size()) for: 6 == 6
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_severe == los for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.low_mild == lom for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_mild == him for: 1 == 1
|
||||
# classify_outliers
|
||||
ok {test-number} - o.high_severe == his for: 0 == 0
|
||||
# classify_outliers
|
||||
ok {test-number} - o.total() == los + lom + him + his for: 2 == 2
|
||||
# comparisons between const int variables
|
||||
ok {test-number} - unsigned_char_var == 1 for: 1 == 1
|
||||
# comparisons between const int variables
|
||||
@ -3001,6 +3119,16 @@ ok {test-number} - long_var == unsigned_short_var for: 1 == 1
|
||||
ok {test-number} - long_var == unsigned_int_var for: 1 == 1
|
||||
# comparisons between int variables
|
||||
ok {test-number} - long_var == unsigned_long_var for: 1 == 1
|
||||
# erfc_inv
|
||||
ok {test-number} - erfc_inv(1.103560) == Approx(-0.09203687623843015) for: -0.0920368762 == Approx( -0.0920368762 )
|
||||
# erfc_inv
|
||||
ok {test-number} - erfc_inv(1.067400) == Approx(-0.05980291115763361) for: -0.0598029112 == Approx( -0.0598029112 )
|
||||
# erfc_inv
|
||||
ok {test-number} - erfc_inv(0.050000) == Approx(1.38590382434967796) for: 1.3859038243 == Approx( 1.3859038243 )
|
||||
# estimate_clock_resolution
|
||||
ok {test-number} - res.mean.count() == rate for: 2000.0 == 2000 (0x<hex digits>)
|
||||
# estimate_clock_resolution
|
||||
ok {test-number} - res.outliers.total() == 0 for: 0 == 0
|
||||
# even more nested SECTION tests
|
||||
ok {test-number} -
|
||||
# even more nested SECTION tests
|
||||
@ -3050,10 +3178,28 @@ ok {test-number} - ( fib[i] % 2 ) == 0 for: 0 == 0 with 1 message: 'Testing if f
|
||||
not ok {test-number} - ( fib[i] % 2 ) == 0 for: 1 == 0 with 1 message: 'Testing if fib[6] (13) is even'
|
||||
# looped tests
|
||||
not ok {test-number} - ( fib[i] % 2 ) == 0 for: 1 == 0 with 1 message: 'Testing if fib[7] (21) is even'
|
||||
# mean
|
||||
ok {test-number} - m == 19. for: 19.0 == 19.0
|
||||
# measure
|
||||
ok {test-number} - x == 17 for: 17 == 17
|
||||
# measure
|
||||
ok {test-number} - x == 23 for: 23 == 23
|
||||
# measure
|
||||
ok {test-number} - r.elapsed.count() == 42 for: 42 == 42
|
||||
# measure
|
||||
ok {test-number} - r.result == 23 for: 23 == 23
|
||||
# measure
|
||||
ok {test-number} - r.iterations == 1 for: 1 == 1
|
||||
# measure
|
||||
ok {test-number} - s.elapsed.count() == 69 for: 69 == 69
|
||||
# measure
|
||||
ok {test-number} - s.result == 17 for: 17 == 17
|
||||
# measure
|
||||
ok {test-number} - s.iterations == 1 for: 1 == 1
|
||||
# mix info, unscoped info and warning
|
||||
warning 1522 - 'info' with 2 messages: 'unscoped info' and 'and warn may mix'
|
||||
warning 1595 - 'info' with 2 messages: 'unscoped info' and 'and warn may mix'
|
||||
# mix info, unscoped info and warning
|
||||
warning 1523 - 'info' with 2 messages: 'unscoped info' and 'they are not cleared after warnings'
|
||||
warning 1596 - 'info' with 2 messages: 'unscoped info' and 'they are not cleared after warnings'
|
||||
# more nested SECTION tests
|
||||
not ok {test-number} - a == b for: 1 == 2
|
||||
# more nested SECTION tests
|
||||
@ -3070,6 +3216,22 @@ ok {test-number} - a != b for: 1 != 2
|
||||
ok {test-number} - s == "7" for: "7" == "7"
|
||||
# non-copyable objects
|
||||
ok {test-number} - ti == typeid(int) for: {?} == {?}
|
||||
# normal_cdf
|
||||
ok {test-number} - normal_cdf(0.000000) == Approx(0.50000000000000000) for: 0.5 == Approx( 0.5 )
|
||||
# normal_cdf
|
||||
ok {test-number} - normal_cdf(1.000000) == Approx(0.84134474606854293) for: 0.8413447461 == Approx( 0.8413447461 )
|
||||
# normal_cdf
|
||||
ok {test-number} - normal_cdf(-1.000000) == Approx(0.15865525393145705) for: 0.1586552539 == Approx( 0.1586552539 )
|
||||
# normal_cdf
|
||||
ok {test-number} - normal_cdf(2.809729) == Approx(0.99752083845315409) for: 0.9975208385 == Approx( 0.9975208385 )
|
||||
# normal_cdf
|
||||
ok {test-number} - normal_cdf(-1.352570) == Approx(0.08809652095066035) for: 0.088096521 == Approx( 0.088096521 )
|
||||
# normal_quantile
|
||||
ok {test-number} - normal_quantile(0.551780) == Approx(0.13015979861484198) for: 0.1301597986 == Approx( 0.1301597986 )
|
||||
# normal_quantile
|
||||
ok {test-number} - normal_quantile(0.533700) == Approx(0.08457408802851875) for: 0.084574088 == Approx( 0.084574088 )
|
||||
# normal_quantile
|
||||
ok {test-number} - normal_quantile(0.025000) == Approx(-1.95996398454005449) for: -1.9599639845 == Approx( -1.9599639845 )
|
||||
# not allowed
|
||||
ok {test-number} -
|
||||
# not prints unscoped info from previous failures
|
||||
@ -3148,6 +3310,70 @@ ok {test-number} - letters == letters for: "abcdefcg" == "abcdefcg"
|
||||
ok {test-number} - Catch::replaceInPlace(s, "'", "|'") for: true
|
||||
# replaceInPlace
|
||||
ok {test-number} - s == "didn|'t" for: "didn|'t" == "didn|'t"
|
||||
# resolution
|
||||
ok {test-number} - res.size() == count for: 10 == 10
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# resolution
|
||||
ok {test-number} - res[i] == rate for: 1000.0 == 1000 (0x<hex digits>)
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 1 >= 1
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 2 >= 1
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 4 >= 2
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 8 >= 4
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 16 >= 8
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 32 >= 16
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 64 >= 32
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - meter.runs() >= old_runs for: 128 >= 64
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - Timing.elapsed >= time for: 128 ns >= 100 ns
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - Timing.result == Timing.iterations + 17 for: 145 == 145
|
||||
# run_for_at_least, chronometer
|
||||
ok {test-number} - Timing.iterations >= time.count() for: 128 >= 100
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 1 >= 1
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 2 >= 1
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 4 >= 2
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 8 >= 4
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 16 >= 8
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 32 >= 16
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 64 >= 32
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - x >= old_x for: 128 >= 64
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - Timing.elapsed >= time for: 128 ns >= 100 ns
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - Timing.result == Timing.iterations + 17 for: 145 == 145
|
||||
# run_for_at_least, int
|
||||
ok {test-number} - Timing.iterations >= time.count() for: 128 >= 100
|
||||
# send a single char to INFO
|
||||
not ok {test-number} - false with 1 message: '3'
|
||||
# sends information to INFO
|
||||
@ -3268,6 +3494,14 @@ ok {test-number} - "{ 0, 42, \"Catch me\" }" == ::Catch::Detail::stringify(value
|
||||
ok {test-number} - "{ \"hello\", \"world\" }" == ::Catch::Detail::stringify(type{"hello","world"}) for: "{ "hello", "world" }" == "{ "hello", "world" }"
|
||||
# tuple<tuple<int>,tuple<>,float>
|
||||
ok {test-number} - "{ { 42 }, { }, 1.2f }" == ::Catch::Detail::stringify(value) for: "{ { 42 }, { }, 1.2f }" == "{ { 42 }, { }, 1.2f }"
|
||||
# uniform samples
|
||||
ok {test-number} - e.point == 23 for: 23.0 == 23
|
||||
# uniform samples
|
||||
ok {test-number} - e.upper_bound == 23 for: 23.0 == 23
|
||||
# uniform samples
|
||||
ok {test-number} - e.lower_bound == 23 for: 23.0 == 23
|
||||
# uniform samples
|
||||
ok {test-number} - e.confidence_interval == 0.95 for: 0.95 == 0.95
|
||||
# vec<vec<string,alloc>> -> toString
|
||||
ok {test-number} - ::Catch::Detail::stringify(v) == "{ }" for: "{ }" == "{ }"
|
||||
# vec<vec<string,alloc>> -> toString
|
||||
@ -3330,9 +3564,19 @@ ok {test-number} - v.capacity() >= 5 for: 5 >= 5
|
||||
ok {test-number} - v.size() == 5 for: 5 == 5
|
||||
# vectors can be sized and resized
|
||||
ok {test-number} - v.capacity() >= 5 for: 5 >= 5
|
||||
# warmup
|
||||
ok {test-number} - (iterations * rate) > Catch::Benchmark::Detail::warmup_time.count() for: 160000000 (0x<hex digits>) > 100
|
||||
# warmup
|
||||
ok {test-number} - (end - start) > Catch::Benchmark::Detail::warmup_time for: 310016000 ns > 100 ms
|
||||
# weighted_average_quantile
|
||||
ok {test-number} - q1 == 14.5 for: 14.5 == 14.5
|
||||
# weighted_average_quantile
|
||||
ok {test-number} - med == 18. for: 18.0 == 18.0
|
||||
# weighted_average_quantile
|
||||
ok {test-number} - q3 == 23. for: 23.0 == 23.0
|
||||
# xmlentitycheck
|
||||
ok {test-number} -
|
||||
# xmlentitycheck
|
||||
ok {test-number} -
|
||||
1..1663
|
||||
1..1785
|
||||
|
||||
|
@ -580,10 +580,14 @@ Exception.tests.cpp:<line number>|nunexpected exception with message:|n "unexpe
|
||||
##teamcity[testFinished name='X/level/1/b' duration="{duration}"]
|
||||
##teamcity[testStarted name='XmlEncode']
|
||||
##teamcity[testFinished name='XmlEncode' duration="{duration}"]
|
||||
##teamcity[testStarted name='analyse no analysis']
|
||||
##teamcity[testFinished name='analyse no analysis' duration="{duration}"]
|
||||
##teamcity[testStarted name='array<int, N> -> toString']
|
||||
##teamcity[testFinished name='array<int, N> -> toString' duration="{duration}"]
|
||||
##teamcity[testStarted name='atomic if']
|
||||
##teamcity[testFinished name='atomic if' duration="{duration}"]
|
||||
##teamcity[testStarted name='benchmark function call']
|
||||
##teamcity[testFinished name='benchmark function call' duration="{duration}"]
|
||||
##teamcity[testStarted name='boolean member']
|
||||
##teamcity[testFinished name='boolean member' duration="{duration}"]
|
||||
##teamcity[testStarted name='checkedElse']
|
||||
@ -598,10 +602,16 @@ Misc.tests.cpp:<line number>|nexpression failed|n REQUIRE( testCheckedElse( fal
|
||||
Misc.tests.cpp:<line number>|nexpression failed|n CHECKED_IF( flag )|nwith expansion:|n false|n']
|
||||
Misc.tests.cpp:<line number>|nexpression failed|n REQUIRE( testCheckedIf( false ) )|nwith expansion:|n false|n']
|
||||
##teamcity[testFinished name='checkedIf, failing' duration="{duration}"]
|
||||
##teamcity[testStarted name='classify_outliers']
|
||||
##teamcity[testFinished name='classify_outliers' duration="{duration}"]
|
||||
##teamcity[testStarted name='comparisons between const int variables']
|
||||
##teamcity[testFinished name='comparisons between const int variables' duration="{duration}"]
|
||||
##teamcity[testStarted name='comparisons between int variables']
|
||||
##teamcity[testFinished name='comparisons between int variables' duration="{duration}"]
|
||||
##teamcity[testStarted name='erfc_inv']
|
||||
##teamcity[testFinished name='erfc_inv' duration="{duration}"]
|
||||
##teamcity[testStarted name='estimate_clock_resolution']
|
||||
##teamcity[testFinished name='estimate_clock_resolution' duration="{duration}"]
|
||||
##teamcity[testStarted name='even more nested SECTION tests']
|
||||
##teamcity[testFinished name='even more nested SECTION tests' duration="{duration}"]
|
||||
##teamcity[testStarted name='first tag']
|
||||
@ -633,6 +643,10 @@ Misc.tests.cpp:<line number>|nexpression failed with message:|n "Testing if fib
|
||||
Misc.tests.cpp:<line number>|nexpression failed with message:|n "Testing if fib|[6|] (13) is even"|n CHECK( ( fib|[i|] % 2 ) == 0 )|nwith expansion:|n 1 == 0|n']
|
||||
Misc.tests.cpp:<line number>|nexpression failed with message:|n "Testing if fib|[7|] (21) is even"|n CHECK( ( fib|[i|] % 2 ) == 0 )|nwith expansion:|n 1 == 0|n']
|
||||
##teamcity[testFinished name='looped tests' duration="{duration}"]
|
||||
##teamcity[testStarted name='mean']
|
||||
##teamcity[testFinished name='mean' duration="{duration}"]
|
||||
##teamcity[testStarted name='measure']
|
||||
##teamcity[testFinished name='measure' duration="{duration}"]
|
||||
##teamcity[testStarted name='mix info, unscoped info and warning']
|
||||
##teamcity[testFinished name='mix info, unscoped info and warning' duration="{duration}"]
|
||||
##teamcity[testStarted name='more nested SECTION tests']
|
||||
@ -644,6 +658,10 @@ Misc.tests.cpp:<line number>|nexpression failed|n REQUIRE( a == b )|nwith expan
|
||||
##teamcity[testFinished name='non streamable - with conv. op' duration="{duration}"]
|
||||
##teamcity[testStarted name='non-copyable objects']
|
||||
##teamcity[testFinished name='non-copyable objects' duration="{duration}"]
|
||||
##teamcity[testStarted name='normal_cdf']
|
||||
##teamcity[testFinished name='normal_cdf' duration="{duration}"]
|
||||
##teamcity[testStarted name='normal_quantile']
|
||||
##teamcity[testFinished name='normal_quantile' duration="{duration}"]
|
||||
##teamcity[testStarted name='not allowed']
|
||||
##teamcity[testFinished name='not allowed' duration="{duration}"]
|
||||
##teamcity[testStarted name='not prints unscoped info from previous failures']
|
||||
@ -671,6 +689,12 @@ Message.tests.cpp:<line number>|nexpression failed with message:|n "this SHOULD
|
||||
##teamcity[testFinished name='random SECTION tests' duration="{duration}"]
|
||||
##teamcity[testStarted name='replaceInPlace']
|
||||
##teamcity[testFinished name='replaceInPlace' duration="{duration}"]
|
||||
##teamcity[testStarted name='resolution']
|
||||
##teamcity[testFinished name='resolution' duration="{duration}"]
|
||||
##teamcity[testStarted name='run_for_at_least, chronometer']
|
||||
##teamcity[testFinished name='run_for_at_least, chronometer' duration="{duration}"]
|
||||
##teamcity[testStarted name='run_for_at_least, int']
|
||||
##teamcity[testFinished name='run_for_at_least, int' duration="{duration}"]
|
||||
##teamcity[testStarted name='second tag']
|
||||
##teamcity[testFinished name='second tag' duration="{duration}"]
|
||||
##teamcity[testStarted name='send a single char to INFO']
|
||||
@ -753,6 +777,8 @@ Exception.tests.cpp:<line number>|nunexpected exception with message:|n "Why wo
|
||||
##teamcity[testFinished name='tuple<string,string>' duration="{duration}"]
|
||||
##teamcity[testStarted name='tuple<tuple<int>,tuple<>,float>']
|
||||
##teamcity[testFinished name='tuple<tuple<int>,tuple<>,float>' duration="{duration}"]
|
||||
##teamcity[testStarted name='uniform samples']
|
||||
##teamcity[testFinished name='uniform samples' duration="{duration}"]
|
||||
##teamcity[testStarted name='vec<vec<string,alloc>> -> toString']
|
||||
##teamcity[testFinished name='vec<vec<string,alloc>> -> toString' duration="{duration}"]
|
||||
##teamcity[testStarted name='vector<bool> -> toString']
|
||||
@ -765,6 +791,10 @@ Exception.tests.cpp:<line number>|nunexpected exception with message:|n "Why wo
|
||||
##teamcity[testFinished name='vector<string> -> toString' duration="{duration}"]
|
||||
##teamcity[testStarted name='vectors can be sized and resized']
|
||||
##teamcity[testFinished name='vectors can be sized and resized' duration="{duration}"]
|
||||
##teamcity[testStarted name='warmup']
|
||||
##teamcity[testFinished name='warmup' duration="{duration}"]
|
||||
##teamcity[testStarted name='weighted_average_quantile']
|
||||
##teamcity[testFinished name='weighted_average_quantile' duration="{duration}"]
|
||||
##teamcity[testStarted name='xmlentitycheck']
|
||||
##teamcity[testFinished name='xmlentitycheck' duration="{duration}"]
|
||||
##teamcity[testSuiteFinished name='<exe-name>']
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,8 +6,15 @@
|
||||
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include <catch2/catch.hpp>
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/catch_approx.h>
|
||||
#include <catch2/catch_config.hpp>
|
||||
#include <catch2/benchmark/catch_benchmark.hpp>
|
||||
#include <catch2/benchmark/catch_chronometer.hpp>
|
||||
#include <catch2/benchmark/detail/catch_analyse.hpp>
|
||||
#include <catch2/benchmark/detail/catch_benchmark_function.hpp>
|
||||
#include <catch2/benchmark/detail/catch_estimate_clock.hpp>
|
||||
|
||||
namespace {
|
||||
struct manual_clock {
|
||||
public:
|
||||
@ -90,10 +97,10 @@ TEST_CASE("resolution", "[benchmark]") {
|
||||
}
|
||||
|
||||
TEST_CASE("estimate_clock_resolution", "[benchmark]") {
|
||||
auto rate = 1000;
|
||||
auto rate = 2'000;
|
||||
counting_clock::set_rate(rate);
|
||||
|
||||
int iters = 160000;
|
||||
int iters = 160'000;
|
||||
auto res = Catch::Benchmark::Detail::estimate_clock_resolution<counting_clock>(iters);
|
||||
|
||||
REQUIRE(res.mean.count() == rate);
|
||||
@ -154,6 +161,7 @@ TEST_CASE("uniform samples", "[benchmark]") {
|
||||
|
||||
TEST_CASE("normal_cdf", "[benchmark]") {
|
||||
using Catch::Benchmark::Detail::normal_cdf;
|
||||
using Catch::Approx;
|
||||
CHECK(normal_cdf(0.000000) == Approx(0.50000000000000000));
|
||||
CHECK(normal_cdf(1.000000) == Approx(0.84134474606854293));
|
||||
CHECK(normal_cdf(-1.000000) == Approx(0.15865525393145705));
|
||||
@ -163,6 +171,7 @@ TEST_CASE("normal_cdf", "[benchmark]") {
|
||||
|
||||
TEST_CASE("erfc_inv", "[benchmark]") {
|
||||
using Catch::Benchmark::Detail::erfc_inv;
|
||||
using Catch::Approx;
|
||||
CHECK(erfc_inv(1.103560) == Approx(-0.09203687623843015));
|
||||
CHECK(erfc_inv(1.067400) == Approx(-0.05980291115763361));
|
||||
CHECK(erfc_inv(0.050000) == Approx(1.38590382434967796));
|
||||
@ -170,6 +179,7 @@ TEST_CASE("erfc_inv", "[benchmark]") {
|
||||
|
||||
TEST_CASE("normal_quantile", "[benchmark]") {
|
||||
using Catch::Benchmark::Detail::normal_quantile;
|
||||
using Catch::Approx;
|
||||
CHECK(normal_quantile(0.551780) == Approx(0.13015979861484198));
|
||||
CHECK(normal_quantile(0.533700) == Approx(0.08457408802851875));
|
||||
CHECK(normal_quantile(0.025000) == Approx(-1.95996398454005449));
|
||||
@ -255,7 +265,7 @@ TEST_CASE("classify_outliers", "[benchmark]") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("analyse", "[benchmark]") {
|
||||
TEST_CASE("analyse", "[approvals][benchmark]") {
|
||||
Catch::ConfigData data{};
|
||||
data.benchmarkConfidenceInterval = 0.95;
|
||||
data.benchmarkNoAnalysis = false;
|
||||
@ -388,7 +398,7 @@ TEST_CASE("measure", "[benchmark]") {
|
||||
CHECK(s.iterations == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("run benchmark", "[benchmark]") {
|
||||
TEST_CASE("run benchmark", "[benchmark][approvals]") {
|
||||
counting_clock::set_rate(1000);
|
||||
auto start = counting_clock::now();
|
||||
|
||||
@ -402,4 +412,3 @@ TEST_CASE("run benchmark", "[benchmark]") {
|
||||
|
||||
CHECK((end - start).count() == 2867251000);
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
@ -1,8 +1,9 @@
|
||||
#include <catch2/catch.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/benchmark/catch_benchmark.hpp>
|
||||
#include <catch2/catch_generators_specific.hpp>
|
||||
|
||||
#include <map>
|
||||
|
||||
#if defined(CATCH_CONFIG_ENABLE_BENCHMARKING)
|
||||
namespace {
|
||||
std::uint64_t Fibonacci(std::uint64_t number) {
|
||||
return number < 2 ? 1 : Fibonacci(number - 1) + Fibonacci(number - 2);
|
||||
@ -127,4 +128,3 @@ TEST_CASE("Benchmark containers", "[!benchmark]") {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // CATCH_CONFIG_ENABLE_BENCHMARKING
|
||||
|
Loading…
Reference in New Issue
Block a user