diff --git a/CMakeLists.txt b/CMakeLists.txt index 48ae6945..016d14b1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,6 +7,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # define some folders set(CATCH_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(SELF_TEST_DIR ${CATCH_DIR}/projects/SelfTest) +set(BENCHMARK_DIR ${CATCH_DIR}/projects/Benchmark) set(HEADER_DIR ${CATCH_DIR}/include) if(USE_CPP11) @@ -161,13 +162,20 @@ set(HEADERS ${HEADER_DIR}/reporters/catch_reporter_xml.hpp ) +set(BENCH_SOURCES + ${BENCHMARK_DIR}/BenchMain.cpp + ${BENCHMARK_DIR}/StringificationBench.cpp + ) + # Provide some groupings for IDEs SOURCE_GROUP("Tests" FILES ${TEST_SOURCES}) SOURCE_GROUP("Surrogates" FILES ${IMPL_SOURCES}) +SOURCE_GROUP("Benchmarks" FILES ${BENCH_SOURCES}) # configure the executable include_directories(${HEADER_DIR}) add_executable(SelfTest ${TEST_SOURCES} ${IMPL_SOURCES} ${HEADERS}) +add_executable(Benchmark ${BENCH_SOURCES} ${HEADERS}) # configure unit tests via CTest enable_testing() diff --git a/projects/Benchmark/BenchMain.cpp b/projects/Benchmark/BenchMain.cpp new file mode 100644 index 00000000..0c7c351f --- /dev/null +++ b/projects/Benchmark/BenchMain.cpp @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include "catch.hpp" diff --git a/projects/Benchmark/StringificationBench.cpp b/projects/Benchmark/StringificationBench.cpp new file mode 100644 index 00000000..67f36685 --- /dev/null +++ b/projects/Benchmark/StringificationBench.cpp @@ -0,0 +1,39 @@ +#include "catch.hpp" + +#include + +/////////////////////////////////////////////////////////////////////////////// +TEST_CASE("Successful tests -- REQUIRE", "[Success]") { + const size_t sz = 1 * 1024 * 1024; + + + std::vector vec; vec.reserve(sz); + for (size_t i = 0; i < sz; ++i){ + vec.push_back(i); + REQUIRE(vec.back() == i); + } +} + +/////////////////////////////////////////////////////////////////////////////// +TEST_CASE("Successful tests -- CHECK", "[Success]") { + const size_t sz = 1 * 1024 * 1024; + + + std::vector vec; vec.reserve(sz); + for (size_t i = 0; i < sz; ++i){ + vec.push_back(i); + CHECK(vec.back() == i); + } +} + +/////////////////////////////////////////////////////////////////////////////// +TEST_CASE("Unsuccessful tests -- CHECK", "[Failure]") { + const size_t sz = 128 * 1024; + + + std::vector vec; vec.reserve(sz); + for (size_t i = 0; i < sz; ++i){ + vec.push_back(i); + CHECK(vec.size() == i); + } +} diff --git a/projects/Benchmark/readme.txt b/projects/Benchmark/readme.txt new file mode 100644 index 00000000..c4d2fabd --- /dev/null +++ b/projects/Benchmark/readme.txt @@ -0,0 +1,4 @@ +This is very much a work in progress. +The past results are standardized to a developer's machine, +the benchmarking script is basic and there are only 3 benchmarks, +but this should get better in time. For now, at least there is something to go by. diff --git a/projects/Benchmark/results/2017-01-14T21-53-49-e3659cdddd43ba4df9e4846630be6a6a7bd85a07.result b/projects/Benchmark/results/2017-01-14T21-53-49-e3659cdddd43ba4df9e4846630be6a6a7bd85a07.result new file mode 100644 index 00000000..4b6fc659 --- /dev/null +++ b/projects/Benchmark/results/2017-01-14T21-53-49-e3659cdddd43ba4df9e4846630be6a6a7bd85a07.result @@ -0,0 +1,3 @@ +Successful tests -- CHECK: median: 3.38116 (s), stddev: 0.11567366292001534 (s) +Successful tests -- REQUIRE: median: 3.479955 (s), stddev: 0.16295972890734556 (s) +Unsuccessful tests -- CHECK: median: 1.966895 (s), stddev: 0.06323488524716572 (s) diff --git a/scripts/runner.py b/scripts/runner.py new file mode 100644 index 00000000..dc753cf0 --- /dev/null +++ b/scripts/runner.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import subprocess, os, sys +import xml.etree.ElementTree as ET +from collections import defaultdict +from statistics import median, stdev +from datetime import datetime + +def get_commit_hash(): + res = subprocess.run('git rev-parse HEAD'.split(), check=True, stdout=subprocess.PIPE, universal_newlines=True) + return res.stdout.strip() + +if len(sys.argv) < 2: + print('Usage: {} benchmark-binary'.format(sys.argv[0])) + exit(1) + + +num_runs = 10 +data = defaultdict(list) + + +def parse_file(file): + + def recursive_search(node): + if node.tag == 'TestCase': + results = node.find('OverallResult') + time = results.get('durationInSeconds') + data[node.get('name')].append(float(time)) + elif node.tag in ('Group', 'Catch'): + for child in node: + recursive_search(child) + + tree = ET.parse(file) + recursive_search(tree.getroot()) + +def run_benchmarks(binary): + call = [binary] + '-d yes -r xml -o'.split() + for i in range(num_runs): + file = 'temp{}.xml'.format(i) + print('Run number {}'.format(i)) + subprocess.run(call + [file]) + parse_file(file) + # Remove file right after parsing, because benchmark output can be big + os.remove(file) + + +# Run benchmarks +run_benchmarks(sys.argv[1]) + +result_file = '{:%Y-%m-%dT%H-%M-%S}-{}.result'.format(datetime.now(), get_commit_hash()) + + +print('Writing results to {}'.format(result_file)) +with open(result_file, 'w') as file: + for k in sorted(data): + file.write('{}: median: {} (s), stddev: {} (s)\n'.format(k, median(data[k]), stdev(data[k])))