First commit of benchmarks for Catch runtime perf.

So far its very much a WIP with some problems that are known already and
not very representative tests.
This commit is contained in:
Martin Hořeňovský 2017-01-14 21:55:37 +01:00
parent e3659cdddd
commit 3b7511e564
6 changed files with 112 additions and 0 deletions

View File

@ -7,6 +7,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# define some folders # define some folders
set(CATCH_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(CATCH_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(SELF_TEST_DIR ${CATCH_DIR}/projects/SelfTest) set(SELF_TEST_DIR ${CATCH_DIR}/projects/SelfTest)
set(BENCHMARK_DIR ${CATCH_DIR}/projects/Benchmark)
set(HEADER_DIR ${CATCH_DIR}/include) set(HEADER_DIR ${CATCH_DIR}/include)
if(USE_CPP11) if(USE_CPP11)
@ -161,13 +162,20 @@ set(HEADERS
${HEADER_DIR}/reporters/catch_reporter_xml.hpp ${HEADER_DIR}/reporters/catch_reporter_xml.hpp
) )
set(BENCH_SOURCES
${BENCHMARK_DIR}/BenchMain.cpp
${BENCHMARK_DIR}/StringificationBench.cpp
)
# Provide some groupings for IDEs # Provide some groupings for IDEs
SOURCE_GROUP("Tests" FILES ${TEST_SOURCES}) SOURCE_GROUP("Tests" FILES ${TEST_SOURCES})
SOURCE_GROUP("Surrogates" FILES ${IMPL_SOURCES}) SOURCE_GROUP("Surrogates" FILES ${IMPL_SOURCES})
SOURCE_GROUP("Benchmarks" FILES ${BENCH_SOURCES})
# configure the executable # configure the executable
include_directories(${HEADER_DIR}) include_directories(${HEADER_DIR})
add_executable(SelfTest ${TEST_SOURCES} ${IMPL_SOURCES} ${HEADERS}) add_executable(SelfTest ${TEST_SOURCES} ${IMPL_SOURCES} ${HEADERS})
add_executable(Benchmark ${BENCH_SOURCES} ${HEADERS})
# configure unit tests via CTest # configure unit tests via CTest
enable_testing() enable_testing()

View File

@ -0,0 +1,2 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"

View File

@ -0,0 +1,39 @@
#include "catch.hpp"
#include <vector>
///////////////////////////////////////////////////////////////////////////////
TEST_CASE("Successful tests -- REQUIRE", "[Success]") {
const size_t sz = 1 * 1024 * 1024;
std::vector<size_t> vec; vec.reserve(sz);
for (size_t i = 0; i < sz; ++i){
vec.push_back(i);
REQUIRE(vec.back() == i);
}
}
///////////////////////////////////////////////////////////////////////////////
TEST_CASE("Successful tests -- CHECK", "[Success]") {
const size_t sz = 1 * 1024 * 1024;
std::vector<size_t> vec; vec.reserve(sz);
for (size_t i = 0; i < sz; ++i){
vec.push_back(i);
CHECK(vec.back() == i);
}
}
///////////////////////////////////////////////////////////////////////////////
TEST_CASE("Unsuccessful tests -- CHECK", "[Failure]") {
const size_t sz = 128 * 1024;
std::vector<size_t> vec; vec.reserve(sz);
for (size_t i = 0; i < sz; ++i){
vec.push_back(i);
CHECK(vec.size() == i);
}
}

View File

@ -0,0 +1,4 @@
This is very much a work in progress.
The past results are standardized to a developer's machine,
the benchmarking script is basic and there are only 3 benchmarks,
but this should get better in time. For now, at least there is something to go by.

View File

@ -0,0 +1,3 @@
Successful tests -- CHECK: median: 3.38116 (s), stddev: 0.11567366292001534 (s)
Successful tests -- REQUIRE: median: 3.479955 (s), stddev: 0.16295972890734556 (s)
Unsuccessful tests -- CHECK: median: 1.966895 (s), stddev: 0.06323488524716572 (s)

56
scripts/runner.py Normal file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env python3
import subprocess, os, sys
import xml.etree.ElementTree as ET
from collections import defaultdict
from statistics import median, stdev
from datetime import datetime
def get_commit_hash():
res = subprocess.run('git rev-parse HEAD'.split(), check=True, stdout=subprocess.PIPE, universal_newlines=True)
return res.stdout.strip()
if len(sys.argv) < 2:
print('Usage: {} benchmark-binary'.format(sys.argv[0]))
exit(1)
num_runs = 10
data = defaultdict(list)
def parse_file(file):
def recursive_search(node):
if node.tag == 'TestCase':
results = node.find('OverallResult')
time = results.get('durationInSeconds')
data[node.get('name')].append(float(time))
elif node.tag in ('Group', 'Catch'):
for child in node:
recursive_search(child)
tree = ET.parse(file)
recursive_search(tree.getroot())
def run_benchmarks(binary):
call = [binary] + '-d yes -r xml -o'.split()
for i in range(num_runs):
file = 'temp{}.xml'.format(i)
print('Run number {}'.format(i))
subprocess.run(call + [file])
parse_file(file)
# Remove file right after parsing, because benchmark output can be big
os.remove(file)
# Run benchmarks
run_benchmarks(sys.argv[1])
result_file = '{:%Y-%m-%dT%H-%M-%S}-{}.result'.format(datetime.now(), get_commit_hash())
print('Writing results to {}'.format(result_file))
with open(result_file, 'w') as file:
for k in sorted(data):
file.write('{}: median: {} (s), stddev: {} (s)\n'.format(k, median(data[k]), stdev(data[k])))