diff --git a/src/catch2/catch_timer.cpp b/src/catch2/catch_timer.cpp index 659292dd..8084352e 100644 --- a/src/catch2/catch_timer.cpp +++ b/src/catch2/catch_timer.cpp @@ -17,42 +17,6 @@ namespace Catch { return std::chrono::duration_cast( std::chrono::high_resolution_clock::now().time_since_epoch() ).count(); } - namespace { - auto estimateClockResolution() -> uint64_t { - uint64_t sum = 0; - static const uint64_t iterations = 1000000; - - auto startTime = getCurrentNanosecondsSinceEpoch(); - - for( std::size_t i = 0; i < iterations; ++i ) { - - uint64_t ticks; - uint64_t baseTicks = getCurrentNanosecondsSinceEpoch(); - do { - ticks = getCurrentNanosecondsSinceEpoch(); - } while( ticks == baseTicks ); - - auto delta = ticks - baseTicks; - sum += delta; - - // If we have been calibrating for over 3 seconds -- the clock - // is terrible and we should move on. - // TBD: How to signal that the measured resolution is probably wrong? - if (ticks > startTime + 3 * nanosecondsInSecond) { - return sum / ( i + 1u ); - } - } - - // We're just taking the mean, here. To do better we could take the std. dev and exclude outliers - // - and potentially do more iterations if there's a high variance. - return sum/iterations; - } - } - auto getEstimatedClockResolution() -> uint64_t { - static auto s_resolution = estimateClockResolution(); - return s_resolution; - } - void Timer::start() { m_nanoseconds = getCurrentNanosecondsSinceEpoch(); } diff --git a/src/catch2/catch_timer.hpp b/src/catch2/catch_timer.hpp index 03bbf497..66aab87b 100644 --- a/src/catch2/catch_timer.hpp +++ b/src/catch2/catch_timer.hpp @@ -13,7 +13,6 @@ namespace Catch { auto getCurrentNanosecondsSinceEpoch() -> uint64_t; - auto getEstimatedClockResolution() -> uint64_t; class Timer { uint64_t m_nanoseconds = 0;