#include <folly/Benchmark.h>
#include <algorithm>
#include <cmath>
#include <cstring>
#include <iostream>
#include <limits>
#include <map>
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <folly/FileUtil.h>
#include <folly/MapUtil.h>
#include <folly/String.h>
#include <folly/detail/PerfScoped.h>
#include <folly/json/json.h>
#define NOMINMAX …
#include <boost/regex.hpp>
usingnamespacestd;
FOLLY_GFLAGS_DEFINE_bool(…);
FOLLY_GFLAGS_DEFINE_bool(…);
FOLLY_GFLAGS_DEFINE_bool(…);
#if FOLLY_PERF_IS_SUPPORTED
FOLLY_GFLAGS_DEFINE_string(
bm_perf_args,
"",
"Run selected benchmarks while attaching `perf` profiling tool."
"Advantage over attaching perf externally is that this skips "
"initialization. The first iteration of the benchmark is also "
"skipped to allow for all statics to be set up. This requires perf "
" to be available on the system. Example: --bm_perf_args=\"record -g\"");
#endif
FOLLY_GFLAGS_DEFINE_bool(…);
FOLLY_GFLAGS_DEFINE_int64(…);
FOLLY_GFLAGS_DEFINE_string(…);
FOLLY_GFLAGS_DEFINE_bool(…);
FOLLY_GFLAGS_DEFINE_string(…);
FOLLY_GFLAGS_DEFINE_string(…);
FOLLY_GFLAGS_DEFINE_int64(…);
FOLLY_GFLAGS_DEFINE_int32(…);
FOLLY_GFLAGS_DEFINE_int64(…);
FOLLY_GFLAGS_DEFINE_int32(…);
FOLLY_GFLAGS_DEFINE_uint32(…);
FOLLY_GFLAGS_DEFINE_uint32(…);
FOLLY_GFLAGS_DEFINE_bool(…);
namespace folly {
namespace detail {
BenchmarkingState<std::chrono::high_resolution_clock>& globalBenchmarkState() { … }
}
BenchmarkFun;
#define FB_FOLLY_GLOBAL_BENCHMARK_BASELINE …
#define FB_STRINGIZE_X2 …
constexpr const char kGlobalBenchmarkBaseline[] = …;
BENCHMARK(…) { …
#undef FB_STRINGIZE_X2
#undef FB_FOLLY_GLOBAL_BENCHMARK_BASELINE
static std::pair<double, UserCounters> runBenchmarkGetNSPerIteration(
const BenchmarkFun& fun, const double globalBaseline) { … }
static std::pair<double, UserCounters> runBenchmarkGetNSPerIterationEstimate(
const BenchmarkFun& fun, const double globalBaseline) { … }
static std::pair<double, UserCounters> runProfilingGetNSPerIteration(
const BenchmarkFun& fun, const double globalBaseline) { … }
struct ScaleInfo { … };
static const ScaleInfo kTimeSuffixes[]{ … };
static const ScaleInfo kMetricSuffixes[]{ … };
static string humanReadable(
double n, unsigned int decimals, const ScaleInfo* scales) { … }
static string readableTime(double n, unsigned int decimals) { … }
static string metricReadable(double n, unsigned int decimals) { … }
namespace {
constexpr std::string_view kUnitHeaders = …;
constexpr std::string_view kUnitHeadersPadding = …;
void printHeaderContents(std::string_view file) { … }
void printDefaultHeaderContents(std::string_view file, size_t columns) { … }
void printSeparator(char pad, unsigned int columns) { … }
class BenchmarkResultsPrinter { … };
}
static void printBenchmarkResultsAsJson(
const vector<detail::BenchmarkResult>& data) { … }
void benchmarkResultsToDynamic(
const vector<detail::BenchmarkResult>& data, dynamic& out) { … }
void benchmarkResultsFromDynamic(
const dynamic& d, vector<detail::BenchmarkResult>& results) { … }
static pair<StringPiece, StringPiece> resultKey(
const detail::BenchmarkResult& result) { … }
void printResultComparison(
const vector<detail::BenchmarkResult>& base,
const vector<detail::BenchmarkResult>& test) { … }
void checkRunMode() { … }
namespace {
struct BenchmarksToRun { … };
void addSeparator(BenchmarksToRun& res) { … }
BenchmarksToRun selectBenchmarksToRun(
const std::vector<detail::BenchmarkRegistration>& benchmarks) { … }
void maybeRunWarmUpIteration(const BenchmarksToRun& toRun) { … }
class ShouldDrawLineTracker { … };
std::pair<std::set<std::string>, std::vector<detail::BenchmarkResult>>
runBenchmarksWithPrinterImpl(
BenchmarkResultsPrinter* FOLLY_NULLABLE printer,
const BenchmarksToRun& toRun) { … }
std::vector<detail::BenchmarkResult> resultsFromFile(
const std::string& filename) { … }
bool writeResultsToFile(
const std::vector<detail::BenchmarkResult>& results,
const std::string& filename) { … }
}
namespace detail {
std::ostream& operator<<(std::ostream& os, const BenchmarkResult& x) { … }
bool operator==(const BenchmarkResult& x, const BenchmarkResult& y) { … }
std::chrono::high_resolution_clock::duration BenchmarkSuspenderBase::timeSpent;
void BenchmarkingStateBase::addBenchmarkImpl(
const char* file, StringPiece name, BenchmarkFun fun, bool useCounter) { … }
bool BenchmarkingStateBase::useCounters() const { … }
std::vector<std::string> BenchmarkingStateBase::getBenchmarkList() { … }
folly::StringPiece BenchmarkingStateBase::getGlobalBaselineNameForTests() { … }
PerfScoped BenchmarkingStateBase::doSetUpPerfScoped(
const std::vector<std::string>& args) const { … }
PerfScoped BenchmarkingStateBase::setUpPerfScoped() const { … }
template <typename Printer>
std::pair<std::set<std::string>, std::vector<BenchmarkResult>>
BenchmarkingStateBase::runBenchmarksWithPrinter(Printer* printer) const { … }
std::vector<BenchmarkResult> BenchmarkingStateBase::runBenchmarksWithResults()
const { … }
std::vector<BenchmarkResult> runBenchmarksWithResults() { … }
}
void runBenchmarks() { … }
}