Commit 4242f2f1 by Eric Fiselier

move reporter internals in both headers and source

parent 279e502a
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_REPORTER_H_
#define BENCHMARK_REPORTER_H_
#include <string>
#include <utility>
#include <vector>
namespace benchmark {
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
// RunSpecifiedBenchmarks and passing it a custom reporter object.
// The reporter object must implement the following interface.
class BenchmarkReporter {
public:
struct Context {
int num_cpus;
double mhz_per_cpu;
bool cpu_scaling_enabled;
// The number of chars in the longest benchmark name.
size_t name_field_width;
};
struct Run {
Run() :
iterations(1),
real_accumulated_time(0),
cpu_accumulated_time(0),
bytes_per_second(0),
items_per_second(0),
max_heapbytes_used(0) {}
std::string benchmark_name;
std::string report_label; // Empty if not set by benchmark.
size_t iterations;
double real_accumulated_time;
double cpu_accumulated_time;
// Zero if not set by benchmark.
double bytes_per_second;
double items_per_second;
// This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used;
};
// Called once for every suite of benchmarks run.
// The parameter "context" contains information that the
// reporter may wish to use when generating its report, for example the
// platform under which the benchmarks are running. The benchmark run is
// never started if this function returns false, allowing the reporter
// to skip runs based on the context information.
virtual bool ReportContext(const Context& context) const = 0;
// Called once for each group of benchmark runs, gives information about
// cpu-time and heap memory usage during the benchmark run.
// Note that all the grouped benchmark runs should refer to the same
// benchmark, thus have the same name.
virtual void ReportRuns(const std::vector<Run>& report) const = 0;
virtual ~BenchmarkReporter();
};
namespace internal {
// ------------------------------------------------------
// Internal implementation details follow; please ignore
// Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter {
public:
virtual bool ReportContext(const Context& context) const;
virtual void ReportRuns(const std::vector<Run>& reports) const;
private:
virtual void PrintRunData(const Run& report) const;
// TODO(ericwf): Find a better way to share this information.
mutable size_t name_field_width_;
};
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_REPORTER_H_
...@@ -3,7 +3,8 @@ include_directories(${PROJECT_SOURCE_DIR}/src) ...@@ -3,7 +3,8 @@ include_directories(${PROJECT_SOURCE_DIR}/src)
# Define the source files # Define the source files
set(SOURCE_FILES "benchmark.cc" "colorprint.cc" "commandlineflags.cc" "log.cc" set(SOURCE_FILES "benchmark.cc" "colorprint.cc" "commandlineflags.cc" "log.cc"
"sleep.cc" "string_util.cc" "sysinfo.cc" "walltime.cc") "reporter.cc" "sleep.cc" "string_util.cc" "sysinfo.cc"
"walltime.cc")
# Determine the correct regular expression engine to use # Determine the correct regular expression engine to use
if(HAVE_STD_REGEX) if(HAVE_STD_REGEX)
set(RE_FILES "re_std.cc") set(RE_FILES "re_std.cc")
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include "check.h" #include "check.h"
#include "commandlineflags.h" #include "commandlineflags.h"
#include "colorprint.h"
#include "log.h" #include "log.h"
#include "mutex.h" #include "mutex.h"
#include "re.h" #include "re.h"
...@@ -134,61 +133,6 @@ static bool CpuScalingEnabled() { ...@@ -134,61 +133,6 @@ static bool CpuScalingEnabled() {
return false; return false;
} }
void ComputeStats(const std::vector<BenchmarkReporter::Run>& reports,
BenchmarkReporter::Run* mean_data,
BenchmarkReporter::Run* stddev_data) {
CHECK(reports.size() >= 2) << "Cannot compute stats for less than 2 reports";
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
Stat1_d bytes_per_second_stat;
Stat1_d items_per_second_stat;
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
std::size_t const run_iterations = reports.front().iterations;
// Populate the accumulators.
for (BenchmarkReporter::Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
}
// Get the data from the accumulator to BenchmarkReporter::Run's.
mean_data->benchmark_name = reports[0].benchmark_name + "_mean";
mean_data->iterations = run_iterations;
mean_data->real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations;
mean_data->cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations;
mean_data->bytes_per_second = bytes_per_second_stat.Mean();
mean_data->items_per_second = items_per_second_stat.Mean();
// Only add label to mean/stddev if it is same for all runs
mean_data->report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != reports[0].report_label) {
mean_data->report_label = "";
break;
}
}
stddev_data->benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data->report_label = mean_data->report_label;
stddev_data->iterations = 0;
stddev_data->real_accumulated_time =
real_accumulated_time_stat.StdDev();
stddev_data->cpu_accumulated_time =
cpu_accumulated_time_stat.StdDev();
stddev_data->bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data->items_per_second = items_per_second_stat.StdDev();
}
struct ThreadStats { struct ThreadStats {
ThreadStats() : bytes_processed(0), items_processed(0) {} ThreadStats() : bytes_processed(0), items_processed(0) {}
int64_t bytes_processed; int64_t bytes_processed;
...@@ -816,108 +760,8 @@ void State::SetLabel(const char* label) { ...@@ -816,108 +760,8 @@ void State::SetLabel(const char* label) {
*GetReportLabel() = label; *GetReportLabel() = label;
} }
BenchmarkReporter::~BenchmarkReporter() {}
namespace internal { namespace internal {
bool ConsoleReporter::ReportContext(const Context& context) const {
name_field_width_ = context.name_field_width;
fprintf(stdout,
"Run on (%d X %0.0f MHz CPU%s)\n",
context.num_cpus,
context.mhz_per_cpu,
(context.num_cpus > 1) ? "s" : "");
int remainder_us;
std::string walltime_str = walltime::Print(
walltime::Now(), "%Y/%m/%d-%H:%M:%S",
true, // use local timezone
&remainder_us);
fprintf(stdout, "%s\n", walltime_str.c_str());
if (context.cpu_scaling_enabled) {
fprintf(stdout, "***WARNING*** CPU scaling is enabled, the benchmark "
"timings may be noisy\n");
}
#ifndef NDEBUG
fprintf(stdout, "Build Type: DEBUG\n");
#endif
int output_width =
fprintf(stdout,
"%-*s %10s %10s %10s\n",
static_cast<int>(name_field_width_),
"Benchmark",
"Time(ns)", "CPU(ns)",
"Iterations");
fprintf(stdout, "%s\n", std::string(output_width - 1, '-').c_str());
return true;
}
void ConsoleReporter::ReportRuns(
const std::vector<Run>& reports) const {
if (reports.empty()) {
return;
}
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
PrintRunData(run);
}
if (reports.size() < 2) {
// We don't report aggregated data if there was a single run.
return;
}
Run mean_data;
Run stddev_data;
ComputeStats(reports, &mean_data, &stddev_data);
// Output using PrintRun.
PrintRunData(mean_data);
PrintRunData(stddev_data);
fprintf(stdout, "\n");
}
void ConsoleReporter::PrintRunData(const Run& result) const {
// Format bytes per second
std::string rate;
if (result.bytes_per_second > 0) {
rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
}
// Format items per second
std::string items;
if (result.items_per_second > 0) {
items = StrCat(" ", HumanReadableNumber(result.items_per_second),
" items/s");
}
double const multiplier = 1e9; // nano second multiplier
ColorPrintf(COLOR_GREEN, "%-*s ",
name_field_width_, result.benchmark_name.c_str());
if (result.iterations == 0) {
ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
result.real_accumulated_time * multiplier,
result.cpu_accumulated_time * multiplier);
} else {
ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
(result.real_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)),
(result.cpu_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)));
}
ColorPrintf(COLOR_CYAN, "%10lld", result.iterations);
ColorPrintf(COLOR_DEFAULT, "%*s %*s %s\n",
13, rate.c_str(),
18, items.c_str(),
result.report_label.c_str());
}
void RunMatchingBenchmarks(const std::string& spec, void RunMatchingBenchmarks(const std::string& spec,
const BenchmarkReporter* reporter) { const BenchmarkReporter* reporter) {
CHECK(reporter != nullptr); CHECK(reporter != nullptr);
......
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/reporter.h"
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "check.h"
#include "colorprint.h"
#include "stat.h"
#include "string_util.h"
#include "walltime.h"
namespace benchmark {
namespace {
void ComputeStats(const std::vector<BenchmarkReporter::Run>& reports,
BenchmarkReporter::Run* mean_data,
BenchmarkReporter::Run* stddev_data) {
CHECK(reports.size() >= 2) << "Cannot compute stats for less than 2 reports";
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
Stat1_d bytes_per_second_stat;
Stat1_d items_per_second_stat;
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
std::size_t const run_iterations = reports.front().iterations;
// Populate the accumulators.
for (BenchmarkReporter::Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
}
// Get the data from the accumulator to BenchmarkReporter::Run's.
mean_data->benchmark_name = reports[0].benchmark_name + "_mean";
mean_data->iterations = run_iterations;
mean_data->real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations;
mean_data->cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations;
mean_data->bytes_per_second = bytes_per_second_stat.Mean();
mean_data->items_per_second = items_per_second_stat.Mean();
// Only add label to mean/stddev if it is same for all runs
mean_data->report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != reports[0].report_label) {
mean_data->report_label = "";
break;
}
}
stddev_data->benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data->report_label = mean_data->report_label;
stddev_data->iterations = 0;
stddev_data->real_accumulated_time =
real_accumulated_time_stat.StdDev();
stddev_data->cpu_accumulated_time =
cpu_accumulated_time_stat.StdDev();
stddev_data->bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data->items_per_second = items_per_second_stat.StdDev();
}
} // end namespace
BenchmarkReporter::~BenchmarkReporter() {}
namespace internal {
bool ConsoleReporter::ReportContext(const Context& context) const {
name_field_width_ = context.name_field_width;
fprintf(stdout,
"Run on (%d X %0.0f MHz CPU%s)\n",
context.num_cpus,
context.mhz_per_cpu,
(context.num_cpus > 1) ? "s" : "");
int remainder_us;
std::string walltime_str = walltime::Print(
walltime::Now(), "%Y/%m/%d-%H:%M:%S",
true, // use local timezone
&remainder_us);
fprintf(stdout, "%s\n", walltime_str.c_str());
if (context.cpu_scaling_enabled) {
fprintf(stdout, "***WARNING*** CPU scaling is enabled, the benchmark "
"timings may be noisy\n");
}
#ifndef NDEBUG
fprintf(stdout, "Build Type: DEBUG\n");
#endif
int output_width =
fprintf(stdout,
"%-*s %10s %10s %10s\n",
static_cast<int>(name_field_width_),
"Benchmark",
"Time(ns)", "CPU(ns)",
"Iterations");
fprintf(stdout, "%s\n", std::string(output_width - 1, '-').c_str());
return true;
}
void ConsoleReporter::ReportRuns(
const std::vector<Run>& reports) const {
if (reports.empty()) {
return;
}
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
PrintRunData(run);
}
if (reports.size() < 2) {
// We don't report aggregated data if there was a single run.
return;
}
Run mean_data;
Run stddev_data;
ComputeStats(reports, &mean_data, &stddev_data);
// Output using PrintRun.
PrintRunData(mean_data);
PrintRunData(stddev_data);
fprintf(stdout, "\n");
}
void ConsoleReporter::PrintRunData(const Run& result) const {
// Format bytes per second
std::string rate;
if (result.bytes_per_second > 0) {
rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
}
// Format items per second
std::string items;
if (result.items_per_second > 0) {
items = StrCat(" ", HumanReadableNumber(result.items_per_second),
" items/s");
}
double const multiplier = 1e9; // nano second multiplier
ColorPrintf(COLOR_GREEN, "%-*s ",
name_field_width_, result.benchmark_name.c_str());
if (result.iterations == 0) {
ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
result.real_accumulated_time * multiplier,
result.cpu_accumulated_time * multiplier);
} else {
ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
(result.real_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)),
(result.cpu_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)));
}
ColorPrintf(COLOR_CYAN, "%10lld", result.iterations);
ColorPrintf(COLOR_DEFAULT, "%*s %*s %s\n",
13, rate.c_str(),
18, items.c_str(),
result.report_label.c_str());
}
} // end namespace internal
} // end namespace benchmark
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment