Commit 11e30435 by Ismael

checked format before pull request

parent 212cfe1c
...@@ -261,16 +261,16 @@ typedef double(BigOFunc)(size_t); ...@@ -261,16 +261,16 @@ typedef double(BigOFunc)(size_t);
class State { class State {
public: public:
State(size_t max_iters, bool has_x, int x, bool has_y, int y, State(size_t max_iters, bool has_x, int x, bool has_y, int y,
int thread_i, int n_threads); int thread_i, int n_threads);
// Returns true iff the benchmark should continue through another iteration. // Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has // NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false. // returned false.
bool KeepRunning() { bool KeepRunning() {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
assert(!finished_); assert(!finished_);
started_ = true; started_ = true;
ResumeTiming(); ResumeTiming();
} }
bool const res = total_iterations_++ < max_iterations; bool const res = total_iterations_++ < max_iterations;
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
...@@ -365,7 +365,7 @@ public: ...@@ -365,7 +365,7 @@ public:
// represent the length of N. // represent the length of N.
BENCHMARK_ALWAYS_INLINE BENCHMARK_ALWAYS_INLINE
void SetComplexityN(size_t complexity_n) { void SetComplexityN(size_t complexity_n) {
complexity_n_ = complexity_n; complexity_n_ = complexity_n;
} }
BENCHMARK_ALWAYS_INLINE BENCHMARK_ALWAYS_INLINE
...@@ -539,11 +539,11 @@ public: ...@@ -539,11 +539,11 @@ public:
// to control how many iterations are run, and in the printing of items/second // to control how many iterations are run, and in the printing of items/second
// or MB/second values. // or MB/second values.
Benchmark* UseManualTime(); Benchmark* UseManualTime();
// Set the asymptotic computational complexity for the benchmark. If called // Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output. // the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigO complexity = benchmark::oAuto); Benchmark* Complexity(BigO complexity = benchmark::oAuto);
// Set the asymptotic computational complexity for the benchmark. If called // Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output. // the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigOFunc* complexity); Benchmark* Complexity(BigOFunc* complexity);
......
...@@ -83,12 +83,12 @@ class BenchmarkReporter { ...@@ -83,12 +83,12 @@ class BenchmarkReporter {
// This is set to 0.0 if memory tracing is not enabled. // This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used; double max_heapbytes_used;
// Keep track of arguments to compute asymptotic complexity // Keep track of arguments to compute asymptotic complexity
BigO complexity; BigO complexity;
BigOFunc* complexity_lambda; BigOFunc* complexity_lambda;
size_t complexity_n; size_t complexity_n;
// Inform print function whether the current run is a complexity report // Inform print function whether the current run is a complexity report
bool report_big_o; bool report_big_o;
bool report_rms; bool report_rms;
...@@ -114,7 +114,7 @@ class BenchmarkReporter { ...@@ -114,7 +114,7 @@ class BenchmarkReporter {
// 'reports' contains additional entries representing the asymptotic // 'reports' contains additional entries representing the asymptotic
// complexity and RMS of that benchmark family. // complexity and RMS of that benchmark family.
virtual void ReportRuns(const std::vector<Run>& report) = 0; virtual void ReportRuns(const std::vector<Run>& report) = 0;
// Called once and only once after ever group of benchmarks is run and // Called once and only once after ever group of benchmarks is run and
// reported. // reported.
virtual void Finalize() {} virtual void Finalize() {}
...@@ -156,11 +156,11 @@ private: ...@@ -156,11 +156,11 @@ private:
// Simple reporter that outputs benchmark data to the console. This is the // Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks(). // default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter { class ConsoleReporter : public BenchmarkReporter {
public: public:
virtual bool ReportContext(const Context& context); virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports); virtual void ReportRuns(const std::vector<Run>& reports);
protected: protected:
virtual void PrintRunData(const Run& report); virtual void PrintRunData(const Run& report);
size_t name_field_width_; size_t name_field_width_;
......
...@@ -140,7 +140,7 @@ class TimerManager { ...@@ -140,7 +140,7 @@ class TimerManager {
manual_time_used_(0), manual_time_used_(0),
num_finalized_(0), num_finalized_(0),
phase_number_(0), phase_number_(0),
entered_(0) entered_(0)
{ {
} }
...@@ -277,7 +277,7 @@ class TimerManager { ...@@ -277,7 +277,7 @@ class TimerManager {
int phase_number_cp = phase_number_; int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() { auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp || return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error entered_ == running_threads_; // A thread has aborted in error
}; };
phase_condition_.wait(ml.native_handle(), cb); phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) if (phase_number_ > phase_number_cp)
...@@ -731,7 +731,7 @@ void FunctionBenchmark::Run(State& st) { ...@@ -731,7 +731,7 @@ void FunctionBenchmark::Run(State& st) {
} // end namespace internal } // end namespace internal
namespace { namespace {
// Execute one thread of benchmark b for the specified number of iterations. // Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total. // Adds the stats collected for the thread into *total.
void RunInThread(const benchmark::internal::Benchmark::Instance* b, void RunInThread(const benchmark::internal::Benchmark::Instance* b,
...@@ -745,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, ...@@ -745,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
MutexLock l(GetBenchmarkLock()); MutexLock l(GetBenchmarkLock());
total->bytes_processed += st.bytes_processed(); total->bytes_processed += st.bytes_processed();
total->items_processed += st.items_processed(); total->items_processed += st.items_processed();
total->complexity_n += st.complexity_length_n(); total->complexity_n += st.complexity_length_n();
} }
timer_manager->Finalize(); timer_manager->Finalize();
} }
void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
BenchmarkReporter* br, BenchmarkReporter* br,
std::vector<BenchmarkReporter::Run>& complexity_reports) std::vector<BenchmarkReporter::Run>& complexity_reports)
EXCLUDES(GetBenchmarkLock()) { EXCLUDES(GetBenchmarkLock()) {
size_t iters = 1; size_t iters = 1;
...@@ -764,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, ...@@ -764,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
pool.resize(b.threads); pool.resize(b.threads);
const int repeats = b.repetitions != 0 ? b.repetitions const int repeats = b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions; : FLAGS_benchmark_repetitions;
for (int i = 0; i < repeats; i++) { for (int i = 0; i < repeats; i++) {
std::string mem; std::string mem;
for (;;) { for (;;) {
...@@ -844,28 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, ...@@ -844,28 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
report.time_unit = b.time_unit; report.time_unit = b.time_unit;
if (!report.error_occurred) { if (!report.error_occurred) {
double bytes_per_second = 0; double bytes_per_second = 0;
if (total.bytes_processed > 0 && seconds > 0.0) { if (total.bytes_processed > 0 && seconds > 0.0) {
bytes_per_second = (total.bytes_processed / seconds); bytes_per_second = (total.bytes_processed / seconds);
} }
double items_per_second = 0; double items_per_second = 0;
if (total.items_processed > 0 && seconds > 0.0) { if (total.items_processed > 0 && seconds > 0.0) {
items_per_second = (total.items_processed / seconds); items_per_second = (total.items_processed / seconds);
} }
if (b.use_manual_time) { if (b.use_manual_time) {
report.real_accumulated_time = manual_accumulated_time; report.real_accumulated_time = manual_accumulated_time;
} else { } else {
report.real_accumulated_time = real_accumulated_time; report.real_accumulated_time = real_accumulated_time;
} }
report.cpu_accumulated_time = cpu_accumulated_time; report.cpu_accumulated_time = cpu_accumulated_time;
report.bytes_per_second = bytes_per_second; report.bytes_per_second = bytes_per_second;
report.items_per_second = items_per_second; report.items_per_second = items_per_second;
report.complexity_n = total.complexity_n; report.complexity_n = total.complexity_n;
report.complexity = b.complexity; report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda; report.complexity_lambda = b.complexity_lambda;
if(report.complexity != oNone) if(report.complexity != oNone)
complexity_reports.push_back(report); complexity_reports.push_back(report);
} }
reports.push_back(report); reports.push_back(report);
...@@ -893,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, ...@@ -893,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
} }
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports); std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
reports.insert(reports.end(), additional_run_stats.begin(), reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end()); additional_run_stats.end());
if((b.complexity != oNone) && b.last_benchmark_instance) { if((b.complexity != oNone) && b.last_benchmark_instance) {
additional_run_stats = ComputeBigO(complexity_reports); additional_run_stats = ComputeBigO(complexity_reports);
reports.insert(reports.end(), additional_run_stats.begin(), reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end()); additional_run_stats.end());
complexity_reports.clear(); complexity_reports.clear();
} }
br->ReportRuns(reports); br->ReportRuns(reports);
if (b.multithreaded) { if (b.multithreaded) {
for (std::thread& thread : pool) for (std::thread& thread : pool)
thread.join(); thread.join();
...@@ -964,56 +964,56 @@ void State::SetLabel(const char* label) { ...@@ -964,56 +964,56 @@ void State::SetLabel(const char* label) {
} }
namespace internal { namespace internal {
namespace { namespace {
void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter* reporter) {
CHECK(reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (has_repetitions)
name_field_width += std::strlen("_stddev");
// Print header here void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter::Context context; BenchmarkReporter* reporter) {
context.num_cpus = NumCPUs(); CHECK(reporter != nullptr);
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (has_repetitions)
name_field_width += std::strlen("_stddev");
context.cpu_scaling_enabled = CpuScalingEnabled(); // Print header here
context.name_field_width = name_field_width; BenchmarkReporter::Context context;
context.num_cpus = NumCPUs();
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
// Keep track of runing times of all instances of current benchmark context.cpu_scaling_enabled = CpuScalingEnabled();
std::vector<BenchmarkReporter::Run> complexity_reports; context.name_field_width = name_field_width;
if (reporter->ReportContext(context)) { // Keep track of runing times of all instances of current benchmark
for (const auto& benchmark : benchmarks) { std::vector<BenchmarkReporter::Run> complexity_reports;
RunBenchmark(benchmark, reporter, complexity_reports);
}
}
}
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() { if (reporter->ReportContext(context)) {
typedef std::unique_ptr<BenchmarkReporter> PtrType; for (const auto& benchmark : benchmarks) {
if (FLAGS_benchmark_format == "console") { RunBenchmark(benchmark, reporter, complexity_reports);
return PtrType(new ConsoleReporter);
} else if (FLAGS_benchmark_format == "json") {
return PtrType(new JSONReporter);
} else if (FLAGS_benchmark_format == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
std::exit(1);
}
} }
}
}
} // end namespace std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (FLAGS_benchmark_format == "console") {
return PtrType(new ConsoleReporter);
} else if (FLAGS_benchmark_format == "json") {
return PtrType(new JSONReporter);
} else if (FLAGS_benchmark_format == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << FLAGS_benchmark_format << "'\n";
std::exit(1);
}
}
} // end namespace
} // end namespace internal } // end namespace internal
size_t RunSpecifiedBenchmarks() { size_t RunSpecifiedBenchmarks() {
......
...@@ -194,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats( ...@@ -194,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
mean_data.iterations = run_iterations; mean_data.iterations = run_iterations;
mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() *
run_iterations; run_iterations;
mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
run_iterations; run_iterations;
mean_data.bytes_per_second = bytes_per_second_stat.Mean(); mean_data.bytes_per_second = bytes_per_second_stat.Mean();
mean_data.items_per_second = items_per_second_stat.Mean(); mean_data.items_per_second = items_per_second_stat.Mean();
......
...@@ -26,15 +26,15 @@ ...@@ -26,15 +26,15 @@
namespace benchmark { namespace benchmark {
// Return a vector containing the mean and standard devation information for // Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two // the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned // non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats( std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports); const std::vector<BenchmarkReporter::Run>& reports);
// Return a vector containing the bigO and RMS information for the specified // Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned. // list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO( std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports); const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq // This data structure will contain the result returned by MinimalLeastSq
......
...@@ -155,7 +155,7 @@ void JSONReporter::PrintRunData(Run const& run) { ...@@ -155,7 +155,7 @@ void JSONReporter::PrintRunData(Run const& run) {
} else if(run.report_rms) { } else if(run.report_rms) {
out << indent out << indent
<< FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100)) << FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100))
<< "%"; << '%';
} }
if (run.bytes_per_second > 0.0) { if (run.bytes_per_second > 0.0) {
out << ",\n" << indent out << ",\n" << indent
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment