Commit 51c23a91 by Dominic Hamon

Merge branch 'added_lambdas' of git://github.com/ismaelJimenez/benchmark into…

Merge branch 'added_lambdas' of git://github.com/ismaelJimenez/benchmark into ismaelJimenez-added_lambdas
parents 84cd50b8 2859ae93
......@@ -142,6 +142,14 @@ BENCHMARK(BM_StringCompare)
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
```
The following code will specify asymptotic complexity with a lambda function,
that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
```
### Templated benchmarks
Templated benchmarks work the same way: This example produces and consumes
messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
......
......@@ -247,9 +247,14 @@ enum BigO {
oNCubed,
oLogN,
oNLogN,
oAuto
oAuto,
oLambda
};
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
typedef double(BigOFunc)(int);
// State is passed to a running Benchmark and contains state for the
// benchmark to use.
class State {
......@@ -257,24 +262,24 @@ public:
State(size_t max_iters, bool has_x, int x, bool has_y, int y,
int thread_i, int n_threads);
// Returns true iff the benchmark should continue through another iteration.
// Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
bool KeepRunning() {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
assert(!finished_);
started_ = true;
ResumeTiming();
assert(!finished_);
started_ = true;
ResumeTiming();
}
bool const res = total_iterations_++ < max_iterations;
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
assert(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
// Total iterations now is one greater than max iterations. Fix this.
total_iterations_ = max_iterations;
finished_ = true;
assert(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
// Total iterations now is one greater than max iterations. Fix this.
total_iterations_ = max_iterations;
finished_ = true;
}
return res;
}
......@@ -358,7 +363,7 @@ public:
// family benchmark, then current benchmark will be part of the computation and complexity_n will
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
void SetComplexityN(size_t complexity_n) {
void SetComplexityN(int complexity_n) {
complexity_n_ = complexity_n;
}
......@@ -439,7 +444,7 @@ private:
size_t bytes_processed_;
size_t items_processed_;
size_t complexity_n_;
int complexity_n_;
public:
// FIXME: Make this private somehow.
......@@ -538,6 +543,10 @@ public:
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigO complexity = benchmark::oAuto);
// Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigOFunc* complexity);
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// of some piece of code.
......
......@@ -20,7 +20,7 @@
#include <utility>
#include <vector>
#include "benchmark_api.h" // For forward declaration of BenchmarkReporter
#include "benchmark_api.h" // For forward declaration of BenchmarkReporter
namespace benchmark {
......@@ -85,7 +85,8 @@ class BenchmarkReporter {
double max_heapbytes_used;
// Keep track of arguments to compute asymptotic complexity
BigO complexity;
BigO complexity;
BigOFunc* complexity_lambda;
int complexity_n;
// Inform print function whether the current run is a complexity report
......@@ -147,7 +148,7 @@ class BenchmarkReporter {
// REQUIRES: 'out' is non-null.
static void PrintBasicContext(std::ostream* out, Context const& context);
private:
private:
std::ostream* output_stream_;
std::ostream* error_stream_;
};
......@@ -159,31 +160,31 @@ class ConsoleReporter : public BenchmarkReporter {
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
protected:
protected:
virtual void PrintRunData(const Run& report);
size_t name_field_width_;
};
class JSONReporter : public BenchmarkReporter {
public:
public:
JSONReporter() : first_report_(true) {}
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
virtual void Finalize();
private:
private:
void PrintRunData(const Run& report);
bool first_report_;
};
class CSVReporter : public BenchmarkReporter {
public:
public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
private:
private:
void PrintRunData(const Run& report);
};
......@@ -200,7 +201,7 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
switch (unit) {
switch (unit) {
case kMillisecond:
return 1e3;
case kMicrosecond:
......@@ -211,5 +212,5 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) {
}
}
} // end namespace benchmark
#endif // BENCHMARK_REPORTER_H_
} // end namespace benchmark
#endif // BENCHMARK_REPORTER_H_
......@@ -130,7 +130,7 @@ struct ThreadStats {
ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {}
int64_t bytes_processed;
int64_t items_processed;
int complexity_n;
size_t complexity_n;
};
// Timer management class
......@@ -287,7 +287,7 @@ class TimerManager {
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
return false;
return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
......@@ -317,6 +317,7 @@ struct Benchmark::Instance {
bool use_real_time;
bool use_manual_time;
BigO complexity;
BigOFunc* complexity_lambda;
bool last_benchmark_instance;
int repetitions;
double min_time;
......@@ -362,6 +363,7 @@ public:
void UseRealTime();
void UseManualTime();
void Complexity(BigO complexity);
void ComplexityLambda(BigOFunc* complexity);
void Threads(int t);
void ThreadRange(int min_threads, int max_threads);
void ThreadPerCpu();
......@@ -382,6 +384,7 @@ private:
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
BigOFunc* complexity_lambda_;
std::vector<int> thread_counts_;
BenchmarkImp& operator=(BenchmarkImp const&);
......@@ -446,6 +449,7 @@ bool BenchmarkFamilies::FindBenchmarks(
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.threads = num_threads;
instance.multithreaded = !(family->thread_counts_.empty());
......@@ -573,6 +577,10 @@ void BenchmarkImp::Complexity(BigO complexity){
complexity_ = complexity;
}
void BenchmarkImp::ComplexityLambda(BigOFunc* complexity) {
complexity_lambda_ = complexity;
}
void BenchmarkImp::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
......@@ -697,6 +705,12 @@ Benchmark* Benchmark::Complexity(BigO complexity) {
return this;
}
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
imp_->Complexity(oLambda);
imp_->ComplexityLambda(complexity);
return this;
}
Benchmark* Benchmark::Threads(int t) {
imp_->Threads(t);
return this;
......@@ -855,6 +869,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
report.items_per_second = items_per_second;
report.complexity_n = total.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
if(report.complexity != oNone)
complexity_reports.push_back(report);
}
......@@ -884,7 +899,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
}
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
reports.insert(reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
additional_run_stats.end());
if((b.complexity != oNone) && b.last_benchmark_instance) {
additional_run_stats = ComputeBigO(complexity_reports);
......
......@@ -60,11 +60,5 @@ struct LeastSq {
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error.
LeastSq MinimalLeastSq(const std::vector<int>& n,
const std::vector<double>& time,
const BigO complexity = oAuto);
} // end namespace benchmark
#endif // COMPLEXITY_H_
......@@ -15,9 +15,9 @@
#include "benchmark/reporter.h"
#include "complexity.h"
#include <algorithm>
#include <cstdint>
#include <cstdio>
#include <algorithm>
#include <iostream>
#include <string>
#include <tuple>
......@@ -62,8 +62,8 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
void ConsoleReporter::PrintRunData(const Run& result) {
auto& Out = GetOutputStream();
auto name_color = (result.report_big_o || result.report_rms)
? COLOR_BLUE : COLOR_GREEN;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
ColorPrintf(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name.c_str());
......@@ -84,25 +84,25 @@ void ConsoleReporter::PrintRunData(const Run& result) {
if (result.items_per_second > 0) {
items = StrCat(" ", HumanReadableNumber(result.items_per_second),
" items/s");
}
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
if(result.report_big_o) {
std::string big_o = result.report_big_o ? GetBigOString(result.complexity) : "";
ColorPrintf(Out, COLOR_YELLOW, "%10.4f %s %10.4f %s ",
real_time, big_o.c_str(), cpu_time, big_o.c_str());
} else if(result.report_rms) {
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ",
real_time * 100, cpu_time * 100);
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time,
big_o.c_str(), cpu_time, big_o.c_str());
} else if (result.report_rms) {
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
cpu_time * 100);
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ",
real_time, timeLabel, cpu_time, timeLabel);
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
cpu_time, timeLabel);
}
if(!result.report_big_o && !result.report_rms) {
if (!result.report_big_o && !result.report_rms) {
ColorPrintf(Out, COLOR_CYAN, "%10lld", result.iterations);
}
......
......@@ -13,9 +13,10 @@
// limitations under the License.
#include "benchmark/reporter.h"
#include "complexity.h"
#include <cstdint>
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
......@@ -79,7 +80,7 @@ void CSVReporter::PrintRunData(const Run & run) {
}
// Do not print iteration on bigO and RMS report
if(!run.report_big_o && !run.report_rms) {
if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
......@@ -87,8 +88,10 @@ void CSVReporter::PrintRunData(const Run & run) {
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on RMS report
if(!run.report_rms) {
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
......@@ -108,7 +111,7 @@ void CSVReporter::PrintRunData(const Run & run) {
ReplaceAll(&label, "\"", "\"\"");
Out << "\"" << label << "\"";
}
Out << ",,"; // for error_occurred and error_message
Out << ",,"; // for error_occurred and error_message
Out << '\n';
}
......
......@@ -13,9 +13,10 @@
// limitations under the License.
#include "benchmark/reporter.h"
#include "complexity.h"
#include <cstdint>
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
......@@ -99,24 +100,24 @@ void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
}
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
}
}
}
void JSONReporter::Finalize() {
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent
<< FormatKV("name", run.benchmark_name)
<< ",\n";
......@@ -128,33 +129,50 @@ void JSONReporter::PrintRunData(Run const& run) {
<< FormatKV("error_message", run.error_message)
<< ",\n";
}
if(!run.report_big_o && !run.report_rms) {
if (!run.report_big_o && !run.report_rms) {
out << indent
<< FormatKV("iterations", run.iterations)
<< ",\n";
}
out << indent
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
<< ",\n";
out << indent
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
out << ",\n" << indent
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
<< FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
<< ",\n";
out << indent
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
if(!run.report_rms) {
out << ",\n" << indent
<< FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
<< ",\n";
out << indent
<< FormatKV("big_o", GetBigOString(run.complexity))
<< ",\n";
out << indent
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
if (run.bytes_per_second > 0.0) {
out << ",\n" << indent
<< FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
}
if (run.items_per_second > 0.0) {
out << ",\n" << indent
<< FormatKV("items_per_second", RoundDouble(run.items_per_second));
}
if (!run.report_label.empty()) {
out << ",\n" << indent
<< FormatKV("label", run.report_label);
}
out << '\n';
} else if(run.report_rms) {
out << indent
<< FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100))
<< '%';
}
if (run.bytes_per_second > 0.0) {
out << ",\n"
<< indent
<< FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
}
if (run.items_per_second > 0.0) {
out << ",\n"
<< indent
<< FormatKV("items_per_second", RoundDouble(run.items_per_second));
}
if (!run.report_label.empty()) {
out << ",\n"
<< indent
<< FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark
} // end namespace benchmark
......@@ -189,7 +189,7 @@ void BM_Complexity_O1(benchmark::State& state) {
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1);
std::string bigOStr = "[0-9]+\\.[0-9]+ \\* [0-9]+";
std::string bigOStr = "[0-9]+\\.[0-9]+ \\([0-9]+\\)";
ADD_CASES(&ConsoleOutputTests, {
{join("^BM_Complexity_O1_BigO", bigOStr, bigOStr) + "[ ]*$"},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment