Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
B
benchmark
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Chen Yisong
benchmark
Commits
867f9145
Commit
867f9145
authored
Jun 01, 2016
by
Ismael
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
added lambdas to complexity report
parent
74a278e2
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
494 additions
and
263 deletions
+494
-263
benchmark_api.h
include/benchmark/benchmark_api.h
+21
-11
reporter.h
include/benchmark/reporter.h
+22
-21
benchmark.cc
src/benchmark.cc
+91
-76
complexity.cc
src/complexity.cc
+55
-64
complexity.h
src/complexity.h
+7
-13
console_reporter.cc
src/console_reporter.cc
+2
-2
csv_reporter.cc
src/csv_reporter.cc
+5
-2
json_reporter.cc
src/json_reporter.cc
+22
-7
complexity_test.cc
test/complexity_test.cc
+269
-67
No files found.
include/benchmark/benchmark_api.h
View file @
867f9145
...
@@ -152,6 +152,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
...
@@ -152,6 +152,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <assert.h>
#include <assert.h>
#include <stddef.h>
#include <stddef.h>
#include <stdint.h>
#include <stdint.h>
#include <functional>
#include "macros.h"
#include "macros.h"
...
@@ -247,15 +248,20 @@ enum BigO {
...
@@ -247,15 +248,20 @@ enum BigO {
oNCubed
,
oNCubed
,
oLogN
,
oLogN
,
oNLogN
,
oNLogN
,
oAuto
oAuto
,
oLambda
};
};
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
typedef
double
(
BigOFunc
)(
size_t
);
// State is passed to a running Benchmark and contains state for the
// State is passed to a running Benchmark and contains state for the
// benchmark to use.
// benchmark to use.
class
State
{
class
State
{
public
:
public
:
State
(
size_t
max_iters
,
bool
has_x
,
int
x
,
bool
has_y
,
int
y
,
State
(
size_t
max_iters
,
bool
has_x
,
int
x
,
bool
has_y
,
int
y
,
int
thread_i
,
int
n_threads
);
int
thread_i
,
int
n_threads
);
// Returns true iff the benchmark should continue through another iteration.
// Returns true iff the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// NOTE: A benchmark may not return from the test until KeepRunning() has
...
@@ -268,13 +274,13 @@ public:
...
@@ -268,13 +274,13 @@ public:
}
}
bool
const
res
=
total_iterations_
++
<
max_iterations
;
bool
const
res
=
total_iterations_
++
<
max_iterations
;
if
(
BENCHMARK_BUILTIN_EXPECT
(
!
res
,
false
))
{
if
(
BENCHMARK_BUILTIN_EXPECT
(
!
res
,
false
))
{
assert
(
started_
&&
(
!
finished_
||
error_occurred_
));
assert
(
started_
&&
(
!
finished_
||
error_occurred_
));
if
(
!
error_occurred_
)
{
if
(
!
error_occurred_
)
{
PauseTiming
();
PauseTiming
();
}
}
// Total iterations now is one greater than max iterations. Fix this.
// Total iterations now is one greater than max iterations. Fix this.
total_iterations_
=
max_iterations
;
total_iterations_
=
max_iterations
;
finished_
=
true
;
finished_
=
true
;
}
}
return
res
;
return
res
;
}
}
...
@@ -359,7 +365,7 @@ public:
...
@@ -359,7 +365,7 @@ public:
// represent the length of N.
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
BENCHMARK_ALWAYS_INLINE
void
SetComplexityN
(
size_t
complexity_n
)
{
void
SetComplexityN
(
size_t
complexity_n
)
{
complexity_n_
=
complexity_n
;
complexity_n_
=
complexity_n
;
}
}
BENCHMARK_ALWAYS_INLINE
BENCHMARK_ALWAYS_INLINE
...
@@ -533,10 +539,14 @@ public:
...
@@ -533,10 +539,14 @@ public:
// to control how many iterations are run, and in the printing of items/second
// to control how many iterations are run, and in the printing of items/second
// or MB/second values.
// or MB/second values.
Benchmark
*
UseManualTime
();
Benchmark
*
UseManualTime
();
// Set the asymptotic computational complexity for the benchmark. If called
// Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output.
// the asymptotic computational complexity will be shown on the output.
Benchmark
*
Complexity
(
BigO
complexity
=
benchmark
::
oAuto
);
Benchmark
*
Complexity
(
BigO
complexity
=
benchmark
::
oAuto
);
// Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output.
Benchmark
*
Complexity
(
BigOFunc
*
complexity
);
// Support for running multiple copies of the same benchmark concurrently
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// in multiple threads. This may be useful when measuring the scaling
...
...
include/benchmark/reporter.h
View file @
867f9145
...
@@ -83,11 +83,12 @@ class BenchmarkReporter {
...
@@ -83,11 +83,12 @@ class BenchmarkReporter {
// This is set to 0.0 if memory tracing is not enabled.
// This is set to 0.0 if memory tracing is not enabled.
double
max_heapbytes_used
;
double
max_heapbytes_used
;
// Keep track of arguments to compute asymptotic complexity
// Keep track of arguments to compute asymptotic complexity
BigO
complexity
;
BigO
complexity
;
int
complexity_n
;
BigOFunc
*
complexity_lambda
;
size_t
complexity_n
;
// Inform print function whether the current run is a complexity report
// Inform print function whether the current run is a complexity report
bool
report_big_o
;
bool
report_big_o
;
bool
report_rms
;
bool
report_rms
;
...
@@ -113,7 +114,7 @@ class BenchmarkReporter {
...
@@ -113,7 +114,7 @@ class BenchmarkReporter {
// 'reports' contains additional entries representing the asymptotic
// 'reports' contains additional entries representing the asymptotic
// complexity and RMS of that benchmark family.
// complexity and RMS of that benchmark family.
virtual
void
ReportRuns
(
const
std
::
vector
<
Run
>&
report
)
=
0
;
virtual
void
ReportRuns
(
const
std
::
vector
<
Run
>&
report
)
=
0
;
// Called once and only once after ever group of benchmarks is run and
// Called once and only once after ever group of benchmarks is run and
// reported.
// reported.
virtual
void
Finalize
()
{}
virtual
void
Finalize
()
{}
...
@@ -159,7 +160,7 @@ class ConsoleReporter : public BenchmarkReporter {
...
@@ -159,7 +160,7 @@ class ConsoleReporter : public BenchmarkReporter {
virtual
bool
ReportContext
(
const
Context
&
context
);
virtual
bool
ReportContext
(
const
Context
&
context
);
virtual
void
ReportRuns
(
const
std
::
vector
<
Run
>&
reports
);
virtual
void
ReportRuns
(
const
std
::
vector
<
Run
>&
reports
);
protected
:
protected
:
virtual
void
PrintRunData
(
const
Run
&
report
);
virtual
void
PrintRunData
(
const
Run
&
report
);
size_t
name_field_width_
;
size_t
name_field_width_
;
...
@@ -189,25 +190,25 @@ private:
...
@@ -189,25 +190,25 @@ private:
inline
const
char
*
GetTimeUnitString
(
TimeUnit
unit
)
{
inline
const
char
*
GetTimeUnitString
(
TimeUnit
unit
)
{
switch
(
unit
)
{
switch
(
unit
)
{
case
kMillisecond
:
case
kMillisecond
:
return
"ms"
;
return
"ms"
;
case
kMicrosecond
:
case
kMicrosecond
:
return
"us"
;
return
"us"
;
case
kNanosecond
:
case
kNanosecond
:
default:
default:
return
"ns"
;
return
"ns"
;
}
}
}
}
inline
double
GetTimeUnitMultiplier
(
TimeUnit
unit
)
{
inline
double
GetTimeUnitMultiplier
(
TimeUnit
unit
)
{
switch
(
unit
)
{
switch
(
unit
)
{
case
kMillisecond
:
case
kMillisecond
:
return
1e3
;
return
1e3
;
case
kMicrosecond
:
case
kMicrosecond
:
return
1e6
;
return
1e6
;
case
kNanosecond
:
case
kNanosecond
:
default:
default:
return
1e9
;
return
1e9
;
}
}
}
}
...
...
src/benchmark.cc
View file @
867f9145
...
@@ -124,7 +124,7 @@ struct ThreadStats {
...
@@ -124,7 +124,7 @@ struct ThreadStats {
ThreadStats
()
:
bytes_processed
(
0
),
items_processed
(
0
),
complexity_n
(
0
)
{}
ThreadStats
()
:
bytes_processed
(
0
),
items_processed
(
0
),
complexity_n
(
0
)
{}
int64_t
bytes_processed
;
int64_t
bytes_processed
;
int64_t
items_processed
;
int64_t
items_processed
;
int
complexity_n
;
size_t
complexity_n
;
};
};
// Timer management class
// Timer management class
...
@@ -140,7 +140,7 @@ class TimerManager {
...
@@ -140,7 +140,7 @@ class TimerManager {
manual_time_used_
(
0
),
manual_time_used_
(
0
),
num_finalized_
(
0
),
num_finalized_
(
0
),
phase_number_
(
0
),
phase_number_
(
0
),
entered_
(
0
)
entered_
(
0
)
{
{
}
}
...
@@ -277,11 +277,11 @@ class TimerManager {
...
@@ -277,11 +277,11 @@ class TimerManager {
int
phase_number_cp
=
phase_number_
;
int
phase_number_cp
=
phase_number_
;
auto
cb
=
[
this
,
phase_number_cp
]()
{
auto
cb
=
[
this
,
phase_number_cp
]()
{
return
this
->
phase_number_
>
phase_number_cp
||
return
this
->
phase_number_
>
phase_number_cp
||
entered_
==
running_threads_
;
// A thread has aborted in error
entered_
==
running_threads_
;
// A thread has aborted in error
};
};
phase_condition_
.
wait
(
ml
.
native_handle
(),
cb
);
phase_condition_
.
wait
(
ml
.
native_handle
(),
cb
);
if
(
phase_number_
>
phase_number_cp
)
if
(
phase_number_
>
phase_number_cp
)
return
false
;
return
false
;
// else (running_threads_ == entered_) and we are the last thread.
// else (running_threads_ == entered_) and we are the last thread.
}
}
// Last thread has reached the barrier
// Last thread has reached the barrier
...
@@ -311,6 +311,7 @@ struct Benchmark::Instance {
...
@@ -311,6 +311,7 @@ struct Benchmark::Instance {
bool
use_real_time
;
bool
use_real_time
;
bool
use_manual_time
;
bool
use_manual_time
;
BigO
complexity
;
BigO
complexity
;
BigOFunc
*
complexity_lambda
;
bool
last_benchmark_instance
;
bool
last_benchmark_instance
;
int
repetitions
;
int
repetitions
;
double
min_time
;
double
min_time
;
...
@@ -356,6 +357,7 @@ public:
...
@@ -356,6 +357,7 @@ public:
void
UseRealTime
();
void
UseRealTime
();
void
UseManualTime
();
void
UseManualTime
();
void
Complexity
(
BigO
complexity
);
void
Complexity
(
BigO
complexity
);
void
ComplexityLambda
(
BigOFunc
*
complexity
);
void
Threads
(
int
t
);
void
Threads
(
int
t
);
void
ThreadRange
(
int
min_threads
,
int
max_threads
);
void
ThreadRange
(
int
min_threads
,
int
max_threads
);
void
ThreadPerCpu
();
void
ThreadPerCpu
();
...
@@ -376,6 +378,7 @@ private:
...
@@ -376,6 +378,7 @@ private:
bool
use_real_time_
;
bool
use_real_time_
;
bool
use_manual_time_
;
bool
use_manual_time_
;
BigO
complexity_
;
BigO
complexity_
;
BigOFunc
*
complexity_lambda_
;
std
::
vector
<
int
>
thread_counts_
;
std
::
vector
<
int
>
thread_counts_
;
BenchmarkImp
&
operator
=
(
BenchmarkImp
const
&
);
BenchmarkImp
&
operator
=
(
BenchmarkImp
const
&
);
...
@@ -440,6 +443,7 @@ bool BenchmarkFamilies::FindBenchmarks(
...
@@ -440,6 +443,7 @@ bool BenchmarkFamilies::FindBenchmarks(
instance
.
use_real_time
=
family
->
use_real_time_
;
instance
.
use_real_time
=
family
->
use_real_time_
;
instance
.
use_manual_time
=
family
->
use_manual_time_
;
instance
.
use_manual_time
=
family
->
use_manual_time_
;
instance
.
complexity
=
family
->
complexity_
;
instance
.
complexity
=
family
->
complexity_
;
instance
.
complexity_lambda
=
family
->
complexity_lambda_
;
instance
.
threads
=
num_threads
;
instance
.
threads
=
num_threads
;
instance
.
multithreaded
=
!
(
family
->
thread_counts_
.
empty
());
instance
.
multithreaded
=
!
(
family
->
thread_counts_
.
empty
());
...
@@ -567,6 +571,10 @@ void BenchmarkImp::Complexity(BigO complexity){
...
@@ -567,6 +571,10 @@ void BenchmarkImp::Complexity(BigO complexity){
complexity_
=
complexity
;
complexity_
=
complexity
;
}
}
void
BenchmarkImp
::
ComplexityLambda
(
BigOFunc
*
complexity
)
{
complexity_lambda_
=
complexity
;
}
void
BenchmarkImp
::
Threads
(
int
t
)
{
void
BenchmarkImp
::
Threads
(
int
t
)
{
CHECK_GT
(
t
,
0
);
CHECK_GT
(
t
,
0
);
thread_counts_
.
push_back
(
t
);
thread_counts_
.
push_back
(
t
);
...
@@ -691,6 +699,12 @@ Benchmark* Benchmark::Complexity(BigO complexity) {
...
@@ -691,6 +699,12 @@ Benchmark* Benchmark::Complexity(BigO complexity) {
return
this
;
return
this
;
}
}
Benchmark
*
Benchmark
::
Complexity
(
BigOFunc
*
complexity
)
{
imp_
->
Complexity
(
oLambda
);
imp_
->
ComplexityLambda
(
complexity
);
return
this
;
}
Benchmark
*
Benchmark
::
Threads
(
int
t
)
{
Benchmark
*
Benchmark
::
Threads
(
int
t
)
{
imp_
->
Threads
(
t
);
imp_
->
Threads
(
t
);
return
this
;
return
this
;
...
@@ -717,7 +731,7 @@ void FunctionBenchmark::Run(State& st) {
...
@@ -717,7 +731,7 @@ void FunctionBenchmark::Run(State& st) {
}
// end namespace internal
}
// end namespace internal
namespace
{
namespace
{
// Execute one thread of benchmark b for the specified number of iterations.
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
// Adds the stats collected for the thread into *total.
void
RunInThread
(
const
benchmark
::
internal
::
Benchmark
::
Instance
*
b
,
void
RunInThread
(
const
benchmark
::
internal
::
Benchmark
::
Instance
*
b
,
...
@@ -731,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
...
@@ -731,15 +745,15 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
MutexLock
l
(
GetBenchmarkLock
());
MutexLock
l
(
GetBenchmarkLock
());
total
->
bytes_processed
+=
st
.
bytes_processed
();
total
->
bytes_processed
+=
st
.
bytes_processed
();
total
->
items_processed
+=
st
.
items_processed
();
total
->
items_processed
+=
st
.
items_processed
();
total
->
complexity_n
+=
st
.
complexity_length_n
();
total
->
complexity_n
+=
st
.
complexity_length_n
();
}
}
timer_manager
->
Finalize
();
timer_manager
->
Finalize
();
}
}
void
RunBenchmark
(
const
benchmark
::
internal
::
Benchmark
::
Instance
&
b
,
void
RunBenchmark
(
const
benchmark
::
internal
::
Benchmark
::
Instance
&
b
,
BenchmarkReporter
*
br
,
BenchmarkReporter
*
br
,
std
::
vector
<
BenchmarkReporter
::
Run
>&
complexity_reports
)
std
::
vector
<
BenchmarkReporter
::
Run
>&
complexity_reports
)
EXCLUDES
(
GetBenchmarkLock
())
{
EXCLUDES
(
GetBenchmarkLock
())
{
size_t
iters
=
1
;
size_t
iters
=
1
;
...
@@ -750,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
...
@@ -750,7 +764,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
pool
.
resize
(
b
.
threads
);
pool
.
resize
(
b
.
threads
);
const
int
repeats
=
b
.
repetitions
!=
0
?
b
.
repetitions
const
int
repeats
=
b
.
repetitions
!=
0
?
b
.
repetitions
:
FLAGS_benchmark_repetitions
;
:
FLAGS_benchmark_repetitions
;
for
(
int
i
=
0
;
i
<
repeats
;
i
++
)
{
for
(
int
i
=
0
;
i
<
repeats
;
i
++
)
{
std
::
string
mem
;
std
::
string
mem
;
for
(;;)
{
for
(;;)
{
...
@@ -830,27 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
...
@@ -830,27 +844,28 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
report
.
time_unit
=
b
.
time_unit
;
report
.
time_unit
=
b
.
time_unit
;
if
(
!
report
.
error_occurred
)
{
if
(
!
report
.
error_occurred
)
{
double
bytes_per_second
=
0
;
double
bytes_per_second
=
0
;
if
(
total
.
bytes_processed
>
0
&&
seconds
>
0.0
)
{
if
(
total
.
bytes_processed
>
0
&&
seconds
>
0.0
)
{
bytes_per_second
=
(
total
.
bytes_processed
/
seconds
);
bytes_per_second
=
(
total
.
bytes_processed
/
seconds
);
}
}
double
items_per_second
=
0
;
double
items_per_second
=
0
;
if
(
total
.
items_processed
>
0
&&
seconds
>
0.0
)
{
if
(
total
.
items_processed
>
0
&&
seconds
>
0.0
)
{
items_per_second
=
(
total
.
items_processed
/
seconds
);
items_per_second
=
(
total
.
items_processed
/
seconds
);
}
}
if
(
b
.
use_manual_time
)
{
if
(
b
.
use_manual_time
)
{
report
.
real_accumulated_time
=
manual_accumulated_time
;
report
.
real_accumulated_time
=
manual_accumulated_time
;
}
else
{
}
else
{
report
.
real_accumulated_time
=
real_accumulated_time
;
report
.
real_accumulated_time
=
real_accumulated_time
;
}
}
report
.
cpu_accumulated_time
=
cpu_accumulated_time
;
report
.
cpu_accumulated_time
=
cpu_accumulated_time
;
report
.
bytes_per_second
=
bytes_per_second
;
report
.
bytes_per_second
=
bytes_per_second
;
report
.
items_per_second
=
items_per_second
;
report
.
items_per_second
=
items_per_second
;
report
.
complexity_n
=
total
.
complexity_n
;
report
.
complexity_n
=
total
.
complexity_n
;
report
.
complexity
=
b
.
complexity
;
report
.
complexity
=
b
.
complexity
;
if
(
report
.
complexity
!=
oNone
)
report
.
complexity_lambda
=
b
.
complexity_lambda
;
complexity_reports
.
push_back
(
report
);
if
(
report
.
complexity
!=
oNone
)
complexity_reports
.
push_back
(
report
);
}
}
reports
.
push_back
(
report
);
reports
.
push_back
(
report
);
...
@@ -878,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
...
@@ -878,17 +893,17 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
}
}
std
::
vector
<
BenchmarkReporter
::
Run
>
additional_run_stats
=
ComputeStats
(
reports
);
std
::
vector
<
BenchmarkReporter
::
Run
>
additional_run_stats
=
ComputeStats
(
reports
);
reports
.
insert
(
reports
.
end
(),
additional_run_stats
.
begin
(),
reports
.
insert
(
reports
.
end
(),
additional_run_stats
.
begin
(),
additional_run_stats
.
end
());
additional_run_stats
.
end
());
if
((
b
.
complexity
!=
oNone
)
&&
b
.
last_benchmark_instance
)
{
if
((
b
.
complexity
!=
oNone
)
&&
b
.
last_benchmark_instance
)
{
additional_run_stats
=
ComputeBigO
(
complexity_reports
);
additional_run_stats
=
ComputeBigO
(
complexity_reports
);
reports
.
insert
(
reports
.
end
(),
additional_run_stats
.
begin
(),
reports
.
insert
(
reports
.
end
(),
additional_run_stats
.
begin
(),
additional_run_stats
.
end
());
additional_run_stats
.
end
());
complexity_reports
.
clear
();
complexity_reports
.
clear
();
}
}
br
->
ReportRuns
(
reports
);
br
->
ReportRuns
(
reports
);
if
(
b
.
multithreaded
)
{
if
(
b
.
multithreaded
)
{
for
(
std
::
thread
&
thread
:
pool
)
for
(
std
::
thread
&
thread
:
pool
)
thread
.
join
();
thread
.
join
();
...
@@ -949,56 +964,56 @@ void State::SetLabel(const char* label) {
...
@@ -949,56 +964,56 @@ void State::SetLabel(const char* label) {
}
}
namespace
internal
{
namespace
internal
{
namespace
{
namespace
{
void
RunMatchingBenchmarks
(
const
std
::
vector
<
Benchmark
::
Instance
>&
benchmarks
,
void
RunMatchingBenchmarks
(
const
std
::
vector
<
Benchmark
::
Instance
>&
benchmarks
,
BenchmarkReporter
*
reporter
)
{
BenchmarkReporter
*
reporter
)
{
CHECK
(
reporter
!=
nullptr
);
CHECK
(
reporter
!=
nullptr
);
// Determine the width of the name field using a minimum width of 10.
// Determine the width of the name field using a minimum width of 10.
bool
has_repetitions
=
FLAGS_benchmark_repetitions
>
1
;
bool
has_repetitions
=
FLAGS_benchmark_repetitions
>
1
;
size_t
name_field_width
=
10
;
size_t
name_field_width
=
10
;
for
(
const
Benchmark
::
Instance
&
benchmark
:
benchmarks
)
{
for
(
const
Benchmark
::
Instance
&
benchmark
:
benchmarks
)
{
name_field_width
=
name_field_width
=
std
::
max
<
size_t
>
(
name_field_width
,
benchmark
.
name
.
size
());
std
::
max
<
size_t
>
(
name_field_width
,
benchmark
.
name
.
size
());
has_repetitions
|=
benchmark
.
repetitions
>
1
;
has_repetitions
|=
benchmark
.
repetitions
>
1
;
}
}
if
(
has_repetitions
)
if
(
has_repetitions
)
name_field_width
+=
std
::
strlen
(
"_stddev"
);
name_field_width
+=
std
::
strlen
(
"_stddev"
);
// Print header here
// Print header here
BenchmarkReporter
::
Context
context
;
BenchmarkReporter
::
Context
context
;
context
.
num_cpus
=
NumCPUs
();
context
.
num_cpus
=
NumCPUs
();
context
.
mhz_per_cpu
=
CyclesPerSecond
()
/
1000000.0
f
;
context
.
mhz_per_cpu
=
CyclesPerSecond
()
/
1000000.0
f
;
context
.
cpu_scaling_enabled
=
CpuScalingEnabled
();
context
.
cpu_scaling_enabled
=
CpuScalingEnabled
();
context
.
name_field_width
=
name_field_width
;
context
.
name_field_width
=
name_field_width
;
// Keep track of runing times of all instances of current benchmark
// Keep track of runing times of all instances of current benchmark
std
::
vector
<
BenchmarkReporter
::
Run
>
complexity_reports
;
std
::
vector
<
BenchmarkReporter
::
Run
>
complexity_reports
;
if
(
reporter
->
ReportContext
(
context
))
{
if
(
reporter
->
ReportContext
(
context
))
{
for
(
const
auto
&
benchmark
:
benchmarks
)
{
for
(
const
auto
&
benchmark
:
benchmarks
)
{
RunBenchmark
(
benchmark
,
reporter
,
complexity_reports
);
RunBenchmark
(
benchmark
,
reporter
,
complexity_reports
);
}
}
}
}
}
}
std
::
unique_ptr
<
BenchmarkReporter
>
GetDefaultReporter
()
{
std
::
unique_ptr
<
BenchmarkReporter
>
GetDefaultReporter
()
{
typedef
std
::
unique_ptr
<
BenchmarkReporter
>
PtrType
;
typedef
std
::
unique_ptr
<
BenchmarkReporter
>
PtrType
;
if
(
FLAGS_benchmark_format
==
"console"
)
{
if
(
FLAGS_benchmark_format
==
"console"
)
{
return
PtrType
(
new
ConsoleReporter
);
return
PtrType
(
new
ConsoleReporter
);
}
else
if
(
FLAGS_benchmark_format
==
"json"
)
{
}
else
if
(
FLAGS_benchmark_format
==
"json"
)
{
return
PtrType
(
new
JSONReporter
);
return
PtrType
(
new
JSONReporter
);
}
else
if
(
FLAGS_benchmark_format
==
"csv"
)
{
}
else
if
(
FLAGS_benchmark_format
==
"csv"
)
{
return
PtrType
(
new
CSVReporter
);
return
PtrType
(
new
CSVReporter
);
}
else
{
}
else
{
std
::
cerr
<<
"Unexpected format: '"
<<
FLAGS_benchmark_format
<<
"'
\n
"
;
std
::
cerr
<<
"Unexpected format: '"
<<
FLAGS_benchmark_format
<<
"'
\n
"
;
std
::
exit
(
1
);
std
::
exit
(
1
);
}
}
}
}
}
// end namespace
}
// end namespace
}
// end namespace internal
}
// end namespace internal
size_t
RunSpecifiedBenchmarks
()
{
size_t
RunSpecifiedBenchmarks
()
{
...
...
src/complexity.cc
View file @
867f9145
...
@@ -25,43 +25,43 @@
...
@@ -25,43 +25,43 @@
#include <functional>
#include <functional>
namespace
benchmark
{
namespace
benchmark
{
// Internal function to calculate the different scalability forms
// Internal function to calculate the different scalability forms
std
::
function
<
double
(
int
)
>
FittingCurve
(
BigO
complexity
)
{
BigOFunc
*
FittingCurve
(
BigO
complexity
)
{
switch
(
complexity
)
{
switch
(
complexity
)
{
case
oN
:
case
oN
:
return
[](
int
n
)
{
return
n
;
};
return
[](
size_t
n
)
->
double
{
return
n
;
};
case
oNSquared
:
case
oNSquared
:
return
[](
int
n
)
{
return
n
*
n
;
};
return
[](
size_t
n
)
->
double
{
return
n
*
n
;
};
case
oNCubed
:
case
oNCubed
:
return
[](
int
n
)
{
return
n
*
n
*
n
;
};
return
[](
size_t
n
)
->
double
{
return
n
*
n
*
n
;
};
case
oLogN
:
case
oLogN
:
return
[](
in
t
n
)
{
return
log2
(
n
);
};
return
[](
size_
t
n
)
{
return
log2
(
n
);
};
case
oNLogN
:
case
oNLogN
:
return
[](
in
t
n
)
{
return
n
*
log2
(
n
);
};
return
[](
size_
t
n
)
{
return
n
*
log2
(
n
);
};
case
o1
:
case
o1
:
default
:
default
:
return
[](
int
)
{
return
1
;
};
return
[](
size_t
)
{
return
1.0
;
};
}
}
}
}
// Function to return an string for the calculated complexity
// Function to return an string for the calculated complexity
std
::
string
GetBigOString
(
BigO
complexity
)
{
std
::
string
GetBigOString
(
BigO
complexity
)
{
switch
(
complexity
)
{
switch
(
complexity
)
{
case
oN
:
case
oN
:
return
"*
N"
;
return
"
N"
;
case
oNSquared
:
case
oNSquared
:
return
"* N**
2"
;
return
"N^
2"
;
case
oNCubed
:
case
oNCubed
:
return
"* N**
3"
;
return
"N^
3"
;
case
oLogN
:
case
oLogN
:
return
"*
lgN"
;
return
"
lgN"
;
case
oNLogN
:
case
oNLogN
:
return
"*
NlgN"
;
return
"
NlgN"
;
case
o1
:
case
o1
:
return
"* 1
"
;
return
"(1)
"
;
default
:
default
:
return
"
"
;
return
"f(N)
"
;
}
}
}
}
...
@@ -75,21 +75,9 @@ std::string GetBigOString(BigO complexity) {
...
@@ -75,21 +75,9 @@ std::string GetBigOString(BigO complexity) {
// For a deeper explanation on the algorithm logic, look the README file at
// For a deeper explanation on the algorithm logic, look the README file at
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
// This interface is currently not used from the oustide, but it has been
LeastSq
MinimalLeastSq
(
const
std
::
vector
<
int
>&
n
,
// provided for future upgrades. If in the future it is not needed to support
const
std
::
vector
<
double
>&
time
,
// Cxx03, then all the calculations could be upgraded to use lambdas because
BigOFunc
*
fitting_curve
)
{
// they are more powerful and provide a cleaner inferface than enumerators,
// but complete implementation with lambdas will not work for Cxx03
// (e.g. lack of std::function).
// In case lambdas are implemented, the interface would be like :
// -> Complexity([](int n) {return n;};)
// and any arbitrary and valid equation would be allowed, but the option to
// calculate the best fit to the most common scalability curves will still
// be kept.
LeastSq
CalculateLeastSq
(
const
std
::
vector
<
int
>&
n
,
const
std
::
vector
<
double
>&
time
,
std
::
function
<
double
(
int
)
>
fitting_curve
)
{
double
sigma_gn
=
0.0
;
double
sigma_gn
=
0.0
;
double
sigma_gn_squared
=
0.0
;
double
sigma_gn_squared
=
0.0
;
double
sigma_time
=
0.0
;
double
sigma_time
=
0.0
;
...
@@ -105,6 +93,7 @@ LeastSq CalculateLeastSq(const std::vector<int>& n,
...
@@ -105,6 +93,7 @@ LeastSq CalculateLeastSq(const std::vector<int>& n,
}
}
LeastSq
result
;
LeastSq
result
;
result
.
complexity
=
oLambda
;
// Calculate complexity.
// Calculate complexity.
result
.
coef
=
sigma_time_gn
/
sigma_gn_squared
;
result
.
coef
=
sigma_time_gn
/
sigma_gn_squared
;
...
@@ -144,19 +133,19 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
...
@@ -144,19 +133,19 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
oLogN
,
oN
,
oNLogN
,
oNSquared
,
oNCubed
};
oLogN
,
oN
,
oNLogN
,
oNSquared
,
oNCubed
};
// Take o1 as default best fitting curve
// Take o1 as default best fitting curve
best_fit
=
Calculate
LeastSq
(
n
,
time
,
FittingCurve
(
o1
));
best_fit
=
Minimal
LeastSq
(
n
,
time
,
FittingCurve
(
o1
));
best_fit
.
complexity
=
o1
;
best_fit
.
complexity
=
o1
;
// Compute all possible fitting curves and stick to the best one
// Compute all possible fitting curves and stick to the best one
for
(
const
auto
&
fit
:
fit_curves
)
{
for
(
const
auto
&
fit
:
fit_curves
)
{
LeastSq
current_fit
=
Calculate
LeastSq
(
n
,
time
,
FittingCurve
(
fit
));
LeastSq
current_fit
=
Minimal
LeastSq
(
n
,
time
,
FittingCurve
(
fit
));
if
(
current_fit
.
rms
<
best_fit
.
rms
)
{
if
(
current_fit
.
rms
<
best_fit
.
rms
)
{
best_fit
=
current_fit
;
best_fit
=
current_fit
;
best_fit
.
complexity
=
fit
;
best_fit
.
complexity
=
fit
;
}
}
}
}
}
else
{
}
else
{
best_fit
=
Calculate
LeastSq
(
n
,
time
,
FittingCurve
(
complexity
));
best_fit
=
Minimal
LeastSq
(
n
,
time
,
FittingCurve
(
complexity
));
best_fit
.
complexity
=
complexity
;
best_fit
.
complexity
=
complexity
;
}
}
...
@@ -164,14 +153,14 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
...
@@ -164,14 +153,14 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
}
}
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeStats
(
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeStats
(
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
)
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
)
{
{
typedef
BenchmarkReporter
::
Run
Run
;
typedef
BenchmarkReporter
::
Run
Run
;
std
::
vector
<
Run
>
results
;
std
::
vector
<
Run
>
results
;
auto
error_count
=
std
::
count_if
(
auto
error_count
=
std
::
count_if
(
reports
.
begin
(),
reports
.
end
(),
reports
.
begin
(),
reports
.
end
(),
[](
Run
const
&
run
)
{
return
run
.
error_occurred
;});
[](
Run
const
&
run
)
{
return
run
.
error_occurred
;});
if
(
reports
.
size
()
-
error_count
<
2
)
{
if
(
reports
.
size
()
-
error_count
<
2
)
{
// We don't report aggregated data if there was a single run.
// We don't report aggregated data if there was a single run.
...
@@ -193,9 +182,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
...
@@ -193,9 +182,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
if
(
run
.
error_occurred
)
if
(
run
.
error_occurred
)
continue
;
continue
;
real_accumulated_time_stat
+=
real_accumulated_time_stat
+=
Stat1_d
(
run
.
real_accumulated_time
/
run
.
iterations
,
run
.
iterations
);
Stat1_d
(
run
.
real_accumulated_time
/
run
.
iterations
,
run
.
iterations
);
cpu_accumulated_time_stat
+=
cpu_accumulated_time_stat
+=
Stat1_d
(
run
.
cpu_accumulated_time
/
run
.
iterations
,
run
.
iterations
);
Stat1_d
(
run
.
cpu_accumulated_time
/
run
.
iterations
,
run
.
iterations
);
items_per_second_stat
+=
Stat1_d
(
run
.
items_per_second
,
run
.
iterations
);
items_per_second_stat
+=
Stat1_d
(
run
.
items_per_second
,
run
.
iterations
);
bytes_per_second_stat
+=
Stat1_d
(
run
.
bytes_per_second
,
run
.
iterations
);
bytes_per_second_stat
+=
Stat1_d
(
run
.
bytes_per_second
,
run
.
iterations
);
}
}
...
@@ -205,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
...
@@ -205,9 +194,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
mean_data
.
benchmark_name
=
reports
[
0
].
benchmark_name
+
"_mean"
;
mean_data
.
benchmark_name
=
reports
[
0
].
benchmark_name
+
"_mean"
;
mean_data
.
iterations
=
run_iterations
;
mean_data
.
iterations
=
run_iterations
;
mean_data
.
real_accumulated_time
=
real_accumulated_time_stat
.
Mean
()
*
mean_data
.
real_accumulated_time
=
real_accumulated_time_stat
.
Mean
()
*
run_iterations
;
run_iterations
;
mean_data
.
cpu_accumulated_time
=
cpu_accumulated_time_stat
.
Mean
()
*
mean_data
.
cpu_accumulated_time
=
cpu_accumulated_time_stat
.
Mean
()
*
run_iterations
;
run_iterations
;
mean_data
.
bytes_per_second
=
bytes_per_second_stat
.
Mean
();
mean_data
.
bytes_per_second
=
bytes_per_second_stat
.
Mean
();
mean_data
.
items_per_second
=
items_per_second_stat
.
Mean
();
mean_data
.
items_per_second
=
items_per_second_stat
.
Mean
();
...
@@ -225,9 +214,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
...
@@ -225,9 +214,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
stddev_data
.
report_label
=
mean_data
.
report_label
;
stddev_data
.
report_label
=
mean_data
.
report_label
;
stddev_data
.
iterations
=
0
;
stddev_data
.
iterations
=
0
;
stddev_data
.
real_accumulated_time
=
stddev_data
.
real_accumulated_time
=
real_accumulated_time_stat
.
StdDev
();
real_accumulated_time_stat
.
StdDev
();
stddev_data
.
cpu_accumulated_time
=
stddev_data
.
cpu_accumulated_time
=
cpu_accumulated_time_stat
.
StdDev
();
cpu_accumulated_time_stat
.
StdDev
();
stddev_data
.
bytes_per_second
=
bytes_per_second_stat
.
StdDev
();
stddev_data
.
bytes_per_second
=
bytes_per_second_stat
.
StdDev
();
stddev_data
.
items_per_second
=
items_per_second_stat
.
StdDev
();
stddev_data
.
items_per_second
=
items_per_second_stat
.
StdDev
();
...
@@ -237,7 +226,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
...
@@ -237,7 +226,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
}
}
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeBigO
(
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeBigO
(
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
)
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
)
{
{
typedef
BenchmarkReporter
::
Run
Run
;
typedef
BenchmarkReporter
::
Run
Run
;
std
::
vector
<
Run
>
results
;
std
::
vector
<
Run
>
results
;
...
@@ -256,14 +245,16 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
...
@@ -256,14 +245,16 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
cpu_time
.
push_back
(
run
.
cpu_accumulated_time
/
run
.
iterations
);
cpu_time
.
push_back
(
run
.
cpu_accumulated_time
/
run
.
iterations
);
}
}
LeastSq
result_cpu
=
MinimalLeastSq
(
n
,
cpu_time
,
reports
[
0
].
complexity
);
LeastSq
result_cpu
;
LeastSq
result_real
;
// result_cpu.complexity is passed as parameter to result_real because in case
// reports[0].complexity is oAuto, the noise on the measured data could make
// the best fit function of Cpu and Real differ. In order to solve this, we
// take the best fitting function for the Cpu, and apply it to Real data.
LeastSq
result_real
=
MinimalLeastSq
(
n
,
real_time
,
result_cpu
.
complexity
);
if
(
reports
[
0
].
complexity
!=
oLambda
)
{
result_cpu
=
MinimalLeastSq
(
n
,
cpu_time
,
reports
[
0
].
complexity
);
result_real
=
MinimalLeastSq
(
n
,
real_time
,
result_cpu
.
complexity
);
}
else
{
result_cpu
=
MinimalLeastSq
(
n
,
cpu_time
,
reports
[
0
].
complexity_lambda
);
result_real
=
MinimalLeastSq
(
n
,
real_time
,
reports
[
0
].
complexity_lambda
);
}
std
::
string
benchmark_name
=
reports
[
0
].
benchmark_name
.
substr
(
0
,
reports
[
0
].
benchmark_name
.
find
(
'/'
));
std
::
string
benchmark_name
=
reports
[
0
].
benchmark_name
.
substr
(
0
,
reports
[
0
].
benchmark_name
.
find
(
'/'
));
// Get the data from the accumulator to BenchmarkReporter::Run's.
// Get the data from the accumulator to BenchmarkReporter::Run's.
...
...
src/complexity.h
View file @
867f9145
...
@@ -26,15 +26,15 @@
...
@@ -26,15 +26,15 @@
namespace
benchmark
{
namespace
benchmark
{
// Return a vector containing the mean and standard devation information for
// Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two
// the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned
// non-errored runs an empty vector is returned
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeStats
(
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeStats
(
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
);
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
);
// Return a vector containing the bigO and RMS information for the specified
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeBigO
(
std
::
vector
<
BenchmarkReporter
::
Run
>
ComputeBigO
(
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
);
const
std
::
vector
<
BenchmarkReporter
::
Run
>&
reports
);
// This data structure will contain the result returned by MinimalLeastSq
// This data structure will contain the result returned by MinimalLeastSq
...
@@ -60,11 +60,5 @@ struct LeastSq {
...
@@ -60,11 +60,5 @@ struct LeastSq {
// Function to return an string for the calculated complexity
// Function to return an string for the calculated complexity
std
::
string
GetBigOString
(
BigO
complexity
);
std
::
string
GetBigOString
(
BigO
complexity
);
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error.
LeastSq
MinimalLeastSq
(
const
std
::
vector
<
int
>&
n
,
const
std
::
vector
<
double
>&
time
,
const
BigO
complexity
=
oAuto
);
}
// end namespace benchmark
}
// end namespace benchmark
#endif // COMPLEXITY_H_
#endif // COMPLEXITY_H_
src/console_reporter.cc
View file @
867f9145
...
@@ -90,8 +90,8 @@ void ConsoleReporter::PrintRunData(const Run& result) {
...
@@ -90,8 +90,8 @@ void ConsoleReporter::PrintRunData(const Run& result) {
const
double
cpu_time
=
result
.
GetAdjustedCPUTime
();
const
double
cpu_time
=
result
.
GetAdjustedCPUTime
();
if
(
result
.
report_big_o
)
{
if
(
result
.
report_big_o
)
{
std
::
string
big_o
=
result
.
report_big_o
?
GetBigOString
(
result
.
complexity
)
:
""
;
std
::
string
big_o
=
GetBigOString
(
result
.
complexity
)
;
ColorPrintf
(
Out
,
COLOR_YELLOW
,
"%10.
4f %s %10.4
f %s "
,
ColorPrintf
(
Out
,
COLOR_YELLOW
,
"%10.
2f %s %10.2
f %s "
,
real_time
,
big_o
.
c_str
(),
cpu_time
,
big_o
.
c_str
());
real_time
,
big_o
.
c_str
(),
cpu_time
,
big_o
.
c_str
());
}
else
if
(
result
.
report_rms
)
{
}
else
if
(
result
.
report_rms
)
{
ColorPrintf
(
Out
,
COLOR_YELLOW
,
"%10.0f %% %10.0f %% "
,
ColorPrintf
(
Out
,
COLOR_YELLOW
,
"%10.0f %% %10.0f %% "
,
...
...
src/csv_reporter.cc
View file @
867f9145
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
// limitations under the License.
// limitations under the License.
#include "benchmark/reporter.h"
#include "benchmark/reporter.h"
#include "complexity.h"
#include <cstdint>
#include <cstdint>
#include <algorithm>
#include <algorithm>
...
@@ -87,8 +88,10 @@ void CSVReporter::PrintRunData(const Run & run) {
...
@@ -87,8 +88,10 @@ void CSVReporter::PrintRunData(const Run & run) {
Out
<<
run
.
GetAdjustedRealTime
()
<<
","
;
Out
<<
run
.
GetAdjustedRealTime
()
<<
","
;
Out
<<
run
.
GetAdjustedCPUTime
()
<<
","
;
Out
<<
run
.
GetAdjustedCPUTime
()
<<
","
;
// Do not print timeLabel on RMS report
// Do not print timeLabel on bigO and RMS report
if
(
!
run
.
report_rms
)
{
if
(
run
.
report_big_o
)
{
Out
<<
GetBigOString
(
run
.
complexity
);
}
else
if
(
!
run
.
report_rms
){
Out
<<
GetTimeUnitString
(
run
.
time_unit
);
Out
<<
GetTimeUnitString
(
run
.
time_unit
);
}
}
Out
<<
","
;
Out
<<
","
;
...
...
src/json_reporter.cc
View file @
867f9145
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
// limitations under the License.
// limitations under the License.
#include "benchmark/reporter.h"
#include "benchmark/reporter.h"
#include "complexity.h"
#include <cstdint>
#include <cstdint>
#include <algorithm>
#include <algorithm>
...
@@ -132,15 +133,29 @@ void JSONReporter::PrintRunData(Run const& run) {
...
@@ -132,15 +133,29 @@ void JSONReporter::PrintRunData(Run const& run) {
out
<<
indent
out
<<
indent
<<
FormatKV
(
"iterations"
,
run
.
iterations
)
<<
FormatKV
(
"iterations"
,
run
.
iterations
)
<<
",
\n
"
;
<<
",
\n
"
;
}
out
<<
indent
out
<<
indent
<<
FormatKV
(
"real_time"
,
RoundDouble
(
run
.
GetAdjustedRealTime
()))
<<
FormatKV
(
"real_time"
,
RoundDouble
(
run
.
GetAdjustedRealTime
()))
<<
",
\n
"
;
<<
",
\n
"
;
out
<<
indent
out
<<
indent
<<
FormatKV
(
"cpu_time"
,
RoundDouble
(
run
.
GetAdjustedCPUTime
()));
<<
FormatKV
(
"cpu_time"
,
RoundDouble
(
run
.
GetAdjustedCPUTime
()));
if
(
!
run
.
report_rms
)
{
out
<<
",
\n
"
<<
indent
out
<<
",
\n
"
<<
indent
<<
FormatKV
(
"time_unit"
,
GetTimeUnitString
(
run
.
time_unit
));
<<
FormatKV
(
"time_unit"
,
GetTimeUnitString
(
run
.
time_unit
));
}
else
if
(
run
.
report_big_o
)
{
out
<<
indent
<<
FormatKV
(
"cpu_coefficient"
,
RoundDouble
(
run
.
GetAdjustedCPUTime
()))
<<
",
\n
"
;
out
<<
indent
<<
FormatKV
(
"real_coefficient"
,
RoundDouble
(
run
.
GetAdjustedRealTime
()))
<<
",
\n
"
;
out
<<
indent
<<
FormatKV
(
"big_o"
,
GetBigOString
(
run
.
complexity
))
<<
",
\n
"
;
out
<<
indent
<<
FormatKV
(
"time_unit"
,
GetTimeUnitString
(
run
.
time_unit
));
}
else
if
(
run
.
report_rms
)
{
out
<<
indent
<<
FormatKV
(
"rms"
,
RoundDouble
(
run
.
GetAdjustedCPUTime
()
*
100
))
<<
"%"
;
}
}
if
(
run
.
bytes_per_second
>
0.0
)
{
if
(
run
.
bytes_per_second
>
0.0
)
{
out
<<
",
\n
"
<<
indent
out
<<
",
\n
"
<<
indent
...
...
test/complexity_test.cc
View file @
867f9145
#include "benchmark/benchmark_api.h"
#undef NDEBUG
#include "benchmark/benchmark.h"
#include <cstdlib>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include <string>
#include "../src/re.h" // NOTE: re.h is for internal use only
#include <cassert>
#include <cstring>
#include <iostream>
#include <sstream>
#include <vector>
#include <vector>
#include <
map
>
#include <
utility
>
#include <algorithm>
#include <algorithm>
std
::
vector
<
int
>
ConstructRandomVector
(
int
size
)
{
namespace
{
std
::
vector
<
int
>
v
;
v
.
reserve
(
size
);
// ========================================================================= //
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
// -------------------------- Testing Case --------------------------------- //
v
.
push_back
(
rand
()
%
size
);
// ========================================================================= //
enum
MatchRules
{
MR_Default
,
// Skip non-matching lines until a match is found.
MR_Next
// Match must occur on the next line.
};
struct
TestCase
{
std
::
string
regex
;
int
match_rule
;
TestCase
(
std
::
string
re
,
int
rule
=
MR_Default
)
:
regex
(
re
),
match_rule
(
rule
)
{}
void
Check
(
std
::
stringstream
&
remaining_output
)
const
{
benchmark
::
Regex
r
;
std
::
string
err_str
;
r
.
Init
(
regex
,
&
err_str
);
CHECK
(
err_str
.
empty
())
<<
"Could not construct regex
\"
"
<<
regex
<<
"
\"
"
<<
" got Error: "
<<
err_str
;
std
::
string
line
;
while
(
remaining_output
.
eof
()
==
false
)
{
CHECK
(
remaining_output
.
good
());
std
::
getline
(
remaining_output
,
line
);
if
(
r
.
Match
(
line
))
return
;
CHECK
(
match_rule
!=
MR_Next
)
<<
"Expected line
\"
"
<<
line
<<
"
\"
to match regex
\"
"
<<
regex
<<
"
\"
"
;
}
CHECK
(
remaining_output
.
eof
()
==
false
)
<<
"End of output reached before match for regex
\"
"
<<
regex
<<
"
\"
was found"
;
}
}
return
v
;
};
}
std
::
map
<
int
,
int
>
ConstructRandomMap
(
int
size
)
{
std
::
vector
<
TestCase
>
ConsoleOutputTests
;
std
::
map
<
int
,
int
>
m
;
std
::
vector
<
TestCase
>
JSONOutputTests
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
std
::
vector
<
TestCase
>
CSVOutputTests
;
m
.
insert
(
std
::
make_pair
(
rand
()
%
size
,
rand
()
%
size
));
// ========================================================================= //
// -------------------------- Test Helpers --------------------------------- //
// ========================================================================= //
class
TestReporter
:
public
benchmark
::
BenchmarkReporter
{
public
:
TestReporter
(
std
::
vector
<
benchmark
::
BenchmarkReporter
*>
reps
)
:
reporters_
(
reps
)
{}
virtual
bool
ReportContext
(
const
Context
&
context
)
{
bool
last_ret
=
false
;
bool
first
=
true
;
for
(
auto
rep
:
reporters_
)
{
bool
new_ret
=
rep
->
ReportContext
(
context
);
CHECK
(
first
||
new_ret
==
last_ret
)
<<
"Reports return different values for ReportContext"
;
first
=
false
;
last_ret
=
new_ret
;
}
return
last_ret
;
}
}
return
m
;
virtual
void
ReportRuns
(
const
std
::
vector
<
Run
>&
report
)
{
for
(
auto
rep
:
reporters_
)
rep
->
ReportRuns
(
report
);
}
virtual
void
Finalize
()
{
for
(
auto
rep
:
reporters_
)
rep
->
Finalize
();
}
private
:
std
::
vector
<
benchmark
::
BenchmarkReporter
*>
reporters_
;
};
#define CONCAT2(x, y) x##y
#define CONCAT(x, y) CONCAT2(x, y)
#define ADD_CASES(...) \
int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
int
AddCases
(
std
::
vector
<
TestCase
>*
out
,
std
::
initializer_list
<
TestCase
>
const
&
v
)
{
for
(
auto
const
&
TC
:
v
)
out
->
push_back
(
TC
);
return
0
;
}
template
<
class
First
>
std
::
string
join
(
First
f
)
{
return
f
;
}
template
<
class
First
,
class
...
Args
>
std
::
string
join
(
First
f
,
Args
&&
...
args
)
{
return
std
::
string
(
std
::
move
(
f
))
+
"[ ]+"
+
join
(
std
::
forward
<
Args
>
(
args
)...);
}
}
std
::
string
dec_re
=
"[0-9]+
\\
.[0-9]+"
;
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int
AddComplexityTest
(
std
::
vector
<
TestCase
>*
console_out
,
std
::
vector
<
TestCase
>*
json_out
,
std
::
vector
<
TestCase
>*
csv_out
,
std
::
string
big_o_test_name
,
std
::
string
rms_test_name
,
std
::
string
big_o
)
{
std
::
string
big_o_str
=
dec_re
+
" "
+
big_o
;
AddCases
(
console_out
,
{
{
join
(
"^"
+
big_o_test_name
+
""
,
big_o_str
,
big_o_str
)
+
"[ ]*$"
},
{
join
(
"^"
+
rms_test_name
+
""
,
"[0-9]+ %"
,
"[0-9]+ %"
)
+
"[ ]*$"
}
});
AddCases
(
json_out
,
{
{
"
\"
name
\"
:
\"
"
+
big_o_test_name
+
"
\"
,$"
},
{
"
\"
cpu_coefficient
\"
: [0-9]+,$"
,
MR_Next
},
{
"
\"
real_coefficient
\"
: [0-9]{1,5},$"
,
MR_Next
},
{
"
\"
big_o
\"
:
\"
"
+
big_o
+
"
\"
,$"
,
MR_Next
},
{
"
\"
time_unit
\"
:
\"
ns
\"
$"
,
MR_Next
},
{
"}"
,
MR_Next
},
{
"
\"
name
\"
:
\"
"
+
rms_test_name
+
"
\"
,$"
},
{
"
\"
rms
\"
: [0-9]+%$"
,
MR_Next
},
{
"}"
,
MR_Next
}
});
AddCases
(
csv_out
,
{
{
"^
\"
"
+
big_o_test_name
+
"
\"
,,"
+
dec_re
+
","
+
dec_re
+
","
+
big_o
+
",,,,,$"
},
{
"^
\"
"
+
rms_test_name
+
"
\"
,,"
+
dec_re
+
","
+
dec_re
+
",,,,,,$"
}
});
return
0
;
}
}
// end namespace
// ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void
BM_Complexity_O1
(
benchmark
::
State
&
state
)
{
void
BM_Complexity_O1
(
benchmark
::
State
&
state
)
{
while
(
state
.
KeepRunning
())
{
while
(
state
.
KeepRunning
())
{
}
}
state
.
SetComplexityN
(
state
.
range_x
());
state
.
SetComplexityN
(
state
.
range_x
());
}
}
BENCHMARK
(
BM_Complexity_O1
)
->
Range
(
1
,
1
<<
18
)
->
Complexity
(
benchmark
::
o1
);
BENCHMARK
(
BM_Complexity_O1
)
->
Range
(
1
,
1
<<
18
)
->
Complexity
();
BENCHMARK
(
BM_Complexity_O1
)
->
Range
(
1
,
1
<<
18
)
->
Complexity
(
benchmark
::
o1
);
BENCHMARK
(
BM_Complexity_O1
)
->
Range
(
1
,
1
<<
18
)
->
Complexity
([](
size_t
){
return
1.0
;
});
std
::
string
big_o_1_test_name
=
"BM_Complexity_O1_BigO"
;
std
::
string
rms_o_1_test_name
=
"BM_Complexity_O1_RMS"
;
std
::
string
enum_auto_big_o_1
=
"
\\
([0-9]+
\\
)"
;
std
::
string
lambda_big_o_1
=
"f
\\
(N
\\
)"
;
// Add automatic tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_1_test_name
,
rms_o_1_test_name
,
enum_auto_big_o_1
);
// Add enum tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_1_test_name
,
rms_o_1_test_name
,
enum_auto_big_o_1
);
// Add lambda tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_1_test_name
,
rms_o_1_test_name
,
lambda_big_o_1
);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std
::
vector
<
int
>
ConstructRandomVector
(
int
size
)
{
std
::
vector
<
int
>
v
;
v
.
reserve
(
size
);
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
v
.
push_back
(
rand
()
%
size
);
}
return
v
;
}
static
void
BM_Complexity_O_N
(
benchmark
::
State
&
state
)
{
void
BM_Complexity_O_N
(
benchmark
::
State
&
state
)
{
auto
v
=
ConstructRandomVector
(
state
.
range_x
());
auto
v
=
ConstructRandomVector
(
state
.
range_x
());
const
int
item_not_in_vector
=
state
.
range_x
()
*
2
;
// Test worst case scenario (item not in vector)
const
int
item_not_in_vector
=
state
.
range_x
()
*
2
;
// Test worst case scenario (item not in vector)
while
(
state
.
KeepRunning
())
{
while
(
state
.
KeepRunning
())
{
...
@@ -39,51 +195,30 @@ static void BM_Complexity_O_N(benchmark::State& state) {
...
@@ -39,51 +195,30 @@ static void BM_Complexity_O_N(benchmark::State& state) {
}
}
state
.
SetComplexityN
(
state
.
range_x
());
state
.
SetComplexityN
(
state
.
range_x
());
}
}
BENCHMARK
(
BM_Complexity_O_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
(
benchmark
::
oN
);
BENCHMARK
(
BM_Complexity_O_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
();
BENCHMARK
(
BM_Complexity_O_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
();
BENCHMARK
(
BM_Complexity_O_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
(
benchmark
::
oN
);
BENCHMARK
(
BM_Complexity_O_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
([](
size_t
n
)
->
double
{
return
n
;
});
static
void
BM_Complexity_O_N_Squared
(
benchmark
::
State
&
state
)
{
std
::
string
big_o_n_test_name
=
"BM_Complexity_O_N_BigO"
;
std
::
string
s1
(
state
.
range_x
(),
'-'
);
std
::
string
rms_o_n_test_name
=
"BM_Complexity_O_N_RMS"
;
std
::
string
s2
(
state
.
range_x
(),
'-'
);
std
::
string
enum_auto_big_o_n
=
"N"
;
state
.
SetComplexityN
(
state
.
range_x
());
std
::
string
lambda_big_o_n
=
"f
\\
(N
\\
)"
;
while
(
state
.
KeepRunning
())
for
(
char
&
c1
:
s1
)
{
for
(
char
&
c2
:
s2
)
{
benchmark
::
DoNotOptimize
(
c1
=
'a'
);
benchmark
::
DoNotOptimize
(
c2
=
'b'
);
}
}
}
BENCHMARK
(
BM_Complexity_O_N_Squared
)
->
Range
(
1
,
1
<<
8
)
->
Complexity
(
benchmark
::
oNSquared
);
static
void
BM_Complexity_O_N_Cubed
(
benchmark
::
State
&
state
)
{
// Add automatic tests
std
::
string
s1
(
state
.
range_x
(),
'-'
);
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
std
::
string
s2
(
state
.
range_x
(),
'-'
);
big_o_n_test_name
,
rms_o_n_test_name
,
enum_auto_big_o_n
);
std
::
string
s3
(
state
.
range_x
(),
'-'
);
state
.
SetComplexityN
(
state
.
range_x
());
while
(
state
.
KeepRunning
())
for
(
char
&
c1
:
s1
)
{
for
(
char
&
c2
:
s2
)
{
for
(
char
&
c3
:
s3
)
{
benchmark
::
DoNotOptimize
(
c1
=
'a'
);
benchmark
::
DoNotOptimize
(
c2
=
'b'
);
benchmark
::
DoNotOptimize
(
c3
=
'c'
);
}
}
}
}
BENCHMARK
(
BM_Complexity_O_N_Cubed
)
->
DenseRange
(
1
,
8
)
->
Complexity
(
benchmark
::
oNCubed
);
static
void
BM_Complexity_O_log_N
(
benchmark
::
State
&
state
)
{
// Add enum tests
auto
m
=
ConstructRandomMap
(
state
.
range_x
());
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
const
int
item_not_in_vector
=
state
.
range_x
()
*
2
;
// Test worst case scenario (item not in vector)
big_o_n_test_name
,
rms_o_n_test_name
,
enum_auto_big_o_n
);
while
(
state
.
KeepRunning
())
{
benchmark
::
DoNotOptimize
(
m
.
find
(
item_not_in_vector
));
// Add lambda tests
}
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
state
.
SetComplexityN
(
state
.
range_x
());
big_o_n_test_name
,
rms_o_n_test_name
,
lambda_big_o_n
);
}
BENCHMARK
(
BM_Complexity_O_log_N
)
// ========================================================================= //
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
(
benchmark
::
oLogN
);
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static
void
BM_Complexity_O_N_log_N
(
benchmark
::
State
&
state
)
{
static
void
BM_Complexity_O_N_log_N
(
benchmark
::
State
&
state
)
{
auto
v
=
ConstructRandomVector
(
state
.
range_x
());
auto
v
=
ConstructRandomVector
(
state
.
range_x
());
...
@@ -92,15 +227,82 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) {
...
@@ -92,15 +227,82 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) {
}
}
state
.
SetComplexityN
(
state
.
range_x
());
state
.
SetComplexityN
(
state
.
range_x
());
}
}
BENCHMARK
(
BM_Complexity_O_N_log_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
(
benchmark
::
oNLogN
);
BENCHMARK
(
BM_Complexity_O_N_log_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
();
BENCHMARK
(
BM_Complexity_O_N_log_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
();
BENCHMARK
(
BM_Complexity_O_N_log_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
(
benchmark
::
oNLogN
);
BENCHMARK
(
BM_Complexity_O_N_log_N
)
->
RangeMultiplier
(
2
)
->
Range
(
1
<<
10
,
1
<<
16
)
->
Complexity
([](
size_t
n
)
{
return
n
*
log2
(
n
);
});
// Test benchmark with no range and check no complexity is calculated.
std
::
string
big_o_n_lg_n_test_name
=
"BM_Complexity_O_N_log_N_BigO"
;
void
BM_Extreme_Cases
(
benchmark
::
State
&
state
)
{
std
::
string
rms_o_n_lg_n_test_name
=
"BM_Complexity_O_N_log_N_RMS"
;
while
(
state
.
KeepRunning
())
{
std
::
string
enum_auto_big_o_n_lg_n
=
"NlgN"
;
std
::
string
lambda_big_o_n_lg_n
=
"f
\\
(N
\\
)"
;
// Add automatic tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_n_lg_n_test_name
,
rms_o_n_lg_n_test_name
,
enum_auto_big_o_n_lg_n
);
// Add enum tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_n_lg_n_test_name
,
rms_o_n_lg_n_test_name
,
enum_auto_big_o_n_lg_n
);
// Add lambda tests
ADD_COMPLEXITY_CASES
(
&
ConsoleOutputTests
,
&
JSONOutputTests
,
&
CSVOutputTests
,
big_o_n_lg_n_test_name
,
rms_o_n_lg_n_test_name
,
lambda_big_o_n_lg_n
);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int
main
(
int
argc
,
char
*
argv
[])
{
// Add --color_print=false to argv since we don't want to match color codes.
char
new_arg
[
64
];
char
*
new_argv
[
64
];
std
::
copy
(
argv
,
argv
+
argc
,
new_argv
);
new_argv
[
argc
++
]
=
std
::
strcpy
(
new_arg
,
"--color_print=false"
);
benchmark
::
Initialize
(
&
argc
,
new_argv
);
benchmark
::
ConsoleReporter
CR
;
benchmark
::
JSONReporter
JR
;
benchmark
::
CSVReporter
CSVR
;
struct
ReporterTest
{
const
char
*
name
;
std
::
vector
<
TestCase
>&
output_cases
;
benchmark
::
BenchmarkReporter
&
reporter
;
std
::
stringstream
out_stream
;
std
::
stringstream
err_stream
;
ReporterTest
(
const
char
*
n
,
std
::
vector
<
TestCase
>&
out_tc
,
benchmark
::
BenchmarkReporter
&
br
)
:
name
(
n
),
output_cases
(
out_tc
),
reporter
(
br
)
{
reporter
.
SetOutputStream
(
&
out_stream
);
reporter
.
SetErrorStream
(
&
err_stream
);
}
}
TestCases
[]
=
{
{
"ConsoleReporter"
,
ConsoleOutputTests
,
CR
},
{
"JSONReporter"
,
JSONOutputTests
,
JR
},
{
"CSVReporter"
,
CSVOutputTests
,
CSVR
}
};
// Create the test reporter and run the benchmarks.
std
::
cout
<<
"Running benchmarks...
\n
"
;
TestReporter
test_rep
({
&
CR
,
&
JR
,
&
CSVR
});
benchmark
::
RunSpecifiedBenchmarks
(
&
test_rep
);
for
(
auto
&
rep_test
:
TestCases
)
{
std
::
string
msg
=
std
::
string
(
"
\n
Testing "
)
+
rep_test
.
name
+
" Output
\n
"
;
std
::
string
banner
(
msg
.
size
()
-
1
,
'-'
);
std
::
cout
<<
banner
<<
msg
<<
banner
<<
"
\n
"
;
std
::
cerr
<<
rep_test
.
err_stream
.
str
();
std
::
cout
<<
rep_test
.
out_stream
.
str
();
for
(
const
auto
&
TC
:
rep_test
.
output_cases
)
TC
.
Check
(
rep_test
.
out_stream
);
std
::
cout
<<
"
\n
"
;
}
}
return
0
;
}
}
BENCHMARK
(
BM_Extreme_Cases
)
->
Complexity
(
benchmark
::
oNLogN
);
BENCHMARK
(
BM_Extreme_Cases
)
->
Arg
(
42
)
->
Complexity
();
BENCHMARK_MAIN
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment