Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
B
benchmark
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Chen Yisong
benchmark
Commits
eee8b05c
Commit
eee8b05c
authored
Dec 07, 2018
by
Jusufadis Bakamovic
Committed by
Roman Lebedev
Dec 07, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[tools] Run autopep8 and apply fixes found. (#739)
parent
eafa34a5
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
15 additions
and
14 deletions
+15
-14
compare.py
tools/compare.py
+2
-4
report.py
tools/gbench/report.py
+3
-4
util.py
tools/gbench/util.py
+10
-6
No files found.
tools/compare.py
View file @
eee8b05c
#!/usr/bin/env python
#!/usr/bin/env python
import
unittest
"""
"""
compare.py - versatile benchmark output compare tool
compare.py - versatile benchmark output compare tool
"""
"""
...
@@ -244,9 +245,6 @@ def main():
...
@@ -244,9 +245,6 @@ def main():
print
(
ln
)
print
(
ln
)
import
unittest
class
TestParser
(
unittest
.
TestCase
):
class
TestParser
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
parser
=
create_parser
()
self
.
parser
=
create_parser
()
...
@@ -402,7 +400,7 @@ class TestParser(unittest.TestCase):
...
@@ -402,7 +400,7 @@ class TestParser(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
#unittest.main()
#
unittest.main()
main
()
main
()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
...
...
tools/gbench/report.py
View file @
eee8b05c
import
unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""report.py - Utilities for reporting statistics about benchmark results
"""
"""
import
os
import
os
...
@@ -270,9 +271,6 @@ def generate_difference_report(
...
@@ -270,9 +271,6 @@ def generate_difference_report(
# Unit tests
# Unit tests
import
unittest
class
TestGetUniqueBenchmarkNames
(
unittest
.
TestCase
):
class
TestGetUniqueBenchmarkNames
(
unittest
.
TestCase
):
def
load_results
(
self
):
def
load_results
(
self
):
import
json
import
json
...
@@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
...
@@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
'BM_One'
,
'BM_One'
,
'BM_Two'
,
'BM_Two'
,
'short'
,
# These two are not sorted
'short'
,
# These two are not sorted
'medium'
,
# These two are not sorted
'medium'
,
# These two are not sorted
]
]
json
=
self
.
load_results
()
json
=
self
.
load_results
()
output_lines
=
get_unique_benchmark_names
(
json
)
output_lines
=
get_unique_benchmark_names
(
json
)
...
@@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
...
@@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
for
i
in
range
(
0
,
len
(
output_lines
)):
for
i
in
range
(
0
,
len
(
output_lines
)):
self
.
assertEqual
(
expect_lines
[
i
],
output_lines
[
i
])
self
.
assertEqual
(
expect_lines
[
i
],
output_lines
[
i
])
class
TestReportDifference
(
unittest
.
TestCase
):
class
TestReportDifference
(
unittest
.
TestCase
):
def
load_results
(
self
):
def
load_results
(
self
):
import
json
import
json
...
...
tools/gbench/util.py
View file @
eee8b05c
...
@@ -7,11 +7,13 @@ import subprocess
...
@@ -7,11 +7,13 @@ import subprocess
import
sys
import
sys
# Input file type enumeration
# Input file type enumeration
IT_Invalid
=
0
IT_Invalid
=
0
IT_JSON
=
1
IT_JSON
=
1
IT_Executable
=
2
IT_Executable
=
2
_num_magic_bytes
=
2
if
sys
.
platform
.
startswith
(
'win'
)
else
4
_num_magic_bytes
=
2
if
sys
.
platform
.
startswith
(
'win'
)
else
4
def
is_executable_file
(
filename
):
def
is_executable_file
(
filename
):
"""
"""
Return 'True' if 'filename' names a valid file which is likely
Return 'True' if 'filename' names a valid file which is likely
...
@@ -46,7 +48,7 @@ def is_json_file(filename):
...
@@ -46,7 +48,7 @@ def is_json_file(filename):
with
open
(
filename
,
'r'
)
as
f
:
with
open
(
filename
,
'r'
)
as
f
:
json
.
load
(
f
)
json
.
load
(
f
)
return
True
return
True
except
:
except
BaseException
:
pass
pass
return
False
return
False
...
@@ -84,6 +86,7 @@ def check_input_file(filename):
...
@@ -84,6 +86,7 @@ def check_input_file(filename):
sys
.
exit
(
1
)
sys
.
exit
(
1
)
return
ftype
return
ftype
def
find_benchmark_flag
(
prefix
,
benchmark_flags
):
def
find_benchmark_flag
(
prefix
,
benchmark_flags
):
"""
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
Search the specified list of flags for a flag matching `<prefix><arg>` and
...
@@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
...
@@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
result
=
f
[
len
(
prefix
):]
result
=
f
[
len
(
prefix
):]
return
result
return
result
def
remove_benchmark_flags
(
prefix
,
benchmark_flags
):
def
remove_benchmark_flags
(
prefix
,
benchmark_flags
):
"""
"""
Return a new list containing the specified benchmark_flags except those
Return a new list containing the specified benchmark_flags except those
...
@@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
...
@@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
assert
prefix
.
startswith
(
'--'
)
and
prefix
.
endswith
(
'='
)
assert
prefix
.
startswith
(
'--'
)
and
prefix
.
endswith
(
'='
)
return
[
f
for
f
in
benchmark_flags
if
not
f
.
startswith
(
prefix
)]
return
[
f
for
f
in
benchmark_flags
if
not
f
.
startswith
(
prefix
)]
def
load_benchmark_results
(
fname
):
def
load_benchmark_results
(
fname
):
"""
"""
Read benchmark output from a file and return the JSON object.
Read benchmark output from a file and return the JSON object.
...
@@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
...
@@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
thandle
,
output_name
=
tempfile
.
mkstemp
()
thandle
,
output_name
=
tempfile
.
mkstemp
()
os
.
close
(
thandle
)
os
.
close
(
thandle
)
benchmark_flags
=
list
(
benchmark_flags
)
+
\
benchmark_flags
=
list
(
benchmark_flags
)
+
\
[
'--benchmark_out=
%
s'
%
output_name
]
[
'--benchmark_out=
%
s'
%
output_name
]
cmd
=
[
exe_name
]
+
benchmark_flags
cmd
=
[
exe_name
]
+
benchmark_flags
print
(
"RUNNING:
%
s"
%
' '
.
join
(
cmd
))
print
(
"RUNNING:
%
s"
%
' '
.
join
(
cmd
))
...
@@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
...
@@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
elif
ftype
==
IT_Executable
:
elif
ftype
==
IT_Executable
:
return
run_benchmark
(
filename
,
benchmark_flags
)
return
run_benchmark
(
filename
,
benchmark_flags
)
else
:
else
:
assert
False
# This branch is unreachable
assert
False
# This branch is unreachable
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment