Commit d39f8930 by Olli Etuaho

Improve perf_test_runner script

1) Make it possible to run the script from any working directory 2) Abort the run if angle_perftests is executing multiple test cases 3) Print some information on which exe and test is running 4) Print the test runner output in case there was an error BUG=angleproject:596 Change-Id: If30c2455dc39b0a776df03e6c1dda2ced90d73ba Reviewed-on: https://chromium-review.googlesource.com/319090Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org> Tested-by: 's avatarOlli Etuaho <oetuaho@nvidia.com>
parent cd089732
...@@ -13,9 +13,16 @@ ...@@ -13,9 +13,16 @@
import subprocess import subprocess
import sys import sys
import os import os
import re
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# You might have to re-order these to find the specific version you want. # You might have to re-order these to find the specific version you want.
perftests_paths = ['out/Release', 'build/Release_x64', 'build/Release_Win32'] perftests_paths = [
os.path.join('out', 'Release'),
os.path.join('build', 'Release_x64'),
os.path.join('build', 'Release_Win32')
]
metric = 'score' metric = 'score'
scores = [] scores = []
...@@ -59,7 +66,7 @@ perftests_path = "" ...@@ -59,7 +66,7 @@ perftests_path = ""
# TODO(jmadill): Linux binaries # TODO(jmadill): Linux binaries
for path in perftests_paths: for path in perftests_paths:
perftests_path = os.path.join(path, 'angle_perftests.exe') perftests_path = os.path.join(base_path, path, 'angle_perftests.exe')
if os.path.exists(perftests_path): if os.path.exists(perftests_path):
break break
...@@ -69,6 +76,9 @@ if not os.path.exists(perftests_path): ...@@ -69,6 +76,9 @@ if not os.path.exists(perftests_path):
test_name = "DrawCallPerfBenchmark.Run/d3d11_null" test_name = "DrawCallPerfBenchmark.Run/d3d11_null"
print('Using test executable: ' + perftests_path)
print('Test name: ' + test_name)
if len(sys.argv) >= 2: if len(sys.argv) >= 2:
test_name = sys.argv[1] test_name = sys.argv[1]
...@@ -78,16 +88,24 @@ while True: ...@@ -78,16 +88,24 @@ while True:
start_index = output.find(metric + "=") start_index = output.find(metric + "=")
if start_index == -1: if start_index == -1:
print("Did not find test output") print("Did not find the score of the specified test in output:")
print(output)
sys.exit(1) sys.exit(1)
start_index += len(metric) + 2 start_index += len(metric) + 2
end_index = output[start_index:].find(" ") end_index = output[start_index:].find(" ")
if end_index == -1: if end_index == -1:
print("Error parsing output") print("Error parsing output:")
print(output)
sys.exit(2) sys.exit(2)
m = re.search('Running (\d+) tests', output)
if m and int(m.group(1)) > 1:
print("Found more than one test result in output:")
print(output)
sys.exit(3)
end_index += start_index end_index += start_index
score = int(output[start_index:end_index]) score = int(output[start_index:end_index])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment