Commit 163f6ac0 by Jamie Madill Committed by Angle LUCI CQ

Gold Tests: Add batching.

This should speed up test runs on Android, which are currently much slower than the other platforms. Bug: angleproject:5966 Change-Id: I57890522b64d1d195ea0be56b4f68b6d54192d64 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2919888Reviewed-by: 's avatarYuly Novikov <ynovikov@chromium.org> Commit-Queue: Jamie Madill <jmadill@chromium.org>
parent 565bba3d
...@@ -452,7 +452,7 @@ ...@@ -452,7 +452,7 @@
} }
], ],
"service_account": "chrome-gpu-gold@chops-service-accounts.iam.gserviceaccount.com", "service_account": "chrome-gpu-gold@chops-service-accounts.iam.gserviceaccount.com",
"shards": 10 "shards": 6
}, },
"test_id_prefix": "ninja://src/tests/restricted_traces:angle_restricted_trace_gold_tests/" "test_id_prefix": "ninja://src/tests/restricted_traces:angle_restricted_trace_gold_tests/"
} }
......
...@@ -697,7 +697,7 @@ ...@@ -697,7 +697,7 @@
'chrome-gpu-gold-service-account', 'chrome-gpu-gold-service-account',
], ],
'android_swarming': { 'android_swarming': {
'shards': 10, 'shards': 6,
}, },
} }
}, },
......
{ {
"infra/specs/angle.json": "infra/specs/angle.json":
"7aa834fc28218feab49fc9e493cd2df0", "328feb5843eb4c1f127616d894de5e6a",
"infra/specs/generate_test_spec_json.py": "infra/specs/generate_test_spec_json.py":
"e1c8a771f751adad715d7bea900fc8e1", "e1c8a771f751adad715d7bea900fc8e1",
"infra/specs/mixins.pyl": "infra/specs/mixins.pyl":
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
"infra/specs/test_suite_exceptions.pyl": "infra/specs/test_suite_exceptions.pyl":
"aad1a4aed801277cc531733deab221b5", "aad1a4aed801277cc531733deab221b5",
"infra/specs/test_suites.pyl": "infra/specs/test_suites.pyl":
"cbe697ca035dc9310f6821e9b628cc6a", "b6fdb5590094d3338aa2a654c3ec0646",
"infra/specs/variants.pyl": "infra/specs/variants.pyl":
"8cfcaa99fa07ad2a2d5d14f220fd5037", "8cfcaa99fa07ad2a2d5d14f220fd5037",
"infra/specs/waterfalls.pyl": "infra/specs/waterfalls.pyl":
......
#! /usr/bin/env python #! /usr/bin/env vpython
# #
# [VPYTHON:BEGIN] # [VPYTHON:BEGIN]
# wheel: < # wheel: <
...@@ -60,8 +60,9 @@ def IsWindows(): ...@@ -60,8 +60,9 @@ def IsWindows():
DEFAULT_TEST_SUITE = 'angle_perftests' DEFAULT_TEST_SUITE = 'angle_perftests'
DEFAULT_TEST_PREFIX = '--gtest_filter=TracePerfTest.Run/vulkan_' DEFAULT_TEST_PREFIX = 'TracePerfTest.Run/vulkan_'
DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_' DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_'
DEFAULT_BATCH_SIZE = 5
# Filters out stuff like: " I 72.572s run_tests_on_device(96071FFAZ00096) " # Filters out stuff like: " I 72.572s run_tests_on_device(96071FFAZ00096) "
ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\) ' ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\) '
...@@ -325,6 +326,16 @@ def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, go ...@@ -325,6 +326,16 @@ def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, go
return FAIL return FAIL
def _get_batches(traces, batch_size):
for i in range(0, len(traces), batch_size):
yield traces[i:i + batch_size]
def _get_gtest_filter_for_batch(batch):
expanded = ['%s%s' % (DEFAULT_TEST_PREFIX, trace) for trace in batch]
return '--gtest_filter=%s' % ':'.join(expanded)
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results): def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
keys = get_skia_gold_keys(args) keys = get_skia_gold_keys(args)
...@@ -335,20 +346,27 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -335,20 +346,27 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
gold_session = gold_session_manager.GetSkiaGoldSession(keys) gold_session = gold_session_manager.GetSkiaGoldSession(keys)
traces = [trace.split(' ')[0] for trace in tests] traces = [trace.split(' ')[0] for trace in tests]
for test in traces:
# Apply test filter if present. if args.isolated_script_test_filter:
if args.isolated_script_test_filter: filtered = []
full_name = 'angle_restricted_trace_gold_tests.%s' % test for trace in traces:
# Apply test filter if present.
full_name = 'angle_restricted_trace_gold_tests.%s' % trace
if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter): if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter):
logging.info('Skipping test %s because it does not match filter %s' % logging.info('Skipping test %s because it does not match filter %s' %
(full_name, args.isolated_script_test_filter)) (full_name, args.isolated_script_test_filter))
continue else:
filtered += [trace]
traces = filtered
batches = _get_batches(traces, args.batch_size)
for batch in batches:
with common.temporary_file() as tempfile_path: with common.temporary_file() as tempfile_path:
gtest_filter = _get_gtest_filter_for_batch(batch)
cmd = [ cmd = [
args.test_suite, args.test_suite,
DEFAULT_TEST_PREFIX + test, gtest_filter,
'--render-test-output-dir=%s' % screenshot_dir, '--render-test-output-dir=%s' % screenshot_dir,
'--one-frame-only', '--one-frame-only',
'--verbose-logging', '--verbose-logging',
...@@ -358,23 +376,24 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -358,23 +376,24 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
for iteration in range(0, args.flaky_retries + 1): for iteration in range(0, args.flaky_retries + 1):
if result != PASS: if result != PASS:
if iteration > 0: if iteration > 0:
logging.info('Retrying flaky test: "%s"...' % test) logging.info('Test run failed, running retry #%d...' % (iteration + 1))
result = PASS if run_wrapper(args, cmd, env, tempfile_path) == 0 else FAIL result = PASS if run_wrapper(args, cmd, env, tempfile_path) == 0 else FAIL
artifacts = {} artifacts = {}
if result == PASS: for trace in batch:
result = upload_test_result_to_skia_gold(args, gold_session_manager, if result == PASS:
gold_session, gold_properties, result = upload_test_result_to_skia_gold(args, gold_session_manager,
screenshot_dir, test, artifacts) gold_session, gold_properties,
screenshot_dir, trace, artifacts)
expected_result = SKIP if result == SKIP else PASS expected_result = SKIP if result == SKIP else PASS
test_results[test] = {'expected': expected_result, 'actual': result} test_results[trace] = {'expected': expected_result, 'actual': result}
if result == FAIL: if result == FAIL:
test_results[test]['is_unexpected'] = True test_results[trace]['is_unexpected'] = True
if len(artifacts) > 0: if len(artifacts) > 0:
test_results[test]['artifacts'] = artifacts test_results[trace]['artifacts'] = artifacts
results['num_failures_by_type'][result] += 1 results['num_failures_by_type'][result] += 1
return results['num_failures_by_type'][FAIL] == 0 return results['num_failures_by_type'][FAIL] == 0
...@@ -403,6 +422,11 @@ def main(): ...@@ -403,6 +422,11 @@ def main():
help='Index of the current shard for test splitting. Default is 0.', help='Index of the current shard for test splitting. Default is 0.',
type=int, type=int,
default=0) default=0)
parser.add_argument(
'--batch-size',
help='Number of tests to run in a group. Default: %d' % DEFAULT_BATCH_SIZE,
type=int,
default=DEFAULT_BATCH_SIZE)
add_skia_gold_args(parser) add_skia_gold_args(parser)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment