Commit d99864c0 by Jamie Madill Committed by Angle LUCI CQ

Gold Tests: Fix flaky test handling.

Indead of just retrying the batch, retry individual tests. This should correctly handle the Intel flake pattern. Bug: angleproject:5415 Change-Id: I029c2514ff8e7a45184c5c105bf2d0350171da29 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2940049 Commit-Queue: Jamie Madill <jmadill@chromium.org> Reviewed-by: 's avatarJonah Ryan-Davis <jonahr@google.com> Reviewed-by: 's avatarYuly Novikov <ynovikov@chromium.org>
parent f03f7bdb
...@@ -610,7 +610,8 @@ ...@@ -610,7 +610,8 @@
"args": [ "args": [
"--test-machine-name", "--test-machine-name",
"${buildername}", "${buildername}",
"--git-revision=${got_angle_revision}" "--git-revision=${got_angle_revision}",
"--flaky-retries=1"
], ],
"isolate_name": "angle_restricted_trace_gold_tests", "isolate_name": "angle_restricted_trace_gold_tests",
"merge": { "merge": {
...@@ -1966,7 +1967,8 @@ ...@@ -1966,7 +1967,8 @@
"args": [ "args": [
"--test-machine-name", "--test-machine-name",
"${buildername}", "${buildername}",
"--git-revision=${got_angle_revision}" "--git-revision=${got_angle_revision}",
"--flaky-retries=1"
], ],
"isolate_name": "angle_restricted_trace_gold_tests", "isolate_name": "angle_restricted_trace_gold_tests",
"merge": { "merge": {
......
...@@ -26,6 +26,20 @@ ...@@ -26,6 +26,20 @@
}, },
}, },
}, },
'angle_restricted_trace_gold_tests': {
'modifications': {
'linux-intel': {
'args': [
'--flaky-retries=1',
],
},
'win10-x64-intel': {
'args': [
'--flaky-retries=1',
],
},
},
},
'angle_white_box_tests': { 'angle_white_box_tests': {
'modifications': { 'modifications': {
# anglebug.com/5328 suspecting blue screen caused by multiprocess # anglebug.com/5328 suspecting blue screen caused by multiprocess
......
{ {
"infra/specs/angle.json": "infra/specs/angle.json":
"ead3cabcb0132402f7c722c072f1ae55", "da36986392a628a408b2ce19318ac0e5",
"infra/specs/generate_test_spec_json.py": "infra/specs/generate_test_spec_json.py":
"162566b21bca4ef0b815e411920c9f2d", "162566b21bca4ef0b815e411920c9f2d",
"infra/specs/mixins.pyl": "infra/specs/mixins.pyl":
"937e107ab606846d61eec617d09e50d0", "937e107ab606846d61eec617d09e50d0",
"infra/specs/test_suite_exceptions.pyl": "infra/specs/test_suite_exceptions.pyl":
"aad1a4aed801277cc531733deab221b5", "723460da84a90884a9668c07a0893390",
"infra/specs/test_suites.pyl": "infra/specs/test_suites.pyl":
"687b407a1fd7d83583817b9570ad983e", "687b407a1fd7d83583817b9570ad983e",
"infra/specs/variants.pyl": "infra/specs/variants.pyl":
......
...@@ -358,7 +358,15 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -358,7 +358,15 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
batches = _get_batches(traces, args.batch_size) batches = _get_batches(traces, args.batch_size)
for batch in batches: for batch in batches:
for iteration in range(0, args.flaky_retries + 1):
with common.temporary_file() as tempfile_path: with common.temporary_file() as tempfile_path:
# This is how we signal early exit
if not batch:
logging.debug('All tests in batch completed.')
break
if iteration > 0:
logging.info('Test run failed, running retry #%d...' % iteration)
gtest_filter = _get_gtest_filter_for_batch(batch) gtest_filter = _get_gtest_filter_for_batch(batch)
cmd = [ cmd = [
args.test_suite, args.test_suite,
...@@ -367,15 +375,10 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -367,15 +375,10 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
'--one-frame-only', '--one-frame-only',
'--verbose-logging', '--verbose-logging',
] + extra_flags ] + extra_flags
batch_result = None
for iteration in range(0, args.flaky_retries + 1):
if batch_result != PASS:
if iteration > 0:
logging.info('Test run failed, running retry #%d...' % (iteration + 1))
batch_result = PASS if run_wrapper(args, cmd, env, batch_result = PASS if run_wrapper(args, cmd, env,
tempfile_path) == 0 else FAIL tempfile_path) == 0 else FAIL
next_batch = []
for trace in batch: for trace in batch:
artifacts = {} artifacts = {}
...@@ -383,17 +386,25 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -383,17 +386,25 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
logging.debug('upload test result: %s' % trace) logging.debug('upload test result: %s' % trace)
result = upload_test_result_to_skia_gold(args, gold_session_manager, result = upload_test_result_to_skia_gold(args, gold_session_manager,
gold_session, gold_properties, gold_session, gold_properties,
screenshot_dir, trace, artifacts) screenshot_dir, trace,
artifacts)
else: else:
result = batch_result result = batch_result
expected_result = SKIP if result == SKIP else PASS expected_result = SKIP if result == SKIP else PASS
test_results[trace] = {'expected': expected_result, 'actual': result} test_results[trace] = {'expected': expected_result, 'actual': result}
if result == FAIL:
test_results[trace]['is_unexpected'] = True
if len(artifacts) > 0: if len(artifacts) > 0:
test_results[trace]['artifacts'] = artifacts test_results[trace]['artifacts'] = artifacts
if result == FAIL:
next_batch.append(trace)
batch = next_batch
# These properties are recorded after iteration to ensure they only happen once.
for _, trace_results in test_results.items():
result = trace_results['actual']
results['num_failures_by_type'][result] += 1 results['num_failures_by_type'][result] += 1
if result == FAIL:
trace_results['is_unexpected'] = True
return results['num_failures_by_type'][FAIL] == 0 return results['num_failures_by_type'][FAIL] == 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment