Commit d99864c0 by Jamie Madill Committed by Angle LUCI CQ

Gold Tests: Fix flaky test handling.

Indead of just retrying the batch, retry individual tests. This should correctly handle the Intel flake pattern. Bug: angleproject:5415 Change-Id: I029c2514ff8e7a45184c5c105bf2d0350171da29 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2940049 Commit-Queue: Jamie Madill <jmadill@chromium.org> Reviewed-by: 's avatarJonah Ryan-Davis <jonahr@google.com> Reviewed-by: 's avatarYuly Novikov <ynovikov@chromium.org>
parent f03f7bdb
...@@ -610,7 +610,8 @@ ...@@ -610,7 +610,8 @@
"args": [ "args": [
"--test-machine-name", "--test-machine-name",
"${buildername}", "${buildername}",
"--git-revision=${got_angle_revision}" "--git-revision=${got_angle_revision}",
"--flaky-retries=1"
], ],
"isolate_name": "angle_restricted_trace_gold_tests", "isolate_name": "angle_restricted_trace_gold_tests",
"merge": { "merge": {
...@@ -1966,7 +1967,8 @@ ...@@ -1966,7 +1967,8 @@
"args": [ "args": [
"--test-machine-name", "--test-machine-name",
"${buildername}", "${buildername}",
"--git-revision=${got_angle_revision}" "--git-revision=${got_angle_revision}",
"--flaky-retries=1"
], ],
"isolate_name": "angle_restricted_trace_gold_tests", "isolate_name": "angle_restricted_trace_gold_tests",
"merge": { "merge": {
......
...@@ -26,6 +26,20 @@ ...@@ -26,6 +26,20 @@
}, },
}, },
}, },
'angle_restricted_trace_gold_tests': {
'modifications': {
'linux-intel': {
'args': [
'--flaky-retries=1',
],
},
'win10-x64-intel': {
'args': [
'--flaky-retries=1',
],
},
},
},
'angle_white_box_tests': { 'angle_white_box_tests': {
'modifications': { 'modifications': {
# anglebug.com/5328 suspecting blue screen caused by multiprocess # anglebug.com/5328 suspecting blue screen caused by multiprocess
......
{ {
"infra/specs/angle.json": "infra/specs/angle.json":
"ead3cabcb0132402f7c722c072f1ae55", "da36986392a628a408b2ce19318ac0e5",
"infra/specs/generate_test_spec_json.py": "infra/specs/generate_test_spec_json.py":
"162566b21bca4ef0b815e411920c9f2d", "162566b21bca4ef0b815e411920c9f2d",
"infra/specs/mixins.pyl": "infra/specs/mixins.pyl":
"937e107ab606846d61eec617d09e50d0", "937e107ab606846d61eec617d09e50d0",
"infra/specs/test_suite_exceptions.pyl": "infra/specs/test_suite_exceptions.pyl":
"aad1a4aed801277cc531733deab221b5", "723460da84a90884a9668c07a0893390",
"infra/specs/test_suites.pyl": "infra/specs/test_suites.pyl":
"687b407a1fd7d83583817b9570ad983e", "687b407a1fd7d83583817b9570ad983e",
"infra/specs/variants.pyl": "infra/specs/variants.pyl":
......
...@@ -358,42 +358,53 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu ...@@ -358,42 +358,53 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
batches = _get_batches(traces, args.batch_size) batches = _get_batches(traces, args.batch_size)
for batch in batches: for batch in batches:
with common.temporary_file() as tempfile_path: for iteration in range(0, args.flaky_retries + 1):
gtest_filter = _get_gtest_filter_for_batch(batch) with common.temporary_file() as tempfile_path:
cmd = [ # This is how we signal early exit
args.test_suite, if not batch:
gtest_filter, logging.debug('All tests in batch completed.')
'--render-test-output-dir=%s' % screenshot_dir, break
'--one-frame-only', if iteration > 0:
'--verbose-logging', logging.info('Test run failed, running retry #%d...' % iteration)
] + extra_flags
gtest_filter = _get_gtest_filter_for_batch(batch)
batch_result = None cmd = [
for iteration in range(0, args.flaky_retries + 1): args.test_suite,
if batch_result != PASS: gtest_filter,
if iteration > 0: '--render-test-output-dir=%s' % screenshot_dir,
logging.info('Test run failed, running retry #%d...' % (iteration + 1)) '--one-frame-only',
batch_result = PASS if run_wrapper(args, cmd, env, '--verbose-logging',
tempfile_path) == 0 else FAIL ] + extra_flags
batch_result = PASS if run_wrapper(args, cmd, env,
for trace in batch: tempfile_path) == 0 else FAIL
artifacts = {}
next_batch = []
if batch_result == PASS: for trace in batch:
logging.debug('upload test result: %s' % trace) artifacts = {}
result = upload_test_result_to_skia_gold(args, gold_session_manager,
gold_session, gold_properties, if batch_result == PASS:
screenshot_dir, trace, artifacts) logging.debug('upload test result: %s' % trace)
else: result = upload_test_result_to_skia_gold(args, gold_session_manager,
result = batch_result gold_session, gold_properties,
screenshot_dir, trace,
expected_result = SKIP if result == SKIP else PASS artifacts)
test_results[trace] = {'expected': expected_result, 'actual': result} else:
if result == FAIL: result = batch_result
test_results[trace]['is_unexpected'] = True
if len(artifacts) > 0: expected_result = SKIP if result == SKIP else PASS
test_results[trace]['artifacts'] = artifacts test_results[trace] = {'expected': expected_result, 'actual': result}
results['num_failures_by_type'][result] += 1 if len(artifacts) > 0:
test_results[trace]['artifacts'] = artifacts
if result == FAIL:
next_batch.append(trace)
batch = next_batch
# These properties are recorded after iteration to ensure they only happen once.
for _, trace_results in test_results.items():
result = trace_results['actual']
results['num_failures_by_type'][result] += 1
if result == FAIL:
trace_results['is_unexpected'] = True
return results['num_failures_by_type'][FAIL] == 0 return results['num_failures_by_type'][FAIL] == 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment