Commit 3ff08c44 by Jamie Madill Committed by Commit Bot

Gold Tests: Implement flaky retries and sharding.

This CL implements two features: first off we can now split the tests into several shards which we can run independently on different machines. Second it implements a simple retry mechanism that will retry a failed test a number of times and count any first passing test as a pass. Bug: angleproject:5875 Bug: angleproject:5966 Change-Id: Ieef4ea0e4eebdb286a32de3b1e3fef4f2eda0600 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2897546Reviewed-by: 's avatarYuly Novikov <ynovikov@chromium.org> Reviewed-by: 's avatarCody Northrop <cnorthrop@google.com> Commit-Queue: Jamie Madill <jmadill@chromium.org>
parent 8328743a
...@@ -322,40 +322,7 @@ def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, go ...@@ -322,40 +322,7 @@ def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, go
return FAIL return FAIL
def main(): def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-output', type=str)
parser.add_argument('--isolated-script-test-perf-output', type=str)
parser.add_argument('--isolated-script-test-filter', type=str)
parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
add_skia_gold_args(parser)
args, extra_flags = parser.parse_known_args()
env = os.environ.copy()
if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
logging.error('Sharding not yet implemented.')
sys.exit(1)
results = {
'tests': {},
'interrupted': False,
'seconds_since_epoch': time.time(),
'path_delimiter': '.',
'version': 3,
'num_failures_by_type': {
FAIL: 0,
PASS: 0,
SKIP: 0,
},
}
result_tests = {}
def run_tests(args, tests, extra_flags, env, screenshot_dir):
keys = get_skia_gold_keys(args) keys = get_skia_gold_keys(args)
with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir: with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
...@@ -364,7 +331,7 @@ def main(): ...@@ -364,7 +331,7 @@ def main():
skia_gold_temp_dir, gold_properties) skia_gold_temp_dir, gold_properties)
gold_session = gold_session_manager.GetSkiaGoldSession(keys) gold_session = gold_session_manager.GetSkiaGoldSession(keys)
traces = [trace.split(' ')[0] for trace in tests['traces']] traces = [trace.split(' ')[0] for trace in tests]
for test in traces: for test in traces:
# Apply test filter if present. # Apply test filter if present.
...@@ -384,6 +351,11 @@ def main(): ...@@ -384,6 +351,11 @@ def main():
'--verbose-logging', '--verbose-logging',
] + extra_flags ] + extra_flags
result = None
for iteration in range(0, args.flaky_retries + 1):
if result != PASS:
if iteration > 0:
logging.info('Retrying flaky test: "%s"...' % test)
result = PASS if run_wrapper(args, cmd, env, tempfile_path) == 0 else FAIL result = PASS if run_wrapper(args, cmd, env, tempfile_path) == 0 else FAIL
artifacts = {} artifacts = {}
...@@ -394,15 +366,68 @@ def main(): ...@@ -394,15 +366,68 @@ def main():
screenshot_dir, test, artifacts) screenshot_dir, test, artifacts)
expected_result = SKIP if result == SKIP else PASS expected_result = SKIP if result == SKIP else PASS
result_tests[test] = {'expected': expected_result, 'actual': result} test_results[test] = {'expected': expected_result, 'actual': result}
if result == FAIL: if result == FAIL:
result_tests[test]['is_unexpected'] = True test_results[test]['is_unexpected'] = True
if len(artifacts) > 0: if len(artifacts) > 0:
result_tests[test]['artifacts'] = artifacts test_results[test]['artifacts'] = artifacts
results['num_failures_by_type'][result] += 1 results['num_failures_by_type'][result] += 1
return results['num_failures_by_type'][FAIL] == 0 return results['num_failures_by_type'][FAIL] == 0
def _shard_tests(tests, shard_count, shard_index):
return [tests[index] for index in range(shard_index, len(tests), shard_count)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-output', type=str)
parser.add_argument('--isolated-script-test-perf-output', type=str)
parser.add_argument('--isolated-script-test-filter', type=str)
parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
parser.add_argument(
'--flaky-retries', help='Number of times to retry failed tests.', type=int, default=0)
parser.add_argument(
'--shard-count',
help='Number of shards for test splitting. Default is 1.',
type=int,
default=1)
parser.add_argument(
'--shard-index',
help='Index of the current shard for test splitting. Default is 0.',
type=int,
default=0)
add_skia_gold_args(parser)
args, extra_flags = parser.parse_known_args()
env = os.environ.copy()
if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
if 'GTEST_SHARD_INDEX' not in env:
logging.error('Sharding params must be specified together.')
sys.exit(1)
args.shard_count = int(env['GTEST_TOTAL_SHARDS'])
args.shard_index = int(env['GTEST_SHARD_INDEX'])
results = {
'tests': {},
'interrupted': False,
'seconds_since_epoch': time.time(),
'path_delimiter': '.',
'version': 3,
'num_failures_by_type': {
FAIL: 0,
PASS: 0,
SKIP: 0,
},
}
test_results = {}
rc = 0 rc = 0
try: try:
...@@ -417,15 +442,21 @@ def main(): ...@@ -417,15 +442,21 @@ def main():
with open(json_name) as fp: with open(json_name) as fp:
tests = json.load(fp) tests = json.load(fp)
# Split tests according to sharding
sharded_tests = _shard_tests(tests['traces'], args.shard_count, args.shard_index)
if args.render_test_output_dir: if args.render_test_output_dir:
if not run_tests(args, tests, extra_flags, env, args.render_test_output_dir): if not _run_tests(args, sharded_tests, extra_flags, env, args.render_test_output_dir,
results, test_results):
rc = 1 rc = 1
elif 'ISOLATED_OUTDIR' in env: elif 'ISOLATED_OUTDIR' in env:
if not run_tests(args, tests, extra_flags, env, env['ISOLATED_OUTDIR']): if not _run_tests(args, sharded_tests, extra_flags, env, env['ISOLATED_OUTDIR'],
results, test_results):
rc = 1 rc = 1
else: else:
with temporary_dir('angle_trace_') as temp_dir: with temporary_dir('angle_trace_') as temp_dir:
if not run_tests(args, tests, extra_flags, env, temp_dir): if not _run_tests(args, sharded_tests, extra_flags, env, temp_dir, results,
test_results):
rc = 1 rc = 1
except Exception: except Exception:
...@@ -433,8 +464,8 @@ def main(): ...@@ -433,8 +464,8 @@ def main():
results['interrupted'] = True results['interrupted'] = True
rc = 1 rc = 1
if result_tests: if test_results:
results['tests']['angle_restricted_trace_gold_tests'] = result_tests results['tests']['angle_restricted_trace_gold_tests'] = test_results
if args.isolated_script_test_output: if args.isolated_script_test_output:
with open(args.isolated_script_test_output, 'w') as out_file: with open(args.isolated_script_test_output, 'w') as out_file:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment