Commit 5fd72bd3 by Jamie Madill

Use Chromium mirror of tools/perf.

This will keep ANGLE in sync with Chromium changes and reduce the maintenance burden. Bug: angleproject:6037 Change-Id: Ibed4031d8cb3468e6be6501bda21404c42fb2be6 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2953043Reviewed-by: 's avatarYuly Novikov <ynovikov@chromium.org>
parent 2e772e04
......@@ -86,6 +86,7 @@
/tools/mb
/tools/md_browser
/tools/memory
/tools/perf
/tools/protoc_wrapper
/tools/python
/tools/skia_goldctl
......
......@@ -573,6 +573,11 @@ deps = {
'condition': 'not build_with_chromium',
},
'tools/perf': {
'url': '{chromium_git}/chromium/src/tools/perf@5c84710692637a3b3618e88b7d145b2189da71c9',
'condition': 'not build_with_chromium',
},
'tools/protoc_wrapper': {
'url': '{chromium_git}/chromium/src/tools/protoc_wrapper@57697a9873d45b2d19117eb76fbf327ba2288095',
'condition': 'not build_with_chromium',
......
......@@ -80,6 +80,7 @@ ANGLE_CHROMIUM_DEPS = [
'tools/mb',
'tools/md_browser',
'tools/memory',
'tools/perf',
'tools/protoc_wrapper',
'tools/python',
'tools/skia_goldctl/linux',
......
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import os
import sys
@contextlib.contextmanager
def SysPath(path, position=None):
if position is None:
sys.path.append(path)
else:
sys.path.insert(position, path)
try:
yield
finally:
if sys.path[-1] == path:
sys.path.pop()
else:
sys.path.remove(path)
def GetChromiumSrcDir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
def GetTelemetryDir():
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'telemetry')
def GetTracingDir():
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'tracing')
def GetPyUtilsDir():
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'common', 'py_utils')
def GetPerfDir():
return os.path.join(GetChromiumSrcDir(), 'tools', 'perf')
def GetPerfStorySetsDir():
return os.path.join(GetPerfDir(), 'page_sets')
def GetOfficialBenchmarksDir():
return os.path.join(GetPerfDir(), 'benchmarks')
def GetContribDir():
return os.path.join(GetPerfDir(), 'contrib')
def GetAndroidPylibDir():
return os.path.join(GetChromiumSrcDir(), 'build', 'android')
def GetVariationsDir():
return os.path.join(GetChromiumSrcDir(), 'tools', 'variations')
def AddTelemetryToPath():
telemetry_path = GetTelemetryDir()
if telemetry_path not in sys.path:
sys.path.insert(1, telemetry_path)
def AddTracingToPath():
tracing_path = GetTracingDir()
if tracing_path not in sys.path:
sys.path.insert(1, tracing_path)
def AddPyUtilsToPath():
py_utils_dir = GetPyUtilsDir()
if py_utils_dir not in sys.path:
sys.path.insert(1, py_utils_dir)
def AddAndroidPylibToPath():
android_pylib_path = GetAndroidPylibDir()
if android_pylib_path not in sys.path:
sys.path.insert(1, android_pylib_path)
def GetExpectationsPath():
return os.path.join(GetPerfDir(), 'expectations.config')
#!/usr/bin/env vpython
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for adding results to perf dashboard."""
# This file was copy-pasted over from:
# //build/scripts/slave/results_dashboard.py
# That file is now deprecated and this one is
# the new source of truth.
import calendar
import datetime
import httplib
import json
import os
import subprocess
import sys
import time
import traceback
import zlib
import logging
import six.moves.urllib.error # pylint: disable=import-error
import six.moves.urllib.parse # pylint: disable=import-error
import six.moves.urllib.request # pylint: disable=import-error
# TODO(crbug.com/996778): Figure out how to get httplib2 hermetically.
import httplib2 # pylint: disable=import-error
from core import path_util
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
# The paths in the results dashboard URLs for sending results.
SEND_RESULTS_PATH = '/add_point'
SEND_HISTOGRAMS_PATH = '/add_histograms'
class SendResultException(Exception):
pass
class SendResultsRetryException(SendResultException):
pass
class SendResultsFatalException(SendResultException):
pass
def LuciAuthTokenGeneratorCallback():
args = ['luci-auth', 'token']
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() == 0:
return p.stdout.read()
else:
raise RuntimeError('Error generating authentication token.\nStdout: %s\nStder:%s' %
(p.stdout.read(), p.stderr.read()))
def SendResults(data,
data_label,
url,
send_as_histograms=False,
token_generator_callback=LuciAuthTokenGeneratorCallback,
num_retries=4):
"""Sends results to the Chrome Performance Dashboard.
This function tries to send the given data to the dashboard.
Args:
data: The data to try to send. Must be JSON-serializable.
data_label: string name of the data to be uploaded. This is only used for
logging purpose.
url: Performance Dashboard URL (including schema).
send_as_histograms: True if result is to be sent to /add_histograms.
token_generator_callback: a callback for generating the authentication token
to upload to perf dashboard.
If |token_generator_callback| is not specified, it's default to
LuciAuthTokenGeneratorCallback.
num_retries: Number of times to retry uploading to the perf dashboard upon
recoverable error.
"""
start = time.time()
all_data_uploaded = False
data_type = ('histogram' if send_as_histograms else 'chartjson')
dashboard_data_str = json.dumps(data)
# When perf dashboard is overloaded, it takes sometimes to spin up new
# instance. So sleep before retrying again. (
# For more details, see crbug.com/867379.
wait_before_next_retry_in_seconds = 15
for i in xrange(1, num_retries + 1):
try:
logging.info('Sending %s result of %s to dashboard (attempt %i out of %i).' %
(data_type, data_label, i, num_retries))
if send_as_histograms:
_SendHistogramJson(url, dashboard_data_str, token_generator_callback)
else:
# TODO(eakuefner): Remove this logic once all bots use histograms.
_SendResultsJson(url, dashboard_data_str, token_generator_callback)
all_data_uploaded = True
break
except SendResultsRetryException as e:
logging.error('Error while uploading %s data: %s' % (data_type, str(e)))
time.sleep(wait_before_next_retry_in_seconds)
wait_before_next_retry_in_seconds *= 2
except SendResultsFatalException as e:
logging.error('Fatal error while uploading %s data: %s' % (data_type, str(e)))
break
except Exception:
logging.error('Unexpected error while uploading %s data: %s' %
(data_type, traceback.format_exc()))
break
logging.info('Time spent sending results to %s: %s' % (url, time.time() - start))
return all_data_uploaded
def MakeHistogramSetWithDiagnostics(histograms_file,
test_name,
bot,
buildername,
buildnumber,
project,
buildbucket,
revisions_dict,
is_reference_build,
perf_dashboard_machine_group,
output_dir,
max_bytes=0):
"""Merges Histograms, adds Diagnostics, and batches the results.
Args:
histograms_file: input filename
output_dir: output directory
max_bytes: If non-zero, tries to produce files no larger than max_bytes.
(May generate a file that is larger than max_bytes if max_bytes is smaller
than a single Histogram.)
"""
add_diagnostics_args = []
add_diagnostics_args.extend([
'--benchmarks',
test_name,
'--bots',
bot,
'--builds',
buildnumber,
'--masters',
perf_dashboard_machine_group,
'--is_reference_build',
'true' if is_reference_build else '',
])
if max_bytes:
add_diagnostics_args.extend(['--max_bytes', max_bytes])
build_status_url = _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber)
if build_status_url:
add_diagnostics_args.extend(['--build_urls_k', 'Build Status'])
add_diagnostics_args.extend(['--build_urls_v', build_status_url])
for k, v in revisions_dict.items():
add_diagnostics_args.extend((k, v))
add_diagnostics_args.append(histograms_file)
# Subprocess only accepts string args
add_diagnostics_args = [str(v) for v in add_diagnostics_args]
add_reserved_diagnostics_path = os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'catapult', 'tracing', 'bin',
'add_reserved_diagnostics')
# This script may write multiple files to output_dir.
output_path = os.path.join(output_dir, test_name + '.json')
cmd = ([sys.executable, add_reserved_diagnostics_path] + add_diagnostics_args +
['--output_path', output_path])
logging.info(cmd)
subprocess.check_call(cmd)
def MakeListOfPoints(charts,
bot,
test_name,
project,
buildbucket,
buildername,
buildnumber,
supplemental_columns,
perf_dashboard_machine_group,
revisions_dict=None):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names to chart data, as generated by the
log processor classes (see process_log_utils.GraphingLogProcessor).
bot: A string which comes from perf_id, e.g. linux-release.
test_name: A test suite name, e.g. sunspider.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_columns: A dictionary of extra data to send with a point.
perf_dashboard_machine_group: Builder's perf machine group.
Returns:
A list of dictionaries in the format accepted by the perf dashboard.
Each dictionary has the keys "master", "bot", "test", "value", "revision".
The full details of this format are described at http://goo.gl/TcJliv.
"""
results = []
for chart_name, chart_data in sorted(charts.items()):
point_id, revision_columns = _RevisionNumberColumns(
revisions_dict if revisions_dict is not None else chart_data, prefix='r_')
for trace_name, trace_values in sorted(chart_data['traces'].items()):
is_important = trace_name in chart_data.get('important', [])
test_path = _TestPath(test_name, chart_name, trace_name)
result = {
'master': perf_dashboard_machine_group,
'bot': bot,
'test': test_path,
'revision': point_id,
'supplemental_columns': {}
}
# Add the supplemental_columns values that were passed in after the
# calculated revision column values so that these can be overwritten.
result['supplemental_columns'].update(revision_columns)
result['supplemental_columns'].update(
_GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber))
result['supplemental_columns'].update(supplemental_columns)
result['value'] = trace_values[0]
result['error'] = trace_values[1]
# Add other properties to this result dictionary if available.
if chart_data.get('units'):
result['units'] = chart_data['units']
if is_important:
result['important'] = True
results.append(result)
return results
def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, project, buildbucket,
buildername, buildnumber, supplemental_dict, is_ref,
perf_dashboard_machine_group):
"""Generates Dashboard JSON in the new Telemetry format.
See http://goo.gl/mDZHPl for more info on the format.
Args:
chart_json: A dict containing the telmetry output.
revision_dict: Dictionary of revisions to include, include "rev",
which determines the point ID.
test_name: A test suite name, e.g. sunspider.
bot: A string which comes from perf_id, e.g. linux-release.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_dict: A dictionary of extra data to send with a point;
this includes revisions and annotation data.
is_ref: True if this is a reference build, False otherwise.
perf_dashboard_machine_group: Builder's perf machine group.
Returns:
A dictionary in the format accepted by the perf dashboard.
"""
if not chart_json:
logging.error('Error: No json output from telemetry.')
logging.error('@@@STEP_FAILURE@@@')
point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')
supplemental = {}
for key in supplemental_dict:
if key.startswith('r_'):
versions[key.replace('r_', '', 1)] = supplemental_dict[key]
if key.startswith('a_'):
supplemental[key.replace('a_', '', 1)] = supplemental_dict[key]
supplemental.update(_GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber))
# TODO(sullivan): The android recipe sends "test_name.reference"
# while the desktop one just sends "test_name" for ref builds. Need
# to figure out why.
# https://github.com/catapult-project/catapult/issues/2046
test_name = test_name.replace('.reference', '')
fields = {
'master': perf_dashboard_machine_group,
'bot': bot,
'test_suite_name': test_name,
'point_id': point_id,
'supplemental': supplemental,
'versions': versions,
'chart_data': chart_json,
'is_ref': is_ref,
}
return fields
def _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber):
if not (buildername and buildnumber):
return None
if not project:
project = 'chrome'
if not buildbucket:
buildbucket = 'ci'
return 'https://ci.chromium.org/ui/p/%s/builders/%s/%s/%s' % (
six.moves.urllib.parse.quote(project), six.moves.urllib.parse.quote(buildbucket),
six.moves.urllib.parse.quote(buildername), six.moves.urllib.parse.quote(str(buildnumber)))
def _GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber):
"""Gets a supplemental column containing buildbot status link."""
url = _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber)
if not url:
return {}
return _CreateLinkColumn('build_uri', 'Buildbot status page', url)
def _CreateLinkColumn(name, label, url):
"""Returns a column containing markdown link to show on dashboard."""
return {'a_' + name: '[%s](%s)' % (label, url)}
def _GetTimestamp():
"""Get the Unix timestamp for the current time."""
return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
def _RevisionNumberColumns(data, prefix):
"""Get the point id and revision-related columns from the given data.
Args:
data: A dict of information from one line of the log file.
master: The name of the buildbot master.
prefix: Prefix for revision type keys. 'r_' for non-telemetry JSON, '' for
telemetry JSON.
Returns:
A tuple with the point id (which must be an int), and a dict of
revision-related columns.
"""
revision_supplemental_columns = {}
# The dashboard requires points' x-values to be integers, and points are
# ordered by these x-values. If data['rev'] can't be parsed as an int, assume
# that it's a git commit hash and use timestamp as the x-value.
try:
revision = int(data['rev'])
if revision and 300000 < revision < 1000000:
# Assume that revision is the commit position number for the master
# branch in the chromium/src repo.
revision_supplemental_columns[prefix + 'commit_pos'] = revision
except ValueError:
# The dashboard requires ordered integer revision numbers. If the revision
# is not an integer, assume it's a git hash and send a timestamp.
revision = _GetTimestamp()
revision_supplemental_columns[prefix + 'chromium'] = data['rev']
# An explicit data['point_id'] overrides the default behavior.
if 'point_id' in data:
revision = int(data['point_id'])
# For other revision data, add it if it's present and not undefined:
for key in ['webrtc_git', 'v8_rev']:
if key in data and data[key] != 'undefined':
revision_supplemental_columns[prefix + key] = data[key]
# If possible, also send the git hash.
if 'git_revision' in data and data['git_revision'] != 'undefined':
revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
return revision, revision_supplemental_columns
def _TestPath(test_name, chart_name, trace_name):
"""Get the slash-separated test path to send.
Args:
test: Test name. Typically, this will be a top-level 'test suite' name.
chart_name: Name of a chart where multiple trace lines are grouped. If the
chart name is the same as the trace name, that signifies that this is
the main trace for the chart.
trace_name: The "trace name" is the name of an individual line on chart.
Returns:
A slash-separated list of names that corresponds to the hierarchy of test
data in the Chrome Performance Dashboard; doesn't include master or bot
name.
"""
# For tests run on reference builds by builds/scripts/slave/telemetry.py,
# "_ref" is appended to the trace name. On the dashboard, as long as the
# result is on the right chart, it can just be called "ref".
if trace_name == chart_name + '_ref':
trace_name = 'ref'
chart_name = chart_name.replace('_by_url', '')
# No slashes are allowed in the trace name.
trace_name = trace_name.replace('/', '_')
# The results for "test/chart" and "test/chart/*" will all be shown on the
# same chart by the dashboard. The result with path "test/path" is considered
# the main trace for the chart.
test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
if chart_name == trace_name:
test_path = '%s/%s' % (test_name, chart_name)
return test_path
def _SendResultsJson(url, results_json, token_generator_callback):
"""Make a HTTP POST with the given JSON to the Performance Dashboard.
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
results_json: JSON string that contains the data to be sent.
Returns:
None if successful, or an error string if there were errors.
"""
# When data is provided to urllib2.Request, a POST is sent instead of GET.
# The data must be in the application/x-www-form-urlencoded format.
data = six.moves.urllib.parse.urlencode({'data': results_json})
req = six.moves.urllib.request.Request(url + SEND_RESULTS_PATH, data)
try:
oauth_token = token_generator_callback()
req.headers['Authorization'] = 'Bearer %s' % oauth_token
six.moves.urllib.request.urlopen(req, timeout=60 * 5)
except (six.moves.urllib.error.HTTPError, six.moves.urllib.error.URLError,
httplib.HTTPException):
error = traceback.format_exc()
if 'HTTPError: 400' in error:
# If the remote app rejects the JSON, it's probably malformed,
# so we don't want to retry it.
raise SendResultsFatalException('Discarding JSON, error:\n%s' % error)
raise SendResultsRetryException(error)
def _SendHistogramJson(url, histogramset_json, token_generator_callback):
"""POST a HistogramSet JSON to the Performance Dashboard.
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
histogramset_json: JSON string that contains a serialized HistogramSet.
For |token_generator_callback|, see SendResults's
documentation.
Returns:
None if successful, or an error string if there were errors.
"""
try:
oauth_token = token_generator_callback()
data = zlib.compress(histogramset_json)
headers = {'Authorization': 'Bearer %s' % oauth_token, 'User-Agent': 'perf-uploader/1.0'}
http = httplib2.Http()
response, _ = http.request(
url + SEND_HISTOGRAMS_PATH, method='POST', body=data, headers=headers)
# A 500 is presented on an exception on the dashboard side, timeout,
# exception, etc. The dashboard can also send back 400 and 403, we could
# recover from 403 (auth error), but 400 is generally malformed data.
if response.status in (403, 500):
raise SendResultsRetryException('HTTP Response %d: %s' %
(response.status, response.reason))
elif response.status != 200:
raise SendResultsFatalException('HTTP Response %d: %s' %
(response.status, response.reason))
except httplib.ResponseNotReady:
raise SendResultsRetryException(traceback.format_exc())
except httplib2.HttpLib2Error:
raise SendResultsRetryException(traceback.format_exc())
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file was copy-pasted over from:
# //build/scripts/slave/recipe_modules/swarming/resources/results_merger.py
# This file is responsbile for merging JSON test results in both the simplified
# JSON format and the Chromium JSON test results format version 3.
from __future__ import print_function
import copy
import json
import sys
# These fields must appear in the test result output
REQUIRED = {
'interrupted',
'num_failures_by_type',
'seconds_since_epoch',
'tests',
}
# These fields are optional, but must have the same value on all shards
OPTIONAL_MATCHING = (
'builder_name',
'build_number',
'chromium_revision',
'has_pretty_patch',
'has_wdiff',
'path_delimiter',
'pixel_tests_enabled',
'random_order_seed',
)
OPTIONAL_IGNORED = ('layout_tests_dir',)
# These fields are optional and will be summed together
OPTIONAL_COUNTS = (
'fixable',
'num_flaky',
'num_passes',
'num_regressions',
'skipped',
'skips',
)
class MergeException(Exception):
pass
def merge_test_results(shard_results_list, test_cross_device=False):
""" Merge list of results.
Args:
shard_results_list: list of results to merge. All the results must have the
same format. Supported format are simplified JSON format & Chromium JSON
test results format version 3 (see
https://www.chromium.org/developers/the-json-test-results-format)
test_cross_device: If true, some tests are running in multiple shards. This
requires some extra handling on merging the values under 'tests'.
Returns:
a dictionary that represent the merged results. Its format follow the same
format of all results in |shard_results_list|.
"""
shard_results_list = [x for x in shard_results_list if x]
if not shard_results_list:
return {}
if 'seconds_since_epoch' in shard_results_list[0]:
return _merge_json_test_result_format(shard_results_list, test_cross_device)
else:
return _merge_simplified_json_format(shard_results_list)
def _merge_simplified_json_format(shard_results_list):
# This code is specialized to the "simplified" JSON format that used to be
# the standard for recipes.
# These are the only keys we pay attention to in the output JSON.
merged_results = {
'successes': [],
'failures': [],
'valid': True,
}
for result_json in shard_results_list:
successes = result_json.get('successes', [])
failures = result_json.get('failures', [])
valid = result_json.get('valid', True)
if (not isinstance(successes, list) or not isinstance(failures, list) or
not isinstance(valid, bool)):
raise MergeException('Unexpected value type in %s' % result_json) # pragma: no cover
merged_results['successes'].extend(successes)
merged_results['failures'].extend(failures)
merged_results['valid'] = merged_results['valid'] and valid
return merged_results
def _merge_json_test_result_format(shard_results_list, test_cross_device=False):
# This code is specialized to the Chromium JSON test results format version 3:
# https://www.chromium.org/developers/the-json-test-results-format
# These are required fields for the JSON test result format version 3.
merged_results = {
'tests': {},
'interrupted': False,
'version': 3,
'seconds_since_epoch': float('inf'),
'num_failures_by_type': {}
}
# To make sure that we don't mutate existing shard_results_list.
shard_results_list = copy.deepcopy(shard_results_list)
for result_json in shard_results_list:
# TODO(tansell): check whether this deepcopy is actually necessary.
result_json = copy.deepcopy(result_json)
# Check the version first
version = result_json.pop('version', -1)
if version != 3:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Unsupported version %s. Only version 3 is supported' % version)
# Check the results for each shard have the required keys
missing = REQUIRED - set(result_json)
if missing:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Invalid json test results (missing %s)' % missing)
# Curry merge_values for this result_json.
# pylint: disable=cell-var-from-loop
merge = lambda key, merge_func: merge_value(result_json, merged_results, key, merge_func)
if test_cross_device:
# Results from the same test(story) may be found on different
# shards(devices). We need to handle the merging on story level.
merge('tests', merge_tries_v2)
else:
# Traverse the result_json's test trie & merged_results's test tries in
# DFS order & add the n to merged['tests'].
merge('tests', merge_tries)
# If any were interrupted, we are interrupted.
merge('interrupted', lambda x, y: x | y)
# Use the earliest seconds_since_epoch value
merge('seconds_since_epoch', min)
# Sum the number of failure types
merge('num_failures_by_type', sum_dicts)
# Optional values must match
for optional_key in OPTIONAL_MATCHING:
if optional_key not in result_json:
continue
if optional_key not in merged_results:
# Set this value to None, then blindly copy over it.
merged_results[optional_key] = None
merge(optional_key, lambda src, dst: src)
else:
merge(optional_key, ensure_match)
# Optional values ignored
for optional_key in OPTIONAL_IGNORED:
if optional_key in result_json:
merged_results[optional_key] = result_json.pop(
# pragma: no cover (covered by
# results_merger_unittest).
optional_key)
# Sum optional value counts
for count_key in OPTIONAL_COUNTS:
if count_key in result_json: # pragma: no cover
# TODO(mcgreevy): add coverage.
merged_results.setdefault(count_key, 0)
merge(count_key, lambda a, b: a + b)
if result_json:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Unmergable values %s' % list(result_json.keys()))
return merged_results
def merge_tries(source, dest):
""" Merges test tries.
This is intended for use as a merge_func parameter to merge_value.
Args:
source: A result json test trie.
dest: A json test trie merge destination.
"""
# merge_tries merges source into dest by performing a lock-step depth-first
# traversal of dest and source.
# pending_nodes contains a list of all sub-tries which have been reached but
# need further merging.
# Each element consists of a trie prefix, and a sub-trie from each of dest
# and source which is reached via that prefix.
pending_nodes = [('', dest, source)]
while pending_nodes:
prefix, dest_node, curr_node = pending_nodes.pop()
for k, v in curr_node.items():
if k in dest_node:
if not isinstance(v, dict):
raise MergeException('%s:%s: %r not mergable, curr_node: %r\ndest_node: %r' %
(prefix, k, v, curr_node, dest_node))
pending_nodes.append(("%s:%s" % (prefix, k), dest_node[k], v))
else:
dest_node[k] = v
return dest
def merge_tries_v2(source, dest):
""" Merges test tries, and adds support for merging results for the same story
from different devices, which is not supported on v1.
This is intended for use as a merge_func parameter to merge_value.
Args:
source: A result json test trie.
dest: A json test trie merge destination.
"""
# merge_tries merges source into dest by performing a lock-step depth-first
# traversal of dest and source.
# pending_nodes contains a list of all sub-tries which have been reached but
# need further merging.
# Each element consists of a trie prefix, and a sub-trie from each of dest
# and source which is reached via that prefix.
pending_nodes = [('', dest, source)]
while pending_nodes:
prefix, dest_node, curr_node = pending_nodes.pop()
for k, v in curr_node.items():
if k in dest_node:
if not isinstance(v, dict):
raise MergeException('%s:%s: %r not mergable, curr_node: %r\ndest_node: %r' %
(prefix, k, v, curr_node, dest_node))
elif 'actual' in v and 'expected' in v:
# v is test result of a story name which is already in dest
_merging_cross_device_results(v, dest_node[k])
else:
pending_nodes.append(("%s:%s" % (prefix, k), dest_node[k], v))
else:
dest_node[k] = v
return dest
def _merging_cross_device_results(src, dest):
# 1. Merge the 'actual' field and update the is_unexpected based on new values
dest['actual'] += ' %s' % src['actual']
if any(actual != dest['expected'] for actual in dest['actual'].split()):
dest['is_unexpected'] = True
# 2. append each item under the 'artifacts' and 'times'.
if 'artifacts' in src:
if 'artifacts' in dest:
for artifact, artifact_list in src['artifacts'].items():
if artifact in dest['artifacts']:
dest['artifacts'][artifact] += artifact_list
else:
dest['artifacts'][artifact] = artifact_list
else:
dest['artifacts'] = src['artifacts']
if 'times' in src:
if 'times' in dest:
dest['times'] += src['times']
else:
dest['time'] = src['time']
dest['times'] = src['times']
# 3. remove the 'shard' because now the results are from multiple shards.
if 'shard' in dest:
del dest['shard']
def ensure_match(source, dest):
""" Returns source if it matches dest.
This is intended for use as a merge_func parameter to merge_value.
Raises:
MergeException if source != dest
"""
if source != dest:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
"Values don't match: %s, %s" % (source, dest))
return source
def sum_dicts(source, dest):
""" Adds values from source to corresponding values in dest.
This is intended for use as a merge_func parameter to merge_value.
"""
for k, v in source.items():
dest.setdefault(k, 0)
dest[k] += v
return dest
def merge_value(source, dest, key, merge_func):
""" Merges a value from source to dest.
The value is deleted from source.
Args:
source: A dictionary from which to pull a value, identified by key.
dest: The dictionary into to which the value is to be merged.
key: The key which identifies the value to be merged.
merge_func(src, dst): A function which merges its src into dst,
and returns the result. May modify dst. May raise a MergeException.
Raises:
MergeException if the values can not be merged.
"""
try:
dest[key] = merge_func(source[key], dest[key])
except MergeException as e:
e.message = "MergeFailure for %s\n%s" % (key, e.message)
e.args = tuple([e.message] + list(e.args[1:]))
raise
del source[key]
def main(files):
if len(files) < 2:
sys.stderr.write("Not enough JSON files to merge.\n")
return 1
sys.stderr.write('Starting with %s\n' % files[0])
result = json.load(open(files[0]))
for f in files[1:]:
sys.stderr.write('Merging %s\n' % f)
result = merge_test_results([result, json.load(open(f))])
print(json.dumps(result))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file was copy-pasted over from:
# //build/scripts/slave/upload_perf_dashboard_results.py
# with sections copied from:
# //build/scripts/slave/slave_utils.py
import json
import optparse
import os
import re
import shutil
import sys
import tempfile
import time
import logging
import six.moves.urllib.parse # pylint: disable=import-error
from core import results_dashboard
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
def _CommitPositionNumber(commit_pos):
"""Extracts the number part of a commit position.
This is used to extract the number from got_revision_cp; This will be used
as the value of "rev" in the data passed to results_dashboard.SendResults.
"""
return int(re.search(r'{#(\d+)}', commit_pos).group(1))
def _GetDashboardJson(options):
main_revision = _CommitPositionNumber(options.got_revision_cp)
revisions = _GetPerfDashboardRevisionsWithProperties(options.got_webrtc_revision,
options.got_v8_revision,
options.git_revision, main_revision)
reference_build = 'reference' in options.name
stripped_test_name = options.name.replace('.reference', '')
results = {}
logging.info('Opening results file %s' % options.results_file)
with open(options.results_file) as f:
results = json.load(f)
dashboard_json = {}
if 'charts' not in results:
# These are legacy results.
# pylint: disable=redefined-variable-type
dashboard_json = results_dashboard.MakeListOfPoints(
results,
options.configuration_name,
stripped_test_name,
options.project,
options.buildbucket,
options.buildername,
options.buildnumber, {},
options.perf_dashboard_machine_group,
revisions_dict=revisions)
else:
dashboard_json = results_dashboard.MakeDashboardJsonV1(
results,
revisions,
stripped_test_name,
options.configuration_name,
options.project,
options.buildbucket,
options.buildername,
options.buildnumber, {},
reference_build,
perf_dashboard_machine_group=options.perf_dashboard_machine_group)
return dashboard_json
def _GetDashboardHistogramData(options):
revisions = {
'--chromium_commit_positions': _CommitPositionNumber(options.got_revision_cp),
'--chromium_revisions': options.git_revision
}
if options.got_webrtc_revision:
revisions['--webrtc_revisions'] = options.got_webrtc_revision
if options.got_v8_revision:
revisions['--v8_revisions'] = options.got_v8_revision
is_reference_build = 'reference' in options.name
stripped_test_name = options.name.replace('.reference', '')
max_bytes = 1 << 20
output_dir = tempfile.mkdtemp()
try:
begin_time = time.time()
results_dashboard.MakeHistogramSetWithDiagnostics(
histograms_file=options.results_file,
test_name=stripped_test_name,
bot=options.configuration_name,
buildername=options.buildername,
buildnumber=options.buildnumber,
project=options.project,
buildbucket=options.buildbucket,
revisions_dict=revisions,
is_reference_build=is_reference_build,
perf_dashboard_machine_group=options.perf_dashboard_machine_group,
output_dir=output_dir,
max_bytes=max_bytes)
end_time = time.time()
logging.info('Duration of adding diagnostics for %s: %d seconds' %
(stripped_test_name, end_time - begin_time))
# Read all batch files from output_dir.
dashboard_jsons = []
for basename in os.listdir(output_dir):
with open(os.path.join(output_dir, basename)) as f:
dashboard_jsons.append(json.load(f))
return dashboard_jsons
finally:
shutil.rmtree(output_dir)
def _CreateParser():
# Parse options
parser = optparse.OptionParser()
parser.add_option('--name')
parser.add_option('--results-file')
parser.add_option('--output-json-file')
parser.add_option('--got-revision-cp')
parser.add_option('--configuration-name')
parser.add_option('--results-url')
parser.add_option('--perf-dashboard-machine-group')
parser.add_option('--project')
parser.add_option('--buildbucket')
parser.add_option('--buildername')
parser.add_option('--buildnumber')
parser.add_option('--got-webrtc-revision')
parser.add_option('--got-v8-revision')
parser.add_option('--git-revision')
parser.add_option('--output-json-dashboard-url')
parser.add_option('--send-as-histograms', action='store_true')
return parser
def main(args):
parser = _CreateParser()
options, extra_args = parser.parse_args(args)
# Validate options.
if extra_args:
parser.error('Unexpected command line arguments')
if not options.configuration_name or not options.results_url:
parser.error('configuration_name and results_url are required.')
if not options.perf_dashboard_machine_group:
logging.error('Invalid perf dashboard machine group')
return 1
if not options.send_as_histograms:
dashboard_json = _GetDashboardJson(options)
dashboard_jsons = []
if dashboard_json:
dashboard_jsons.append(dashboard_json)
else:
dashboard_jsons = _GetDashboardHistogramData(options)
# The HistogramSet might have been batched if it would be too large to
# upload together. It's safe to concatenate the batches in order to write
# output_json_file.
# TODO(crbug.com/918208): Use a script in catapult to merge dashboard_jsons.
dashboard_json = sum(dashboard_jsons, [])
if options.output_json_file:
json.dump(dashboard_json, options.output_json_file, indent=4, separators=(',', ': '))
if dashboard_jsons:
if options.output_json_dashboard_url:
# Dump dashboard url to file.
dashboard_url = GetDashboardUrl(options.name, options.configuration_name,
options.results_url, options.got_revision_cp,
options.perf_dashboard_machine_group)
with open(options.output_json_dashboard_url, 'w') as f:
json.dump(dashboard_url if dashboard_url else '', f)
for batch in dashboard_jsons:
if not results_dashboard.SendResults(
batch,
options.name,
options.results_url,
send_as_histograms=options.send_as_histograms):
return 1
else:
# The upload didn't fail since there was no data to upload.
logging.warning('No perf dashboard JSON was produced.')
return 0
if __name__ == '__main__':
sys.exit(main((sys.argv[1:])))
def GetDashboardUrl(name, configuration_name, results_url, got_revision_cp,
perf_dashboard_machine_group):
"""Optionally writes the dashboard URL to a file and returns a link to the
dashboard.
"""
name = name.replace('.reference', '')
dashboard_url = results_url + RESULTS_LINK_PATH % (
six.moves.urllib.parse.quote(perf_dashboard_machine_group),
six.moves.urllib.parse.quote(configuration_name), six.moves.urllib.parse.quote(name),
_CommitPositionNumber(got_revision_cp))
return dashboard_url
def _GetPerfDashboardRevisionsWithProperties(got_webrtc_revision,
got_v8_revision,
git_revision,
main_revision,
point_id=None):
"""Fills in the same revisions fields that process_log_utils does."""
versions = {}
versions['rev'] = main_revision
versions['webrtc_git'] = got_webrtc_revision
versions['v8_rev'] = got_v8_revision
versions['git_revision'] = git_revision
versions['point_id'] = point_id
# There are a lot of "bad" revisions to check for, so clean them all up here.
for key in versions.keys():
if not versions[key] or versions[key] == 'undefined':
del versions[key]
return versions
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Dictionary for the repeat config.
# E.g.:
# {
# 'builder-1':
# {
# 'benchmark-1':
# {
# 'story-1': 4,
# }
# 'builder-2':
# 'benchmark-2':
# {
# 'story-1': 10,
# 'story-2': 10,
# }
# }
TARGET_DEVICES = {
'android-pixel2-perf-fyi': {
'speedometer2': {
'Speedometer2': 3,
},
'rendering.mobile': {
'css_transitions_triggered_style_element': 4,
'canvas_animation_no_clear': 4
},
'system_health.common_mobile': 3,
'system_health.memory_mobile': 3,
},
'android-pixel2-perf': {
'system_health.common_mobile': {
# timeToFirstContentfulPaint
'browse:media:googleplaystore:2019': 10,
'load:social:pinterest:2019': 10,
'browse:media:facebook_photos:2019': 10
}
},
'android-go-perf': {
'system_health.common_mobile': {
# timeToFirstContentfulPaint
'background:social:facebook:2019': 10,
# cputimeToFirstContentfulPaint
'load:search:google:2018': 10
}
},
'linux-perf': {
'system_health.common_desktop': {
# cputimeToFirstContentfulPaint
'browse:social:tumblr_infinite_scroll:2018': 10,
'long_running:tools:gmail-background': 10,
'browse:media:youtubetv:2019': 10
}
},
'win-10-perf': {
'system_health.common_desktop': {
# cputimeToFirstContentfulPaint
'browse:media:tumblr:2018': 10,
'browse:social:tumblr_infinite_scroll:2018': 10,
'load:search:google:2018': 10,
}
},
'linux-perf-calibration': {
'speedometer2': {
'Speedometer2': 28,
},
'blink_perf.shadow_dom': 31
}
}
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generates legacy perf dashboard json from non-telemetry based perf tests.
Taken from chromium/build/scripts/slave/performance_log_processory.py
(https://goo.gl/03SQRk)
"""
import collections
import json
import math
import logging
import re
class LegacyResultsProcessor(object):
"""Class for any log processor expecting standard data to be graphed.
The log will be parsed looking for any lines of the forms:
<*>RESULT <graph_name>: <trace_name>= <value> <units>
or
<*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] <units>
or
<*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
For example,
*RESULT vm_final_browser: OneTab= 8488 kb
RESULT startup: ref= [167.00,148.00,146.00,142.00] ms
RESULT TabCapturePerformance_foo: Capture= {30.7, 1.45} ms
The leading * is optional; it indicates that the data from that line should
be considered "important", which may mean for example that it's graphed by
default.
If multiple values are given in [], their mean and (sample) standard
deviation will be written; if only one value is given, that will be written.
A trailing comma is permitted in the list of values.
NOTE: All lines except for RESULT lines are ignored, including the Avg and
Stddev lines output by Telemetry!
Any of the <fields> except <value> may be empty, in which case the
not-terribly-useful defaults will be used. The <graph_name> and <trace_name>
should not contain any spaces, colons (:) nor equals-signs (=). Furthermore,
the <trace_name> will be used on the waterfall display, so it should be kept
short. If the trace_name ends with '_ref', it will be interpreted as a
reference value, and shown alongside the corresponding main value on the
waterfall.
Semantic note: The terms graph and chart are used interchangeably here.
"""
RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
r' ?(?P<UNITS>.+))?')
# TODO(eyaich): Determine if this format is still used by any perf tests
HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
def __init__(self):
# A dict of Graph objects, by name.
self._graphs = {}
# A dict mapping output file names to lists of lines in a file.
self._output = {}
self._percentiles = [.1, .25, .5, .75, .90, .95, .99]
class Trace(object):
"""Encapsulates data for one trace. Here, this means one point."""
def __init__(self):
self.important = False
self.values = []
self.mean = 0.0
self.stddev = 0.0
def __str__(self):
result = _FormatHumanReadable(self.mean)
if self.stddev:
result += '+/-%s' % _FormatHumanReadable(self.stddev)
return result
class Graph(object):
"""Encapsulates a set of points that should appear on the same graph."""
def __init__(self):
self.units = None
self.traces = {}
def IsImportant(self):
"""A graph is considered important if any of its traces is important."""
for trace in self.traces.values():
if trace.important:
return True
return False
def BuildTracesDict(self):
"""Returns a dictionary mapping trace names to [value, stddev]."""
traces_dict = {}
for name, trace in self.traces.items():
traces_dict[name] = [str(trace.mean), str(trace.stddev)]
return traces_dict
def GenerateJsonResults(self, filename):
# Iterate through the file and process each output line
with open(filename) as f:
for line in f.readlines():
self.ProcessLine(line)
# After all results have been seen, generate the graph json data
return self.GenerateGraphJson()
def _PrependLog(self, filename, data):
"""Prepends some data to an output file."""
self._output[filename] = data + self._output.get(filename, [])
def ProcessLine(self, line):
"""Processes one result line, and updates the state accordingly."""
results_match = self.RESULTS_REGEX.search(line)
histogram_match = self.HISTOGRAM_REGEX.search(line)
if results_match:
self._ProcessResultLine(results_match)
elif histogram_match:
raise Exception("Error: Histogram results parsing not supported yet")
def _ProcessResultLine(self, line_match):
"""Processes a line that matches the standard RESULT line format.
Args:
line_match: A MatchObject as returned by re.search.
"""
match_dict = line_match.groupdict()
graph_name = match_dict['GRAPH'].strip()
trace_name = match_dict['TRACE'].strip()
graph = self._graphs.get(graph_name, self.Graph())
graph.units = (match_dict['UNITS'] or '').strip()
trace = graph.traces.get(trace_name, self.Trace())
value = match_dict['VALUE']
trace.important = match_dict['IMPORTANT'] or False
# Compute the mean and standard deviation for a list or a histogram,
# or the numerical value of a scalar value.
if value.startswith('['):
try:
value_list = [float(x) for x in value.strip('[],').split(',')]
except ValueError:
# Report, but ignore, corrupted data lines. (Lines that are so badly
# broken that they don't even match the RESULTS_REGEX won't be
# detected.)
logging.warning("Bad test output: '%s'" % value.strip())
return
trace.values += value_list
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
trace.values, trace_name)
assert filedata is not None
for filename in filedata:
self._PrependLog(filename, filedata[filename])
elif value.startswith('{'):
stripped = value.strip('{},')
try:
trace.mean, trace.stddev = [float(x) for x in stripped.split(',')]
except ValueError:
logging.warning("Bad test output: '%s'" % value.strip())
return
else:
try:
trace.values.append(float(value))
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
trace.values, trace_name)
assert filedata is not None
for filename in filedata:
self._PrependLog(filename, filedata[filename])
except ValueError:
logging.warning("Bad test output: '%s'" % value.strip())
return
graph.traces[trace_name] = trace
self._graphs[graph_name] = graph
def GenerateGraphJson(self):
"""Writes graph json for each graph seen.
"""
charts = {}
for graph_name, graph in self._graphs.items():
traces = graph.BuildTracesDict()
# Traces should contain exactly two elements: [mean, stddev].
for _, trace in traces.items():
assert len(trace) == 2
graph_dict = collections.OrderedDict([
('traces', traces),
('units', str(graph.units)),
])
# Include a sorted list of important trace names if there are any.
important = [t for t in graph.traces.keys() if graph.traces[t].important]
if important:
graph_dict['important'] = sorted(important)
charts[graph_name] = graph_dict
return json.dumps(charts)
# _CalculateStatistics needs to be a member function.
# pylint: disable=R0201
# Unused argument value_list.
# pylint: disable=W0613
def _CalculateStatistics(self, value_list, trace_name):
"""Returns a tuple with some statistics based on the given value list.
This method may be overridden by subclasses wanting a different standard
deviation calcuation (or some other sort of error value entirely).
Args:
value_list: the list of values to use in the calculation
trace_name: the trace that produced the data (not used in the base
implementation, but subclasses may use it)
Returns:
A 3-tuple - mean, standard deviation, and a dict which is either
empty or contains information about some file contents.
"""
n = len(value_list)
if n == 0:
return 0.0, 0.0, {}
mean = float(sum(value_list)) / n
variance = sum([(element - mean)**2 for element in value_list]) / n
stddev = math.sqrt(variance)
return mean, stddev, {}
def _FormatHumanReadable(number):
"""Formats a float into three significant figures, using metric suffixes.
Only m, k, and M prefixes (for 1/1000, 1000, and 1,000,000) are used.
Examples:
0.0387 => 38.7m
1.1234 => 1.12
10866 => 10.8k
682851200 => 683M
"""
metric_prefixes = {-3: 'm', 0: '', 3: 'k', 6: 'M'}
scientific = '%.2e' % float(number) # 6.83e+005
e_idx = scientific.find('e') # 4, or 5 if negative
digits = float(scientific[:e_idx]) # 6.83
exponent = int(scientific[e_idx + 1:]) # int('+005') = 5
while exponent % 3:
digits *= 10
exponent -= 1
while exponent > 6:
digits *= 10
exponent -= 1
while exponent < -3:
digits /= 10
exponent += 1
if digits >= 100:
# Don't append a meaningless '.0' to an integer number.
digits = int(digits) # pylint: disable=redefined-variable-type
# Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
return '%s%s' % (digits, metric_prefixes[exponent])
#!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import collections
import json
import logging
import multiprocessing
import os
import shutil
import sys
import tempfile
import time
import uuid
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
import cross_device_test_config
from core import path_util
from core import upload_results_to_perf_dashboard
from core import results_merger
path_util.AddAndroidPylibToPath()
try:
from pylib.utils import logdog_helper
except ImportError:
pass
RESULTS_URL = 'https://chromeperf.appspot.com'
# Until we are migrated to LUCI, we will be utilizing a hard
# coded master name based on what is passed in in the build properties.
# See crbug.com/801289 for more details.
MACHINE_GROUP_JSON_FILE = os.path.join(path_util.GetChromiumSrcDir(), 'tools', 'perf', 'core',
'perf_dashboard_machine_group_mapping.json')
JSON_CONTENT_TYPE = 'application/json'
# Cache of what data format (ChartJSON, Histograms, etc.) each results file is
# in so that only one disk read is required when checking the format multiple
# times.
_data_format_cache = {}
DATA_FORMAT_GTEST = 'gtest'
DATA_FORMAT_CHARTJSON = 'chartjson'
DATA_FORMAT_HISTOGRAMS = 'histograms'
DATA_FORMAT_UNKNOWN = 'unknown'
def _GetMachineGroup(build_properties):
machine_group = None
if build_properties.get('perf_dashboard_machine_group', False):
# Once luci migration is complete this will exist as a property
# in the build properties
machine_group = build_properties['perf_dashboard_machine_group']
else:
builder_group_mapping = {}
with open(MACHINE_GROUP_JSON_FILE) as fp:
builder_group_mapping = json.load(fp)
if build_properties.get('builder_group', False):
legacy_builder_group = build_properties['builder_group']
else:
# TODO(crbug.com/1153958): remove reference to mastername.
legacy_builder_group = build_properties['mastername']
if builder_group_mapping.get(legacy_builder_group):
machine_group = builder_group_mapping[legacy_builder_group]
if not machine_group:
raise ValueError('Must set perf_dashboard_machine_group or have a valid '
'mapping in '
'src/tools/perf/core/perf_dashboard_machine_group_mapping.json'
'See bit.ly/perf-dashboard-machine-group for more details')
return machine_group
def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
output_json_file):
"""Upload the contents of result JSON(s) to the perf dashboard."""
args = [
'--buildername', build_properties['buildername'], '--buildnumber',
build_properties['buildnumber'], '--name', name, '--configuration-name',
configuration_name, '--results-file', json_to_upload, '--results-url', RESULTS_URL,
'--got-revision-cp', build_properties['got_revision_cp'], '--got-v8-revision',
build_properties['got_v8_revision'], '--got-webrtc-revision',
build_properties['got_webrtc_revision'], '--output-json-file', output_json_file,
'--perf-dashboard-machine-group',
_GetMachineGroup(build_properties)
]
buildbucket = build_properties.get('buildbucket', {})
if isinstance(buildbucket, basestring):
buildbucket = json.loads(buildbucket)
if 'build' in buildbucket:
args += [
'--project',
buildbucket['build'].get('project'),
'--buildbucket',
buildbucket['build'].get('bucket'),
]
if build_properties.get('git_revision'):
args.append('--git-revision')
args.append(build_properties['git_revision'])
if _is_histogram(json_to_upload):
args.append('--send-as-histograms')
#TODO(crbug.com/1072729): log this in top level
logging.info('upload_results_to_perf_dashboard: %s.' % args)
return upload_results_to_perf_dashboard.main(args)
def _is_histogram(json_file):
return _determine_data_format(json_file) == DATA_FORMAT_HISTOGRAMS
def _is_gtest(json_file):
return _determine_data_format(json_file) == DATA_FORMAT_GTEST
def _determine_data_format(json_file):
if json_file not in _data_format_cache:
with open(json_file) as f:
data = json.load(f)
if isinstance(data, list):
_data_format_cache[json_file] = DATA_FORMAT_HISTOGRAMS
elif isinstance(data, dict):
if 'charts' in data:
_data_format_cache[json_file] = DATA_FORMAT_CHARTJSON
else:
_data_format_cache[json_file] = DATA_FORMAT_GTEST
else:
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
return _data_format_cache[json_file]
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
return _data_format_cache[json_file]
def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
"""Merges the contents of one or more results JSONs.
Args:
output_json: A path to a JSON file to which the merged results should be
written.
jsons_to_merge: A list of JSON files that should be merged.
extra_links: a (key, value) map in which keys are the human-readable strings
which describe the data, and value is logdog url that contain the data.
"""
begin_time = time.time()
merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
# Only append the perf results links if present
if extra_links:
merged_results['links'] = extra_links
with open(output_json, 'w') as f:
json.dump(merged_results, f)
end_time = time.time()
print_duration('Merging json test results', begin_time, end_time)
return 0
def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
"""Checks the test_results.json under each folder:
1. mark the benchmark 'enabled' if tests results are found
2. add the json content to a list for non-ref.
"""
begin_time = time.time()
benchmark_enabled_map = {}
for benchmark_name, directories in benchmark_directory_map.items():
for directory in directories:
# Obtain the test name we are running
is_ref = '.reference' in benchmark_name
enabled = True
try:
with open(os.path.join(directory, 'test_results.json')) as json_data:
json_results = json.load(json_data)
if not json_results:
# Output is null meaning the test didn't produce any results.
# Want to output an error and continue loading the rest of the
# test results.
logging.warning('No results produced for %s, skipping upload' % directory)
continue
if json_results.get('version') == 3:
# Non-telemetry tests don't have written json results but
# if they are executing then they are enabled and will generate
# chartjson results.
if not bool(json_results.get('tests')):
enabled = False
if not is_ref:
# We don't need to upload reference build data to the
# flakiness dashboard since we don't monitor the ref build
test_results_list.append(json_results)
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
continue
if not enabled:
# We don't upload disabled benchmarks or tests that are run
# as a smoke test
logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
continue
benchmark_enabled_map[benchmark_name] = True
end_time = time.time()
print_duration('Analyzing perf json test results', begin_time, end_time)
return benchmark_enabled_map
def _generate_unique_logdog_filename(name_prefix):
return name_prefix + '_' + str(uuid.uuid4())
def _handle_perf_logs(benchmark_directory_map, extra_links):
""" Upload benchmark logs to logdog and add a page entry for them. """
begin_time = time.time()
benchmark_logs_links = collections.defaultdict(list)
for benchmark_name, directories in benchmark_directory_map.items():
for directory in directories:
benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
if os.path.exists(benchmark_log_file):
with open(benchmark_log_file) as f:
uploaded_link = logdog_helper.text(
name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
benchmark_logs_links[benchmark_name].append(uploaded_link)
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
logdog_stream = logdog_helper.text(
logdog_file_name,
json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks logs'] = logdog_stream
end_time = time.time()
print_duration('Generating perf log streams', begin_time, end_time)
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
begin_time = time.time()
with open(benchmarks_shard_map_file) as f:
benchmarks_shard_data = f.read()
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
logdog_stream = logdog_helper.text(
logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks shard map'] = logdog_stream
end_time = time.time()
print_duration('Generating benchmark shard map stream', begin_time, end_time)
def _get_benchmark_name(directory):
return os.path.basename(directory).replace(" benchmark", "")
def _scan_output_dir(task_output_dir):
benchmark_directory_map = {}
benchmarks_shard_map_file = None
directory_list = [
f for f in os.listdir(task_output_dir)
if not os.path.isfile(os.path.join(task_output_dir, f))
]
benchmark_directory_list = []
for directory in directory_list:
for f in os.listdir(os.path.join(task_output_dir, directory)):
path = os.path.join(task_output_dir, directory, f)
if os.path.isdir(path):
benchmark_directory_list.append(path)
elif path.endswith('benchmarks_shard_map.json'):
benchmarks_shard_map_file = path
# Now create a map of benchmark name to the list of directories
# the lists were written to.
for directory in benchmark_directory_list:
benchmark_name = _get_benchmark_name(directory)
if benchmark_name in benchmark_directory_map.keys():
benchmark_directory_map[benchmark_name].append(directory)
else:
benchmark_directory_map[benchmark_name] = [directory]
return benchmark_directory_map, benchmarks_shard_map_file
def process_perf_results(output_json,
configuration_name,
build_properties,
task_output_dir,
smoke_test_mode,
output_results_dir,
lightweight=False,
skip_perf=False):
"""Process perf results.
Consists of merging the json-test-format output, uploading the perf test
output (chartjson and histogram), and store the benchmark logs in logdog.
Each directory in the task_output_dir represents one benchmark
that was run. Within this directory, there is a subdirectory with the name
of the benchmark that was run. In that subdirectory, there is a
perftest-output.json file containing the performance results in histogram
or dashboard json format and an output.json file containing the json test
results for the benchmark.
Returns:
(return_code, upload_results_map):
return_code is 0 if the whole operation is successful, non zero otherwise.
benchmark_upload_result_map: the dictionary that describe which benchmarks
were successfully uploaded.
"""
handle_perf = not lightweight or not skip_perf
handle_non_perf = not lightweight or skip_perf
logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
(lightweight, handle_perf, handle_non_perf))
begin_time = time.time()
return_code = 0
benchmark_upload_result_map = {}
benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
test_results_list = []
extra_links = {}
if handle_non_perf:
# First, upload benchmarks shard map to logdog and add a page
# entry for it in extra_links.
if benchmarks_shard_map_file:
_handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
# Second, upload all the benchmark logs to logdog and add a page entry for
# those links in extra_links.
_handle_perf_logs(benchmark_directory_map, extra_links)
# Then try to obtain the list of json test results to merge
# and determine the status of each benchmark.
benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
test_results_list)
build_properties_map = json.loads(build_properties)
if not configuration_name:
# we are deprecating perf-id crbug.com/817823
configuration_name = build_properties_map['buildername']
if not smoke_test_mode and handle_perf:
try:
return_code, benchmark_upload_result_map = _handle_perf_results(
benchmark_enabled_map, benchmark_directory_map, configuration_name,
build_properties_map, extra_links, output_results_dir)
except Exception:
logging.exception('Error handling perf results jsons')
return_code = 1
if handle_non_perf:
# Finally, merge all test results json, add the extra links and write out to
# output location
try:
_merge_json_output(output_json, test_results_list, extra_links,
configuration_name in cross_device_test_config.TARGET_DEVICES)
except Exception:
logging.exception('Error handling test results jsons.')
end_time = time.time()
print_duration('Total process_perf_results', begin_time, end_time)
return return_code, benchmark_upload_result_map
def _merge_chartjson_results(chartjson_dicts):
merged_results = chartjson_dicts[0]
for chartjson_dict in chartjson_dicts[1:]:
for key in chartjson_dict:
if key == 'charts':
for add_key in chartjson_dict[key]:
merged_results[key][add_key] = chartjson_dict[key][add_key]
return merged_results
def _merge_histogram_results(histogram_lists):
merged_results = []
for histogram_list in histogram_lists:
merged_results += histogram_list
return merged_results
def _merge_perf_results(benchmark_name, results_filename, directories):
begin_time = time.time()
collected_results = []
for directory in directories:
filename = os.path.join(directory, 'perf_results.json')
try:
with open(filename) as pf:
collected_results.append(json.load(pf))
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain perf results from %s: %s', directory, e)
if not collected_results:
logging.error('Failed to obtain any perf results from %s.', benchmark_name)
return
# Assuming that multiple shards will only be chartjson or histogram set
# Non-telemetry benchmarks only ever run on one shard
merged_results = []
if isinstance(collected_results[0], dict):
merged_results = _merge_chartjson_results(collected_results)
elif isinstance(collected_results[0], list):
merged_results = _merge_histogram_results(collected_results)
with open(results_filename, 'w') as rf:
json.dump(merged_results, rf)
end_time = time.time()
print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
output_json_file):
tmpfile_dir = tempfile.mkdtemp()
try:
upload_begin_time = time.time()
# There are potentially multiple directores with results, re-write and
# merge them if necessary
results_filename = None
if len(directories) > 1:
merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
if not os.path.exists(merge_perf_dir):
os.makedirs(merge_perf_dir)
results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
_merge_perf_results(benchmark_name, results_filename, directories)
else:
# It was only written to one shard, use that shards data
results_filename = os.path.join(directories[0], 'perf_results.json')
results_size_in_mib = os.path.getsize(results_filename) / (2**20)
logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
(benchmark_name, results_size_in_mib))
with open(output_json_file, 'w') as oj:
upload_return_code = _upload_perf_results(results_filename, benchmark_name,
configuration_name, build_properties, oj)
upload_end_time = time.time()
print_duration(('%s upload time' % (benchmark_name)), upload_begin_time,
upload_end_time)
return (benchmark_name, upload_return_code == 0)
finally:
shutil.rmtree(tmpfile_dir)
def _upload_individual_benchmark(params):
try:
return _upload_individual(*params)
except Exception:
benchmark_name = params[0]
upload_succeed = False
logging.exception('Error uploading perf result of %s' % benchmark_name)
return benchmark_name, upload_succeed
def _GetCpuCount(log=True):
try:
cpu_count = multiprocessing.cpu_count()
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
cpu_count = min(cpu_count, 56)
return cpu_count
except NotImplementedError:
if log:
logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
# TODO(crbug.com/948281): This is currently set to 4 since the mac masters
# only have 4 cores. Once we move to all-linux, this can be increased or
# we can even delete this whole function and use multiprocessing.cpu_count()
# directly.
return 4
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
build_properties, extra_links, output_results_dir):
"""
Upload perf results to the perf dashboard.
This method also upload the perf results to logdog and augment it to
|extra_links|.
Returns:
(return_code, benchmark_upload_result_map)
return_code is 0 if this upload to perf dashboard successfully, 1
otherwise.
benchmark_upload_result_map is a dictionary describes which benchmark
was successfully uploaded.
"""
begin_time = time.time()
# Upload all eligible benchmarks to the perf dashboard
results_dict = {}
invocations = []
for benchmark_name, directories in benchmark_directory_map.items():
if not benchmark_enabled_map.get(benchmark_name, False):
continue
# Create a place to write the perf results that you will write out to
# logdog.
output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
results_dict[benchmark_name] = output_json_file
#TODO(crbug.com/1072729): pass final arguments instead of build properties
# and configuration_name
invocations.append(
(benchmark_name, directories, configuration_name, build_properties, output_json_file))
# Kick off the uploads in multiple processes
# crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
# to 2 processes to avoid this error. Uncomment the following code once
# the problem is fixed on the dashboard side.
# pool = multiprocessing.Pool(_GetCpuCount())
pool = multiprocessing.Pool(2)
upload_result_timeout = False
try:
async_result = pool.map_async(_upload_individual_benchmark, invocations)
# TODO(crbug.com/947035): What timeout is reasonable?
results = async_result.get(timeout=4000)
except multiprocessing.TimeoutError:
upload_result_timeout = True
logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
results = []
for benchmark_name in benchmark_directory_map:
results.append((benchmark_name, False))
finally:
pool.terminate()
# Keep a mapping of benchmarks to their upload results
benchmark_upload_result_map = {}
for r in results:
benchmark_upload_result_map[r[0]] = r[1]
logdog_dict = {}
upload_failures_counter = 0
logdog_stream = None
logdog_label = 'Results Dashboard'
for benchmark_name, output_file in results_dict.items():
upload_succeed = benchmark_upload_result_map[benchmark_name]
if not upload_succeed:
upload_failures_counter += 1
is_reference = '.reference' in benchmark_name
_write_perf_data_to_logfile(
benchmark_name,
output_file,
configuration_name,
build_properties,
logdog_dict,
is_reference,
upload_failure=not upload_succeed)
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
logdog_stream = logdog_helper.text(
logdog_file_name,
json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
if upload_failures_counter > 0:
logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
extra_links[logdog_label] = logdog_stream
end_time = time.time()
print_duration('Uploading results to perf dashboard', begin_time, end_time)
if upload_result_timeout or upload_failures_counter > 0:
return 1, benchmark_upload_result_map
return 0, benchmark_upload_result_map
def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
logdog_dict, is_ref, upload_failure):
viewer_url = None
# logdog file to write perf results to
if os.path.exists(output_file):
results = None
with open(output_file) as f:
try:
results = json.load(f)
except ValueError:
logging.error('Error parsing perf results JSON for benchmark %s' % benchmark_name)
if results:
try:
output_json_file = logdog_helper.open_text(benchmark_name)
json.dump(results, output_json_file, indent=4, separators=(',', ': '))
except ValueError as e:
logging.error('ValueError: "%s" while dumping output to logdog' % e)
finally:
output_json_file.close()
viewer_url = output_json_file.get_viewer_url()
else:
logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
base_benchmark_name = benchmark_name.replace('.reference', '')
if base_benchmark_name not in logdog_dict:
logdog_dict[base_benchmark_name] = {}
# add links for the perf results and the dashboard url to
# the logs section of buildbot
if is_ref:
if viewer_url:
logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
else:
logdog_dict[base_benchmark_name]['dashboard_url'] = (
upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
RESULTS_URL,
build_properties['got_revision_cp'],
_GetMachineGroup(build_properties)))
if viewer_url:
logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
def print_duration(step, start, end):
logging.info('Duration of %s: %d seconds' % (step, end - start))
def main():
""" See collect_task.collect_task for more on the merge script API. """
logging.info(sys.argv)
parser = argparse.ArgumentParser()
# configuration-name (previously perf-id) is the name of bot the tests run on
# For example, buildbot-test is the name of the android-go-perf bot
# configuration-name and results-url are set in the json file which is going
# away tools/perf/core/chromium.perf.fyi.extras.json
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
parser.add_argument(
'--skip-perf',
action='store_true',
help='In lightweight mode, using --skip-perf will skip the performance'
' data handling.')
parser.add_argument(
'--lightweight',
action='store_true',
help='Choose the lightweight mode in which the perf result handling'
' is performed on a separate VM.')
parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
parser.add_argument(
'--smoke-test-mode',
action='store_true',
help='This test should be run in smoke test mode'
' meaning it does not upload to the perf dashboard')
args = parser.parse_args()
output_results_dir = tempfile.mkdtemp('outputresults')
try:
return_code, _ = process_perf_results(args.output_json, args.configuration_name,
args.build_properties, args.task_output_dir,
args.smoke_test_mode, output_results_dir,
args.lightweight, args.skip_perf)
return return_code
finally:
shutil.rmtree(output_results_dir)
if __name__ == '__main__':
sys.exit(main())
# Copyright 2015 The Chromium Authors. All rights reserved.
#!/usr/bin/env python
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Placeholder file to allow Python imports to function. """
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment