Commit a10b2d07 by Jonah Ryan-Davis Committed by Commit Bot

Modify 'bb ls' command for generate_stats for more precision.

We were using 'bb ls ... -A' which combines '-p' and '-steps', but we only parse information from '-p'. Updates the script to only call what it needs. Also works around current issue with '-A'. Bug: angleproject:3429 Change-Id: I10d412885663feefd300eb135dae70b499fedd7f Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/1600334Reviewed-by: 's avatarShahbaz Youssefi <syoussefi@chromium.org> Reviewed-by: 's avatarGeoff Lang <geofflang@chromium.org> Commit-Queue: Jonah Ryan-Davis <jonahr@google.com>
parent 5cbaa3f8
...@@ -156,103 +156,104 @@ INFO_TAG = '*RESULT' ...@@ -156,103 +156,104 @@ INFO_TAG = '*RESULT'
# Returns a struct with info about the latest successful build given a bot name. Info contains the # Returns a struct with info about the latest successful build given a bot name. Info contains the
# build_name, time, date, angle_revision, and chrome revision. # build_name, time, date, angle_revision, and chrome revision.
# Uses: bb ls '<botname>' -n 1 -status success -A # Uses: bb ls '<botname>' -n 1 -status success -p
def get_latest_success_build_info(bot_name): def get_latest_success_build_info(bot_name):
bb = subprocess.Popen(['bb', 'ls', bot_name, '-n', '1', '-status', 'success', '-A'], bb = subprocess.Popen(['bb', 'ls', bot_name, '-n', '1', '-status', 'success', '-p'],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb ls '" + bot_name + "' -n 1 -status success -A]") LOGGER.debug("Ran [bb ls '" + bot_name + "' -n 1 -status success -p]")
out, err = bb.communicate() out, err = bb.communicate()
if err: if err:
raise ValueError("Unexpected error from bb ls: '" + err + "'") raise ValueError("Unexpected error from bb ls: '" + err + "'")
if not out: if not out:
raise ValueError("Unexpected empty result from bb ls of bot '" + bot_name + "'") raise ValueError("Unexpected empty result from bb ls of bot '" + bot_name + "'")
# Example output (line 1): # Example output (line 1):
# ci.chromium.org/b/8915280275579996928 SUCCESS 'chromium/ci/Win10 FYI dEQP Release (NVIDIA)/26877' # ci.chromium.org/b/8915280275579996928 SUCCESS 'chromium/ci/Win10 FYI dEQP Release (NVIDIA)/26877'
# ... # ...
if 'SUCCESS' not in out: if 'SUCCESS' not in out:
raise ValueError("Unexpected result from bb ls: '" + out + "'") raise ValueError("Unexpected result from bb ls: '" + out + "'")
info = {} info = {}
for line in out.splitlines(): for line in out.splitlines():
# The first line holds the build name # The first line holds the build name
if 'build_name' not in info:
info['build_name'] = line.strip().split("'")[1]
# Remove the bot name and prepend the build link
info['build_link'] = BUILD_LINK_PREFIX + urllib.quote(
info['build_name'].split(BOT_NAME_PREFIX)[1])
if 'Created' in line:
# Example output of line with 'Created':
# ...
# Created today at 12:26:39, waited 2.056319s, started at 12:26:41, ran for 1h16m48.14963s, ended at 13:43:30
# ...
info['time'] = re.findall(r'[0-9]{1,2}:[0-9]{2}:[0-9]{2}', line.split(',', 1)[0])[0]
# Format today's date in US format so Sheets can read it properly
info['date'] = datetime.datetime.now().strftime('%m/%d/%y')
if 'got_angle_revision' in line:
# Example output of line with angle revision:
# ...
# "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee",
# ...
info['angle_revision'] = filter(str.isalnum, line.split(':')[1])
if '"revision"' in line:
# Example output of line with chromium revision:
# ...
# "revision": "3b68405a27f1f9590f83ae07757589dba862f141",
# ...
info['revision'] = filter(str.isalnum, line.split(':')[1])
if 'build_name' not in info: if 'build_name' not in info:
info['build_name'] = line.strip().split("'")[1] raise ValueError("Could not find build_name from bot '" + bot_name + "'")
# Remove the bot name and prepend the build link return info
info['build_link'] = BUILD_LINK_PREFIX + urllib.quote(
info['build_name'].split(BOT_NAME_PREFIX)[1])
if 'Created' in line:
# Example output of line with 'Created':
# ...
# Created today at 12:26:39, waited 2.056319s, started at 12:26:41, ran for 1h16m48.14963s, ended at 13:43:30
# ...
info['time'] = re.findall(r'[0-9]{1,2}:[0-9]{2}:[0-9]{2}', line.split(',', 1)[0])[0]
# Format today's date in US format so Sheets can read it properly
info['date'] = datetime.datetime.now().strftime('%m/%d/%y')
if 'got_angle_revision' in line:
# Example output of line with angle revision:
# ...
# "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee",
# ...
info['angle_revision'] = filter(str.isalnum, line.split(':')[1])
if '"revision"' in line:
# Example output of line with chromium revision:
# ...
# "revision": "3b68405a27f1f9590f83ae07757589dba862f141",
# ...
info['revision'] = filter(str.isalnum, line.split(':')[1])
if 'build_name' not in info:
raise ValueError("Could not find build_name from bot '" + bot_name + "'")
return info
# Returns a list of step names that we're interested in given a build name. We are interested in # Returns a list of step names that we're interested in given a build name. We are interested in
# step names starting with 'angle_'. May raise an exception. # step names starting with 'angle_'. May raise an exception.
# Uses: bb get '<build_name>' -steps # Uses: bb get '<build_name>' -steps
def get_step_names(build_name): def get_step_names(build_name):
bb = subprocess.Popen(['bb', 'get', build_name, '-steps'], bb = subprocess.Popen(['bb', 'get', build_name, '-steps'],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb get '" + build_name + "' -steps]") LOGGER.debug("Ran [bb get '" + build_name + "' -steps]")
out, err = bb.communicate() out, err = bb.communicate()
if err: if err:
raise ValueError("Unexpected error from bb get: '" + err + "'") raise ValueError("Unexpected error from bb get: '" + err + "'")
step_names = [] step_names = []
# Example output (relevant lines to a single step): # Example output (relevant lines to a single step):
# ... # ...
# Step "angle_deqp_egl_vulkan_tests on (nvidia-quadro-p400-win10-stable) GPU on Windows on Windows-10" SUCCESS 4m12s Logs: "stdout", "chromium_swarming.summary", "Merge script log", "Flaky failure: dEQP.EGL&#x2f;info_version (status CRASH,SUCCESS)", "step_metadata" # Step "angle_deqp_egl_vulkan_tests on (nvidia-quadro-p400-win10-stable) GPU on Windows on Windows-10" SUCCESS 4m12s Logs: "stdout", "chromium_swarming.summary", "Merge script log", "Flaky failure: dEQP.EGL&#x2f;info_version (status CRASH,SUCCESS)", "step_metadata"
# Run on OS: 'Windows-10'<br>Max shard duration: 0:04:07.309848 (shard \#1)<br>Min shard duration: 0:02:26.402128 (shard \#0)<br/>flaky failures [ignored]:<br/>dEQP.EGL/info\_version<br/> # Run on OS: 'Windows-10'<br>Max shard duration: 0:04:07.309848 (shard \#1)<br>Min shard duration: 0:02:26.402128 (shard \#0)<br/>flaky failures [ignored]:<br/>dEQP.EGL/info\_version<br/>
# * [shard #0 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=9a5999a59d332e55f54f495948d0c9f959e60ed2) # * [shard #0 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=9a5999a59d332e55f54f495948d0c9f959e60ed2)
# * [shard #0 (128.3 sec)](https://chromium-swarm.appspot.com/user/task/446903ae365b8110) # * [shard #0 (128.3 sec)](https://chromium-swarm.appspot.com/user/task/446903ae365b8110)
# * [shard #1 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=d71e1bdd91dee61b536b4057a9222e642bd3809f) # * [shard #1 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=d71e1bdd91dee61b536b4057a9222e642bd3809f)
# * [shard #1 (229.3 sec)](https://chromium-swarm.appspot.com/user/task/446903b7b0d90210) # * [shard #1 (229.3 sec)](https://chromium-swarm.appspot.com/user/task/446903b7b0d90210)
# * [shard #2 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=ac9ba85b1cca77774061b87335c077980e1eef85) # * [shard #2 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=ac9ba85b1cca77774061b87335c077980e1eef85)
# * [shard #2 (144.5 sec)](https://chromium-swarm.appspot.com/user/task/446903c18e15a010) # * [shard #2 (144.5 sec)](https://chromium-swarm.appspot.com/user/task/446903c18e15a010)
# * [shard #3 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=976d586386864abecf53915fbac3e085f672e30f) # * [shard #3 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=976d586386864abecf53915fbac3e085f672e30f)
# * [shard #3 (138.4 sec)](https://chromium-swarm.appspot.com/user/task/446903cc8da0ad10) # * [shard #3 (138.4 sec)](https://chromium-swarm.appspot.com/user/task/446903cc8da0ad10)
# ... # ...
for line in out.splitlines(): for line in out.splitlines():
if 'Step "angle_' not in line: if 'Step "angle_' not in line:
continue continue
step_names.append(line.split('"')[1]) step_names.append(line.split('"')[1])
return step_names return step_names
# Performs some heuristic validation of the step_info struct returned from a single step log. # Performs some heuristic validation of the step_info struct returned from a single step log.
# Returns True if valid, False if invalid. May write to stderr # Returns True if valid, False if invalid. May write to stderr
def validate_step_info(step_info, build_name, step_name): def validate_step_info(step_info, build_name, step_name):
print_name = "'" + build_name + "': '" + step_name + "'" print_name = "'" + build_name + "': '" + step_name + "'"
if not step_info: if not step_info:
LOGGER.warning('Step info empty for ' + print_name + '\n') LOGGER.warning('Step info empty for ' + print_name + '\n')
return False return False
if 'Total' in step_info: if 'Total' in step_info:
partial_sum_keys = ['Passed', 'Failed', 'Skipped', 'Not Supported', 'Exception', 'Crashed'] partial_sum_keys = ['Passed', 'Failed', 'Skipped', 'Not Supported', 'Exception', 'Crashed']
partial_sum_values = [int(step_info[key]) for key in partial_sum_keys if key in step_info] partial_sum_values = [int(step_info[key]) for key in partial_sum_keys if key in step_info]
computed_total = sum(partial_sum_values) computed_total = sum(partial_sum_values)
if step_info['Total'] != computed_total: if step_info['Total'] != computed_total:
LOGGER.warning('Step info does not sum to total for ' + print_name + ' | Total: ' + LOGGER.warning('Step info does not sum to total for ' + print_name + ' | Total: ' +
str(step_info['Total']) + ' - Computed total: ' + str(computed_total) + '\n') str(step_info['Total']) + ' - Computed total: ' + str(computed_total) +
return True '\n')
return True
# Returns a struct containing parsed info from a given step log. The info is parsed by looking for # Returns a struct containing parsed info from a given step log. The info is parsed by looking for
...@@ -261,78 +262,79 @@ def validate_step_info(step_info, build_name, step_name): ...@@ -261,78 +262,79 @@ def validate_step_info(step_info, build_name, step_name):
# May write to stderr # May write to stderr
# Uses: bb log '<build_name>' '<step_name>' # Uses: bb log '<build_name>' '<step_name>'
def get_step_info(build_name, step_name): def get_step_info(build_name, step_name):
bb = subprocess.Popen(['bb', 'log', build_name, step_name], bb = subprocess.Popen(['bb', 'log', build_name, step_name],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb log '" + build_name + "' '" + step_name + "']") LOGGER.debug("Ran [bb log '" + build_name + "' '" + step_name + "']")
out, err = bb.communicate() out, err = bb.communicate()
if err: if err:
LOGGER.warning("Unexpected error from bb log '" + build_name + "' '" + step_name + "': '" + LOGGER.warning("Unexpected error from bb log '" + build_name + "' '" + step_name + "': '" +
err + "'") err + "'")
return None return None
step_info = {} step_info = {}
# Example output (relevant lines of stdout): # Example output (relevant lines of stdout):
# ... # ...
# *RESULT: Total: 155 # *RESULT: Total: 155
# *RESULT: Passed: 11 # *RESULT: Passed: 11
# *RESULT: Failed: 0 # *RESULT: Failed: 0
# *RESULT: Skipped: 12 # *RESULT: Skipped: 12
# *RESULT: Not Supported: 132 # *RESULT: Not Supported: 132
# *RESULT: Exception: 0 # *RESULT: Exception: 0
# *RESULT: Crashed: 0 # *RESULT: Crashed: 0
# *RESULT: Unexpected Passed: 12 # *RESULT: Unexpected Passed: 12
# ... # ...
append_errors = [] append_errors = []
for line in out.splitlines(): for line in out.splitlines():
if INFO_TAG not in line: if INFO_TAG not in line:
continue continue
found_stat = True found_stat = True
line_columns = line.split(INFO_TAG, 1)[1].split(':') line_columns = line.split(INFO_TAG, 1)[1].split(':')
if len(line_columns) is not 3: if len(line_columns) is not 3:
LOGGER.warning("Line improperly formatted: '" + line + "'\n") LOGGER.warning("Line improperly formatted: '" + line + "'\n")
continue continue
key = line_columns[1].strip() key = line_columns[1].strip()
# If the value is clearly an int, sum it. Otherwise, concatenate it as a string # If the value is clearly an int, sum it. Otherwise, concatenate it as a string
isInt = False isInt = False
intVal = 0 intVal = 0
try: try:
intVal = int(line_columns[2]) intVal = int(line_columns[2])
if intVal is not None: if intVal is not None:
isInt = True isInt = True
except Exception as error: except Exception as error:
isInt = False isInt = False
if isInt: if isInt:
if key not in step_info: if key not in step_info:
step_info[key] = 0 step_info[key] = 0
step_info[key] += intVal step_info[key] += intVal
else:
if key not in step_info:
step_info[key] = line_columns[2].strip()
else:
append_string = '\n' + line_columns[2].strip()
# Sheets has a limit of 50000 characters per cell, so make sure to stop appending below
# this limit
if len(step_info[key]) + len(append_string) < 50000:
step_info[key] += append_string
else: else:
if key not in append_errors: if key not in step_info:
append_errors.append(key) step_info[key] = line_columns[2].strip()
LOGGER.warning("Too many characters in column '" + key + "'. Output capped.") else:
append_string = '\n' + line_columns[2].strip()
if validate_step_info(step_info, build_name, step_name): # Sheets has a limit of 50000 characters per cell, so make sure to stop appending below
return step_info # this limit
return None if len(step_info[key]) + len(append_string) < 50000:
step_info[key] += append_string
else:
if key not in append_errors:
append_errors.append(key)
LOGGER.warning("Too many characters in column '" + key +
"'. Output capped.")
if validate_step_info(step_info, build_name, step_name):
return step_info
return None
# Returns the info for each step run on a given bot_name. # Returns the info for each step run on a given bot_name.
def get_bot_info(bot_name): def get_bot_info(bot_name):
info = get_latest_success_build_info(bot_name) info = get_latest_success_build_info(bot_name)
info['step_names'] = get_step_names(info['build_name']) info['step_names'] = get_step_names(info['build_name'])
for step_name in info['step_names']: for step_name in info['step_names']:
LOGGER.info("Parsing step '" + step_name + "'...") LOGGER.info("Parsing step '" + step_name + "'...")
info[step_name] = get_step_info(info['build_name'], step_name) info[step_name] = get_step_info(info['build_name'], step_name)
return info return info
##################### #####################
...@@ -343,220 +345,220 @@ def get_bot_info(bot_name): ...@@ -343,220 +345,220 @@ def get_bot_info(bot_name):
# Get an individual spreadsheet based on the spreadsheet id. Returns the result of # Get an individual spreadsheet based on the spreadsheet id. Returns the result of
# spreadsheets.get(), or throws an exception if the sheet could not open. # spreadsheets.get(), or throws an exception if the sheet could not open.
def get_spreadsheet(service, spreadsheet_id): def get_spreadsheet(service, spreadsheet_id):
LOGGER.debug("Called [spreadsheets.get(spreadsheetId='" + spreadsheet_id + "')]") LOGGER.debug("Called [spreadsheets.get(spreadsheetId='" + spreadsheet_id + "')]")
request = service.get(spreadsheetId=spreadsheet_id) request = service.get(spreadsheetId=spreadsheet_id)
spreadsheet = request.execute() spreadsheet = request.execute()
if not spreadsheet: if not spreadsheet:
raise Exception("Did not open spreadsheet '" + spreadsheet_id + "'") raise Exception("Did not open spreadsheet '" + spreadsheet_id + "'")
return spreadsheet return spreadsheet
# Returns a nicely formatted string based on the bot_name and step_name # Returns a nicely formatted string based on the bot_name and step_name
def format_sheet_name(bot_name, step_name): def format_sheet_name(bot_name, step_name):
# Some tokens should be ignored for readability in the name # Some tokens should be ignored for readability in the name
unneccesary_tokens = ['FYI', 'Release', 'Vk', 'dEQP', '(', ')'] unneccesary_tokens = ['FYI', 'Release', 'Vk', 'dEQP', '(', ')']
for token in unneccesary_tokens: for token in unneccesary_tokens:
bot_name = bot_name.replace(token, '') bot_name = bot_name.replace(token, '')
bot_name = ' '.join(bot_name.strip().split()) # Remove extra spaces bot_name = ' '.join(bot_name.strip().split()) # Remove extra spaces
step_name = re.findall(r'angle\w*', step_name)[0] # Separate test name step_name = re.findall(r'angle\w*', step_name)[0] # Separate test name
# Test names are formatted as 'angle_deqp_<frontend>_<backend>_tests' # Test names are formatted as 'angle_deqp_<frontend>_<backend>_tests'
new_step_name = '' new_step_name = ''
# Put the frontend first # Put the frontend first
if '_egl_' in step_name: if '_egl_' in step_name:
new_step_name += ' EGL' new_step_name += ' EGL'
if '_gles2_' in step_name: if '_gles2_' in step_name:
new_step_name += ' GLES 2.0 ' new_step_name += ' GLES 2.0 '
if '_gles3_' in step_name: if '_gles3_' in step_name:
new_step_name += ' GLES 3.0 ' new_step_name += ' GLES 3.0 '
if '_gles31_' in step_name: if '_gles31_' in step_name:
new_step_name += ' GLES 3.1 ' new_step_name += ' GLES 3.1 '
# Put the backend second # Put the backend second
if '_d3d9_' in step_name: if '_d3d9_' in step_name:
new_step_name += ' D3D9 ' new_step_name += ' D3D9 '
if '_d3d11' in step_name: if '_d3d11' in step_name:
new_step_name += ' D3D11 ' new_step_name += ' D3D11 '
if '_gl_' in step_name: if '_gl_' in step_name:
new_step_name += ' Desktop OpenGL ' new_step_name += ' Desktop OpenGL '
if '_gles_' in step_name: if '_gles_' in step_name:
new_step_name += ' OpenGLES ' new_step_name += ' OpenGLES '
if '_vulkan_' in step_name: if '_vulkan_' in step_name:
new_step_name += ' Vulkan ' new_step_name += ' Vulkan '
new_step_name = ' '.join(new_step_name.strip().split()) # Remove extra spaces new_step_name = ' '.join(new_step_name.strip().split()) # Remove extra spaces
return new_step_name + ' ' + bot_name return new_step_name + ' ' + bot_name
# Returns the full list of sheet names that should be populated based on the info struct # Returns the full list of sheet names that should be populated based on the info struct
def get_sheet_names(info): def get_sheet_names(info):
sheet_names = [] sheet_names = []
for bot_name in info: for bot_name in info:
for step_name in info[bot_name]['step_names']: for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name) sheet_name = format_sheet_name(bot_name, step_name)
sheet_names.append(sheet_name) sheet_names.append(sheet_name)
return sheet_names return sheet_names
# Returns True if the sheet is found in the spreadsheets object # Returns True if the sheet is found in the spreadsheets object
def sheet_exists(spreadsheet, step_name): def sheet_exists(spreadsheet, step_name):
for sheet in spreadsheet['sheets']: for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == step_name: if sheet['properties']['title'] == step_name:
return True return True
return False return False
# Validates the spreadsheets object against the list of sheet names which should appear. Returns a # Validates the spreadsheets object against the list of sheet names which should appear. Returns a
# list of sheets that need creation. # list of sheets that need creation.
def validate_sheets(spreadsheet, sheet_names): def validate_sheets(spreadsheet, sheet_names):
create_sheets = [] create_sheets = []
for sheet_name in sheet_names: for sheet_name in sheet_names:
if not sheet_exists(spreadsheet, sheet_name): if not sheet_exists(spreadsheet, sheet_name):
create_sheets.append(sheet_name) create_sheets.append(sheet_name)
return create_sheets return create_sheets
# Performs a batch update with a given service, spreadsheet id, and list <object(Request)> of # Performs a batch update with a given service, spreadsheet id, and list <object(Request)> of
# updates to do. # updates to do.
def batch_update(service, spreadsheet_id, updates): def batch_update(service, spreadsheet_id, updates):
batch_update_request_body = { batch_update_request_body = {
'requests': updates, 'requests': updates,
} }
LOGGER.debug("Called [spreadsheets.batchUpdate(spreadsheetId='" + spreadsheet_id + "', body=" + LOGGER.debug("Called [spreadsheets.batchUpdate(spreadsheetId='" + spreadsheet_id + "', body=" +
str(batch_update_request_body) + ')]') str(batch_update_request_body) + ')]')
request = service.batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_request_body) request = service.batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_request_body)
request.execute() request.execute()
# Creates sheets given a service and spreadsheed id based on a list of sheet names input # Creates sheets given a service and spreadsheed id based on a list of sheet names input
def create_sheets(service, spreadsheet_id, sheet_names): def create_sheets(service, spreadsheet_id, sheet_names):
updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names] updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names]
batch_update(service, spreadsheet_id, updates) batch_update(service, spreadsheet_id, updates)
# Calls a values().batchGet() on the service to find the list of column names from each sheet in # Calls a values().batchGet() on the service to find the list of column names from each sheet in
# sheet_names. Returns a dictionary with one list per sheet_name. # sheet_names. Returns a dictionary with one list per sheet_name.
def get_headers(service, spreadsheet_id, sheet_names): def get_headers(service, spreadsheet_id, sheet_names):
header_ranges = [sheet_name + '!A1:Z' for sheet_name in sheet_names] header_ranges = [sheet_name + '!A1:Z' for sheet_name in sheet_names]
LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='" + spreadsheet_id + LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='" + spreadsheet_id +
', ranges=' + str(header_ranges) + "')]") ', ranges=' + str(header_ranges) + "')]")
request = service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges) request = service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges)
response = request.execute() response = request.execute()
headers = {} headers = {}
for k, sheet_name in enumerate(sheet_names): for k, sheet_name in enumerate(sheet_names):
if 'values' in response['valueRanges'][k]: if 'values' in response['valueRanges'][k]:
# Headers are in the first row of values # Headers are in the first row of values
headers[sheet_name] = response['valueRanges'][k]['values'][0] headers[sheet_name] = response['valueRanges'][k]['values'][0]
else: else:
headers[sheet_name] = [] headers[sheet_name] = []
return headers return headers
# Calls values().batchUpdate() with supplied list of data <object(ValueRange)> to update on the # Calls values().batchUpdate() with supplied list of data <object(ValueRange)> to update on the
# service. # service.
def batch_update_values(service, spreadsheet_id, data): def batch_update_values(service, spreadsheet_id, data):
batch_update_values_request_body = { batch_update_values_request_body = {
'valueInputOption': 'USER_ENTERED', # Helps with formatting of dates 'valueInputOption': 'USER_ENTERED', # Helps with formatting of dates
'data': data, 'data': data,
} }
LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='" + spreadsheet_id + LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='" + spreadsheet_id +
"', body=" + str(batch_update_values_request_body) + ')]') "', body=" + str(batch_update_values_request_body) + ')]')
request = service.values().batchUpdate( request = service.values().batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body) spreadsheetId=spreadsheet_id, body=batch_update_values_request_body)
request.execute() request.execute()
# Populates the headers with any missing/desired rows based on the info struct, and calls # Populates the headers with any missing/desired rows based on the info struct, and calls
# batch update to update the corresponding sheets if necessary. # batch update to update the corresponding sheets if necessary.
def update_headers(service, spreadsheet_id, headers, info): def update_headers(service, spreadsheet_id, headers, info):
data = [] data = []
sheet_names = [] sheet_names = []
for bot_name in info: for bot_name in info:
for step_name in info[bot_name]['step_names']: for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name) sheet_name = format_sheet_name(bot_name, step_name)
headers_stale = False headers_stale = False
# Headers should always contain the following columns # Headers should always contain the following columns
for req in REQUIRED_COLUMNS: for req in REQUIRED_COLUMNS:
if req not in headers[sheet_name]: if req not in headers[sheet_name]:
headers_stale = True headers_stale = True
headers[sheet_name].append(req) headers[sheet_name].append(req)
# Headers also must contain all the keys seen in this step # Headers also must contain all the keys seen in this step
for key in info[bot_name][step_name].keys(): for key in info[bot_name][step_name].keys():
if key not in headers[sheet_name]: if key not in headers[sheet_name]:
headers_stale = True headers_stale = True
headers[sheet_name].append(key) headers[sheet_name].append(key)
# Update the Gdoc headers if necessary # Update the Gdoc headers if necessary
if headers_stale: if headers_stale:
sheet_names.append(sheet_name) sheet_names.append(sheet_name)
header_range = sheet_name + '!A1:Z' header_range = sheet_name + '!A1:Z'
data.append({ data.append({
'range': header_range, 'range': header_range,
'majorDimension': 'ROWS', 'majorDimension': 'ROWS',
'values': [headers[sheet_name]] 'values': [headers[sheet_name]]
}) })
if data: if data:
LOGGER.info('Updating sheet headers...') LOGGER.info('Updating sheet headers...')
batch_update_values(service, spreadsheet_id, data) batch_update_values(service, spreadsheet_id, data)
# Calls values().append() to append a list of values to a given sheet. # Calls values().append() to append a list of values to a given sheet.
def append_values(service, spreadsheet_id, sheet_name, values): def append_values(service, spreadsheet_id, sheet_name, values):
header_range = sheet_name + '!A1:Z' header_range = sheet_name + '!A1:Z'
insert_data_option = 'INSERT_ROWS' insert_data_option = 'INSERT_ROWS'
value_input_option = 'USER_ENTERED' # Helps with formatting of dates value_input_option = 'USER_ENTERED' # Helps with formatting of dates
append_values_request_body = { append_values_request_body = {
'range': header_range, 'range': header_range,
'majorDimension': 'ROWS', 'majorDimension': 'ROWS',
'values': [values], 'values': [values],
} }
LOGGER.debug("Called [spreadsheets.values().append(spreadsheetId='" + spreadsheet_id + LOGGER.debug("Called [spreadsheets.values().append(spreadsheetId='" + spreadsheet_id +
"', body=" + str(append_values_request_body) + ", range='" + header_range + "', body=" + str(append_values_request_body) + ", range='" + header_range +
"', insertDataOption='" + insert_data_option + "', valueInputOption='" + "', insertDataOption='" + insert_data_option + "', valueInputOption='" +
value_input_option + "')]") value_input_option + "')]")
request = service.values().append( request = service.values().append(
spreadsheetId=spreadsheet_id, spreadsheetId=spreadsheet_id,
body=append_values_request_body, body=append_values_request_body,
range=header_range, range=header_range,
insertDataOption=insert_data_option, insertDataOption=insert_data_option,
valueInputOption=value_input_option) valueInputOption=value_input_option)
request.execute() request.execute()
# Uses the list of headers and the info struct to come up with a list of values for each step # Uses the list of headers and the info struct to come up with a list of values for each step
# from the latest builds. # from the latest builds.
def update_values(service, spreadsheet_id, headers, info): def update_values(service, spreadsheet_id, headers, info):
data = [] data = []
for bot_name in info: for bot_name in info:
for step_name in info[bot_name]['step_names']: for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name) sheet_name = format_sheet_name(bot_name, step_name)
values = [] values = []
# For each key in the list of headers, either add the corresponding value or add a blank # For each key in the list of headers, either add the corresponding value or add a blank
# value. It's necessary for the values to match the order of the headers # value. It's necessary for the values to match the order of the headers
for key in headers[sheet_name]: for key in headers[sheet_name]:
if key in info[bot_name] and key in REQUIRED_COLUMNS: if key in info[bot_name] and key in REQUIRED_COLUMNS:
values.append(info[bot_name][key]) values.append(info[bot_name][key])
elif key in info[bot_name][step_name]: elif key in info[bot_name][step_name]:
values.append(info[bot_name][step_name][key]) values.append(info[bot_name][step_name][key])
else: else:
values.append('') values.append('')
LOGGER.info("Appending new rows to sheet '" + sheet_name + "'...") LOGGER.info("Appending new rows to sheet '" + sheet_name + "'...")
try: try:
append_values(service, spreadsheet_id, sheet_name, values) append_values(service, spreadsheet_id, sheet_name, values)
except Exception as error: except Exception as error:
LOGGER.warning('%s\n' % str(error)) LOGGER.warning('%s\n' % str(error))
# Updates the given spreadsheed_id with the info struct passed in. # Updates the given spreadsheed_id with the info struct passed in.
def update_spreadsheet(service, spreadsheet_id, info): def update_spreadsheet(service, spreadsheet_id, info):
LOGGER.info('Opening spreadsheet...') LOGGER.info('Opening spreadsheet...')
spreadsheet = get_spreadsheet(service, spreadsheet_id) spreadsheet = get_spreadsheet(service, spreadsheet_id)
LOGGER.info('Parsing sheet names...') LOGGER.info('Parsing sheet names...')
sheet_names = get_sheet_names(info) sheet_names = get_sheet_names(info)
new_sheets = validate_sheets(spreadsheet, sheet_names) new_sheets = validate_sheets(spreadsheet, sheet_names)
if new_sheets: if new_sheets:
LOGGER.info('Creating new sheets...') LOGGER.info('Creating new sheets...')
create_sheets(service, spreadsheet_id, new_sheets) create_sheets(service, spreadsheet_id, new_sheets)
LOGGER.info('Parsing sheet headers...') LOGGER.info('Parsing sheet headers...')
headers = get_headers(service, spreadsheet_id, sheet_names) headers = get_headers(service, spreadsheet_id, sheet_names)
update_headers(service, spreadsheet_id, headers, info) update_headers(service, spreadsheet_id, headers, info)
update_values(service, spreadsheet_id, headers, info) update_values(service, spreadsheet_id, headers, info)
##################### #####################
...@@ -567,112 +569,112 @@ def update_spreadsheet(service, spreadsheet_id, info): ...@@ -567,112 +569,112 @@ def update_spreadsheet(service, spreadsheet_id, info):
# Loads or creates credentials and connects to the Sheets API. Returns a Spreadsheets object with # Loads or creates credentials and connects to the Sheets API. Returns a Spreadsheets object with
# an open connection. # an open connection.
def get_sheets_service(auth_path): def get_sheets_service(auth_path):
credentials_path = auth_path + '/credentials.json' credentials_path = auth_path + '/credentials.json'
token_path = auth_path + '/token.pickle' token_path = auth_path + '/token.pickle'
creds = None creds = None
if not os.path.exists(auth_path): if not os.path.exists(auth_path):
LOGGER.info("Creating auth dir '" + auth_path + "'") LOGGER.info("Creating auth dir '" + auth_path + "'")
os.makedirs(auth_path) os.makedirs(auth_path)
if not os.path.exists(credentials_path): if not os.path.exists(credentials_path):
raise Exception('Missing credentials.json.\n' raise Exception('Missing credentials.json.\n'
'Go to: https://developers.google.com/sheets/api/quickstart/python\n' 'Go to: https://developers.google.com/sheets/api/quickstart/python\n'
"Under Step 1, click 'ENABLE THE GOOGLE SHEETS API'\n" "Under Step 1, click 'ENABLE THE GOOGLE SHEETS API'\n"
"Click 'DOWNLOAD CLIENT CONFIGURATION'\n" "Click 'DOWNLOAD CLIENT CONFIGURATION'\n"
'Save to your auth_path (' + auth_path + ') as credentials.json') 'Save to your auth_path (' + auth_path + ') as credentials.json')
if os.path.exists(token_path): if os.path.exists(token_path):
with open(token_path, 'rb') as token: with open(token_path, 'rb') as token:
creds = pickle.load(token) creds = pickle.load(token)
LOGGER.info('Loaded credentials from ' + token_path) LOGGER.info('Loaded credentials from ' + token_path)
if not creds or not creds.valid: if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token: if creds and creds.expired and creds.refresh_token:
LOGGER.info('Refreshing credentials...') LOGGER.info('Refreshing credentials...')
creds.refresh(Request()) creds.refresh(Request())
else: else:
LOGGER.info('Could not find credentials. Requesting new credentials.') LOGGER.info('Could not find credentials. Requesting new credentials.')
flow = InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES) flow = InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES)
creds = flow.run_local_server() creds = flow.run_local_server()
with open(token_path, 'wb') as token: with open(token_path, 'wb') as token:
pickle.dump(creds, token) pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds) service = build('sheets', 'v4', credentials=creds)
sheets = service.spreadsheets() sheets = service.spreadsheets()
return sheets return sheets
# Parse the input to the script # Parse the input to the script
def parse_args(): def parse_args():
parser = argparse.ArgumentParser(os.path.basename(sys.argv[0])) parser = argparse.ArgumentParser(os.path.basename(sys.argv[0]))
parser.add_argument( parser.add_argument(
'--auth_path', '--auth_path',
default=HOME_DIR + '/.auth', default=HOME_DIR + '/.auth',
nargs='?', nargs='?',
help='path to directory containing authorization data ' help='path to directory containing authorization data '
'(credentials.json and token.pickle). ' '(credentials.json and token.pickle). '
'[default=<home>/.auth]') '[default=<home>/.auth]')
parser.add_argument( parser.add_argument(
'--spreadsheet', '--spreadsheet',
default='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw', default='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw',
nargs='?', nargs='?',
help='ID of the spreadsheet to write stats to. ' help='ID of the spreadsheet to write stats to. '
"[default='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw']") "[default='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw']")
parser.add_argument( parser.add_argument(
'--verbosity', '--verbosity',
default='INFO', default='INFO',
nargs='?', nargs='?',
help='Verbosity of output. Valid options are ' help='Verbosity of output. Valid options are '
'[DEBUG, INFO, WARNING, ERROR]. ' '[DEBUG, INFO, WARNING, ERROR]. '
'[default=INFO]') '[default=INFO]')
return parser.parse_args() return parser.parse_args()
# Set up the logging with the right verbosity and output. # Set up the logging with the right verbosity and output.
def initialize_logging(verbosity): def initialize_logging(verbosity):
handler = logging.StreamHandler() handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(levelname)s: %(message)s') formatter = logging.Formatter(fmt='%(levelname)s: %(message)s')
handler.setFormatter(formatter) handler.setFormatter(formatter)
LOGGER.addHandler(handler) LOGGER.addHandler(handler)
if 'DEBUG' in verbosity: if 'DEBUG' in verbosity:
LOGGER.setLevel(level=logging.DEBUG) LOGGER.setLevel(level=logging.DEBUG)
elif 'INFO' in verbosity: elif 'INFO' in verbosity:
LOGGER.setLevel(level=logging.INFO) LOGGER.setLevel(level=logging.INFO)
elif 'WARNING' in verbosity: elif 'WARNING' in verbosity:
LOGGER.setLevel(level=logging.WARNING) LOGGER.setLevel(level=logging.WARNING)
elif 'ERROR' in verbosity: elif 'ERROR' in verbosity:
LOGGER.setLevel(level=logging.ERROR) LOGGER.setLevel(level=logging.ERROR)
else: else:
LOGGER.setLevel(level=logging.INFO) LOGGER.setLevel(level=logging.INFO)
def main(): def main():
os.chdir(ROOT_DIR) os.chdir(ROOT_DIR)
args = parse_args() args = parse_args()
verbosity = args.verbosity.strip().upper() verbosity = args.verbosity.strip().upper()
initialize_logging(verbosity) initialize_logging(verbosity)
auth_path = args.auth_path.replace('\\', '/') auth_path = args.auth_path.replace('\\', '/')
try:
service = get_sheets_service(auth_path)
except Exception as error:
LOGGER.error('%s\n' % str(error))
exit(1)
info = {}
LOGGER.info('Building info struct...')
for bot_name in BOT_NAMES:
LOGGER.info("Parsing bot '" + bot_name + "'...")
try: try:
info[bot_name] = get_bot_info(BOT_NAME_PREFIX + bot_name) service = get_sheets_service(auth_path)
except Exception as error: except Exception as error:
LOGGER.error('%s\n' % str(error)) LOGGER.error('%s\n' % str(error))
exit(1)
LOGGER.info('Updating sheets...')
try: info = {}
update_spreadsheet(service, args.spreadsheet, info) LOGGER.info('Building info struct...')
except Exception as error: for bot_name in BOT_NAMES:
LOGGER.error('%s\n' % str(error)) LOGGER.info("Parsing bot '" + bot_name + "'...")
quit(1) try:
info[bot_name] = get_bot_info(BOT_NAME_PREFIX + bot_name)
except Exception as error:
LOGGER.error('%s\n' % str(error))
LOGGER.info('Updating sheets...')
try:
update_spreadsheet(service, args.spreadsheet, info)
except Exception as error:
LOGGER.error('%s\n' % str(error))
quit(1)
LOGGER.info('Info was successfully parsed to sheet: https://docs.google.com/spreadsheets/d/' + LOGGER.info('Info was successfully parsed to sheet: https://docs.google.com/spreadsheets/d/' +
args.spreadsheet) args.spreadsheet)
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment