Bug 1708631 - Update visual metrics script to the latest. (#19332)

upstream-sync
Gregory Mierzwinski 3 years ago committed by GitHub
parent 49025e4669
commit 879f1b9e97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -46,6 +46,9 @@ class Job:
#: The extra options for this job.
extra_options = attr.ib(type=str)
#: If true, we allow 0's in the vismet results
accept_zero_vismet = attr.ib(type=bool)
#: json_path: The path to the ``browsertime.json`` file on disk.
json_path = attr.ib(type=Path)
@ -61,6 +64,7 @@ JOB_SCHEMA = Schema(
Required("test_name"): str,
Required("browsertime_json_path"): str,
Required("extra_options"): [str],
Required("accept_zero_vismet"): bool,
}
],
Required("application"): {Required("name"): str, "version": str},
@ -185,7 +189,7 @@ def append_result(log, suites, test_name, name, result, extra_options):
"replicates": [result],
"lowerIsBetter": True,
"unit": "ms",
"shouldAlert": SHOULD_ALERT[name],
"shouldAlert": SHOULD_ALERT.get(name, False),
}
else:
subtests[name]["replicates"].append(result)
@ -322,6 +326,7 @@ def main(log, args):
extra_options=len(job["extra_options"]) > 0
and job["extra_options"]
or jobs_json["extra_options"],
accept_zero_vismet=job["accept_zero_vismet"],
json_path=browsertime_json_path,
video_path=browsertime_json_path.parent / video,
count=count,
@ -420,18 +425,20 @@ def run_visual_metrics(job, visualmetrics_path, options):
# Python 3.5 requires a str object (not 3.6+)
res = json.loads(res.decode("utf8"))
# Ensure that none of these values are at 0 which
# is indicative of a failling test
monitored_tests = [
"contentfulspeedindex",
"lastvisualchange",
"perceptualspeedindex",
"speedindex",
]
failed_tests = []
for metric, val in res.items():
if metric.lower() in monitored_tests and val == 0:
failed_tests.append(metric)
if not job.accept_zero_vismet:
# Ensure that none of these values are at 0 which
# is indicative of a failling test
monitored_tests = [
"contentfulspeedindex",
"lastvisualchange",
"perceptualspeedindex",
"speedindex",
]
for metric, val in res.items():
if metric.lower() in monitored_tests and val == 0:
failed_tests.append(metric)
if failed_tests:
log.error(
"TEST-UNEXPECTED-FAIL | Some visual metrics have an erroneous value of 0."

Loading…
Cancel
Save