From dfd7ca63447a9d6df2ed86c7869eb1d9f3d623c7 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Mon, 9 Oct 2023 16:30:11 +0200 Subject: [PATCH 01/15] analyze_outcomes: rename some variables for better readability Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 35 ++++++++++++++----------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 1f20734b1d..f7fc4e3eff 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -179,7 +179,7 @@ def do_analyze_driver_vs_reference(outcome_file, args): args['ignored_tests']) # List of tasks with a function that can handle this task and additional arguments if required -TASKS = { +KNOWN_TASKS = { 'analyze_coverage': { 'test_function': do_analyze_coverage, 'args': { @@ -645,7 +645,7 @@ def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('outcomes', metavar='OUTCOMES.CSV', help='Outcome file to analyze') - parser.add_argument('task', default='all', nargs='?', + parser.add_argument('specified_tasks', default='all', nargs='?', help='Analysis to be done. By default, run all tasks. ' 'With one or more TASK, run only those. ' 'TASK can be the name of a single task or ' @@ -660,31 +660,28 @@ def main(): options = parser.parse_args() if options.list: - for task in TASKS: + for task in KNOWN_TASKS: Results.log(task) sys.exit(0) - result = True - - if options.task == 'all': - tasks = TASKS.keys() + if options.specified_tasks == 'all': + tasks_list = KNOWN_TASKS.keys() else: - tasks = re.split(r'[, ]+', options.task) + tasks_list = re.split(r'[, ]+', options.specified_tasks) - for task in tasks: - if task not in TASKS: - Results.log('Error: invalid task: {}'.format(task)) - sys.exit(1) + for task in tasks_list: + if task not in KNOWN_TASKS: - TASKS['analyze_coverage']['args']['full_coverage'] = \ - options.full_coverage + KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage - for task in TASKS: - if task in tasks: - if not TASKS[task]['test_function'](options.outcomes, TASKS[task]['args']): - result = False + all_succeeded = True - if result is False: + for task in KNOWN_TASKS: + if task in tasks_list: + if not KNOWN_TASKS[task]['test_function'](options.outcomes, KNOWN_TASKS[task]['args']): + all_succeeded = False + + if all_succeeded is False: sys.exit(1) Results.log("SUCCESS :-)") except Exception: # pylint: disable=broad-except From aaef0bc172d09c289d8f59c01187c4cbaf76af38 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 10 Oct 2023 09:42:13 +0200 Subject: [PATCH 02/15] analyze_outcomes: improve logging system - the script now only terminates in case of hard faults - each task is assigned a log - this log tracks messages, warning and errors - when task completes, errors and warnings are listed and messages are appended to the main log - on exit the main log is printed and the proper return value is returned Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 100 +++++++++++++++++++----------- 1 file changed, 63 insertions(+), 37 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index f7fc4e3eff..49445a4735 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -15,25 +15,31 @@ import os import check_test_cases -class Results: +class TestLog: """Process analysis results.""" def __init__(self): self.error_count = 0 self.warning_count = 0 + self.output = "" - @staticmethod - def log(fmt, *args, **kwargs): - sys.stderr.write((fmt + '\n').format(*args, **kwargs)) + def add_line(self, fmt, *args, **kwargs): + self.output = self.output + (fmt + '\n').format(*args, **kwargs) + + def info(self, fmt, *args, **kwargs): + self.add_line(fmt, *args, **kwargs) def error(self, fmt, *args, **kwargs): - self.log('Error: ' + fmt, *args, **kwargs) + self.info('Error: ' + fmt, *args, **kwargs) self.error_count += 1 def warning(self, fmt, *args, **kwargs): - self.log('Warning: ' + fmt, *args, **kwargs) + self.info('Warning: ' + fmt, *args, **kwargs) self.warning_count += 1 + def print_output(self): + sys.stderr.write(self.output) + class TestCaseOutcomes: """The outcomes of one test case across many configurations.""" # pylint: disable=too-few-public-methods @@ -53,25 +59,27 @@ class TestCaseOutcomes: """ return len(self.successes) + len(self.failures) -def execute_reference_driver_tests(ref_component, driver_component, outcome_file): +def execute_reference_driver_tests(log: TestLog, ref_component, driver_component, \ + outcome_file) -> TestLog: """Run the tests specified in ref_component and driver_component. Results are stored in the output_file and they will be used for the following coverage analysis""" # If the outcome file already exists, we assume that the user wants to # perform the comparison analysis again without repeating the tests. if os.path.exists(outcome_file): - Results.log("Outcome file (" + outcome_file + ") already exists. " + \ - "Tests will be skipped.") - return + log.info("Outcome file (" + outcome_file + ") already exists. " + \ + "Tests will be skipped.") + return log shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component - Results.log("Running: " + shell_command) + log.info("Running: " + shell_command) ret_val = subprocess.run(shell_command.split(), check=False).returncode if ret_val != 0: - Results.log("Error: failed to run reference/driver components") - sys.exit(ret_val) + log.error("failed to run reference/driver components") + + return log def analyze_coverage(results, outcomes, allow_list, full_coverage): """Check that all available test cases are executed at least once.""" @@ -90,7 +98,8 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): else: results.warning('Allow listed test case was executed: {}', key) -def analyze_driver_vs_reference(outcomes, component_ref, component_driver, +def analyze_driver_vs_reference(log: TestLog, outcomes, + component_ref, component_driver, ignored_suites, ignored_test=None): """Check that all tests executed in the reference component are also executed in the corresponding driver component. @@ -100,7 +109,6 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver, output string is provided """ available = check_test_cases.collect_available_test_cases() - result = True for key in available: # Continue if test was not executed by any component @@ -125,16 +133,15 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver, if component_ref in entry: reference_test_passed = True if(reference_test_passed and not driver_test_passed): - Results.log(key) - result = False - return result + log.error(key) -def analyze_outcomes(outcomes, args): + return log + +def analyze_outcomes(log: TestLog, outcomes, args) -> TestLog: """Run all analyses on the given outcome collection.""" - results = Results() - analyze_coverage(results, outcomes, args['allow_list'], + analyze_coverage(log, outcomes, args['allow_list'], args['full_coverage']) - return results + return log def read_outcome_file(outcome_file): """Parse an outcome file and return an outcome collection. @@ -159,24 +166,32 @@ by a semicolon. def do_analyze_coverage(outcome_file, args): """Perform coverage analysis.""" + log = TestLog() + log.info("\n*** Analyze coverage ***\n") outcomes = read_outcome_file(outcome_file) - Results.log("\n*** Analyze coverage ***\n") - results = analyze_outcomes(outcomes, args) - return results.error_count == 0 + log = analyze_outcomes(log, outcomes, args) + return log def do_analyze_driver_vs_reference(outcome_file, args): """Perform driver vs reference analyze.""" - execute_reference_driver_tests(args['component_ref'], \ - args['component_driver'], outcome_file) + log = TestLog() + + log = execute_reference_driver_tests(log, args['component_ref'], \ + args['component_driver'], outcome_file) + if log.error_count != 0: + return log ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] outcomes = read_outcome_file(outcome_file) - Results.log("\n*** Analyze driver {} vs reference {} ***\n".format( + + log.info("\n*** Analyze driver {} vs reference {} ***\n".format( args['component_driver'], args['component_ref'])) - return analyze_driver_vs_reference(outcomes, args['component_ref'], - args['component_driver'], ignored_suites, - args['ignored_tests']) + log = analyze_driver_vs_reference(log, outcomes, + args['component_ref'], args['component_driver'], + ignored_suites, args['ignored_tests']) + + return log # List of tasks with a function that can handle this task and additional arguments if required KNOWN_TASKS = { @@ -641,6 +656,8 @@ KNOWN_TASKS = { } def main(): + main_log = TestLog() + try: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('outcomes', metavar='OUTCOMES.CSV', @@ -661,16 +678,17 @@ def main(): if options.list: for task in KNOWN_TASKS: - Results.log(task) + main_log.info(task) + main_log.print_output() sys.exit(0) if options.specified_tasks == 'all': tasks_list = KNOWN_TASKS.keys() else: tasks_list = re.split(r'[, ]+', options.specified_tasks) - for task in tasks_list: if task not in KNOWN_TASKS: + main_log.error('invalid task: {}'.format(task)) KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage @@ -678,12 +696,20 @@ def main(): for task in KNOWN_TASKS: if task in tasks_list: - if not KNOWN_TASKS[task]['test_function'](options.outcomes, KNOWN_TASKS[task]['args']): + test_function = KNOWN_TASKS[task]['test_function'] + test_args = KNOWN_TASKS[task]['args'] + test_log = test_function(options.outcomes, test_args) + # Merge the output of this task with the main one + main_log.output = main_log.output + test_log.output + main_log.info("Task {} completed with:\n".format(task) + \ + "{} warnings\n".format(test_log.warning_count) + \ + "{} errors\n".format(test_log.error_count)) + if test_log.error_count != 0: all_succeeded = False - if all_succeeded is False: - sys.exit(1) - Results.log("SUCCESS :-)") + main_log.print_output() + sys.exit(0 if all_succeeded else 1) + except Exception: # pylint: disable=broad-except # Print the backtrace and exit explicitly with our chosen status. traceback.print_exc() From b0c618e147554e672b6c3f438127e1163157e807 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Mon, 16 Oct 2023 14:19:49 +0200 Subject: [PATCH 03/15] analyze_outcomes: minor improvements Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 49445a4735..105a4aaedd 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -27,7 +27,7 @@ class TestLog: self.output = self.output + (fmt + '\n').format(*args, **kwargs) def info(self, fmt, *args, **kwargs): - self.add_line(fmt, *args, **kwargs) + self.add_line('Info: ' + fmt, *args, **kwargs) def error(self, fmt, *args, **kwargs): self.info('Error: ' + fmt, *args, **kwargs) @@ -176,6 +176,9 @@ def do_analyze_driver_vs_reference(outcome_file, args): """Perform driver vs reference analyze.""" log = TestLog() + log.info("\n*** Analyze driver {} vs reference {} ***\n".format( + args['component_driver'], args['component_ref'])) + log = execute_reference_driver_tests(log, args['component_ref'], \ args['component_driver'], outcome_file) if log.error_count != 0: @@ -185,8 +188,6 @@ def do_analyze_driver_vs_reference(outcome_file, args): outcomes = read_outcome_file(outcome_file) - log.info("\n*** Analyze driver {} vs reference {} ***\n".format( - args['component_driver'], args['component_ref'])) log = analyze_driver_vs_reference(log, outcomes, args['component_ref'], args['component_driver'], ignored_suites, args['ignored_tests']) From 5329ff06b9456fecb7cbe42021177da14999c4c3 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 09:44:36 +0200 Subject: [PATCH 04/15] analyze_outcomes: print task list directly to stdout Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 105a4aaedd..f1680adc9c 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -679,8 +679,7 @@ def main(): if options.list: for task in KNOWN_TASKS: - main_log.info(task) - main_log.print_output() + print(task) sys.exit(0) if options.specified_tasks == 'all': From fb2750e98e1f68e61a04b54ad6821bee89ee54a9 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 10:11:45 +0200 Subject: [PATCH 05/15] analyze_outcomes: exit immediately in case of invalid task Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index f1680adc9c..28c55125c2 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -688,24 +688,24 @@ def main(): tasks_list = re.split(r'[, ]+', options.specified_tasks) for task in tasks_list: if task not in KNOWN_TASKS: - main_log.error('invalid task: {}'.format(task)) + sys.stderr.write('invalid task: {}'.format(task)) + sys.exit(2) KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage all_succeeded = True - for task in KNOWN_TASKS: - if task in tasks_list: - test_function = KNOWN_TASKS[task]['test_function'] - test_args = KNOWN_TASKS[task]['args'] - test_log = test_function(options.outcomes, test_args) - # Merge the output of this task with the main one - main_log.output = main_log.output + test_log.output - main_log.info("Task {} completed with:\n".format(task) + \ - "{} warnings\n".format(test_log.warning_count) + \ - "{} errors\n".format(test_log.error_count)) - if test_log.error_count != 0: - all_succeeded = False + for task in tasks_list: + test_function = KNOWN_TASKS[task]['test_function'] + test_args = KNOWN_TASKS[task]['args'] + test_log = test_function(options.outcomes, test_args) + # Merge the output of this task with the main one + main_log.output = main_log.output + test_log.output + main_log.info("Task {} completed with:\n".format(task) + \ + "{} warnings\n".format(test_log.warning_count) + \ + "{} errors\n".format(test_log.error_count)) + if test_log.error_count != 0: + all_succeeded = False main_log.print_output() sys.exit(0 if all_succeeded else 1) From 3f339897628ebd9ee37320042099aa255a83823a Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 10:42:11 +0200 Subject: [PATCH 06/15] analyze_outcomes: use a single TestLog instance and do not delay output Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 41 +++++++++++-------------------- 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 28c55125c2..8ddbf6c1eb 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -21,24 +21,21 @@ class TestLog: def __init__(self): self.error_count = 0 self.warning_count = 0 - self.output = "" - - def add_line(self, fmt, *args, **kwargs): - self.output = self.output + (fmt + '\n').format(*args, **kwargs) def info(self, fmt, *args, **kwargs): - self.add_line('Info: ' + fmt, *args, **kwargs) + self.print_line('Info: ' + fmt, *args, **kwargs) def error(self, fmt, *args, **kwargs): - self.info('Error: ' + fmt, *args, **kwargs) self.error_count += 1 + self.print_line('Error: ' + fmt, *args, **kwargs) def warning(self, fmt, *args, **kwargs): - self.info('Warning: ' + fmt, *args, **kwargs) self.warning_count += 1 + self.print_line('Warning: ' + fmt, *args, **kwargs) - def print_output(self): - sys.stderr.write(self.output) + @staticmethod + def print_line(fmt, *args, **kwargs): + sys.stderr.write(fmt, *args, **kwargs) class TestCaseOutcomes: """The outcomes of one test case across many configurations.""" @@ -164,25 +161,20 @@ by a semicolon. outcomes[key].failures.append(setup) return outcomes -def do_analyze_coverage(outcome_file, args): +def do_analyze_coverage(log: TestLog, outcome_file, args) -> TestLog: """Perform coverage analysis.""" - log = TestLog() log.info("\n*** Analyze coverage ***\n") outcomes = read_outcome_file(outcome_file) log = analyze_outcomes(log, outcomes, args) return log -def do_analyze_driver_vs_reference(outcome_file, args): +def do_analyze_driver_vs_reference(log: TestLog, outcome_file, args) -> TestLog: """Perform driver vs reference analyze.""" - log = TestLog() - log.info("\n*** Analyze driver {} vs reference {} ***\n".format( args['component_driver'], args['component_ref'])) log = execute_reference_driver_tests(log, args['component_ref'], \ args['component_driver'], outcome_file) - if log.error_count != 0: - return log ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] @@ -693,22 +685,17 @@ def main(): KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage - all_succeeded = True - for task in tasks_list: test_function = KNOWN_TASKS[task]['test_function'] test_args = KNOWN_TASKS[task]['args'] - test_log = test_function(options.outcomes, test_args) - # Merge the output of this task with the main one - main_log.output = main_log.output + test_log.output - main_log.info("Task {} completed with:\n".format(task) + \ - "{} warnings\n".format(test_log.warning_count) + \ - "{} errors\n".format(test_log.error_count)) - if test_log.error_count != 0: - all_succeeded = False + main_log = test_function(main_log, options.outcomes, test_args) + + main_log.info("Overall results:\n" + \ + "{} warnings\n".format(main_log.warning_count) + \ + "{} errors\n".format(main_log.error_count)) main_log.print_output() - sys.exit(0 if all_succeeded else 1) + sys.exit(0 if (main_log.error_count == 0) else 2) except Exception: # pylint: disable=broad-except # Print the backtrace and exit explicitly with our chosen status. From f075e47bc1ea7a02bfc5d9c44427ee4e7908a419 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 11:03:16 +0200 Subject: [PATCH 07/15] analyze_outcomes: reset name of TestLog to Results Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 67 +++++++++++++++---------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 8ddbf6c1eb..95f0cc6973 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 + #!/usr/bin/env python3 """Analyze the test outcomes from a full CI run. @@ -15,7 +15,7 @@ import os import check_test_cases -class TestLog: +class Results: """Process analysis results.""" def __init__(self): @@ -56,27 +56,27 @@ class TestCaseOutcomes: """ return len(self.successes) + len(self.failures) -def execute_reference_driver_tests(log: TestLog, ref_component, driver_component, \ - outcome_file) -> TestLog: +def execute_reference_driver_tests(results: Results, ref_component, driver_component, \ + outcome_file) -> Results: """Run the tests specified in ref_component and driver_component. Results are stored in the output_file and they will be used for the following coverage analysis""" # If the outcome file already exists, we assume that the user wants to # perform the comparison analysis again without repeating the tests. if os.path.exists(outcome_file): - log.info("Outcome file (" + outcome_file + ") already exists. " + \ + results.info("Outcome file (" + outcome_file + ") already exists. " + \ "Tests will be skipped.") - return log + return results shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component - log.info("Running: " + shell_command) + results.info("Running: " + shell_command) ret_val = subprocess.run(shell_command.split(), check=False).returncode if ret_val != 0: - log.error("failed to run reference/driver components") + results.error("failed to run reference/driver components") - return log + return results def analyze_coverage(results, outcomes, allow_list, full_coverage): """Check that all available test cases are executed at least once.""" @@ -95,7 +95,7 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): else: results.warning('Allow listed test case was executed: {}', key) -def analyze_driver_vs_reference(log: TestLog, outcomes, +def analyze_driver_vs_reference(results: Results, outcomes, component_ref, component_driver, ignored_suites, ignored_test=None): """Check that all tests executed in the reference component are also @@ -130,15 +130,15 @@ def analyze_driver_vs_reference(log: TestLog, outcomes, if component_ref in entry: reference_test_passed = True if(reference_test_passed and not driver_test_passed): - log.error(key) + results.error(key) - return log + return results -def analyze_outcomes(log: TestLog, outcomes, args) -> TestLog: +def analyze_outcomes(results: Results, outcomes, args) -> Results: """Run all analyses on the given outcome collection.""" - analyze_coverage(log, outcomes, args['allow_list'], + analyze_coverage(results, outcomes, args['allow_list'], args['full_coverage']) - return log + return results def read_outcome_file(outcome_file): """Parse an outcome file and return an outcome collection. @@ -161,30 +161,30 @@ by a semicolon. outcomes[key].failures.append(setup) return outcomes -def do_analyze_coverage(log: TestLog, outcome_file, args) -> TestLog: +def do_analyze_coverage(results: Results, outcome_file, args) -> Results: """Perform coverage analysis.""" - log.info("\n*** Analyze coverage ***\n") + results.info("\n*** Analyze coverage ***\n") outcomes = read_outcome_file(outcome_file) - log = analyze_outcomes(log, outcomes, args) - return log + results = analyze_outcomes(results, outcomes, args) + return results -def do_analyze_driver_vs_reference(log: TestLog, outcome_file, args) -> TestLog: +def do_analyze_driver_vs_reference(results: Results, outcome_file, args) -> Results: """Perform driver vs reference analyze.""" - log.info("\n*** Analyze driver {} vs reference {} ***\n".format( + results.info("\n*** Analyze driver {} vs reference {} ***\n".format( args['component_driver'], args['component_ref'])) - log = execute_reference_driver_tests(log, args['component_ref'], \ + results = execute_reference_driver_tests(results, args['component_ref'], \ args['component_driver'], outcome_file) ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] outcomes = read_outcome_file(outcome_file) - log = analyze_driver_vs_reference(log, outcomes, - args['component_ref'], args['component_driver'], - ignored_suites, args['ignored_tests']) + results = analyze_driver_vs_reference(results, outcomes, + args['component_ref'], args['component_driver'], + ignored_suites, args['ignored_tests']) - return log + return results # List of tasks with a function that can handle this task and additional arguments if required KNOWN_TASKS = { @@ -649,7 +649,7 @@ KNOWN_TASKS = { } def main(): - main_log = TestLog() + main_results = Results() try: parser = argparse.ArgumentParser(description=__doc__) @@ -688,14 +688,13 @@ def main(): for task in tasks_list: test_function = KNOWN_TASKS[task]['test_function'] test_args = KNOWN_TASKS[task]['args'] - main_log = test_function(main_log, options.outcomes, test_args) - - main_log.info("Overall results:\n" + \ - "{} warnings\n".format(main_log.warning_count) + \ - "{} errors\n".format(main_log.error_count)) + main_results = test_function(main_results, options.outcomes, test_args) - main_log.print_output() - sys.exit(0 if (main_log.error_count == 0) else 2) + main_results.info("Overall results:\n" + \ + "{} warnings\n".format(main_results.warning_count) + \ + "{} errors\n".format(main_results.error_count)) + + sys.exit(0 if (main_results.error_count == 0) else 2) except Exception: # pylint: disable=broad-except # Print the backtrace and exit explicitly with our chosen status. From 40314fcc75d3343ecc7cb3b8a366e89715f52a85 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 11:34:31 +0200 Subject: [PATCH 08/15] analyze_outcomes: fix newlines Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 95f0cc6973..57f359a653 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -35,7 +35,7 @@ class Results: @staticmethod def print_line(fmt, *args, **kwargs): - sys.stderr.write(fmt, *args, **kwargs) + sys.stderr.write(fmt + '\n', *args, **kwargs) class TestCaseOutcomes: """The outcomes of one test case across many configurations.""" @@ -163,14 +163,14 @@ by a semicolon. def do_analyze_coverage(results: Results, outcome_file, args) -> Results: """Perform coverage analysis.""" - results.info("\n*** Analyze coverage ***\n") + results.info("*** Analyze coverage ***") outcomes = read_outcome_file(outcome_file) results = analyze_outcomes(results, outcomes, args) return results def do_analyze_driver_vs_reference(results: Results, outcome_file, args) -> Results: """Perform driver vs reference analyze.""" - results.info("\n*** Analyze driver {} vs reference {} ***\n".format( + results.info("*** Analyze driver {} vs reference {} ***".format( args['component_driver'], args['component_ref'])) results = execute_reference_driver_tests(results, args['component_ref'], \ @@ -690,9 +690,9 @@ def main(): test_args = KNOWN_TASKS[task]['args'] main_results = test_function(main_results, options.outcomes, test_args) - main_results.info("Overall results:\n" + \ - "{} warnings\n".format(main_results.warning_count) + \ - "{} errors\n".format(main_results.error_count)) + main_results.info("Overall results: " + \ + "{} warnings | ".format(main_results.warning_count) + \ + "{} errors".format(main_results.error_count)) sys.exit(0 if (main_results.error_count == 0) else 2) From 8d178be66e5cdc1b5ba60d90a96d2e63da9a7ff2 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 12:23:55 +0200 Subject: [PATCH 09/15] analyze_outcomes: fix return value in case of test failure Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 57f359a653..2998d322d8 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -1,4 +1,4 @@ - #!/usr/bin/env python3 +#!/usr/bin/env python3 """Analyze the test outcomes from a full CI run. @@ -694,7 +694,7 @@ def main(): "{} warnings | ".format(main_results.warning_count) + \ "{} errors".format(main_results.error_count)) - sys.exit(0 if (main_results.error_count == 0) else 2) + sys.exit(0 if (main_results.error_count == 0) else 1) except Exception: # pylint: disable=broad-except # Print the backtrace and exit explicitly with our chosen status. From f6f64cfd819335fab6d09320a66de27c821ee4ad Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 12:28:26 +0200 Subject: [PATCH 10/15] analyze_outcomes: code style improvement Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 2998d322d8..e0c69469f5 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -690,9 +690,8 @@ def main(): test_args = KNOWN_TASKS[task]['args'] main_results = test_function(main_results, options.outcomes, test_args) - main_results.info("Overall results: " + \ - "{} warnings | ".format(main_results.warning_count) + \ - "{} errors".format(main_results.error_count)) + main_results.info("Overall results: {} warnings and {} errors", + main_results.warning_count, main_results.error_count) sys.exit(0 if (main_results.error_count == 0) else 1) From 8070dbec6b76f5a4dcf8260d855065dca42dc633 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 12:29:30 +0200 Subject: [PATCH 11/15] analyze_outcomes: keep print_line() method non-static Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index e0c69469f5..0346404302 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -23,18 +23,18 @@ class Results: self.warning_count = 0 def info(self, fmt, *args, **kwargs): - self.print_line('Info: ' + fmt, *args, **kwargs) + self._print_line('Info: ' + fmt, *args, **kwargs) def error(self, fmt, *args, **kwargs): self.error_count += 1 - self.print_line('Error: ' + fmt, *args, **kwargs) + self._print_line('Error: ' + fmt, *args, **kwargs) def warning(self, fmt, *args, **kwargs): self.warning_count += 1 - self.print_line('Warning: ' + fmt, *args, **kwargs) + self._print_line('Warning: ' + fmt, *args, **kwargs) @staticmethod - def print_line(fmt, *args, **kwargs): + def _print_line(fmt, *args, **kwargs): sys.stderr.write(fmt + '\n', *args, **kwargs) class TestCaseOutcomes: From 781c23416e511ab31738bfc2607f944e8fcc4ce3 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Tue, 17 Oct 2023 12:47:35 +0200 Subject: [PATCH 12/15] analyze_oucomes: do not return Results instance passed as parameter Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 32 ++++++++++++------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 0346404302..5f2e37877e 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -57,7 +57,7 @@ class TestCaseOutcomes: return len(self.successes) + len(self.failures) def execute_reference_driver_tests(results: Results, ref_component, driver_component, \ - outcome_file) -> Results: + outcome_file): """Run the tests specified in ref_component and driver_component. Results are stored in the output_file and they will be used for the following coverage analysis""" @@ -66,7 +66,7 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo if os.path.exists(outcome_file): results.info("Outcome file (" + outcome_file + ") already exists. " + \ "Tests will be skipped.") - return results + return shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component @@ -76,8 +76,6 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo if ret_val != 0: results.error("failed to run reference/driver components") - return results - def analyze_coverage(results, outcomes, allow_list, full_coverage): """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() @@ -132,13 +130,10 @@ def analyze_driver_vs_reference(results: Results, outcomes, if(reference_test_passed and not driver_test_passed): results.error(key) - return results - -def analyze_outcomes(results: Results, outcomes, args) -> Results: +def analyze_outcomes(results: Results, outcomes, args): """Run all analyses on the given outcome collection.""" analyze_coverage(results, outcomes, args['allow_list'], args['full_coverage']) - return results def read_outcome_file(outcome_file): """Parse an outcome file and return an outcome collection. @@ -161,30 +156,27 @@ by a semicolon. outcomes[key].failures.append(setup) return outcomes -def do_analyze_coverage(results: Results, outcome_file, args) -> Results: +def do_analyze_coverage(results: Results, outcome_file, args): """Perform coverage analysis.""" results.info("*** Analyze coverage ***") outcomes = read_outcome_file(outcome_file) - results = analyze_outcomes(results, outcomes, args) - return results + analyze_outcomes(results, outcomes, args) -def do_analyze_driver_vs_reference(results: Results, outcome_file, args) -> Results: +def do_analyze_driver_vs_reference(results: Results, outcome_file, args): """Perform driver vs reference analyze.""" results.info("*** Analyze driver {} vs reference {} ***".format( args['component_driver'], args['component_ref'])) - results = execute_reference_driver_tests(results, args['component_ref'], \ - args['component_driver'], outcome_file) + execute_reference_driver_tests(results, args['component_ref'], \ + args['component_driver'], outcome_file) ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] outcomes = read_outcome_file(outcome_file) - results = analyze_driver_vs_reference(results, outcomes, - args['component_ref'], args['component_driver'], - ignored_suites, args['ignored_tests']) - - return results + analyze_driver_vs_reference(results, outcomes, + args['component_ref'], args['component_driver'], + ignored_suites, args['ignored_tests']) # List of tasks with a function that can handle this task and additional arguments if required KNOWN_TASKS = { @@ -688,7 +680,7 @@ def main(): for task in tasks_list: test_function = KNOWN_TASKS[task]['test_function'] test_args = KNOWN_TASKS[task]['args'] - main_results = test_function(main_results, options.outcomes, test_args) + test_function(main_results, options.outcomes, test_args) main_results.info("Overall results: {} warnings and {} errors", main_results.warning_count, main_results.error_count) From 735794c7454c91ba06a1e0c518799b85ed00b99e Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Wed, 18 Oct 2023 08:05:15 +0200 Subject: [PATCH 13/15] analyze_outcomes: fix missing format for args/kwargs Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 5f2e37877e..d0b72a859b 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -35,7 +35,7 @@ class Results: @staticmethod def _print_line(fmt, *args, **kwargs): - sys.stderr.write(fmt + '\n', *args, **kwargs) + sys.stderr.write((fmt + '\n').format(*args, **kwargs)) class TestCaseOutcomes: """The outcomes of one test case across many configurations.""" From 39d4b9d15bf6a89bc2d9ef59c064041c539ab38f Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Wed, 18 Oct 2023 14:30:03 +0200 Subject: [PATCH 14/15] analyze_outcomes: fix format interpolation errors Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index d0b72a859b..b522efb316 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -64,13 +64,12 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo # If the outcome file already exists, we assume that the user wants to # perform the comparison analysis again without repeating the tests. if os.path.exists(outcome_file): - results.info("Outcome file (" + outcome_file + ") already exists. " + \ - "Tests will be skipped.") + results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file) return shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component - results.info("Running: " + shell_command) + results.info("Running: {}", shell_command) ret_val = subprocess.run(shell_command.split(), check=False).returncode if ret_val != 0: @@ -128,7 +127,7 @@ def analyze_driver_vs_reference(results: Results, outcomes, if component_ref in entry: reference_test_passed = True if(reference_test_passed and not driver_test_passed): - results.error(key) + results.error("Did not pass with driver: {}", key) def analyze_outcomes(results: Results, outcomes, args): """Run all analyses on the given outcome collection.""" @@ -164,8 +163,8 @@ def do_analyze_coverage(results: Results, outcome_file, args): def do_analyze_driver_vs_reference(results: Results, outcome_file, args): """Perform driver vs reference analyze.""" - results.info("*** Analyze driver {} vs reference {} ***".format( - args['component_driver'], args['component_ref'])) + results.info("*** Analyze driver {} vs reference {} ***", + args['component_driver'], args['component_ref']) execute_reference_driver_tests(results, args['component_ref'], \ args['component_driver'], outcome_file) From 2cff82069e6933358cb03c5f87a169fad52acf38 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Wed, 18 Oct 2023 14:36:47 +0200 Subject: [PATCH 15/15] analyze_outcomes: add new_section() method to the Results class Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index b522efb316..9254331189 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -22,6 +22,9 @@ class Results: self.error_count = 0 self.warning_count = 0 + def new_section(self, fmt, *args, **kwargs): + self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs) + def info(self, fmt, *args, **kwargs): self._print_line('Info: ' + fmt, *args, **kwargs) @@ -157,14 +160,14 @@ by a semicolon. def do_analyze_coverage(results: Results, outcome_file, args): """Perform coverage analysis.""" - results.info("*** Analyze coverage ***") + results.new_section("Analyze coverage") outcomes = read_outcome_file(outcome_file) analyze_outcomes(results, outcomes, args) def do_analyze_driver_vs_reference(results: Results, outcome_file, args): """Perform driver vs reference analyze.""" - results.info("*** Analyze driver {} vs reference {} ***", - args['component_driver'], args['component_ref']) + results.new_section("Analyze driver {} vs reference {}", + args['component_driver'], args['component_ref']) execute_reference_driver_tests(results, args['component_ref'], \ args['component_driver'], outcome_file)