mirror of
https://github.com/Mbed-TLS/mbedtls.git
synced 2025-07-30 22:43:08 +03:00
Run tests for ref_vs_driver outside task function
Signed-off-by: Pengyu Lv <pengyu.lv@arm.com>
This commit is contained in:
@ -50,11 +50,7 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo
|
|||||||
"""Run the tests specified in ref_component and driver_component. Results
|
"""Run the tests specified in ref_component and driver_component. Results
|
||||||
are stored in the output_file and they will be used for the following
|
are stored in the output_file and they will be used for the following
|
||||||
coverage analysis"""
|
coverage analysis"""
|
||||||
# If the outcome file already exists, we assume that the user wants to
|
results.new_section("Test {} and {}", ref_component, driver_component)
|
||||||
# perform the comparison analysis again without repeating the tests.
|
|
||||||
if os.path.exists(outcome_file):
|
|
||||||
results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file)
|
|
||||||
return
|
|
||||||
|
|
||||||
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
|
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
|
||||||
" " + ref_component + " " + driver_component
|
" " + ref_component + " " + driver_component
|
||||||
@ -188,27 +184,18 @@ suite_case = "<suite>;<case>"
|
|||||||
|
|
||||||
return outcomes
|
return outcomes
|
||||||
|
|
||||||
def do_analyze_coverage(results: Results, outcomes_or_file, args):
|
def do_analyze_coverage(results: Results, outcomes, args):
|
||||||
"""Perform coverage analysis."""
|
"""Perform coverage analysis."""
|
||||||
results.new_section("Analyze coverage")
|
results.new_section("Analyze coverage")
|
||||||
outcomes = read_outcome_file(outcomes_or_file) \
|
|
||||||
if isinstance(outcomes_or_file, str) else outcomes_or_file
|
|
||||||
analyze_outcomes(results, outcomes, args)
|
analyze_outcomes(results, outcomes, args)
|
||||||
|
|
||||||
def do_analyze_driver_vs_reference(results: Results, outcomes_or_file, args):
|
def do_analyze_driver_vs_reference(results: Results, outcomes, args):
|
||||||
"""Perform driver vs reference analyze."""
|
"""Perform driver vs reference analyze."""
|
||||||
results.new_section("Analyze driver {} vs reference {}",
|
results.new_section("Analyze driver {} vs reference {}",
|
||||||
args['component_driver'], args['component_ref'])
|
args['component_driver'], args['component_ref'])
|
||||||
|
|
||||||
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
|
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
|
||||||
|
|
||||||
if isinstance(outcomes_or_file, str):
|
|
||||||
execute_reference_driver_tests(results, args['component_ref'], \
|
|
||||||
args['component_driver'], outcomes_or_file)
|
|
||||||
outcomes = read_outcome_file(outcomes_or_file)
|
|
||||||
else:
|
|
||||||
outcomes = outcomes_or_file
|
|
||||||
|
|
||||||
analyze_driver_vs_reference(results, outcomes,
|
analyze_driver_vs_reference(results, outcomes,
|
||||||
args['component_ref'], args['component_driver'],
|
args['component_ref'], args['component_driver'],
|
||||||
ignored_suites, args['ignored_tests'])
|
ignored_suites, args['ignored_tests'])
|
||||||
@ -507,17 +494,29 @@ def main():
|
|||||||
|
|
||||||
# If the outcome file exists, parse it once and share the result
|
# If the outcome file exists, parse it once and share the result
|
||||||
# among tasks to improve performance.
|
# among tasks to improve performance.
|
||||||
# Otherwise, it will be generated by do_analyze_driver_vs_reference.
|
# Otherwise, it will be generated by execute_reference_driver_tests.
|
||||||
if os.path.exists(options.outcomes):
|
if not os.path.exists(options.outcomes):
|
||||||
main_results.info("Read outcome file from {}.", options.outcomes)
|
if len(tasks_list) > 1:
|
||||||
outcomes_or_file = read_outcome_file(options.outcomes)
|
sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n")
|
||||||
else:
|
sys.exit(2)
|
||||||
outcomes_or_file = options.outcomes
|
|
||||||
|
task_name = tasks_list[0]
|
||||||
|
task = KNOWN_TASKS[task_name]
|
||||||
|
if task['test_function'] != do_analyze_driver_vs_reference:
|
||||||
|
sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name))
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
execute_reference_driver_tests(main_results,
|
||||||
|
task['args']['component_ref'],
|
||||||
|
task['args']['component_driver'],
|
||||||
|
options.outcomes)
|
||||||
|
|
||||||
|
outcomes = read_outcome_file(options.outcomes)
|
||||||
|
|
||||||
for task in tasks_list:
|
for task in tasks_list:
|
||||||
test_function = KNOWN_TASKS[task]['test_function']
|
test_function = KNOWN_TASKS[task]['test_function']
|
||||||
test_args = KNOWN_TASKS[task]['args']
|
test_args = KNOWN_TASKS[task]['args']
|
||||||
test_function(main_results, outcomes_or_file, test_args)
|
test_function(main_results, outcomes, test_args)
|
||||||
|
|
||||||
main_results.info("Overall results: {} warnings and {} errors",
|
main_results.info("Overall results: {} warnings and {} errors",
|
||||||
main_results.warning_count, main_results.error_count)
|
main_results.warning_count, main_results.error_count)
|
||||||
|
Reference in New Issue
Block a user