Skip to content

Commit

Permalink
Add ability to list all YAML tests and manually run them from the tes…
Browse files Browse the repository at this point in the history
…t runner (#24629)

* Add ability to list all YAML tests and manually run them from the test runner

* Fix typo

* Do not try to find chip-tool if not needed

* Restyle

* Remove flaky tests identified in #24626 ... they seem to block this PR

* Properly support dry run

* Fix test target

---------

Co-authored-by: Andrei Litvin <[email protected]>
  • Loading branch information
2 people authored and pull[bot] committed Oct 13, 2023
1 parent ce0d8a9 commit 1009058
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 23 deletions.
1 change: 0 additions & 1 deletion .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,6 @@ jobs:
run: |
./scripts/run_in_build_env.sh \
"./scripts/tests/run_test_suite.py \
--chip-tool ./out/linux-x64-chip-tool${CHIP_TOOL_VARIANT}-${BUILD_VARIANT}/chip-tool \
--run-yamltests-with-chip-repl \
run \
--iterations 1 \
Expand Down
52 changes: 37 additions & 15 deletions scripts/tests/chiptest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,17 @@
os.path.join(_DEFAULT_CHIP_ROOT, "src/app/tests/suites"))


def _FindYamlTestPath(name: str):
def _AllYamlTests():
yaml_test_suite_path = Path(_YAML_TEST_SUITE_PATH)

if not yaml_test_suite_path.exists():
raise FileNotFoundError(f"Expected directory {_YAML_TEST_SUITE_PATH} to exist")
for path in yaml_test_suite_path.rglob(name):
raise FileNotFoundError(
f"Expected directory {_YAML_TEST_SUITE_PATH} to exist")

for path in yaml_test_suite_path.rglob("*.yaml"):
if not path.is_file():
continue
if path.name != name:
continue
return str(path)
return None
yield path


def target_for_name(name: str):
Expand Down Expand Up @@ -76,7 +76,7 @@ def tests_with_command(chip_tool: str, is_manual: bool):
# TODO We will move away from hardcoded list of yamltests to run all file when yamltests
# parser/runner reaches parity with the code gen version.
def _hardcoded_python_yaml_tests():
currently_supported_yaml_tests = [
currently_supported_yaml_tests = {
"Test_TC_ACL_1_1.yaml",
"Test_TC_ACL_2_1.yaml",
"Test_TC_BOOL_1_1.yaml",
Expand All @@ -95,6 +95,7 @@ def _hardcoded_python_yaml_tests():
"Test_TC_I_2_1.yaml",
"Test_TC_ILL_1_1.yaml",
"Test_TC_ILL_2_1.yaml",
# "Test_TC_LVL_2_1.yaml", # TODO: Fix flakyness
"Test_TC_LVL_2_2.yaml",
"Test_TC_LCFG_1_1.yaml",
"Test_TC_LTIME_1_2.yaml",
Expand Down Expand Up @@ -165,20 +166,41 @@ def _hardcoded_python_yaml_tests():
"Test_TC_WNCV_2_3.yaml",
"Test_TC_WNCV_4_3.yaml",
"Test_TC_WNCV_4_4.yaml",
# "DL_Schedules.yaml", # TODO: Fix flakyness
"DL_UsersAndCredentials.yaml",
]
}

invalid_tests = {
"tests.yaml", # src/app/tests/suites/certification/tests.yaml is not a real test
"PICS.yaml", # src/app/tests/suites/certification/PICS.yaml is not a real test
}

# By default assume all yaml files are valid test cases, however only a
# smaller subset is known to pass, all the rest are marked "manual"
# For sanity check, all known supported tests MUST exist
found_supported_tests = set()
for path in _AllYamlTests():
if path.name in invalid_tests:
continue

for name in currently_supported_yaml_tests:
yaml_test_path = _FindYamlTestPath(name)
if not yaml_test_path:
raise FileNotFoundError(f"Could not find YAML test {name}")
is_supported = path.name in currently_supported_yaml_tests

target = target_for_name(name)
if is_supported:
found_supported_tests.add(path.name)

file_path = str(path)

target = target_for_name(path.name)

# `path.stem` converts "some/path/Test_ABC_1.2.yaml" to "Test_ABC.1.2"
yield TestDefinition(
run_name=yaml_test_path, name=name, target=target, is_manual=False, use_chip_repl_yaml_tester=True
run_name=file_path, name=path.stem, target=target, is_manual=not is_supported, use_chip_repl_yaml_tester=True
)

if found_supported_tests != currently_supported_yaml_tests:
raise Exception("Did not find YAMLs for all supported tests: %r" % (
currently_supported_yaml_tests - found_supported_tests))


def AllTests(chip_tool: str, run_yamltests_with_chip_repl: bool):
if run_yamltests_with_chip_repl:
Expand Down
26 changes: 19 additions & 7 deletions scripts/tests/run_test_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class RunContext:
in_unshare: bool
chip_tool: str
dry_run: bool
skip_manual: bool


@click.group(chain=True)
Expand Down Expand Up @@ -122,14 +123,17 @@ def main(context, dry_run, log_level, target, target_glob, target_skip_glob,
log_fmt = '%(levelname)-7s %(message)s'
coloredlogs.install(level=__LOG_LEVELS__[log_level], fmt=log_fmt)

if chip_tool is None:
if chip_tool is None and not run_yamltests_with_chip_repl:
# non yaml tests REQUIRE chip-tool. Yaml tests should not require chip-tool
chip_tool = FindBinaryPath('chip-tool')

# Figures out selected test that match the given name(s)
all_tests = [test for test in chiptest.AllTests(chip_tool, run_yamltests_with_chip_repl)]

tests = all_tests

# Default to only non-manual tests unless explicit targets are specified.
tests = list(filter(lambda test: not test.is_manual, all_tests))
skip_manual = 'all' in target
if 'all' not in target:
tests = []
for name in target:
Expand Down Expand Up @@ -159,15 +163,19 @@ def main(context, dry_run, log_level, target, target_glob, target_skip_glob,

context.obj = RunContext(root=root, tests=tests,
in_unshare=internal_inside_unshare,
chip_tool=chip_tool, dry_run=dry_run)
chip_tool=chip_tool, dry_run=dry_run,
skip_manual=skip_manual)


@main.command(
'list', help='List available test suites')
@click.pass_context
def cmd_list(context):
for test in context.obj.tests:
print(test.name)
if test.is_manual:
print("%s (MANUAL TEST)" % test.name)
else:
print(test.name)


@main.command(
Expand Down Expand Up @@ -257,19 +265,23 @@ def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, o
for i in range(iterations):
logging.info("Starting iteration %d" % (i+1))
for test in context.obj.tests:
if context.obj.skip_manual and test.is_manual:
continue

test_start = time.monotonic()
try:
if context.obj.dry_run:
logging.info("Would run test %s:" % test.name)
logging.info("Would run test: %s" % test.name)
continue

logging.info('%-20s - Starting test' % (test.name))
test.Run(runner, apps_register, paths, pics_file, test_timeout_seconds, context.obj.dry_run)
test_end = time.monotonic()
logging.info('%-20s - Completed in %0.2f seconds' %
logging.info('%-30s - Completed in %0.2f seconds' %
(test.name, (test_end - test_start)))
except Exception:
test_end = time.monotonic()
logging.exception('%s - FAILED in %0.2f seconds' %
logging.exception('%-30s - FAILED in %0.2f seconds' %
(test.name, (test_end - test_start)))
apps_register.uninit()
sys.exit(2)
Expand Down

0 comments on commit 1009058

Please sign in to comment.