Skip to content

Commit

Permalink
Merge branch 'project-chip:master' into metering-identification-clust…
Browse files Browse the repository at this point in the history
…er-test-scripts
  • Loading branch information
dmitrymaslovdsr authored Jul 25, 2024
2 parents 17c85fa + 1b1340f commit a3d80a4
Show file tree
Hide file tree
Showing 4 changed files with 316 additions and 1 deletion.
18 changes: 18 additions & 0 deletions examples/shell/nrfconnect/sysbuild.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#
# Copyright (c) 2024 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

SB_CONFIG_MATTER=y
SB_CONFIG_MATTER_OTA=n
14 changes: 13 additions & 1 deletion src/python_testing/TC_DeviceConformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
import chip.clusters as Clusters
from basic_composition_support import BasicCompositionTests
from chip.tlv import uint
from choice_conformance_support import (evaluate_attribute_choice_conformance, evaluate_command_choice_conformance,
evaluate_feature_choice_conformance)
from conformance_support import ConformanceDecision, conformance_allowed
from global_attribute_ids import GlobalAttributeIds
from matter_testing_support import (AttributePathLocation, ClusterPathLocation, CommandPathLocation, MatterBaseTest, ProblemNotice,
Expand Down Expand Up @@ -188,7 +190,17 @@ def check_spec_conformance_for_commands(command_type: CommandType):
check_spec_conformance_for_commands(CommandType.ACCEPTED)
check_spec_conformance_for_commands(CommandType.GENERATED)

# TODO: Add choice checkers
feature_choice_problems = evaluate_feature_choice_conformance(
endpoint_id, cluster_id, self.xml_clusters, feature_map, attribute_list, all_command_list)
attribute_choice_problems = evaluate_attribute_choice_conformance(
endpoint_id, cluster_id, self.xml_clusters, feature_map, attribute_list, all_command_list)
command_choice_problem = evaluate_command_choice_conformance(
endpoint_id, cluster_id, self.xml_clusters, feature_map, attribute_list, all_command_list)

if feature_choice_problems or attribute_choice_problems or command_choice_problem:
success = False
problems.extend(feature_choice_problems + attribute_choice_problems + command_choice_problem)

print(f'success = {success}')
return success, problems

Expand Down
211 changes: 211 additions & 0 deletions src/python_testing/TestChoiceConformanceSupport.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
#
# Copyright (c) 2023 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import itertools
import xml.etree.ElementTree as ElementTree

import jinja2
from choice_conformance_support import (evaluate_attribute_choice_conformance, evaluate_command_choice_conformance,
evaluate_feature_choice_conformance)
from matter_testing_support import MatterBaseTest, ProblemNotice, default_matter_test_main
from mobly import asserts
from spec_parsing_support import XmlCluster, add_cluster_data_from_xml

FEATURE_TEMPLATE = '''\
<feature bit="{{ id }}" code="{{ name }}" name="{{ name }}" summary="summary">
<optionalConform choice="{{ choice }}" more="{{ more }}">
{%- if XXX %}'
<feature name="XXX" />
{% endif %}
</optionalConform>
</feature>
'''

ATTRIBUTE_TEMPLATE = (
' <attribute id="{{ id }}" name="{{ name }}" type="uint16">\n'
' <optionalConform choice="{{ choice }}" more="{{ more }}">\n'
' {% if XXX %}'
' <attribute name="XXX" />\n'
' {% endif %}'
' </optionalConform>\n'
' </attribute>\n'
)

COMMAND_TEMPLATE = (
' <command id="{{ id }}" name="{{ name }}" direction="commandToServer" response="Y">\n'
' <optionalConform choice="{{ choice }}" more="{{ more }}">\n'
' {% if XXX %}'
' <command name="XXX" />\n'
' {% endif %}'
' </optionalConform>\n'
' </command>\n'
)

CLUSTER_TEMPLATE = (
'<cluster xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="types types.xsd cluster cluster.xsd" id="0x0001" name="Test Base" revision="1">\n'
' <revisionHistory>\n'
' <revision revision="1" summary="Initial version"/>\n'
' </revisionHistory>\n'
' <clusterIds>\n'
' <clusterId id="0x0001" name="Test Base"/>\n'
' </clusterIds>\n'
' <classification hierarchy="base" role="application" picsCode="BASE" scope="Endpoint"/>\n'
' <features>\n'
' {{ feature_string }}\n'
' </features>\n'
' <attributes>\n'
' {{ attribute_string}}\n'
' </attributes>\n'
' <commands>\n'
' {{ command_string }}\n'
' </commands>\n'
'</cluster>\n')


def _create_elements(template_str: str, base_name: str) -> list[str]:
xml_str = []

def add_elements(curr_choice: str, starting_id: int, more: str, XXX: bool):
for i in range(3):
element_name = f'{base_name}{curr_choice.upper()*(i+1)}'
environment = jinja2.Environment()
template = environment.from_string(template_str)
xml_str.append(template.render(id=(i + starting_id), name=element_name, choice=curr_choice, more=more, XXX=XXX))
add_elements('a', 1, 'false', False)
add_elements('b', 4, 'true', False)
add_elements('c', 7, 'false', True)
add_elements('d', 10, 'true', True)

return xml_str

# TODO: this setup makes my life easy because it assumes that choice conformances apply only within one table
# if this is not true (ex, you can have choose 1 of a feature or an attribute), then this gets more complex
# in this case we need to have this test evaluate the same choice conformance value between multiple tables, and all
# the conformances need to be assessed for the entire element set.
# I've done it this way specifically so I can hardcode the choice values and test the features, attributes and commands
# separately, even though I load them all at the start.

# Cluster with choices on all elements
# 3 of each element with O.a
# 3 of each element with O.b+
# 3 of each element with [XXX].c
# 3 of each element with [XXX].d+
# 1 element named XXX


def _create_features():
xml = _create_elements(FEATURE_TEMPLATE, 'F')
xxx = (' <feature bit="13" code="XXX" name="XXX" summary="summary">\n'
' <optionalConform />\n'
' </feature>\n')
xml.append(xxx)
return '\n'.join(xml)


def _create_attributes():
xml = _create_elements(ATTRIBUTE_TEMPLATE, "attr")
xxx = (' <attribute id="13" name="XXX" summary="summary">\n'
' <optionalConform />\n'
' </attribute>\n')
xml.append(xxx)
return '\n'.join(xml)


def _create_commands():
xml = _create_elements(COMMAND_TEMPLATE, 'cmd')
xxx = (' <command id="13" name="XXX" summary="summary" direction="commandToServer" response="Y">\n'
' <optionalConform />\n'
' </command>\n')
xml.append(xxx)
return '\n'.join(xml)


def _create_cluster():
environment = jinja2.Environment()
template = environment.from_string(CLUSTER_TEMPLATE)
return template.render(feature_string=_create_features(), attribute_string=_create_attributes(), command_string=_create_commands())


class TestConformanceSupport(MatterBaseTest):
def setup_class(self):
super().setup_class()

clusters: dict[int, XmlCluster] = {}
pure_base_clusters: dict[str, XmlCluster] = {}
ids_by_name: dict[str, int] = {}
problems: list[ProblemNotice] = []
cluster_xml = ElementTree.fromstring(_create_cluster())
add_cluster_data_from_xml(cluster_xml, clusters, pure_base_clusters, ids_by_name, problems)
self.clusters = clusters
# each element type uses 13 IDs from 1-13 (or bits for the features) and we want to test all the combinations
num_elements = 13
ids = range(1, num_elements + 1)
self.all_id_combos = []
combos = []
for r in range(1, num_elements + 1):
combos.extend(list(itertools.combinations(ids, r)))
for combo in combos:
# The first three IDs are all O.a, so we need exactly one for the conformance to be valid
expected_failures = set()
if len(set([1, 2, 3]) & set(combo)) != 1:
expected_failures.add('a')
if len(set([4, 5, 6]) & set(combo)) < 1:
expected_failures.add('b')
# For these, we are checking that choice conformance checkers
# - Correctly report errors and correct cases when the gating feature is ON
# - Do not report any errors when the gating features is off.
# Errors where we incorrectly set disallowed features based on the gating feature are checked
# elsewhere in the cert test in a comprehensive way. We just want to ensure that we are not
# incorrectly reporting choice conformance error as well
if 13 in combo and ((len(set([7, 8, 9]) & set(combo)) != 1)):
expected_failures.add('c')
if 13 in combo and (len(set([10, 11, 12]) & set(combo)) < 1):
expected_failures.add('d')

self.all_id_combos.append((combo, expected_failures))

def _evaluate_problems(self, problems, expected_failures=list[str]):
if len(expected_failures) != len(problems):
print(problems)
asserts.assert_equal(len(expected_failures), len(problems), 'Unexpected number of choice conformance problems')
actual_failures = set([p.choice.marker for p in problems])
asserts.assert_equal(actual_failures, expected_failures, "Mismatch between failures")

def test_features(self):
def make_feature_map(combo: tuple[int]) -> int:
feature_map = 0
for bit in combo:
feature_map += pow(2, bit)
return feature_map

for combo, expected_failures in self.all_id_combos:
problems = evaluate_feature_choice_conformance(0, 1, self.clusters, make_feature_map(combo), [], [])
self._evaluate_problems(problems, expected_failures)

def test_attributes(self):
for combo, expected_failures in self.all_id_combos:
problems = evaluate_attribute_choice_conformance(0, 1, self.clusters, 0, list(combo), [])
self._evaluate_problems(problems, expected_failures)

def test_commands(self):
for combo, expected_failures in self.all_id_combos:
problems = evaluate_command_choice_conformance(0, 1, self.clusters, 0, [], list(combo))
self._evaluate_problems(problems, expected_failures)


if __name__ == "__main__":
default_matter_test_main()
74 changes: 74 additions & 0 deletions src/python_testing/choice_conformance_support.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from chip.tlv import uint
from conformance_support import Choice, ConformanceDecisionWithChoice
from global_attribute_ids import GlobalAttributeIds
from matter_testing_support import AttributePathLocation, ProblemNotice, ProblemSeverity
from spec_parsing_support import XmlCluster


class ChoiceConformanceProblemNotice(ProblemNotice):
def __init__(self, location: AttributePathLocation, choice: Choice, count: int):
problem = f'Problem with choice conformance {choice} - {count} selected'
super().__init__(test_name='Choice conformance', location=location, severity=ProblemSeverity.ERROR, problem=problem, spec_location='')
self.choice = choice
self.count = count


def _add_to_counts_if_required(conformance_decision_with_choice: ConformanceDecisionWithChoice, element_present: bool, counts: dict[Choice, int]):
choice = conformance_decision_with_choice.choice
if not choice:
return
counts[choice] = counts.get(choice, 0)
if element_present:
counts[choice] += 1


def _evaluate_choices(location: AttributePathLocation, counts: dict[Choice, int]) -> list[ChoiceConformanceProblemNotice]:
problems: list[ChoiceConformanceProblemNotice] = []
for choice, count in counts.items():
if count == 0 or (not choice.more and count > 1):
problems.append(ChoiceConformanceProblemNotice(location, choice, count))
return problems


def evaluate_feature_choice_conformance(endpoint_id: int, cluster_id: int, xml_clusters: dict[int, XmlCluster], feature_map: uint, attribute_list: list[uint], all_command_list: list[uint]) -> list[ChoiceConformanceProblemNotice]:
all_features = [1 << i for i in range(32)]
all_features = [f for f in all_features if f in xml_clusters[cluster_id].features.keys()]

# Other pieces of the 10.2 test check for unknown features, so just remove them here to check choice conformance
counts: dict[Choice, int] = {}
for f in all_features:
xml_feature = xml_clusters[cluster_id].features[f]
conformance_decision_with_choice = xml_feature.conformance(feature_map, attribute_list, all_command_list)
_add_to_counts_if_required(conformance_decision_with_choice, (feature_map & f), counts)

location = AttributePathLocation(endpoint_id=endpoint_id, cluster_id=cluster_id,
attribute_id=GlobalAttributeIds.FEATURE_MAP_ID)
return _evaluate_choices(location, counts)


def evaluate_attribute_choice_conformance(endpoint_id: int, cluster_id: int, xml_clusters: dict[int, XmlCluster], feature_map: uint, attribute_list: list[uint], all_command_list: list[uint]) -> list[ChoiceConformanceProblemNotice]:
all_attributes = xml_clusters[cluster_id].attributes.keys()

counts: dict[Choice, int] = {}
for attribute_id in all_attributes:
conformance_decision_with_choice = xml_clusters[cluster_id].attributes[attribute_id].conformance(
feature_map, attribute_list, all_command_list)
_add_to_counts_if_required(conformance_decision_with_choice, attribute_id in attribute_list, counts)

location = AttributePathLocation(endpoint_id=endpoint_id, cluster_id=cluster_id,
attribute_id=GlobalAttributeIds.ATTRIBUTE_LIST_ID)
return _evaluate_choices(location, counts)


def evaluate_command_choice_conformance(endpoint_id: int, cluster_id: int, xml_clusters: dict[int, XmlCluster], feature_map: uint, attribute_list: list[uint], all_command_list: list[uint]) -> list[ChoiceConformanceProblemNotice]:
all_commands = xml_clusters[cluster_id].accepted_commands.keys()

counts: dict[Choice, int] = {}
for command_id in all_commands:
conformance_decision_with_choice = xml_clusters[cluster_id].accepted_commands[command_id].conformance(
feature_map, attribute_list, all_command_list)
_add_to_counts_if_required(conformance_decision_with_choice, command_id in all_command_list, counts)

location = AttributePathLocation(endpoint_id=endpoint_id, cluster_id=cluster_id,
attribute_id=GlobalAttributeIds.ACCEPTED_COMMAND_LIST_ID)
return _evaluate_choices(location, counts)

0 comments on commit a3d80a4

Please sign in to comment.