From 79c05c21cf840bc5affe57dc0cc1415a74aa7ea4 Mon Sep 17 00:00:00 2001 From: Tennessee Carmel-Veilleux Date: Fri, 5 Aug 2022 10:43:58 -0400 Subject: [PATCH] Implement TC-SC-3.6 as a Python test - Test TC-SC-3.6 was not feasible with chip-tool and manual intervention due to the complexity of validating that the correct sessions were established, establishing all of them, and ensuring all subscriptions fire as intended. This PR: - Adds a version of TC-SC-3.6 in Python - Updates the Python code with minor improvements needed during the development of the test - **Touches no C++ SDK code** Testing done: - Ran the test as it could be done by an end-user, passed on Linux and on ESP32 (after some changes to resources in the examples, not included in this PR) To run: - Build all-clusters app Linux: - `scripts/examples/gn_build_example.sh examples/all-clusters-app/linux out/debug/standalone chip_config_network_layer_ble=false` - In a shell, run: `clear && rm -f kvs1 && out/debug/standalone/chip-all-clusters-app --discriminator 2118 --KVS kvs1` - Build the Python environment, activate it, then run the test - `./scripts/build_python.sh -m platform -i separate` - `. ./out/python_env/bin/activate` - Run the test: `rm -f admin_storage.json && python src/python_testing/TC_SC_3_6.py -m on-network -d 2118 -p 20202021` --- src/controller/python/chip/ChipDeviceCtrl.py | 55 ++++- src/controller/python/chip/FabricAdmin.py | 17 +- .../python/chip/clusters/Attribute.py | 6 +- .../python/chip/storage/__init__.py | 1 - .../chip/utils/CommissioningBuildingBlocks.py | 26 +- src/python_testing/TC_SC_3_6.py | 226 ++++++++++++++++++ src/python_testing/hello_test.py | 17 ++ src/python_testing/matter_testing_support.py | 20 +- 8 files changed, 334 insertions(+), 34 deletions(-) create mode 100644 src/python_testing/TC_SC_3_6.py diff --git a/src/controller/python/chip/ChipDeviceCtrl.py b/src/controller/python/chip/ChipDeviceCtrl.py index 089fdae966f943..3dc6c522225e6d 100644 --- a/src/controller/python/chip/ChipDeviceCtrl.py +++ b/src/controller/python/chip/ChipDeviceCtrl.py @@ -40,11 +40,11 @@ from .clusters import Objects as GeneratedObjects from .clusters.CHIPClusters import * from . import clusters as Clusters +from .FabricAdmin import FabricAdmin import enum import threading import typing import builtins -import ipdb import ctypes import copy @@ -133,7 +133,7 @@ class DiscoveryFilterType(enum.IntEnum): class ChipDeviceController(): activeList = set() - def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, paaTrustStorePath: str = "", useTestCommissioner: bool = False): + def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, paaTrustStorePath: str = "", useTestCommissioner: bool = False, fabricAdmin: FabricAdmin = None, name: str = None): self.state = DCState.NOT_INITIALIZED self.devCtrl = None self._ChipStack = builtins.chipStack @@ -150,19 +150,24 @@ def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, opCredsContext), pointer(devCtrl), fabricId, nodeId, adminVendorId, ctypes.c_char_p(None if len(paaTrustStorePath) == 0 else str.encode(paaTrustStorePath)), useTestCommissioner) ) - self.nodeId = nodeId + self._nodeId = nodeId if res != 0: raise self._ChipStack.ErrorToException(res) self.devCtrl = devCtrl + self._fabricAdmin = fabricAdmin + self._fabricId = fabricId + self._adminIndex = fabricAdmin.adminIndex + + if name is None: + self._name = "adminIndex(%x)/fabricId(0x%016X)/nodeId(0x%016X)" % (fabricAdmin.adminIndex, fabricId, nodeId) + else: + self._name = name self._Cluster = ChipClusters(builtins.chipStack) self._Cluster.InitLib(self._dmLib) - def GetNodeId(self): - return self.nodeId - def HandleCommissioningComplete(nodeid, err): if err != 0: print("Failed to commission: {}".format(err)) @@ -174,7 +179,7 @@ def HandleCommissioningComplete(nodeid, err): self._ChipStack.commissioningCompleteEvent.set() self._ChipStack.completeEvent.set() - def HandleKeyExchangeComplete(err): + def HandlePASEEstablishmentComplete(err): if err != 0: print("Failed to establish secure session to device: {}".format(err)) self._ChipStack.callbackRes = self._ChipStack.ErrorToException( @@ -183,7 +188,7 @@ def HandleKeyExchangeComplete(err): print("Established secure session with Device") if self.state != DCState.COMMISSIONING: - # During Commissioning, HandleKeyExchangeComplete will also be called, + # During Commissioning, HandlePASEEstablishmentComplete will also be called, # in this case the async operation should be marked as finished by # HandleCommissioningComplete instead this function. self.state = DCState.IDLE @@ -194,10 +199,10 @@ def HandleKeyExchangeComplete(err): if err != 0: HandleCommissioningComplete(0, err) - self.cbHandleKeyExchangeCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct( - HandleKeyExchangeComplete) + self.cbHandlePASEEstablishmentCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct( + HandlePASEEstablishmentComplete) self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback( - self.devCtrl, self.cbHandleKeyExchangeCompleteFunct) + self.devCtrl, self.cbHandlePASEEstablishmentCompleteFunct) self.cbHandleCommissioningCompleteFunct = _DevicePairingDelegate_OnCommissioningCompleteFunct( HandleCommissioningComplete) @@ -209,6 +214,33 @@ def HandleKeyExchangeComplete(err): ChipDeviceController.activeList.add(self) + @property + def fabricAdmin(self) -> FabricAdmin: + return self._fabricAdmin + + @property + def nodeId(self) -> int: + return self._nodeId + + @property + def fabricId(self) -> int: + return self._fabricId + + @property + def adminIndex(self) -> int: + return self._adminIndex + + @property + def name(self) -> str: + return self._name + + @name.setter + def name(self, new_name: str): + self._name = new_name + + def GetNodeId(self) -> int: + return self.nodeId + def Shutdown(self): ''' Shuts down this controller and reclaims any used resources, including the bound C++ constructor instance in the SDK. @@ -423,6 +455,7 @@ def CommissionWithCode(self, setupPayload: str, nodeid: int): return self._ChipStack.commissioningEventRes == 0 def CommissionIP(self, ipaddr: str, setupPinCode: int, nodeid: int): + """ DEPRECATED, DO NOT USE! Use `CommissionOnNetwork` or `CommissionWithCode` """ self.CheckIsActive() # IP connection will run through full commissioning, so we need to wait diff --git a/src/controller/python/chip/FabricAdmin.py b/src/controller/python/chip/FabricAdmin.py index 4af0e38ad4f746..d44175819ba16a 100644 --- a/src/controller/python/chip/FabricAdmin.py +++ b/src/controller/python/chip/FabricAdmin.py @@ -23,7 +23,6 @@ from typing import * from ctypes import * from rich.pretty import pprint -import ipdb import json import logging import builtins @@ -102,7 +101,7 @@ def __init__(self, vendorId: int, adminIndex: int = None, fabricId: int = 1): raise ValueError( f"Invalid VendorID ({vendorId}) provided!") - self.vendorId = vendorId + self._vendorId = vendorId self._fabricId = fabricId if (adminIndex is None): @@ -160,7 +159,7 @@ def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTest f"Allocating new controller with FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}") controller = ChipDeviceCtrl.ChipDeviceController( - self.closure, self._fabricId, nodeId, self.vendorId, paaTrustStorePath, useTestCommissioner) + self.closure, self._fabricId, nodeId, self.vendorId, paaTrustStorePath, useTestCommissioner, fabricAdmin=self) return controller def ShutdownAll(): @@ -200,3 +199,15 @@ def Shutdown(self, deleteFromStorage: bool = True): def __del__(self): self.Shutdown(False) + + @property + def vendorId(self) -> int: + return self._vendorId + + @property + def fabricId(self) -> int: + return self._fabricId + + @property + def adminIndex(self) -> int: + return self._adminIndex diff --git a/src/controller/python/chip/clusters/Attribute.py b/src/controller/python/chip/clusters/Attribute.py index db29b86974a284..fc2ec8ec20ba7d 100644 --- a/src/controller/python/chip/clusters/Attribute.py +++ b/src/controller/python/chip/clusters/Attribute.py @@ -465,7 +465,7 @@ def UpdateCachedData(self): class SubscriptionTransaction: - def __init__(self, transaction: 'AsyncReadTransaction', subscriptionId, devCtrl): + def __init__(self, transaction: AsyncReadTransaction, subscriptionId, devCtrl): self._onResubscriptionAttemptedCb = DefaultResubscriptionAttemptedCallback self._onAttributeChangeCb = DefaultAttributeChangeCallback self._onEventChangeCb = DefaultEventChangeCallback @@ -760,9 +760,9 @@ def _handleDone(self): if not self._future.done(): if self._resultError: if self._subscription_handler: - self._subscription_handler.OnErrorCb(chipError, self._subscription_handler) + self._subscription_handler.OnErrorCb(self._resultError, self._subscription_handler) else: - self._future.set_exception(chip.exceptions.ChipStackError(chipError)) + self._future.set_exception(chip.exceptions.ChipStackError(self._resultError)) else: self._future.set_result(AsyncReadTransaction.ReadResponse( attributes=self._cache.attributeCache, events=self._events)) diff --git a/src/controller/python/chip/storage/__init__.py b/src/controller/python/chip/storage/__init__.py index 521fd15f699586..362abda084075c 100644 --- a/src/controller/python/chip/storage/__init__.py +++ b/src/controller/python/chip/storage/__init__.py @@ -23,7 +23,6 @@ from typing import * from ctypes import * from rich.pretty import pprint -import ipdb import json import logging import base64 diff --git a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py index 69e3082d0b7aa2..d6abb6b0d31f3d 100644 --- a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py +++ b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py @@ -115,24 +115,22 @@ async def CreateControllersOnFabric(fabricAdmin: FabricAdmin, adminDevCtrl: Chip async def AddNOCForNewFabricFromExisting(commissionerDevCtrl, newFabricDevCtrl, existingNodeId, newNodeId): - ''' Perform sequence to commission new frabric using existing commissioned fabric. + ''' Perform sequence to commission new fabric using existing commissioned fabric. Args: commissionerDevCtrl (ChipDeviceController): Already commissioned device controller used to commission a new fabric on `newFabricDevCtrl`. newFabricDevCtrl (ChipDeviceController): New device controller which is used for the new fabric we are establishing. - existingNodeId (int): Node ID of the server we are establishing a CASE session on the - existing fabric that we will used to perform AddNOC. - newNodeId (int): Node ID that we would like to server to used on the new fabric being - added. + existingNodeId (int): Node ID of the target where an AddNOC needs to be done for a new fabric. + newNodeId (int): Node ID to use for the target node on the new fabric. Return: bool: True if successful, False otherwise. ''' - - resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(60), timedRequestTimeoutMs=1000) + # TODO: Why the timedREquestTimeoutMs ? + resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(60), timedRequestTimeoutMs=10000) if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk: return False @@ -141,20 +139,20 @@ async def AddNOCForNewFabricFromExisting(commissionerDevCtrl, newFabricDevCtrl, chainForAddNOC = newFabricDevCtrl.IssueNOCChain(csrForAddNOC, newNodeId) if chainForAddNOC.rcacBytes is None or chainForAddNOC.icacBytes is None or chainForAddNOC.nocBytes is None or chainForAddNOC.ipkBytes is None: # Expiring the failsafe timer in an attempt to clean up. - await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False await commissionerDevCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.AddTrustedRootCertificate(chainForAddNOC.rcacBytes)) resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.AddNOC(chainForAddNOC.nocBytes, chainForAddNOC.icacBytes, chainForAddNOC.ipkBytes, newFabricDevCtrl.GetNodeId(), 0xFFF1)) if resp.statusCode is not opCreds.Enums.OperationalCertStatus.kSuccess: # Expiring the failsafe timer in an attempt to clean up. - await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False resp = await newFabricDevCtrl.SendCommand(newNodeId, 0, generalCommissioning.Commands.CommissioningComplete()) if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk: # Expiring the failsafe timer in an attempt to clean up. - await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False if not await _IsNodeInFabricList(newFabricDevCtrl, newNodeId): @@ -179,20 +177,20 @@ async def UpdateNOC(devCtrl, existingNodeId, newNodeId): bool: True if successful, False otherwise. """ - resp = await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(600), timedRequestTimeoutMs=1000) + resp = await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(600), timedRequestTimeoutMs=10000) if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk: return False csrForUpdateNOC = await devCtrl.SendCommand( existingNodeId, 0, opCreds.Commands.CSRRequest(CSRNonce=os.urandom(32), isForUpdateNOC=True)) chainForUpdateNOC = devCtrl.IssueNOCChain(csrForUpdateNOC, newNodeId) if chainForUpdateNOC.rcacBytes is None or chainForUpdateNOC.icacBytes is None or chainForUpdateNOC.nocBytes is None or chainForUpdateNOC.ipkBytes is None: - await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False resp = await devCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.UpdateNOC(chainForUpdateNOC.nocBytes, chainForUpdateNOC.icacBytes)) if resp.statusCode is not opCreds.Enums.OperationalCertStatus.kSuccess: # Expiring the failsafe timer in an attempt to clean up. - await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False # Forget our session since the peer deleted it @@ -201,7 +199,7 @@ async def UpdateNOC(devCtrl, existingNodeId, newNodeId): resp = await devCtrl.SendCommand(newNodeId, 0, generalCommissioning.Commands.CommissioningComplete()) if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk: # Expiring the failsafe timer in an attempt to clean up. - await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000) + await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=10000) return False if not await _IsNodeInFabricList(devCtrl, newNodeId): diff --git a/src/python_testing/TC_SC_3_6.py b/src/python_testing/TC_SC_3_6.py new file mode 100644 index 00000000000000..48b84c7c03b884 --- /dev/null +++ b/src/python_testing/TC_SC_3_6.py @@ -0,0 +1,226 @@ +# +# Copyright (c) 2022 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unicodedata import name +from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body +from chip.interaction_model import Status +import chip.clusters as Clusters +import chip.FabricAdmin +import logging +from mobly import asserts +from chip.utils import CommissioningBuildingBlocks +from chip.clusters.Attribute import TypedAttributePath, SubscriptionTransaction +import asyncio +import queue +from threading import Event +import time + + +class ResubscriptionCatcher: + def __init__(self, name): + self._name = name + self._got_resubscription_event = Event() + + async def __call__(self, transaction: SubscriptionTransaction, terminationError, nextResubscribeIntervalMsec): + self._got_resubscription_event.set() + logging.info("Got resubscription on client %s" % self.name) + + @property + def name(self) -> str: + return self._name + + @property + def caught_resubscription(self) -> bool: + return self._got_resubscription_event.is_set() + + +class AttributeChangeAccumulator: + def __init__(self, name): + self._name = name + self._all_data = queue.Queue() + + def __call__(self, path: TypedAttributePath, transaction: SubscriptionTransaction): + data = transaction.GetAttribute(path) + value = { + 'sub_name': self._name, + 'endpoint': path.Path.EndpointId, + 'attribute': path.AttributeType, + 'value': data + } + self._all_data.put(value) + logging.info("Got subscription report on client %s: %s" % (self.name, value)) + + @property + def all_data(self): + data = [] + while True: + try: + data.append(self._all_data.get(block=False)) + except queue.Empty: + break + return data + + @property + def name(self) -> str: + return self._name + + +class TC_SC_3_6(MatterBaseTest): + @async_test_body + async def test_TC_SC_3_6(self): + dev_ctrl = self.default_controller + + # Get overrides for debugging the test + num_fabrics_to_commission = self.user_params.get("num_fabrics_to_commission", 5) + num_controllers_per_fabric = self.user_params.get("num_controllers_per_fabric", 3) + min_report_interval_sec = self.user_params.get("min_report_interval_sec", 1) + max_report_interval_sec = self.user_params.get("max_report_interval_sec", 30) + # Time to wait after changing NodeLabel for subscriptions to all hit + stabilization_delay_sec = self.user_params.get("stabilization_delay_sec", 10) + sub_liveness_override_ms = self.user_params.get("sub_liveness_override_ms", None) + + BEFORE_LABEL = "Before Subscriptions" + AFTER_LABEL = "After Subscriptions" + + # Generate list of all clients names + all_names = [] + for fabric_idx in range(num_fabrics_to_commission): + for controller_idx in range(num_controllers_per_fabric): + all_names.append("RD%d%s" % (fabric_idx + 1, chr(ord('A') + controller_idx))) + logging.info("Client names that will be used: %s" % all_names) + client_list = [] + + logging.info("Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3") + + capability_minima = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.Basic.Attributes.CapabilityMinima) + asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3) + + logging.info("Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics" % + num_fabrics_to_commission) + + # Generate Node IDs for subsequent for subsequent controllers start at 200, follow 200, 300, ... + node_ids = [200 + (i * 100) for i in range(num_controllers_per_fabric - 1)] + + # Prepare clients for first fabric, that includes the default controller + dev_ctrl.name = all_names.pop(0) + client_list.append(dev_ctrl) + + if num_controllers_per_fabric > 1: + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=dev_ctrl.fabricAdmin, adminDevCtrl=dev_ctrl, controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id) + for controller in new_controllers: + controller.name = all_names.pop(0) + client_list.extend(new_controllers) + + # Prepare clients for subsequent fabrics + for i in range(num_fabrics_to_commission - 1): + admin_index = 2 + i + logging.info("Commissioning fabric %d/%d" % (admin_index, num_fabrics_to_commission)) + new_fabric_admin = chip.FabricAdmin.FabricAdmin(vendorId=0xFFF1, adminIndex=admin_index) + new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId) + new_admin_ctrl.name = all_names.pop(0) + client_list.append(new_admin_ctrl) + await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) + + if num_controllers_per_fabric > 1: + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=new_fabric_admin, adminDevCtrl=new_admin_ctrl, + controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id) + for controller in new_controllers: + controller.name = all_names.pop(0) + + client_list.extend(new_controllers) + + asserts.assert_equal(len(client_list), num_fabrics_to_commission * + num_controllers_per_fabric, "Must have the right number of clients") + + # Before subscribing, set the NodeBabel to "Before Subscriptions" + logging.info("Pre-conditions: writing initial value of NodeLabel, so that we can control for change of attribute detection") + await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))]) + + # Subscribe with all clients to NodeLabel attribute + subscriptions = [] + sub_handlers = [] + resub_catchers = [] + + logging.info("Step 1 (first part): Establish subscription with all 15 clients") + for client in client_list: + logging.info("Establishing subscription from controller node %s" % client.name) + sub = await client.ReadAttribute(nodeid=self.dut_node_id, attributes=[(0, Clusters.Basic.Attributes.NodeLabel)], + reportInterval=(min_report_interval_sec, max_report_interval_sec), keepSubscriptions=False) + subscriptions.append(sub) + + attribute_handler = AttributeChangeAccumulator(name=client.name) + sub.SetAttributeUpdateCallback(attribute_handler) + sub_handlers.append(attribute_handler) + + resub_catcher = ResubscriptionCatcher(name=client.name) + sub.SetResubscriptionAttemptedCallback(resub_catcher, isAsync=True) + resub_catchers.append(resub_catcher) + + if sub_liveness_override_ms is not None: + logging.warning("Overriding subscription liveness to check %dms! NOT FOR CERTIFICATION!" % sub_liveness_override_ms) + sub.OverrideLivenessTimeoutMs(sub_liveness_override_ms) + + asserts.assert_equal(len(subscriptions), num_fabrics_to_commission * + num_controllers_per_fabric, "Must have the right number of subscriptions") + + # Trigger a change on NodeLabel + logging.info("Step 1 (second part): Change attribute with one client, and validate all clients observe the change on same session") + await asyncio.sleep(1) + await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))]) + + # Await a stabilization delay in increments to let the coroutines run + start_time = time.time() + while (time.time() - start_time) < stabilization_delay_sec: + await asyncio.sleep(0.05) + + # After stabilization, validate no resubscriptions and all nodes have seen an update + + logging.info("Validation of results") + # First check: all subs seeing update + failed = False + for handler in sub_handlers: + data_update_count = 0 + for item in handler.all_data: + if item['value'] == AFTER_LABEL: + data_update_count += 1 + + if data_update_count == 0: + logging.error("Client %s did not see subscription update" % handler.name) + failed = True + elif data_update_count > 1: + logging.error("Client %s saw %d updates instead of 1" % (handler.name, data_update_count)) + failed = True + else: + logging.info("Client %s successfully saw 1 update" % handler.name) + + # Second check: no resubscriptions + for catcher in resub_catchers: + if catcher.caught_resubscription: + logging.error("Client %s saw a resubscription" % catcher.name) + failed = True + else: + logging.error("Client %s correctly did not see a resubscription" % catcher.name) + + # Determine final result + if failed: + asserts.fail("Failed test !") + + # Pass is implicit if not failed + + +if __name__ == "__main__": + default_matter_test_main() diff --git a/src/python_testing/hello_test.py b/src/python_testing/hello_test.py index 51a380b9e909f9..d6bde7de629a4a 100644 --- a/src/python_testing/hello_test.py +++ b/src/python_testing/hello_test.py @@ -1,3 +1,20 @@ +# +# Copyright (c) 2022 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body from chip.interaction_model import Status import chip.clusters as Clusters diff --git a/src/python_testing/matter_testing_support.py b/src/python_testing/matter_testing_support.py index 660f1c85d12d49..9ac5f8ed44d80a 100644 --- a/src/python_testing/matter_testing_support.py +++ b/src/python_testing/matter_testing_support.py @@ -121,6 +121,7 @@ class MatterTestConfig: storage_path: pathlib.Path = None logs_path: pathlib.Path = None paa_trust_store_path: pathlib.Path = None + ble_interface_id: int = None admin_vendor_id: int = _DEFAULT_ADMIN_VENDOR_ID global_test_params: dict = field(default_factory=dict) @@ -130,6 +131,7 @@ class MatterTestConfig: commissioning_method: str = None discriminator: int = None setup_passcode: int = None + commissionee_ip_address_just_for_testing: str = None qr_code_content: str = None manual_code: str = None @@ -158,7 +160,7 @@ def __init__(self, config: MatterTestConfig): self._fabric_admins = [] if not hasattr(builtins, "chipStack"): - chip.native.Init() + chip.native.Init(bluetoothAdapter=config.ble_interface_id) if config.storage_path is None: raise ValueError("Must have configured a MatterTestConfig.storage_path") self._init_stack(already_initialized=False, persistentStoragePath=config.storage_path) @@ -468,6 +470,11 @@ def populate_commissioning_args(args: argparse.Namespace, config: MatterTestConf print("error: missing --thread-dataset-hex for --commissioning-method ble-thread!") return False config.thread_operational_dataset = args.thread_dataset_hex + elif config.commissioning_method == "on-network-ip": + if args.ip_addr is None: + print("error: missing --ip-addr for --commissioning-method on-network-ip") + return False + config.commissionee_ip_address_just_for_testing = args.ip_addr return True @@ -482,6 +489,7 @@ def convert_args_to_matter_config(args: argparse.Namespace) -> MatterTestConfig: config.storage_path = pathlib.Path(_DEFAULT_STORAGE_PATH) if args.storage_path is None else args.storage_path config.logs_path = pathlib.Path(_DEFAULT_LOG_PATH) if args.logs_path is None else args.logs_path config.paa_trust_store_path = args.paa_trust_store_path + config.ble_interface_id = args.ble_interface_id config.controller_node_id = args.controller_node_id @@ -521,6 +529,8 @@ def parse_matter_test_args(argv: list[str]) -> MatterTestConfig: paa_path_default = get_default_paa_trust_store(pathlib.Path.cwd()) basic_group.add_argument('--paa-trust-store-path', action="store", type=pathlib.Path, metavar="PATH", default=paa_path_default, help="PAA trust store path (default: %s)" % str(paa_path_default)) + basic_group.add_argument('--ble-interface-id', action="store", type=int, + metavar="INTERFACE_ID", help="ID of BLE adapter (from hciconfig)") basic_group.add_argument('-N', '--controller-node-id', type=int_decimal_or_hex, metavar='NODE_ID', default=_DEFAULT_CONTROLLER_NODE_ID, @@ -533,7 +543,7 @@ def parse_matter_test_args(argv: list[str]) -> MatterTestConfig: commission_group.add_argument('-m', '--commissioning-method', type=str, metavar='METHOD_NAME', - choices=["on-network", "ble-wifi", "ble-thread"], + choices=["on-network", "ble-wifi", "ble-thread", "on-network-ip"], help='Name of commissioning method to use') commission_group.add_argument('-d', '--discriminator', type=int_decimal_or_hex, metavar='LONG_DISCRIMINATOR', @@ -541,6 +551,9 @@ def parse_matter_test_args(argv: list[str]) -> MatterTestConfig: commission_group.add_argument('-p', '--passcode', type=int_decimal_or_hex, metavar='PASSCODE', help='PAKE passcode to use') + commission_group.add_argument('-i', '--ip-addr', type=str, + metavar='RAW_IP_ADDRESS', + help='IP address to use (only for method "on-network-ip". ONLY FOR LOCAL TESTING!') commission_group.add_argument('--wifi-ssid', type=str, metavar='SSID', @@ -634,6 +647,9 @@ def _commission_device(self) -> bool: return dev_ctrl.CommissionWiFi(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.wifi_ssid, conf.wifi_passphrase) elif conf.commissioning_method == "ble-thread": return dev_ctrl.CommissionThread(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.thread_operational_dataset) + elif conf.commissioning_method == "on-network-ip": + logging.warning("==== USING A DIRECT IP COMMISSIONING METHOD NOT SUPPORTED IN THE LONG TERM ====") + return dev_ctrl.CommissionIP(ipaddr=conf.commissionee_ip_address_just_for_testing, setupPinCode=conf.setup_passcode, nodeid=conf.dut_node_id) else: raise ValueError("Invalid commissioning method %s!" % conf.commissioning_method)