diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 9fcfc5cc5c033c..a99cc0a24d1f98 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -515,8 +515,6 @@ jobs: mkdir -p out/trace_data scripts/run_in_python_env.sh out/venv './scripts/tests/run_python_test.py --load-from-env /tmp/test_env.yaml --script src/controller/python/test/test_scripts/mobile-device-test.py' scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/execute_python_tests.py --env-file /tmp/test_env.yaml --search-directory src/python_testing' - scripts/run_in_python_env.sh out/venv './scripts/tests/run_python_test.py --script "src/python_testing/TestMatterTestingSupport.py" --script-args "--trace-to json:out/trace_data/test-{SCRIPT_BASE_NAME}.json --trace-to perfetto:out/trace_data/test-{SCRIPT_BASE_NAME}.perfetto"' - scripts/run_in_python_env.sh out/venv './scripts/tests/run_python_test.py --script "src/python_testing/TestSpecParsingSupport.py" --script-args "--trace-to json:out/trace_data/test-{SCRIPT_BASE_NAME}.json --trace-to perfetto:out/trace_data/test-{SCRIPT_BASE_NAME}.perfetto"' scripts/run_in_python_env.sh out/venv './scripts/tests/TestTimeSyncTrustedTimeSourceRunner.py' scripts/run_in_python_env.sh out/venv './src/python_testing/test_testing/test_TC_ICDM_2_1.py' scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestIdChecks.py' @@ -528,6 +526,8 @@ jobs: scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/test_IDM_10_4.py' scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/test_TC_SC_7_1.py' scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/TestDecorators.py' + scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestMatterTestingSupport.py' + scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestSpecParsingSupport.py' - name: Uploading core files diff --git a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter index 4627385f5a4c44..9d4aa0c7362d22 100644 --- a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter +++ b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.matter @@ -2032,7 +2032,7 @@ endpoint 0 { ram attribute clusterRevision default = 1; } } -endpoint 1 { +endpoint 13 { device type ma_onofflight = 256, version 1; binding cluster Binding; diff --git a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap index e8625f2f612caf..aa402e3f0b0df2 100644 --- a/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap +++ b/examples/chef/devices/rootnode_onofflight_bbs1b7IaOV.zap @@ -3473,9 +3473,9 @@ "endpointTypeName": "Anonymous Endpoint Type", "endpointTypeIndex": 1, "profileId": 259, - "endpointId": 1, + "endpointId": 13, "networkId": 0, "parentEndpointIdentifier": null } ] -} \ No newline at end of file +} diff --git a/examples/energy-management-app/energy-management-common/energy-evse/include/EnergyEvseDelegateImpl.h b/examples/energy-management-app/energy-management-common/energy-evse/include/EnergyEvseDelegateImpl.h index c43707d9d09589..0b84e8fcc87c22 100644 --- a/examples/energy-management-app/energy-management-common/energy-evse/include/EnergyEvseDelegateImpl.h +++ b/examples/energy-management-app/energy-management-common/energy-evse/include/EnergyEvseDelegateImpl.h @@ -56,33 +56,47 @@ enum EVSEStateMachineEvent class EvseSession { public: - EvseSession(EndpointId aEndpoint) { mEndpointId = aEndpoint; } + EvseSession() {} /** * @brief This function records the start time and provided energy meter values as part of the new session. * + * @param endpointId - The endpoint to report the update on * @param chargingMeterValue - The current value of the energy meter (charging) in mWh * @param dischargingMeterValue - The current value of the energy meter (discharging) in mWh */ - void StartSession(int64_t chargingMeterValue, int64_t dischargingMeterValue); + void StartSession(EndpointId endpointId, int64_t chargingMeterValue, int64_t dischargingMeterValue); + + /** + * @brief This function updates the session information at the unplugged event + * + * @param endpointId - The endpoint to report the update on + * @param chargingMeterValue - The current value of the energy meter (charging) in mWh + * @param dischargingMeterValue - The current value of the energy meter (discharging) in mWh + */ + void StopSession(EndpointId endpointId, int64_t chargingMeterValue, int64_t dischargingMeterValue); /** * @brief This function updates the session Duration to allow read attributes to return latest values + * + * @param endpointId - The endpoint to report the update on */ - void RecalculateSessionDuration(); + void RecalculateSessionDuration(EndpointId endpointId); /** * @brief This function updates the EnergyCharged meter value * + * @param endpointId - The endpoint to report the update on * @param chargingMeterValue - The value of the energy meter (charging) in mWh */ - void UpdateEnergyCharged(int64_t chargingMeterValue); + void UpdateEnergyCharged(EndpointId endpointId, int64_t chargingMeterValue); /** * @brief This function updates the EnergyDischarged meter value * + * @param endpointId - The endpoint to report the update on * @param dischargingMeterValue - The value of the energy meter (discharging) in mWh */ - void UpdateEnergyDischarged(int64_t dischargingMeterValue); + void UpdateEnergyDischarged(EndpointId endpointId, int64_t dischargingMeterValue); /* Public members - represent attributes in the cluster */ DataModel::Nullable mSessionID; @@ -91,8 +105,6 @@ class EvseSession DataModel::Nullable mSessionEnergyDischarged; private: - EndpointId mEndpointId = 0; - uint32_t mStartTime = 0; // Epoch_s - 0 means it hasn't started yet int64_t mSessionEnergyChargedAtStart = 0; // in mWh - 0 means it hasn't been set yet int64_t mSessionEnergyDischargedAtStart = 0; // in mWh - 0 means it hasn't been set yet @@ -358,7 +370,7 @@ class EnergyEvseDelegate : public EnergyEvse::Delegate DataModel::Nullable mVehicleID; /* Session Object */ - EvseSession mSession = EvseSession(mEndpointId); + EvseSession mSession = EvseSession(); /* Helper variable to hold meter val since last EnergyTransferStarted event */ int64_t mMeterValueAtEnergyTransferStart; diff --git a/examples/energy-management-app/energy-management-common/energy-evse/src/EnergyEvseDelegateImpl.cpp b/examples/energy-management-app/energy-management-common/energy-evse/src/EnergyEvseDelegateImpl.cpp index 6266be245c2a4a..0294a0349e8f29 100644 --- a/examples/energy-management-app/energy-management-common/energy-evse/src/EnergyEvseDelegateImpl.cpp +++ b/examples/energy-management-app/energy-management-common/energy-evse/src/EnergyEvseDelegateImpl.cpp @@ -672,8 +672,8 @@ Status EnergyEvseDelegate::HandleEVPluggedInEvent() if (mState == StateEnum::kNotPluggedIn) { /* EV was not plugged in - start a new session */ - // TODO get energy meter readings - mSession.StartSession(0, 0); + // TODO get energy meter readings - #35370 + mSession.StartSession(mEndpointId, 0, 0); SendEVConnectedEvent(); /* Set the state to either PluggedInNoDemand or PluggedInDemand as indicated by mHwState */ @@ -694,6 +694,8 @@ Status EnergyEvseDelegate::HandleEVNotDetectedEvent() SendEnergyTransferStoppedEvent(EnergyTransferStoppedReasonEnum::kOther); } + // TODO get energy meter readings - #35370 + mSession.StopSession(mEndpointId, 0, 0); SendEVNotDetectedEvent(); SetState(StateEnum::kNotPluggedIn); return Status::Success; @@ -706,7 +708,7 @@ Status EnergyEvseDelegate::HandleEVNoDemandEvent() /* * EV was transferring current - EV decided to stop */ - mSession.RecalculateSessionDuration(); + mSession.RecalculateSessionDuration(mEndpointId); SendEnergyTransferStoppedEvent(EnergyTransferStoppedReasonEnum::kEVStopped); } /* We must still be plugged in to get here - so no need to check if we are plugged in! */ @@ -1601,7 +1603,6 @@ DataModel::Nullable EnergyEvseDelegate::GetSessionID() } DataModel::Nullable EnergyEvseDelegate::GetSessionDuration() { - mSession.RecalculateSessionDuration(); return mSession.mSessionDuration; } DataModel::Nullable EnergyEvseDelegate::GetSessionEnergyCharged() @@ -1626,10 +1627,11 @@ bool EnergyEvseDelegate::IsEvsePluggedIn() /** * @brief This function samples the start-time, and energy meter to hold the session info * + * @param endpointId - The endpoint to report the update on * @param chargingMeterValue - The current value of the energy meter (charging) in mWh * @param dischargingMeterValue - The current value of the energy meter (discharging) in mWh */ -void EvseSession::StartSession(int64_t chargingMeterValue, int64_t dischargingMeterValue) +void EvseSession::StartSession(EndpointId endpointId, int64_t chargingMeterValue, int64_t dischargingMeterValue) { /* Get Timestamp */ uint32_t chipEpoch = 0; @@ -1661,13 +1663,13 @@ void EvseSession::StartSession(int64_t chargingMeterValue, int64_t dischargingMe mSessionEnergyCharged = MakeNullable(static_cast(0)); mSessionEnergyDischarged = MakeNullable(static_cast(0)); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionID::Id); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionDuration::Id); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionEnergyCharged::Id); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionEnergyDischarged::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionID::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionDuration::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionEnergyCharged::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionEnergyDischarged::Id); // Write values to persistent storage. - ConcreteAttributePath path = ConcreteAttributePath(mEndpointId, EnergyEvse::Id, SessionID::Id); + ConcreteAttributePath path = ConcreteAttributePath(endpointId, EnergyEvse::Id, SessionID::Id); GetSafeAttributePersistenceProvider()->WriteScalarValue(path, mSessionID); // TODO persist mStartTime @@ -1675,12 +1677,28 @@ void EvseSession::StartSession(int64_t chargingMeterValue, int64_t dischargingMe // TODO persist mSessionEnergyDischargedAtStart } +/** + * @brief This function updates the session information at the unplugged event + * + * @param endpointId - The endpoint to report the update on + * @param chargingMeterValue - The current value of the energy meter (charging) in mWh + * @param dischargingMeterValue - The current value of the energy meter (discharging) in mWh + */ +void EvseSession::StopSession(EndpointId endpointId, int64_t chargingMeterValue, int64_t dischargingMeterValue) +{ + RecalculateSessionDuration(endpointId); + UpdateEnergyCharged(endpointId, chargingMeterValue); + UpdateEnergyDischarged(endpointId, dischargingMeterValue); +} + /*---------------------- EvseSession functions --------------------------*/ /** * @brief This function updates the session attrs to allow read attributes to return latest values + * + * @param endpointId - The endpoint to report the update on */ -void EvseSession::RecalculateSessionDuration() +void EvseSession::RecalculateSessionDuration(EndpointId endpointId) { /* Get Timestamp */ uint32_t chipEpoch = 0; @@ -1696,27 +1714,29 @@ void EvseSession::RecalculateSessionDuration() uint32_t duration = chipEpoch - mStartTime; mSessionDuration = MakeNullable(duration); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionDuration::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionDuration::Id); } /** * @brief This function updates the EnergyCharged meter value * + * @param endpointId - The endpoint to report the update on * @param chargingMeterValue - The value of the energy meter (charging) in mWh */ -void EvseSession::UpdateEnergyCharged(int64_t chargingMeterValue) +void EvseSession::UpdateEnergyCharged(EndpointId endpointId, int64_t chargingMeterValue) { mSessionEnergyCharged = MakeNullable(chargingMeterValue - mSessionEnergyChargedAtStart); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionEnergyCharged::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionEnergyCharged::Id); } /** * @brief This function updates the EnergyDischarged meter value * + * @param endpointId - The endpoint to report the update on * @param dischargingMeterValue - The value of the energy meter (discharging) in mWh */ -void EvseSession::UpdateEnergyDischarged(int64_t dischargingMeterValue) +void EvseSession::UpdateEnergyDischarged(EndpointId endpointId, int64_t dischargingMeterValue) { mSessionEnergyDischarged = MakeNullable(dischargingMeterValue - mSessionEnergyDischargedAtStart); - MatterReportingAttributeChangeCallback(mEndpointId, EnergyEvse::Id, SessionEnergyDischarged::Id); + MatterReportingAttributeChangeCallback(endpointId, EnergyEvse::Id, SessionEnergyDischarged::Id); } diff --git a/src/controller/python/ChipDeviceController-Discovery.cpp b/src/controller/python/ChipDeviceController-Discovery.cpp index 7c669603f4ff1d..e4349b31bd1f09 100644 --- a/src/controller/python/ChipDeviceController-Discovery.cpp +++ b/src/controller/python/ChipDeviceController-Discovery.cpp @@ -23,12 +23,16 @@ * */ +#include + #include #include #include #include #include #include +#include +#include using namespace chip; @@ -186,4 +190,31 @@ bool pychip_DeviceController_GetIPForDiscoveredDevice(Controller::DeviceCommissi } return false; } + +PyChipError pychip_CreateManualCode(uint16_t longDiscriminator, uint32_t passcode, char * manualCodeBuffer, size_t inBufSize, + size_t * outBufSize) +{ + SetupPayload payload; + SetupDiscriminator discriminator; + discriminator.SetLongValue(longDiscriminator); + payload.discriminator = discriminator; + payload.setUpPINCode = passcode; + std::string setupManualCode; + + *outBufSize = 0; + CHIP_ERROR err = ManualSetupPayloadGenerator(payload).payloadDecimalStringRepresentation(setupManualCode); + if (err == CHIP_NO_ERROR) + { + MutableCharSpan span(manualCodeBuffer, inBufSize); + // Plus 1 so we copy the null terminator + CopyCharSpanToMutableCharSpan(CharSpan(setupManualCode.c_str(), setupManualCode.length() + 1), span); + *outBufSize = span.size(); + if (*outBufSize == 0) + { + err = CHIP_ERROR_NO_MEMORY; + } + } + + return ToPyChipError(err); +} } diff --git a/src/controller/python/chip/ChipDeviceCtrl.py b/src/controller/python/chip/ChipDeviceCtrl.py index 24fe59486f78d5..011174185fd440 100644 --- a/src/controller/python/chip/ChipDeviceCtrl.py +++ b/src/controller/python/chip/ChipDeviceCtrl.py @@ -1711,6 +1711,19 @@ def InitGroupTestingData(self): self.devCtrl) ).raise_on_error() + def CreateManualCode(self, discriminator: int, passcode: int) -> str: + """ Creates a standard flow manual code from the given discriminator and passcode.""" + # 64 bytes is WAY more than required, but let's be safe + in_size = 64 + out_size = c_size_t(0) + buf = create_string_buffer(in_size) + self._ChipStack.Call( + lambda: self._dmLib.pychip_CreateManualCode(discriminator, passcode, buf, in_size, pointer(out_size)) + ).raise_on_error() + if out_size.value == 0 or out_size.value > in_size: + raise MemoryError("Invalid output size for manual code") + return buf.value.decode() + # ----- Private Members ----- def _InitLib(self): if self._dmLib is None: @@ -1938,6 +1951,9 @@ def _InitLib(self): self._dmLib.pychip_DeviceProxy_GetRemoteSessionParameters.restype = PyChipError self._dmLib.pychip_DeviceProxy_GetRemoteSessionParameters.argtypes = [c_void_p, c_char_p] + self._dmLib.pychip_CreateManualCode.restype = PyChipError + self._dmLib.pychip_CreateManualCode.argtypes = [c_uint16, c_uint32, c_char_p, c_size_t, POINTER(c_size_t)] + class ChipDeviceController(ChipDeviceControllerBase): ''' The ChipDeviceCommissioner binding, named as ChipDeviceController diff --git a/src/python_testing/TC_CCTRL_2_1.py b/src/python_testing/TC_CCTRL_2_1.py index 7822d101b16e02..a8aedb49ab3f2c 100644 --- a/src/python_testing/TC_CCTRL_2_1.py +++ b/src/python_testing/TC_CCTRL_2_1.py @@ -16,7 +16,7 @@ # import chip.clusters as Clusters -from matter_testing_support import MatterBaseTest, TestStep, default_matter_test_main, has_cluster, per_endpoint_test +from matter_testing_support import MatterBaseTest, TestStep, default_matter_test_main, has_cluster, run_if_endpoint_matches from mobly import asserts @@ -27,7 +27,7 @@ def steps_TC_CCTRL_2_1(self) -> list[TestStep]: TestStep(2, "Validate SupportedDeviceCategories is set accordingly based on MCORE.FS")] return steps - @per_endpoint_test(has_cluster(Clusters.CommissionerControl)) + @run_if_endpoint_matches(has_cluster(Clusters.CommissionerControl)) async def test_TC_CCTRL_2_1(self): self.step(1) is_fabric_sync_pics_enabled = self.check_pics("MCORE.FS") diff --git a/src/python_testing/TC_CCTRL_2_2.py b/src/python_testing/TC_CCTRL_2_2.py index ed4051feda3ebb..aa52703313ceaa 100644 --- a/src/python_testing/TC_CCTRL_2_2.py +++ b/src/python_testing/TC_CCTRL_2_2.py @@ -34,7 +34,7 @@ from chip import ChipDeviceCtrl from chip.interaction_model import InteractionModelError, Status from matter_testing_support import (MatterBaseTest, TestStep, async_test_body, default_matter_test_main, has_cluster, - per_endpoint_test) + run_if_endpoint_matches) from mobly import asserts @@ -119,7 +119,7 @@ def steps_TC_CCTRL_2_2(self) -> list[TestStep]: return steps - @per_endpoint_test(has_cluster(Clusters.CommissionerControl)) + @run_if_endpoint_matches(has_cluster(Clusters.CommissionerControl)) async def test_TC_CCTRL_2_2(self): self.is_ci = self.check_pics('PICS_SDK_CI_ONLY') diff --git a/src/python_testing/TC_CC_2_2.py b/src/python_testing/TC_CC_2_2.py index 86080efd358456..2cd9aef8a09f35 100644 --- a/src/python_testing/TC_CC_2_2.py +++ b/src/python_testing/TC_CC_2_2.py @@ -24,7 +24,7 @@ # test-runner-run/run1/factoryreset: True # test-runner-run/run1/quiet: True # test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json -# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto +# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --endpoint 1 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto # === END CI TEST ARGUMENTS === import logging @@ -33,7 +33,7 @@ import chip.clusters as Clusters from chip.clusters import ClusterObjects as ClusterObjects from matter_testing_support import (ClusterAttributeChangeAccumulator, MatterBaseTest, TestStep, default_matter_test_main, - has_cluster, per_endpoint_test) + has_cluster, run_if_endpoint_matches) from mobly import asserts from test_plan_support import commission_if_required, if_feature_supported, read_attribute, verify_success @@ -107,7 +107,7 @@ def entry_count_verification() -> str: "The third entry in _reportedRemainingTimeValuesList_ is equal to 0") ] - @per_endpoint_test(has_cluster(Clusters.ColorControl)) + @run_if_endpoint_matches(has_cluster(Clusters.ColorControl)) async def test_TC_CC_2_2(self): gather_time = 20 diff --git a/src/python_testing/TC_DeviceBasicComposition.py b/src/python_testing/TC_DeviceBasicComposition.py index 72e6e3e2418c8c..24a336e0b001e9 100644 --- a/src/python_testing/TC_DeviceBasicComposition.py +++ b/src/python_testing/TC_DeviceBasicComposition.py @@ -19,14 +19,58 @@ # for details about the block below. # # === BEGIN CI TEST ARGUMENTS === -# test-runner-runs: run1 +# test-runner-runs: run1 run2 run3 run4 run5 run6 run7 # test-runner-run/run1/app: ${ALL_CLUSTERS_APP} # test-runner-run/run1/factoryreset: True # test-runner-run/run1/quiet: True # test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json # test-runner-run/run1/script-args: --storage-path admin_storage.json --manual-code 10054912339 --PICS src/app/tests/suites/certification/ci-pics-values --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto +# +# test-runner-run/run2/app: ${CHIP_LOCK_APP} +# test-runner-run/run2/factoryreset: True +# test-runner-run/run2/quiet: True +# test-runner-run/run2/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run2/script-args: --storage-path admin_storage.json --manual-code 10054912339 +# +# test-runner-run/run3/app: ${CHIP_LOCK_APP} +# test-runner-run/run3/factoryreset: True +# test-runner-run/run3/quiet: True +# test-runner-run/run3/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run3/script-args: --storage-path admin_storage.json --qr-code MT:-24J0Q1212-10648G00 +# +# test-runner-run/run4/app: ${CHIP_LOCK_APP} +# test-runner-run/run4/factoryreset: True +# test-runner-run/run4/quiet: True +# test-runner-run/run4/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run4/script-args: --storage-path admin_storage.json --discriminator 1234 --passcode 20202021 +# +# test-runner-run/run5/app: ${CHIP_LOCK_APP} +# test-runner-run/run5/factoryreset: True +# test-runner-run/run5/quiet: True +# test-runner-run/run5/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run5/script-args: --storage-path admin_storage.json --manual-code 10054912339 --commissioning-method on-network +# +# test-runner-run/run6/app: ${CHIP_LOCK_APP} +# test-runner-run/run6/factoryreset: True +# test-runner-run/run6/quiet: True +# test-runner-run/run6/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run6/script-args: --storage-path admin_storage.json --qr-code MT:-24J0Q1212-10648G00 --commissioning-method on-network +# +# test-runner-run/run7/app: ${CHIP_LOCK_APP} +# test-runner-run/run7/factoryreset: True +# test-runner-run/run7/quiet: True +# test-runner-run/run7/app-args: --discriminator 1234 --KVS kvs1 +# test-runner-run/run7/script-args: --storage-path admin_storage.json --discriminator 1234 --passcode 20202021 --commissioning-method on-network # === END CI TEST ARGUMENTS === +# Run 1: runs through all tests +# Run 2: tests PASE connection using manual code (12.1 only) +# Run 3: tests PASE connection using QR code (12.1 only) +# Run 4: tests PASE connection using discriminator and passcode (12.1 only) +# Run 5: Tests CASE connection using manual code (12.1 only) +# Run 6: Tests CASE connection using QR code (12.1 only) +# Run 7: Tests CASE connection using manual discriminator and passcode (12.1 only) + import logging from dataclasses import dataclass from typing import Any, Callable diff --git a/src/python_testing/TC_EEVSE_2_6.py b/src/python_testing/TC_EEVSE_2_6.py new file mode 100644 index 00000000000000..4567f96c3e466a --- /dev/null +++ b/src/python_testing/TC_EEVSE_2_6.py @@ -0,0 +1,293 @@ +# +# Copyright (c) 2024 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# See https://github.com/project-chip/connectedhomeip/blob/master/docs/testing/python.md#defining-the-ci-test-arguments +# for details about the block below. +# +# === BEGIN CI TEST ARGUMENTS === +# test-runner-runs: run1 +# test-runner-run/run1/app: ${ENERGY_MANAGEMENT_APP} +# test-runner-run/run1/factoryreset: True +# test-runner-run/run1/quiet: True +# test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json --enable-key 000102030405060708090a0b0c0d0e0f --application evse +# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --hex-arg enableKey:000102030405060708090a0b0c0d0e0f --endpoint 1 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto +# === END CI TEST ARGUMENTS === + +import logging +import time + +import chip.clusters as Clusters +from chip.clusters.Types import NullValue +from matter_testing_support import (ClusterAttributeChangeAccumulator, EventChangeCallback, MatterBaseTest, TestStep, + async_test_body, default_matter_test_main) +from mobly import asserts +from TC_EEVSE_Utils import EEVSEBaseTestHelper + +logger = logging.getLogger(__name__) + + +class TC_EEVSE_2_6(MatterBaseTest, EEVSEBaseTestHelper): + + def desc_TC_EEVSE_2_6(self) -> str: + """Returns a description of this test""" + return "5.1.6. [TC-EEVSE-2.6] Test Q quality functionality with DUT as Server" + + def pics_TC_EEVSE_2_6(self): + """ This function returns a list of PICS for this test case that must be True for the test to be run""" + + return ["EEVSE.S"] + + def steps_TC_EEVSE_2_6(self) -> list[TestStep]: + steps = [ + TestStep("1", "Commission DUT to TH (can be skipped if done in a preceding test)"), + TestStep("2", "TH reads from the DUT the FeatureMap", + "Verify that the DUT response contains the FeatureMap attribute. Store the value as FeatureMap."), + TestStep("3", "Set up a subscription to all EnergyEVSE cluster events"), + TestStep("4", "TH reads TestEventTriggersEnabled attribute from General Diagnostics Cluster", + "Value has to be 1 (True)"), + TestStep("5", "Set up a subscription to the EnergyEVSE cluster, with MinIntervalFloor set to 0, MaxIntervalCeiling set to 10 and KeepSubscriptions set to True", + "Subscription successfully established"), + TestStep("6", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for Basic Functionality Test Event", + "Verify DUT responds w/ status SUCCESS(0x00)"), + TestStep("6a", "TH reads from the DUT the State", + "Value has to be 0x00 (NotPluggedIn)"), + TestStep("7", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Plugged-in Test Event", + "Verify DUT responds w/ status SUCCESS(0x00) and event EEVSE.S.E00(EVConnected) sent"), + TestStep("8", "TH sends command EnableCharging with ChargingEnabledUntil=null, minimumChargeCurrent=6000, maximumChargeCurrent=12000", + "Verify DUT responds w/ status SUCCESS(0x00)"), + TestStep("9", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Charge Demand Test Event", + "Verify DUT responds w/ status SUCCESS(0x00) and event EEVSE.S.E02(EnergyTransferStarted) sent"), + TestStep("9a", "TH reads from the DUT the State", + "Value has to be 0x03 (PluggedInCharging)"), + TestStep("10", "Reset all accumulated report counts, then wait 12 seconds"), + TestStep("10a", "TH counts all report transactions with an attribute report for the SessionID attribute", + "TH verifies that numberOfReportsReceived = 0"), + TestStep("10b", "TH counts all report transactions with an attribute report for the SessionDuration attribute", + "TH verifies that numberOfReportsReceived <= 2"), + TestStep("10c", "TH counts all report transactions with an attribute report for the SessionEnergyCharged attribute", + "TH verifies that numberOfReportsReceived <= 2"), + TestStep("10d", "TH counts all report transactions with an attribute report for the SessionEnergyDischarged attribute", + "TH verifies that numberOfReportsReceived <= 2"), + TestStep("11", "Reset all accumulated report counts"), + TestStep("12", "TH sends command Disable", + "Verify DUT responds w/ status SUCCESS(0x00) and Event EEVSE.S.E03(EnergyTransferStopped) sent with reason EvseStopped"), + TestStep("13", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Charge Demand Test Event Clear", + "Verify DUT responds w/ status SUCCESS(0x00)"), + TestStep("14", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Plugged-in Test Event Clear", + "Verify DUT responds w/ status SUCCESS(0x00) and event EEVSE.S.E01(EVNotDetected) sent"), + TestStep("15", "Wait 5 seconds"), + TestStep("15a", "TH counts all report transactions with an attribute report for the SessionID attribute", + "TH verifies that numberOfReportsReceived = 0"), + TestStep("15b", "TH counts all report transactions with an attribute report for the SessionDuration attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("15c", "TH counts all report transactions with an attribute report for the SessionEnergyCharged attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("15d", "If V2X feature is supported on the cluster, TH counts all report transactions with an attribute report for the SessionEnergyDischarged attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("16", "Reset all accumulated report counts"), + TestStep("17", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Plugged-in Test Event", + "Verify DUT responds w/ status SUCCESS(0x00) and event EEVSE.S.E00(EVConnected) sent"), + TestStep("18", "Wait 5 seconds"), + TestStep("18a", "TH counts all report transactions with an attribute report for the SessionID attribute", + "TH verifies that numberOfReportsReceived = 1"), + TestStep("18b", "TH counts all report transactions with an attribute report for the SessionDuration attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("18c", "TH counts all report transactions with an attribute report for the SessionEnergyCharged attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("18d", "If V2X feature is supported on the cluster, TH counts all report transactions with an attribute report for the SessionEnergyDischarged attribute", + "TH verifies that numberOfReportsReceived >= 1"), + TestStep("19", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for EV Plugged-in Test Event Clear", + "Verify DUT responds w/ status SUCCESS(0x00) and event EEVSE.S.E01(EVNotDetected) sent"), + TestStep("20", "TH sends TestEventTrigger command to General Diagnostics Cluster on Endpoint 0 with EnableKey field set to PIXIT.EEVSE.TESTEVENT_TRIGGERKEY and EventTrigger field set to PIXIT.EEVSE.TESTEVENTTRIGGER for Basic Functionality Test Event Clear", + "Verify DUT responds w/ status SUCCESS(0x00)"), + TestStep("21", "Cancel the subscription to the Device Energy Management cluster", + "The subscription is cancelled successfully"), + ] + + return steps + + @async_test_body + async def test_TC_EEVSE_2_6(self): + self.step("1") + # Commission DUT - already done + + self.step("2") + feature_map = await self.read_evse_attribute_expect_success(attribute="FeatureMap") + logger.info(f"FeatureMap: {feature_map}") + has_v2x = feature_map & Clusters.EnergyEvse.Bitmaps.Feature.kV2x + + # Subscribe to Events and when they are sent push them to a queue for checking later + self.step("3") + events_callback = EventChangeCallback(Clusters.EnergyEvse) + await events_callback.start(self.default_controller, + self.dut_node_id, + self.matter_test_config.endpoint) + + self.step("4") + await self.check_test_event_triggers_enabled() + + self.step("5") + sub_handler = ClusterAttributeChangeAccumulator(Clusters.EnergyEvse) + await sub_handler.start(self.default_controller, self.dut_node_id, + self.matter_test_config.endpoint, + min_interval_sec=0, + max_interval_sec=10, keepSubscriptions=True) + + def accumulate_reports(wait_time): + logging.info(f"Test will now wait {wait_time} seconds to accumulate reports") + time.sleep(wait_time) + + self.step("6") + await self.send_test_event_trigger_basic() + + self.step("6a") + await self.check_evse_attribute("State", Clusters.EnergyEvse.Enums.StateEnum.kNotPluggedIn) + + self.step("7") + await self.send_test_event_trigger_pluggedin() + event_data = events_callback.wait_for_event_report( + Clusters.EnergyEvse.Events.EVConnected) + + self.step("8") + charge_until = NullValue + min_charge_current = 6000 + max_charge_current = 12000 + await self.send_enable_charge_command(charge_until=charge_until, min_charge=min_charge_current, max_charge=max_charge_current) + + self.step("9") + await self.send_test_event_trigger_charge_demand() + event_data = events_callback.wait_for_event_report(Clusters.EnergyEvse.Events.EnergyTransferStarted) + + self.step("9a") + await self.check_evse_attribute("State", Clusters.EnergyEvse.Enums.StateEnum.kPluggedInCharging) + + self.step("10") + wait = 12 # Wait 12 seconds - the spec says we should only get reports every 10s at most, unless a command changes it + sub_handler.reset() + accumulate_reports(wait) + + self.step("10a") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionID] + logging.info(f"Received {count} SessionID updates in {wait} seconds") + asserts.assert_equal(count, 0, f"Expected NO SessionID updates in {wait} seconds") + + self.step("10b") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionDuration] + logging.info(f"Received {count} SessionDuration updates in {wait} seconds") + asserts.assert_less_equal(count, 2, f"Expected <= 2 SessionDuration updates in {wait} seconds") + + self.step("10c") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyCharged] + logging.info(f"Received {count} SessionEnergyCharged updates in {wait} seconds") + asserts.assert_less_equal(count, 2, f"Expected <= 2 SessionEnergyCharged updates in {wait} seconds") + + self.step("10d") + if has_v2x: + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyDischarged] + logging.info(f"Received {count} SessionEnergyDischarged updates in {wait} seconds") + asserts.assert_less_equal(count, 2, f"Expected <= 2 SessionEnergyDischarged updates in {wait} seconds") + + self.step("11") + sub_handler.reset() + + self.step("12") + await self.send_disable_command() + event_data = events_callback.wait_for_event_report( + Clusters.EnergyEvse.Events.EnergyTransferStopped) + expected_reason = Clusters.EnergyEvse.Enums.EnergyTransferStoppedReasonEnum.kEVSEStopped + asserts.assert_equal(expected_reason, event_data.reason, + f"EnergyTransferStopped event reason was {event_data.reason}, expected {expected_reason}") + + self.step("13") + await self.send_test_event_trigger_charge_demand_clear() + + self.step("14") + await self.send_test_event_trigger_pluggedin_clear() + event_data = events_callback.wait_for_event_report( + Clusters.EnergyEvse.Events.EVNotDetected) + + self.step("15") + wait = 5 # We expect a change to the Session attributes after the EV is unplugged, Wait 5 seconds - allow time for the report to come in + accumulate_reports(wait) + + self.step("15a") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionID] + logging.info(f"Received {count} SessionID updates in {wait} seconds") + asserts.assert_equal(count, 0, "Expected = 0 SessionID updates after a Unplugged operation - it changes on next plug-in") + + self.step("15b") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionDuration] + logging.info(f"Received {count} SessionDuration updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionDuration updates after a Unplugged operation") + + self.step("15c") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyCharged] + logging.info(f"Received {count} SessionEnergyCharged updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionEnergyCharged updates after a Unplugged operation") + + self.step("15d") + if has_v2x: + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyDischarged] + logging.info(f"Received {count} SessionEnergyDischarged updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionEnergyDischarged updates after a Unplugged operation") + + self.step("16") + sub_handler.reset() + + self.step("17") + await self.send_test_event_trigger_pluggedin() + event_data = events_callback.wait_for_event_report( + Clusters.EnergyEvse.Events.EVConnected) + + self.step("18") + wait = 5 # We expect a change to the Session attributes after the EV is plugged in again, Wait 5 seconds - allow time for the report to come in + accumulate_reports(wait) + + self.step("18a") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionID] + logging.info(f"Received {count} SessionID updates in {wait} seconds") + asserts.assert_equal(count, 1, "Expected = 1 SessionID updates after a plug-in") + + self.step("18b") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionDuration] + logging.info(f"Received {count} SessionDuration updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionDuration updates after a Unplugged operation") + + self.step("18c") + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyCharged] + logging.info(f"Received {count} SessionEnergyCharged updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionEnergyCharged updates after a Unplugged operation") + + self.step("18d") + if has_v2x: + count = sub_handler.attribute_report_counts[Clusters.EnergyEvse.Attributes.SessionEnergyDischarged] + logging.info(f"Received {count} SessionEnergyDischarged updates in {wait} seconds") + asserts.assert_greater_equal(count, 1, "Expected >= 1 SessionEnergyDischarged updates after a Unplugged operation") + + self.step("19") + await self.send_test_event_trigger_pluggedin_clear() + event_data = events_callback.wait_for_event_report( + Clusters.EnergyEvse.Events.EVNotDetected) + + self.step("20") + await self.send_test_event_trigger_basic_clear() + + self.step("21") + await sub_handler.cancel() + + +if __name__ == "__main__": + default_matter_test_main() diff --git a/src/python_testing/TC_LVL_2_3.py b/src/python_testing/TC_LVL_2_3.py index d45c459f1352d3..d0ac4c8c1ff893 100644 --- a/src/python_testing/TC_LVL_2_3.py +++ b/src/python_testing/TC_LVL_2_3.py @@ -24,7 +24,7 @@ # test-runner-run/run1/factoryreset: True # test-runner-run/run1/quiet: True # test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json -# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto +# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --endpoint 1 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto # === END CI TEST ARGUMENTS === import logging @@ -33,7 +33,7 @@ import chip.clusters as Clusters import test_plan_support from matter_testing_support import (ClusterAttributeChangeAccumulator, MatterBaseTest, TestStep, default_matter_test_main, - has_cluster, per_endpoint_test) + has_cluster, run_if_endpoint_matches) from mobly import asserts @@ -84,7 +84,7 @@ def steps_TC_LVL_2_3(self) -> list[TestStep]: "The third entry in reportedRemainingTimeValuesList is equal to 0") ] - @per_endpoint_test(has_cluster(Clusters.LevelControl)) + @run_if_endpoint_matches(has_cluster(Clusters.LevelControl)) async def test_TC_LVL_2_3(self): # Commissioning - already done self.step(1) diff --git a/src/python_testing/TC_SWTCH.py b/src/python_testing/TC_SWTCH.py index 3db2b213428b39..3d369be456962d 100644 --- a/src/python_testing/TC_SWTCH.py +++ b/src/python_testing/TC_SWTCH.py @@ -18,13 +18,35 @@ # for details about the block below. # # === BEGIN CI TEST ARGUMENTS === -# test-runner-runs: run1 +# test-runner-runs: run1 run2 run3 run4 +# # test-runner-run/run1/app: ${ALL_CLUSTERS_APP} # test-runner-run/run1/factoryreset: True # test-runner-run/run1/quiet: True # test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json -# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto --PICS src/app/tests/suites/certification/ci-pics-values +# test-runner-run/run1/script-args: --endpoint 1 --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto --PICS src/app/tests/suites/certification/ci-pics-values +# +# test-runner-run/run2/app: ${ALL_CLUSTERS_APP} +# test-runner-run/run2/factoryreset: True +# test-runner-run/run2/quiet: True +# test-runner-run/run2/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json +# test-runner-run/run2/script-args: --endpoint 2 --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto --PICS src/app/tests/suites/certification/ci-pics-values +# +# test-runner-run/run3/app: ${ALL_CLUSTERS_APP} +# test-runner-run/run3/factoryreset: True +# test-runner-run/run3/quiet: True +# test-runner-run/run3/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json +# test-runner-run/run3/script-args: --endpoint 3 --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto --PICS src/app/tests/suites/certification/ci-pics-values +# +# test-runner-run/run4/app: ${ALL_CLUSTERS_APP} +# test-runner-run/run4/factoryreset: True +# test-runner-run/run4/quiet: True +# test-runner-run/run4/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json +# test-runner-run/run4/script-args: --endpoint 4 --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto --PICS src/app/tests/suites/certification/ci-pics-values +# # === END CI TEST ARGUMENTS === +# +# These tests run on every endpoint regardless of whether a switch is present because they are set up to auto-select. import json import logging @@ -39,7 +61,8 @@ from chip.clusters.Attribute import EventReadResult from chip.tlv import uint from matter_testing_support import (AttributeValue, ClusterAttributeChangeAccumulator, EventChangeCallback, MatterBaseTest, - TestStep, await_sequence_of_reports, default_matter_test_main, has_feature, per_endpoint_test) + TestStep, await_sequence_of_reports, default_matter_test_main, has_feature, + run_if_endpoint_matches) from mobly import asserts logger = logging.getLogger(__name__) @@ -277,7 +300,7 @@ def steps_TC_SWTCH_2_2(self): "Verify that the value is 0, and that a subscription report was received for that change."), ] - @per_endpoint_test(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kLatchingSwitch)) + @run_if_endpoint_matches(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kLatchingSwitch)) async def test_TC_SWTCH_2_2(self): post_prompt_settle_delay_seconds = 10.0 cluster = Clusters.Switch @@ -387,7 +410,7 @@ def steps_TC_SWTCH_2_3(self): TestStep(9, "TH reads the CurrentPosition attribute from the DUT", "Verify that the value is 0"), ] - @per_endpoint_test(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kMomentarySwitch)) + @run_if_endpoint_matches(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kMomentarySwitch)) async def test_TC_SWTCH_2_3(self): # Commissioning - already done self.step(1) @@ -465,7 +488,7 @@ def steps_TC_SWTCH_2_4(self): """) ] - @per_endpoint_test(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kMomentarySwitch)) + @run_if_endpoint_matches(has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kMomentarySwitch)) async def test_TC_SWTCH_2_4(self): switch_pressed_position = self._default_pressed_position post_prompt_settle_delay_seconds = 10.0 @@ -639,7 +662,7 @@ def should_run_SWTCH_2_5(wildcard, endpoint): asf = has_feature(Clusters.Switch, Clusters.Switch.Bitmaps.Feature.kActionSwitch) return msm(wildcard, endpoint) and not asf(wildcard, endpoint) - @per_endpoint_test(should_run_SWTCH_2_5) + @run_if_endpoint_matches(should_run_SWTCH_2_5) async def test_TC_SWTCH_2_5(self): # Commissioning - already done self.step(1) @@ -818,7 +841,7 @@ def should_run_SWTCH_2_6(wildcard, endpoint): asf = has_feature(Clusters.Switch, 0x20) return msm(wildcard, endpoint) and asf(wildcard, endpoint) - @per_endpoint_test(should_run_SWTCH_2_6) + @run_if_endpoint_matches(should_run_SWTCH_2_6) async def test_TC_SWTCH_2_6(self): # Commissioning - already done self.step(1) diff --git a/src/python_testing/TC_TIMESYNC_2_1.py b/src/python_testing/TC_TIMESYNC_2_1.py index 1cfb22e17c7fc8..0f2fdba4d603f5 100644 --- a/src/python_testing/TC_TIMESYNC_2_1.py +++ b/src/python_testing/TC_TIMESYNC_2_1.py @@ -24,7 +24,7 @@ # test-runner-run/run1/factoryreset: True # test-runner-run/run1/quiet: True # test-runner-run/run1/app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json -# test-runner-run/run1/script-args: --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto +# test-runner-run/run1/script-args: --endpoint 0 --storage-path admin_storage.json --commissioning-method on-network --discriminator 1234 --passcode 20202021 --PICS src/app/tests/suites/certification/ci-pics-values --trace-to json:${TRACE_TEST_JSON}.json --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto # === END CI TEST ARGUMENTS === import ipaddress @@ -32,7 +32,7 @@ import chip.clusters as Clusters from chip.clusters.Types import NullValue -from matter_testing_support import (MatterBaseTest, default_matter_test_main, has_attribute, has_cluster, per_endpoint_test, +from matter_testing_support import (MatterBaseTest, default_matter_test_main, has_attribute, has_cluster, run_if_endpoint_matches, utc_time_in_matter_epoch) from mobly import asserts @@ -42,7 +42,7 @@ async def read_ts_attribute_expect_success(self, attribute): cluster = Clusters.Objects.TimeSynchronization return await self.read_single_attribute_check_success(endpoint=None, cluster=cluster, attribute=attribute) - @per_endpoint_test(has_cluster(Clusters.TimeSynchronization) and has_attribute(Clusters.TimeSynchronization.Attributes.TimeSource)) + @run_if_endpoint_matches(has_cluster(Clusters.TimeSynchronization) and has_attribute(Clusters.TimeSynchronization.Attributes.TimeSource)) async def test_TC_TIMESYNC_2_1(self): attributes = Clusters.TimeSynchronization.Attributes features = await self.read_ts_attribute_expect_success(attribute=attributes.FeatureMap) diff --git a/src/python_testing/basic_composition_support.py b/src/python_testing/basic_composition_support.py index e25de55c0441a9..c09d78ef3027d8 100644 --- a/src/python_testing/basic_composition_support.py +++ b/src/python_testing/basic_composition_support.py @@ -15,7 +15,7 @@ # limitations under the License. # - +import asyncio import base64 import copy import json @@ -98,12 +98,15 @@ def ConvertValue(value) -> Any: class BasicCompositionTests: - async def connect_over_pase(self, dev_ctrl): - asserts.assert_true(self.matter_test_config.qr_code_content == [] or self.matter_test_config.manual_code == [], - "Cannot have both QR and manual code specified") - setupCode = self.matter_test_config.qr_code_content + self.matter_test_config.manual_code - asserts.assert_equal(len(setupCode), 1, "Require one of either --qr-code or --manual-code.") - await dev_ctrl.FindOrEstablishPASESession(setupCode[0], self.dut_node_id) + def get_code(self, dev_ctrl): + created_codes = [] + for idx, discriminator in enumerate(self.matter_test_config.discriminators): + created_codes.append(dev_ctrl.CreateManualCode(discriminator, self.matter_test_config.setup_passcodes[idx])) + + setup_codes = self.matter_test_config.qr_code_content + self.matter_test_config.manual_code + created_codes + asserts.assert_equal(len(setup_codes), 1, + "Require exactly one of either --qr-code, --manual-code or (--discriminator and --passcode).") + return setup_codes[0] def dump_wildcard(self, dump_device_composition_path: typing.Optional[str]) -> tuple[str, str]: """ Dumps a json and a txt file of the attribute wildcard for this device if the dump_device_composition_path is supplied. @@ -120,19 +123,34 @@ def dump_wildcard(self, dump_device_composition_path: typing.Optional[str]) -> t pprint(self.endpoints, outfile, indent=1, width=200, compact=True) return (json_dump_string, pformat(self.endpoints, indent=1, width=200, compact=True)) - async def setup_class_helper(self, default_to_pase: bool = True): + async def setup_class_helper(self, allow_pase: bool = True): dev_ctrl = self.default_controller self.problems = [] - do_test_over_pase = self.user_params.get("use_pase_only", default_to_pase) dump_device_composition_path: Optional[str] = self.user_params.get("dump_device_composition_path", None) - if do_test_over_pase: - await self.connect_over_pase(dev_ctrl) - node_id = self.dut_node_id - else: - # Using the already commissioned node - node_id = self.dut_node_id + node_id = self.dut_node_id + + task_list = [] + if allow_pase: + setup_code = self.get_code(dev_ctrl) + pase_future = dev_ctrl.EstablishPASESession(setup_code, self.dut_node_id) + task_list.append(asyncio.create_task(pase_future)) + + case_future = dev_ctrl.GetConnectedDevice(nodeid=node_id, allowPASE=False) + task_list.append(asyncio.create_task(case_future)) + + for task in task_list: + asyncio.ensure_future(task) + + done, pending = await asyncio.wait(task_list, return_when=asyncio.FIRST_COMPLETED) + + for task in pending: + try: + task.cancel() + await task + except asyncio.CancelledError: + pass wildcard_read = (await dev_ctrl.Read(node_id, [()])) diff --git a/src/python_testing/matter_testing_support.py b/src/python_testing/matter_testing_support.py index 1e0fa26ae8d722..7c1ac60fd3910a 100644 --- a/src/python_testing/matter_testing_support.py +++ b/src/python_testing/matter_testing_support.py @@ -625,12 +625,12 @@ class MatterTestConfig: # List of explicit tests to run by name. If empty, all tests will run tests: List[str] = field(default_factory=list) timeout: typing.Union[int, None] = None - endpoint: int = 0 + endpoint: typing.Union[int, None] = 0 app_pid: int = 0 commissioning_method: Optional[str] = None - discriminators: Optional[List[int]] = None - setup_passcodes: Optional[List[int]] = None + discriminators: List[int] = field(default_factory=list) + setup_passcodes: List[int] = field(default_factory=list) commissionee_ip_address_just_for_testing: Optional[str] = None # By default, we start with maximized cert chains, as required for RR-1.1. # This allows cert tests to be run without re-commissioning for RR-1.1. @@ -646,7 +646,7 @@ class MatterTestConfig: pics: dict[bool, str] = field(default_factory=dict) # Node ID for basic DUT - dut_node_ids: Optional[List[int]] = None + dut_node_ids: List[int] = field(default_factory=list) # Node ID to use for controller/commissioner controller_node_id: int = _DEFAULT_CONTROLLER_NODE_ID # CAT Tags for default controller/commissioner @@ -1157,7 +1157,7 @@ async def read_single_attribute_check_success( if node_id is None: node_id = self.dut_node_id if endpoint is None: - endpoint = self.matter_test_config.endpoint + endpoint = 0 if self.matter_test_config.endpoint is None else self.matter_test_config.endpoint result = await dev_ctrl.ReadAttribute(node_id, [(endpoint, attribute)], fabricFiltered=fabric_filtered) attr_ret = result[endpoint][cluster][attribute] @@ -1189,7 +1189,7 @@ async def read_single_attribute_expect_error( if node_id is None: node_id = self.dut_node_id if endpoint is None: - endpoint = self.matter_test_config.endpoint + endpoint = 0 if self.matter_test_config.endpoint is None else self.matter_test_config.endpoint result = await dev_ctrl.ReadAttribute(node_id, [(endpoint, attribute)], fabricFiltered=fabric_filtered) attr_ret = result[endpoint][cluster][attribute] @@ -1218,12 +1218,13 @@ async def write_single_attribute(self, attribute_value: object, endpoint_id: int """ dev_ctrl = self.default_controller node_id = self.dut_node_id - endpoint = self.matter_test_config.endpoint if endpoint_id is None else endpoint_id + if endpoint_id is None: + endpoint_id = 0 if self.matter_test_config.endpoint is None else self.matter_test_config.endpoint - write_result = await dev_ctrl.WriteAttribute(node_id, [(endpoint, attribute_value)]) + write_result = await dev_ctrl.WriteAttribute(node_id, [(endpoint_id, attribute_value)]) if expect_success: asserts.assert_equal(write_result[0].Status, Status.Success, - f"Expected write success for write to attribute {attribute_value} on endpoint {endpoint}") + f"Expected write success for write to attribute {attribute_value} on endpoint {endpoint_id}") return write_result[0].Status async def send_single_cmd( @@ -1236,7 +1237,7 @@ async def send_single_cmd( if node_id is None: node_id = self.dut_node_id if endpoint is None: - endpoint = self.matter_test_config.endpoint + endpoint = 0 if self.matter_test_config.endpoint is None else self.matter_test_config.endpoint result = await dev_ctrl.SendCommand(nodeid=node_id, endpoint=endpoint, payload=cmd, timedRequestTimeoutMs=timedRequestTimeoutMs, payloadCapability=payloadCapability) @@ -1730,19 +1731,8 @@ def populate_commissioning_args(args: argparse.Namespace, config: MatterTestConf config.qr_code_content.extend(args.qr_code) config.manual_code.extend(args.manual_code) - - if args.commissioning_method is None: - return True - - if args.discriminators == [] and (args.qr_code == [] and args.manual_code == []): - print("error: Missing --discriminator when no --qr-code/--manual-code present!") - return False - config.discriminators = args.discriminators - - if args.passcodes == [] and (args.qr_code == [] and args.manual_code == []): - print("error: Missing --passcode when no --qr-code/--manual-code present!") - return False - config.setup_passcodes = args.passcodes + config.discriminators.extend(args.discriminators) + config.setup_passcodes.extend(args.passcodes) if args.qr_code != [] and args.manual_code != []: print("error: Cannot have both --qr-code and --manual-code present!") @@ -1759,9 +1749,11 @@ def populate_commissioning_args(args: argparse.Namespace, config: MatterTestConf return False if len(config.dut_node_ids) < len(device_descriptors): - missing = len(device_descriptors) - len(config.dut_node_ids) # We generate new node IDs sequentially from the last one seen for all # missing NodeIDs when commissioning many nodes at once. + if not config.dut_node_ids: + config.dut_node_ids = [_DEFAULT_DUT_NODE_ID] + missing = len(device_descriptors) - len(config.dut_node_ids) for i in range(missing): config.dut_node_ids.append(config.dut_node_ids[-1] + 1) @@ -1773,6 +1765,17 @@ def populate_commissioning_args(args: argparse.Namespace, config: MatterTestConf print("error: Duplicate value in discriminator list") return False + if args.commissioning_method is None: + return True + + if args.discriminators == [] and (args.qr_code == [] and args.manual_code == []): + print("error: Missing --discriminator when no --qr-code/--manual-code present!") + return False + + if args.passcodes == [] and (args.qr_code == [] and args.manual_code == []): + print("error: Missing --passcode when no --qr-code/--manual-code present!") + return False + if config.commissioning_method == "ble-wifi": if args.wifi_ssid is None: print("error: missing --wifi-ssid for --commissioning-method ble-wifi!") @@ -1819,7 +1822,7 @@ def convert_args_to_matter_config(args: argparse.Namespace) -> MatterTestConfig: config.pics = {} if args.PICS is None else read_pics_from_file(args.PICS) config.tests = [] if args.tests is None else args.tests config.timeout = args.timeout # This can be none, we pull the default from the test if it's unspecified - config.endpoint = 0 if args.endpoint is None else args.endpoint + config.endpoint = args.endpoint config.app_pid = 0 if args.app_pid is None else args.app_pid config.controller_node_id = args.controller_node_id @@ -1869,10 +1872,10 @@ def parse_matter_test_args(argv: Optional[List[str]] = None) -> MatterTestConfig default=_DEFAULT_CONTROLLER_NODE_ID, help='NodeID to use for initial/default controller (default: %d)' % _DEFAULT_CONTROLLER_NODE_ID) basic_group.add_argument('-n', '--dut-node-id', '--nodeId', type=int_decimal_or_hex, - metavar='NODE_ID', dest='dut_node_ids', default=[_DEFAULT_DUT_NODE_ID], + metavar='NODE_ID', dest='dut_node_ids', default=[], help='Node ID for primary DUT communication, ' 'and NodeID to assign if commissioning (default: %d)' % _DEFAULT_DUT_NODE_ID, nargs="+") - basic_group.add_argument('--endpoint', type=int, default=0, help="Endpoint under test") + basic_group.add_argument('--endpoint', type=int, default=None, help="Endpoint under test") basic_group.add_argument('--app-pid', type=int, default=0, help="The PID of the app against which the test is going to run") basic_group.add_argument('--timeout', type=int, help="Test timeout in seconds") basic_group.add_argument("--PICS", help="PICS file path", type=str) @@ -1980,20 +1983,6 @@ def async_runner(self: MatterBaseTest, *args, **kwargs): return async_runner -def per_node_test(body): - """ Decorator to be used for PICS-free tests that apply to the entire node. - - Use this decorator when your script needs to be run once to validate the whole node. - To use this decorator, the test must NOT have an associated pics_ method. - """ - - def whole_node_runner(self: MatterBaseTest, *args, **kwargs): - asserts.assert_false(self.get_test_pics(self.current_test_info.name), "pics_ method supplied for per_node_test.") - return _async_runner(body, self, *args, **kwargs) - - return whole_node_runner - - EndpointCheckFunction = typing.Callable[[Clusters.Attribute.AsyncReadTransaction.ReadResponse, int], bool] @@ -2013,9 +2002,9 @@ def _has_cluster(wildcard, endpoint, cluster: ClusterObjects.Cluster) -> bool: def has_cluster(cluster: ClusterObjects.ClusterObjectDescriptor) -> EndpointCheckFunction: - """ EndpointCheckFunction that can be passed as a parameter to the per_endpoint_test decorator. + """ EndpointCheckFunction that can be passed as a parameter to the run_if_endpoint_matches decorator. - Use this function with the per_endpoint_test decorator to run this test on all endpoints with + Use this function with the run_if_endpoint_matches decorator to run this test on all endpoints with the specified cluster. For example, given a device with the following conformance EP0: cluster A, B, C @@ -2024,13 +2013,12 @@ def has_cluster(cluster: ClusterObjects.ClusterObjectDescriptor) -> EndpointChec EP3, cluster E And the following test specification: - @per_endpoint_test(has_cluster(Clusters.D)) + @run_if_endpoint_matches(has_cluster(Clusters.D)) test_mytest(self): ... - The test would be run on endpoint 1 and on endpoint 2. - - If the cluster is not found on any endpoint the decorator will call the on_skip function to + If you run this test with --endpoint 1 or --endpoint 2, the test will be run. If you run this test + with any other --endpoint the run_if_endpoint_matches decorator will call the on_skip function to notify the test harness that the test is not applicable to this node and the test will not be run. """ return partial(_has_cluster, cluster=cluster) @@ -2049,9 +2037,9 @@ def _has_attribute(wildcard, endpoint, attribute: ClusterObjects.ClusterAttribut def has_attribute(attribute: ClusterObjects.ClusterAttributeDescriptor) -> EndpointCheckFunction: - """ EndpointCheckFunction that can be passed as a parameter to the per_endpoint_test decorator. + """ EndpointCheckFunction that can be passed as a parameter to the run_if_endpoint_matches decorator. - Use this function with the per_endpoint_test decorator to run this test on all endpoints with + Use this function with the run_if_endpoint_matches decorator to run this test on all endpoints with the specified attribute. For example, given a device with the following conformance EP0: cluster A, B, C @@ -2060,13 +2048,12 @@ def has_attribute(attribute: ClusterObjects.ClusterAttributeDescriptor) -> Endpo EP3, cluster D without attribute d And the following test specification: - @per_endpoint_test(has_attribute(Clusters.D.Attributes.d)) + @run_if_endpoint_matches(has_attribute(Clusters.D.Attributes.d)) test_mytest(self): ... - The test would be run on endpoint 1 and on endpoint 2. - - If the cluster is not found on any endpoint the decorator will call the on_skip function to + If you run this test with --endpoint 1 or --endpoint 2, the test will be run. If you run this test + with any other --endpoint the run_if_endpoint_matches decorator will call the on_skip function to notify the test harness that the test is not applicable to this node and the test will not be run. """ return partial(_has_attribute, attribute=attribute) @@ -2085,9 +2072,9 @@ def _has_feature(wildcard, endpoint: int, cluster: ClusterObjects.ClusterObjectD def has_feature(cluster: ClusterObjects.ClusterObjectDescriptor, feature: IntFlag) -> EndpointCheckFunction: - """ EndpointCheckFunction that can be passed as a parameter to the per_endpoint_test decorator. + """ EndpointCheckFunction that can be passed as a parameter to the run_if_endpoint_matches decorator. - Use this function with the per_endpoint_test decorator to run this test on all endpoints with + Use this function with the run_if_endpoint_matches decorator to run this test on all endpoints with the specified feature. For example, given a device with the following conformance EP0: cluster A, B, C @@ -2096,31 +2083,69 @@ def has_feature(cluster: ClusterObjects.ClusterObjectDescriptor, feature: IntFla EP3: cluster D without feature F0 And the following test specification: - @per_endpoint_test(has_feature(Clusters.D.Bitmaps.Feature.F0)) + @run_if_endpoint_matches(has_feature(Clusters.D.Bitmaps.Feature.F0)) test_mytest(self): ... - The test would be run on endpoint 1 and on endpoint 2. - - If the cluster is not found on any endpoint the decorator will call the on_skip function to + If you run this test with --endpoint 1 or --endpoint 2, the test will be run. If you run this test + with any other --endpoint the run_if_endpoint_matches decorator will call the on_skip function to notify the test harness that the test is not applicable to this node and the test will not be run. """ return partial(_has_feature, cluster=cluster, feature=feature) -async def get_accepted_endpoints_for_test(self: MatterBaseTest, accept_function: EndpointCheckFunction) -> list[uint]: - """ Helper function for the per_endpoint_test decorator. +async def _get_all_matching_endpoints(self: MatterBaseTest, accept_function: EndpointCheckFunction) -> list[uint]: + """ Returns a list of endpoints matching the accept condition. """ + wildcard = await self.default_controller.Read(self.dut_node_id, [(Clusters.Descriptor), Attribute.AttributePath(None, None, GlobalAttributeIds.ATTRIBUTE_LIST_ID), Attribute.AttributePath(None, None, GlobalAttributeIds.FEATURE_MAP_ID), Attribute.AttributePath(None, None, GlobalAttributeIds.ACCEPTED_COMMAND_LIST_ID)]) + matching = [e for e in wildcard.attributes.keys() if accept_function(wildcard, e)] + return matching + + +async def should_run_test_on_endpoint(self: MatterBaseTest, accept_function: EndpointCheckFunction) -> bool: + """ Helper function for the run_if_endpoint_matches decorator. - Returns a list of endpoints on which the test should be run given the accept_function for the test. + Returns True if self.matter_test_config.endpoint matches the accept function. """ - wildcard = await self.default_controller.Read(self.dut_node_id, [(Clusters.Descriptor), Attribute.AttributePath(None, None, GlobalAttributeIds.ATTRIBUTE_LIST_ID), Attribute.AttributePath(None, None, GlobalAttributeIds.FEATURE_MAP_ID), Attribute.AttributePath(None, None, GlobalAttributeIds.ACCEPTED_COMMAND_LIST_ID)]) - return [e for e in wildcard.attributes.keys() if accept_function(wildcard, e)] + if self.matter_test_config.endpoint is None: + msg = """ + The --endpoint flag is required for this test. + """ + asserts.fail(msg) + matching = await (_get_all_matching_endpoints(self, accept_function)) + return self.matter_test_config.endpoint in matching + + +def run_on_singleton_matching_endpoint(accept_function: EndpointCheckFunction): + """ Test decorator for a test that needs to be run on the endpoint that matches the given accept function. + + This decorator should be used for tests where the endpoint is not known a-priori (dynamic endpoints). + Note that currently this test is limited to devices with a SINGLE matching endpoint. + """ + def run_on_singleton_matching_endpoint_internal(body): + def matching_runner(self: MatterBaseTest, *args, **kwargs): + runner_with_timeout = asyncio.wait_for(_get_all_matching_endpoints(self, accept_function), timeout=30) + matching = asyncio.run(runner_with_timeout) + asserts.assert_less_equal(len(matching), 1, "More than one matching endpoint found for singleton test.") + if not matching: + logging.info("Test is not applicable to any endpoint - skipping test") + asserts.skip('No endpoint matches test requirements') + return + # Exceptions should flow through, hence no except block + try: + old_endpoint = self.matter_test_config.endpoint + self.matter_test_config.endpoint = matching[0] + logging.info(f'Running test on endpoint {self.matter_test_config.endpoint}') + _async_runner(body, self, *args, **kwargs) + finally: + self.matter_test_config.endpoint = old_endpoint + return matching_runner + return run_on_singleton_matching_endpoint_internal -def per_endpoint_test(accept_function: EndpointCheckFunction): - """ Test decorator for a test that needs to be run once per endpoint that meets the accept_function criteria. +def run_if_endpoint_matches(accept_function: EndpointCheckFunction): + """ Test decorator for a test that needs to be run only if the endpoint meets the accept_function criteria. - Place this decorator above the test_ method to have the test framework run this test once per endpoint. + Place this decorator above the test_ method to have the test framework run this test only if the endpoint matches. This decorator takes an EndpointCheckFunction to assess whether a test needs to be run on a particular endpoint. @@ -2132,52 +2157,31 @@ def per_endpoint_test(accept_function: EndpointCheckFunction): EP3, cluster E And the following test specification: - @per_endpoint_test(has_cluster(Clusters.D)) + @run_if_endpoint_matches(has_cluster(Clusters.D)) test_mytest(self): ... - The test would be run on endpoint 1 and on endpoint 2. - - If the cluster is not found on any endpoint the decorator will call the on_skip function to + If you run this test with --endpoint 1 or --endpoint 2, the test will be run. If you run this test + with any other --endpoint the decorator will call the on_skip function to notify the test harness that the test is not applicable to this node and the test will not be run. - The decorator works by setting the self.matter_test_config.endpoint value and running the test function. - Therefore, tests that make use of this decorator should call controller functions against that endpoint. - Support functions in this file default to this endpoint. - Tests that use this decorator cannot use a pics_ method for test selection and should not reference any PICS values internally. """ - def per_endpoint_test_internal(body): + def run_if_endpoint_matches_internal(body): def per_endpoint_runner(self: MatterBaseTest, *args, **kwargs): - asserts.assert_false(self.get_test_pics(self.current_test_info.name), "pics_ method supplied for per_endpoint_test.") - runner_with_timeout = asyncio.wait_for(get_accepted_endpoints_for_test(self, accept_function), timeout=60) - endpoints = asyncio.run(runner_with_timeout) - if not endpoints: - logging.info("No matching endpoints found - skipping test") - asserts.skip('No endpoints match requirements') + asserts.assert_false(self.get_test_pics(self.current_test_info.name), + "pics_ method supplied for run_if_endpoint_matches.") + runner_with_timeout = asyncio.wait_for(should_run_test_on_endpoint(self, accept_function), timeout=60) + should_run_test = asyncio.run(runner_with_timeout) + if not should_run_test: + logging.info("Test is not applicable to this endpoint - skipping test") + asserts.skip('Endpoint does not match test requirements') return - logging.info(f"Running test on the following endpoints: {endpoints}") - # setup_class is meant to be called once, but setup_test is expected to be run before - # each iteration. Mobly will run it for us the first time, but since we're running this - # more than one time, we want to make sure we reset everything as expected. - # Ditto for teardown - we want to tear down after each iteration, and we want to notify the hook that - # the test iteration is stopped. test_stop is called by on_pass or on_fail during the last iteration or - # on failure. - original_ep = self.matter_test_config.endpoint - for e in endpoints: - logging.info(f'Running test on endpoint {e}') - if e != endpoints[0]: - self.setup_test() - self.matter_test_config.endpoint = e - _async_runner(body, self, *args, **kwargs) - if e != endpoints[-1] and not self.failed: - self.teardown_test() - test_duration = (datetime.now(timezone.utc) - self.test_start_time) / timedelta(microseconds=1) - self.runner_hook.test_stop(exception=None, duration=test_duration) - self.matter_test_config.endpoint = original_ep + logging.info(f'Running test on endpoint {self.matter_test_config.endpoint}') + _async_runner(body, self, *args, **kwargs) return per_endpoint_runner - return per_endpoint_test_internal + return run_if_endpoint_matches_internal class CommissionDeviceTest(MatterBaseTest): diff --git a/src/python_testing/test_testing/MockTestRunner.py b/src/python_testing/test_testing/MockTestRunner.py index 024228db7cd035..2d0afb8a5c2e67 100644 --- a/src/python_testing/test_testing/MockTestRunner.py +++ b/src/python_testing/test_testing/MockTestRunner.py @@ -38,13 +38,14 @@ async def __call__(self, *args, **kwargs): class MockTestRunner(): - def __init__(self, filename: str, classname: str, test: str, endpoint: int = 0, pics: dict[str, bool] = None, paa_trust_store_path=None): - self.test = test - self.endpoint = endpoint - self.pics = pics + def __init__(self, filename: str, classname: str, test: str, endpoint: int = None, pics: dict[str, bool] = None, paa_trust_store_path=None): self.kvs_storage = 'kvs_admin.json' - self.paa_path = paa_trust_store_path + self.config = MatterTestConfig(endpoint=endpoint, paa_trust_store_path=paa_trust_store_path, + pics=pics, storage_path=self.kvs_storage) self.set_test(filename, classname, test) + + self.set_test_config(self.config) + self.stack = MatterStackState(self.config) self.default_controller = self.stack.certificate_authorities[0].adminList[0].NewController( nodeId=self.config.controller_node_id, @@ -54,20 +55,16 @@ def __init__(self, filename: str, classname: str, test: str, endpoint: int = 0, def set_test(self, filename: str, classname: str, test: str): self.test = test - self.set_test_config() + self.config.tests = [self.test] module = importlib.import_module(Path(os.path.basename(filename)).stem) self.test_class = getattr(module, classname) def set_test_config(self, test_config: MatterTestConfig = MatterTestConfig()): self.config = test_config self.config.tests = [self.test] - self.config.endpoint = self.endpoint self.config.storage_path = self.kvs_storage - self.config.paa_trust_store_path = self.paa_path if not self.config.dut_node_ids: self.config.dut_node_ids = [1] - if self.pics: - self.config.pics = self.pics def Shutdown(self): self.stack.Shutdown() diff --git a/src/python_testing/test_testing/TestDecorators.py b/src/python_testing/test_testing/TestDecorators.py index 2ce418d6c5e43a..1ad5bc550bc237 100644 --- a/src/python_testing/test_testing/TestDecorators.py +++ b/src/python_testing/test_testing/TestDecorators.py @@ -32,12 +32,13 @@ from chip.clusters import Attribute try: - from matter_testing_support import (MatterBaseTest, async_test_body, get_accepted_endpoints_for_test, has_attribute, - has_cluster, has_feature, per_endpoint_test, per_node_test) + from matter_testing_support import (MatterBaseTest, MatterTestConfig, async_test_body, has_attribute, has_cluster, has_feature, + run_if_endpoint_matches, run_on_singleton_matching_endpoint, should_run_test_on_endpoint) except ImportError: sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) - from matter_testing_support import (MatterBaseTest, async_test_body, get_accepted_endpoints_for_test, has_attribute, - has_cluster, has_feature, per_endpoint_test, per_node_test) + from matter_testing_support import (MatterBaseTest, MatterTestConfig, async_test_body, has_attribute, + has_cluster, has_feature, run_if_endpoint_matches, run_on_singleton_matching_endpoint, + should_run_test_on_endpoint) from typing import Optional @@ -136,107 +137,101 @@ async def test_endpoints(self): all_endpoints = await self.default_controller.Read(self.dut_node_id, [()]) all_endpoints = list(all_endpoints.attributes.keys()) - msg = "Unexpected endpoint list returned" + msg = "Unexpected evaluation of should_run_test_on_endpoint" + for e in all_endpoints: + self.matter_test_config.endpoint = e + should_run = await should_run_test_on_endpoint(self, has_onoff) + asserts.assert_true(should_run, msg) - endpoints = await get_accepted_endpoints_for_test(self, has_onoff) - asserts.assert_equal(endpoints, all_endpoints, msg) + should_run = await should_run_test_on_endpoint(self, has_onoff_onoff) + asserts.assert_true(should_run, msg) - endpoints = await get_accepted_endpoints_for_test(self, has_onoff_onoff) - asserts.assert_equal(endpoints, all_endpoints, msg) + should_run = await should_run_test_on_endpoint(self, has_onoff_ontime) + asserts.assert_false(should_run, msg) - endpoints = await get_accepted_endpoints_for_test(self, has_onoff_ontime) - asserts.assert_equal(endpoints, [], msg) + should_run = await should_run_test_on_endpoint(self, has_timesync) + asserts.assert_false(should_run, msg) - endpoints = await get_accepted_endpoints_for_test(self, has_timesync) - asserts.assert_equal(endpoints, [], msg) - - endpoints = await get_accepted_endpoints_for_test(self, has_timesync_utc) - asserts.assert_equal(endpoints, [], msg) - - # This test should cause an assertion because it has pics_ method - @per_node_test - async def test_whole_node_with_pics(self): - pass - - # This method returns the top level pics for test_whole_node_with_pics - # It is used to test that test_whole_node_with_pics will fail since you can't have a whole node test gated on a PICS. - def pics_whole_node_with_pics(self): - return ['EXAMPLE.S'] + should_run = await should_run_test_on_endpoint(self, has_timesync_utc) + asserts.assert_false(should_run, msg) # This test should cause an assertion because it has a pics_ method - @per_endpoint_test(has_cluster(Clusters.OnOff)) - async def test_per_endpoint_with_pics(self): + @run_if_endpoint_matches(has_cluster(Clusters.OnOff)) + async def test_endpoint_with_pics(self): pass - # This method returns the top level pics for test_per_endpoint_with_pics - # It is used to test that test_per_endpoint_with_pics will fail since you can't have a per endpoint test gated on a PICS. - def pics_per_endpoint_with_pics(self): + # This method returns the top level pics for test_endpoint_with_pics + # It is used to test that test_endpoint_with_pics will fail since you can't have a per endpoint test gated on a PICS. + def pics_endpoint_with_pics(self): return ['EXAMPLE.S'] - # This test should be run once - @per_node_test - async def test_whole_node_ok(self): - pass - # This test should be run once per endpoint - @per_endpoint_test(has_cluster(Clusters.OnOff)) + @run_if_endpoint_matches(has_cluster(Clusters.OnOff)) async def test_endpoint_cluster_yes(self): pass # This test should be skipped since this cluster isn't on any endpoint - @per_endpoint_test(has_cluster(Clusters.TimeSynchronization)) + @run_if_endpoint_matches(has_cluster(Clusters.TimeSynchronization)) async def test_endpoint_cluster_no(self): pass # This test should be run once per endpoint - @per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OnOff)) + @run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OnOff)) async def test_endpoint_attribute_yes(self): pass # This test should be skipped since this attribute isn't on the supported cluster - @per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OffWaitTime)) + @run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OffWaitTime)) async def test_endpoint_attribute_supported_cluster_no(self): pass # This test should be skipped since this attribute is part of an unsupported cluster - @per_endpoint_test(has_attribute(Clusters.TimeSynchronization.Attributes.Granularity)) + @run_if_endpoint_matches(has_attribute(Clusters.TimeSynchronization.Attributes.Granularity)) async def test_endpoint_attribute_unsupported_cluster_no(self): pass # This test should be run once per endpoint - @per_endpoint_test(has_feature(Clusters.OnOff, Clusters.OnOff.Bitmaps.Feature.kLighting)) + @run_if_endpoint_matches(has_feature(Clusters.OnOff, Clusters.OnOff.Bitmaps.Feature.kLighting)) async def test_endpoint_feature_yes(self): pass # This test should be skipped since this attribute is part of an unsupported cluster - @per_endpoint_test(has_feature(Clusters.TimeSynchronization, Clusters.TimeSynchronization.Bitmaps.Feature.kNTPClient)) + @run_if_endpoint_matches(has_feature(Clusters.TimeSynchronization, Clusters.TimeSynchronization.Bitmaps.Feature.kNTPClient)) async def test_endpoint_feature_unsupported_cluster_no(self): pass # This test should be run since both are present - @per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OnOff) and has_cluster(Clusters.OnOff)) + @run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OnOff) and has_cluster(Clusters.OnOff)) async def test_endpoint_boolean_yes(self): pass # This test should be skipped since we have an OnOff cluster, but no Time sync - @per_endpoint_test(has_cluster(Clusters.OnOff) and has_cluster(Clusters.TimeSynchronization)) + @run_if_endpoint_matches(has_cluster(Clusters.OnOff) and has_cluster(Clusters.TimeSynchronization)) async def test_endpoint_boolean_no(self): pass - @per_endpoint_test(has_cluster(Clusters.OnOff)) + @run_if_endpoint_matches(has_cluster(Clusters.OnOff)) async def test_fail_on_ep0(self): if self.matter_test_config.endpoint == 0: asserts.fail("Expected failure") - @per_endpoint_test(has_cluster(Clusters.OnOff)) + @run_if_endpoint_matches(has_cluster(Clusters.OnOff)) async def test_fail_on_ep1(self): if self.matter_test_config.endpoint == 1: asserts.fail("Expected failure") - @per_node_test - async def test_fail_on_whole_node(self): + @run_on_singleton_matching_endpoint(has_cluster(Clusters.OnOff)) + async def test_run_on_singleton_matching_endpoint(self): + pass + + @run_on_singleton_matching_endpoint(has_cluster(Clusters.OnOff)) + async def test_run_on_singleton_matching_endpoint_failure(self): asserts.fail("Expected failure") + @run_on_singleton_matching_endpoint(has_attribute(Clusters.OnOff.Attributes.OffWaitTime)) + async def test_no_run_on_singleton_matching_endpoint(self): + pass + def main(): failures = [] @@ -258,82 +253,129 @@ def main(): if not ok: failures.append("Test case failure: test_endpoints") - test_name = 'test_whole_node_with_pics' - test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) - ok = test_runner.run_test_with_mock_read(read_resp, hooks) - if ok: - failures.append(f"Did not get expected test assertion on {test_name}") - - test_name = 'test_per_endpoint_with_pics' + test_name = 'test_endpoint_with_pics' test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) ok = test_runner.run_test_with_mock_read(read_resp, hooks) if ok: failures.append(f"Did not get expected test assertion on {test_name}") # Test should run once for the whole node, regardless of the number of endpoints - def run_check(test_name: str, read_response: Attribute.AsyncReadTransaction.ReadResponse, expected_runs: int, expect_skip: bool) -> None: + def run_check(test_name: str, read_response: Attribute.AsyncReadTransaction.ReadResponse, expect_skip: bool) -> None: nonlocal failures test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) hooks = DecoratorTestRunnerHooks() - ok = test_runner.run_test_with_mock_read(read_response, hooks) - started_ok = len(hooks.started) == expected_runs - skipped_ok = (hooks.skipped != []) == expect_skip - stopped_ok = hooks.stopped == expected_runs + num_endpoints = 2 + for e in [0, 1]: + test_runner.set_test_config(MatterTestConfig(endpoint=e)) + ok = test_runner.run_test_with_mock_read(read_response, hooks) + started_ok = len(hooks.started) == num_endpoints + expected_num_skips = 2 if expect_skip else 0 + skipped_ok = len(hooks.skipped) == expected_num_skips + stopped_ok = hooks.stopped == num_endpoints if not ok or not started_ok or not skipped_ok or not stopped_ok: failures.append( - f'Expected {expected_runs} run of {test_name}, skips expected: {expect_skip}. Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}') - - def check_once_per_node(test_name: str): - run_check(test_name, get_clusters([0]), 1, False) - run_check(test_name, get_clusters([0, 1]), 1, False) + f'Expected {num_endpoints} run of {test_name}, skips expected: {expect_skip}. Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}') def check_once_per_endpoint(test_name: str): - run_check(test_name, get_clusters([0]), 1, False) - run_check(test_name, get_clusters([0, 1]), 2, False) + run_check(test_name, get_clusters([0, 1]), False) - def check_skipped(test_name: str): - run_check(test_name, get_clusters([0]), 1, True) - run_check(test_name, get_clusters([0, 1]), 1, True) + def check_all_skipped(test_name: str): + run_check(test_name, get_clusters([0, 1]), True) - check_once_per_node('test_whole_node_ok') check_once_per_endpoint('test_endpoint_cluster_yes') - check_skipped('test_endpoint_cluster_no') + check_all_skipped('test_endpoint_cluster_no') check_once_per_endpoint('test_endpoint_attribute_yes') - check_skipped('test_endpoint_attribute_supported_cluster_no') - check_skipped('test_endpoint_attribute_unsupported_cluster_no') + check_all_skipped('test_endpoint_attribute_supported_cluster_no') + check_all_skipped('test_endpoint_attribute_unsupported_cluster_no') check_once_per_endpoint('test_endpoint_feature_yes') - check_skipped('test_endpoint_feature_unsupported_cluster_no') + check_all_skipped('test_endpoint_feature_unsupported_cluster_no') check_once_per_endpoint('test_endpoint_boolean_yes') - check_skipped('test_endpoint_boolean_no') + check_all_skipped('test_endpoint_boolean_no') test_name = 'test_fail_on_ep0' test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) read_resp = get_clusters([0, 1]) + # fail on EP0, pass on EP1 + test_runner.set_test_config(MatterTestConfig(endpoint=0)) ok = test_runner.run_test_with_mock_read(read_resp, hooks) if ok: failures.append(f"Did not get expected test assertion on {test_name}") - - test_name = 'test_fail_on_ep1' - test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) - read_resp = get_clusters([0, 1]) + test_runner.set_test_config(MatterTestConfig(endpoint=1)) ok = test_runner.run_test_with_mock_read(read_resp, hooks) - if ok: - failures.append(f"Did not get expected test assertion on {test_name}") + if not ok: + failures.append(f"Unexpected failure on {test_name}") test_name = 'test_fail_on_ep1' test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) - read_resp = get_clusters([0]) + read_resp = get_clusters([0, 1]) + # pass on EP0, fail on EP1 + test_runner.set_test_config(MatterTestConfig(endpoint=0)) ok = test_runner.run_test_with_mock_read(read_resp, hooks) if not ok: failures.append(f"Unexpected failure on {test_name}") - - test_name = 'test_fail_on_whole_node' - test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) - read_resp = get_clusters([0, 1]) + test_runner.set_test_config(MatterTestConfig(endpoint=1)) ok = test_runner.run_test_with_mock_read(read_resp, hooks) if ok: failures.append(f"Did not get expected test assertion on {test_name}") + def run_singleton_dynamic(test_name: str, cluster_list: list[int]) -> tuple[bool, DecoratorTestRunnerHooks]: + nonlocal failures + read_resp = get_clusters(cluster_list) + test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name) + test_runner.set_test_config(MatterTestConfig(endpoint=2)) + hooks = DecoratorTestRunnerHooks() + ok = test_runner.run_test_with_mock_read(read_resp, hooks) + # for all tests, we need to ensure the endpoint was set back to the prior values + if test_runner.config.endpoint != 2: + failures.append(f"Dynamic tests {test_name} with clusters {cluster_list} did not set endpoint back to prior") + # All tests should have a start and a stop + started_ok = len(hooks.started) == 1 + stopped_ok = hooks.stopped == 1 + if not started_ok or not stopped_ok: + failures.append( + f'Hooks failure on {test_name}, Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}') + return ok, hooks + + def expect_success_dynamic(test_name: str, cluster_list: list[int]): + ok, hooks = run_singleton_dynamic(test_name, cluster_list) + if not ok: + failures.append(f"Unexpected failure on {test_name} with cluster list {cluster_list}") + if hooks.skipped: + failures.append(f'Unexpected skip call on {test_name} with cluster list {cluster_list}') + + def expect_failure_dynamic(test_name: str, cluster_list: list[int]): + ok, hooks = run_singleton_dynamic(test_name, cluster_list) + if ok: + failures.append(f"Unexpected success on {test_name} with cluster list {cluster_list}") + if hooks.skipped: + # We don't expect a skip call because the test actually failed. + failures.append(f'Skip called for {test_name} with cluster list {cluster_list}') + + def expect_skip_dynamic(test_name: str, cluster_list: list[int]): + ok, hooks = run_singleton_dynamic(test_name, cluster_list) + if not ok: + failures.append(f"Unexpected failure on {test_name} with cluster list {cluster_list}") + if not hooks.skipped: + # We don't expect a skip call because the test actually failed. + failures.append(f'Skip not called for {test_name} with cluster list {cluster_list}') + + test_name = 'test_run_on_singleton_matching_endpoint' + expect_success_dynamic(test_name, [0]) + expect_success_dynamic(test_name, [1]) + # expect failure because there is more than 1 endpoint + expect_failure_dynamic(test_name, [0, 1]) + + test_name = 'test_run_on_singleton_matching_endpoint_failure' + expect_failure_dynamic(test_name, [0]) + expect_failure_dynamic(test_name, [1]) + expect_failure_dynamic(test_name, [0, 1]) + + test_name = 'test_no_run_on_singleton_matching_endpoint' + # no failure, no matches, expect skips on all endpoints + expect_skip_dynamic(test_name, [0]) + expect_skip_dynamic(test_name, [1]) + expect_skip_dynamic(test_name, [0, 1]) + test_runner.Shutdown() print( f"Test of Decorators: test response incorrect: {len(failures)}") diff --git a/src/python_testing/test_testing/test_TC_CCNTL_2_2.py b/src/python_testing/test_testing/test_TC_CCNTL_2_2.py index 77971779fecbfc..d528b5652e1b45 100644 --- a/src/python_testing/test_testing/test_TC_CCNTL_2_2.py +++ b/src/python_testing/test_testing/test_TC_CCNTL_2_2.py @@ -178,7 +178,7 @@ def main(th_server_app: str): print(f'paa = {paa_path}') pics = {"PICS_SDK_CI_ONLY": True} - test_runner = MyMock('TC_CCTRL_2_2', 'TC_CCTRL_2_2', 'test_TC_CCTRL_2_2', 1, paa_trust_store_path=paa_path, pics=pics) + test_runner = MyMock('TC_CCTRL_2_2', 'TC_CCTRL_2_2', 'test_TC_CCTRL_2_2', paa_trust_store_path=paa_path, pics=pics) config = MatterTestConfig() config.global_test_params = {'th_server_app_path': th_server_app} test_runner.set_test_config(config) diff --git a/src/python_testing/test_testing/test_TC_MCORE_FS_1_1.py b/src/python_testing/test_testing/test_TC_MCORE_FS_1_1.py index ec91db72551523..5b2e29b71915b1 100644 --- a/src/python_testing/test_testing/test_TC_MCORE_FS_1_1.py +++ b/src/python_testing/test_testing/test_TC_MCORE_FS_1_1.py @@ -149,7 +149,7 @@ def main(th_server_app: str): print(f'paa = {paa_path}') pics = {"PICS_SDK_CI_ONLY": True} - test_runner = MyMock('TC_MCORE_FS_1_1', 'TC_MCORE_FS_1_1', 'test_TC_MCORE_FS_1_1', 1, paa_trust_store_path=paa_path, pics=pics) + test_runner = MyMock('TC_MCORE_FS_1_1', 'TC_MCORE_FS_1_1', 'test_TC_MCORE_FS_1_1', paa_trust_store_path=paa_path, pics=pics) config = MatterTestConfig() config.user_params = {'th_server_app_path': th_server_app} test_runner.set_test_config(config)