diff --git a/.gitignore b/.gitignore index 25683d481..3c66a2e75 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,9 @@ ci.log tests/e2e/reports/report.xml tests/kind/kind tests/log.txt +# e2e tests results +tests/e2e-test-framework/logs +tests/e2e-test-framework/report # vscode .vscode # python diff --git a/tests/e2e-test-framework/.pylintrc b/tests/e2e-test-framework/.pylintrc index 53e8dc925..dce8a6579 100644 --- a/tests/e2e-test-framework/.pylintrc +++ b/tests/e2e-test-framework/.pylintrc @@ -340,7 +340,7 @@ indent-string=' ' max-line-length=210 # Maximum number of lines in a module. -max-module-lines=1000 +max-module-lines=1500 # Allow the body of a class to be on the same line as the declaration if body # contains single statement. diff --git a/tests/e2e-test-framework/framework/drive.py b/tests/e2e-test-framework/framework/drive.py index f8e1f30f0..3a1e8f717 100644 --- a/tests/e2e-test-framework/framework/drive.py +++ b/tests/e2e-test-framework/framework/drive.py @@ -1,5 +1,6 @@ import json import logging +import time from typing import Any, Dict, List, TypedDict from framework.ssh import SSHCommandExecutor @@ -251,6 +252,8 @@ def wipe_drives(self) -> None: else: raise ValueError(f"Unknown drive type: {children['type']}") + time.sleep(1) + def _get_device_name(self, device_path_or_name: str) -> str: return ( device_path_or_name[5:] diff --git a/tests/e2e-test-framework/framework/sts.py b/tests/e2e-test-framework/framework/sts.py index c868fb0ba..bcc3802bb 100644 --- a/tests/e2e-test-framework/framework/sts.py +++ b/tests/e2e-test-framework/framework/sts.py @@ -96,12 +96,13 @@ def create(self, storage_classes: List[str]) -> None: assert ( response is not None ), f"Failed to create StatefulSet: {self.name}" + logging.info(f"StatefulSet created : {self.name}") except ApiException as exc: pytest.fail( f"Failed to create StatefulSet: {self.name}. Reason: {str(exc)}" ) - def delete(self) -> None: + def delete(self) -> str: try: response = self.apps_v1_api.delete_namespaced_stateful_set( self.name, self.namespace @@ -113,6 +114,7 @@ def delete(self) -> None: logging.warning( f"Failed to delete StatefulSet: {self.name}. Reason: {str(exc)}" ) + return self.name def verify(self, timeout: int) -> bool: start_time = time.time() diff --git a/tests/e2e-test-framework/framework/utils.py b/tests/e2e-test-framework/framework/utils.py index 009557aee..afd5267e2 100644 --- a/tests/e2e-test-framework/framework/utils.py +++ b/tests/e2e-test-framework/framework/utils.py @@ -1,11 +1,13 @@ import time import logging +import threading from typing import Any, Callable, Dict, List, Optional from kubernetes.client.rest import ApiException from kubernetes import watch from kubernetes.client.models import ( V1Pod, + V1PersistentVolume, V1PersistentVolumeClaim, CoreV1Event, ) @@ -20,6 +22,7 @@ def __init__(self, vm_user: str, vm_cred: str, namespace: str): self.vm_user = vm_user self.vm_cred = vm_cred self.namespace = namespace + self.storage_class_prefix = "csi-baremetal-sc" ( self.core_v1_api, self.custom_objects_api, @@ -148,6 +151,42 @@ def is_pod_ready(self, pod_name: str, timeout=5): ) return False + def is_pod_exists(self, pod_name: str) -> bool: + """ + Checks if a Pod with the specified name exists in the namespace. + + Args: + pod_name (str): The name of the Pod to check. + + Returns: + bool: True if the Pod exists, False otherwise. + """ + pods = self.list_pods(name_prefix=pod_name, namespace=self.namespace) + if pods: + logging.info(f"Pod '{pods[0].metadata.name}' exists in namespace '{self.namespace}'.") + return True + return False + + def wait_for_pod_removing(self, pod_name: str, timeout: int = 30) -> None: + """ + Waits for a Pod to be in the removing state. + + Args: + pod_name (str): The name of the Pod to wait for. + timeout (int, optional): The maximum time in seconds to wait for the Pod to be in the removing state. Defaults to 30. + + Returns: + None: This function does not return anything. + """ + start_time = time.time() + while time.time() - start_time < timeout: + if not self.is_pod_exists(pod_name): + return + time.sleep(1) + + logging.info("Waiting for pods resources to be in the removing state...") + time.sleep(3) + def list_pods( self, name_prefix: Optional[str] = None, @@ -372,6 +411,25 @@ def event_in(self, resource_name: str, reason: str) -> bool: logging.warning(f"event {reason} not found") return False + def wait_event_in(self, resource_name: str, reason: str, timeout: int = 30) -> bool: + """ + Waits for an event with the given resource name and reason to exist in the Kubernetes API. + + Args: + resource_name (str): The name of the resource. + reason (str): The reason for the event. + timeout (int): The maximum time to wait for the event in seconds. Defaults to 90. + + Returns: + bool: True if the event exists within the given timeout, False otherwise. + """ + end_time = time.time() + timeout + while time.time() < end_time: + if self.event_in(resource_name, reason): + return True + time.sleep(1) + return False + def wait_volume( self, name: str, @@ -454,7 +512,7 @@ def _wait_cr( self, expected: Dict[str, str], get_cr_fn: Callable[[None], Any], - timeout: int = 90, + timeout: int = 120, ) -> bool: """ Waits for the custom resource (CR) to reach the expected state. @@ -470,6 +528,7 @@ def _wait_cr( assertions = {key: False for key, _ in expected.items()} end_time = time.time() + timeout retry_count = 0 + cr = None while time.time() < end_time: if retry_count > 0: logging.warning( @@ -491,6 +550,8 @@ def _wait_cr( if not v: logging.error(f"CR is not in expected state: {k} != {expected[k]}") + logging.error(f"CR details: {cr}") + return False def annotate_custom_resource( @@ -600,66 +661,19 @@ def clear_csi_resources(self, namespace: str) -> None: None: This function does not return anything. """ try: - self.custom_objects_api.delete_collection_namespaced_custom_object( - group=const.CR_GROUP, - version=const.CR_VERSION, - namespace=namespace, - plural=const.VOLUMES_PLURAL, - grace_period_seconds=0, - propagation_policy="Foreground", - ) - logging.info("CR volumes: delete request sent") - for plural in [ - const.DRIVES_PLURAL, - const.AC_PLURAL, - const.ACR_PLURAL, - const.LVG_PLURAL, - ]: - self.custom_objects_api.delete_collection_cluster_custom_object( - group=const.CR_GROUP, - version=const.CR_VERSION, - plural=plural, - grace_period_seconds=0, - propagation_policy="Foreground", - ) - logging.info(f"CR {plural}: delete request sent") - self.core_v1_api.delete_collection_namespaced_persistent_volume_claim( - namespace=namespace - ) - logging.info("waiting for resources to be in the removing state") - time.sleep(10) - lvg_list = self.custom_objects_api.list_cluster_custom_object( - group=const.CR_GROUP, - version=const.CR_VERSION, - plural="logicalvolumegroups", - )["items"] - for lvg in lvg_list: - if "finalizers" in lvg.get("metadata", {}): - lvg["metadata"]["finalizers"] = [] - self.custom_objects_api.replace_cluster_custom_object( - group=const.CR_GROUP, - version=const.CR_VERSION, - namespace=namespace, - plural=const.LVG_PLURAL, - name=lvg["metadata"]["name"], - body=lvg, - ) - for v in self.list_volumes(): - if "finalizers" in v.get("metadata", {}): - v["metadata"]["finalizers"] = [] - self.custom_objects_api.replace_namespaced_custom_object( - const.CR_GROUP, - const.CR_VERSION, - namespace, - plural=const.VOLUMES_PLURAL, - name=v["metadata"]["name"], - body=v, - ) + self.delete_volumes() + self.delete_custom_objects([const.DRIVES_PLURAL, const.AC_PLURAL, const.ACR_PLURAL, const.LVG_PLURAL]) + self.delete_persistent_volumes() + self.delete_persistent_volume_claims() + self.clear_lvg_finalizers(namespace) except ApiException as e: print( f"Exception when calling CustomObjectsApi->delete_namespaced_custom_object: {e}" ) + logging.info("Waiting for resources to be in the removing state...") + time.sleep(5) + def recreate_pod(self, name: str, namespace: str) -> V1Pod: """ Recreates a Kubernetes Pod by deleting the existing Pod and waiting for a new Pod to be created. @@ -796,3 +810,228 @@ def check_drive_cr_exist_or_not( raise time.sleep(2) return False + + def delete_persistent_volumes(self) -> None: + """ + Deletes all PersistentVolumes. + + Args: + None + + Returns: + None: This function does not return anything. + """ + pvs = self.core_v1_api.list_persistent_volume(pretty=True) + + threads = [] + for pv in pvs.items: + if self.storage_class_prefix in pv.spec.storage_class_name: + # Create a thread for each persistent volume + thread = threading.Thread(target=self.delete_pv, args=(pv,)) + thread.start() + threads.append(thread) + + for thread in threads: + thread.join() + + logging.info("Waiting for PV resources to be in the removing state...") + time.sleep(5) + + def delete_pv(self, pv: V1PersistentVolume) -> None: + """ + Deletes a PersistentVolume. + + Args: + pv (V1PersistentVolume): The PersistentVolume object to be deleted. + + Returns: + None: This function does not return anything. + """ + logging.info(f"Deleting PV {pv.metadata.name} ...") + + try: + if pv.metadata.finalizers is not None: + pv.metadata.finalizers = [] + self.core_v1_api.replace_persistent_volume( + name=pv.metadata.name, + body=pv + ) + self.core_v1_api.delete_persistent_volume(pv.metadata.name) + logging.info(f"PV {pv.metadata.name} deleted.") + except ApiException as exc: + logging.warning(f"Failed to delete PV {pv.metadata.name}. Reason: {str(exc)}") + + def delete_persistent_volume_claims(self) -> None: + """ + Deletes all PersistentVolumeClaims. + + Args: + None + + Returns: + None: This function does not return anything. + """ + pvcs = self.list_persistent_volume_claims( + namespace=self.namespace + ) + + threads = [] + for pvc in pvcs: + if self.storage_class_prefix in pvc.spec.storage_class_name: + thread = threading.Thread(target=self.delete_pvc, args=(pvc,)) + thread.start() + threads.append(thread) + + for thread in threads: + thread.join() + + logging.info("Waiting for PVC resources to be in the removing state...") + time.sleep(5) + + def delete_pvc(self, pvc: V1PersistentVolumeClaim) -> None: + """ + Deletes a PersistentVolumeClaim. + + Args: + pvc (V1PersistentVolumeClaim): The PersistentVolumeClaim object to be deleted. + + Returns: + None: This function does not return anything. + """ + logging.info(f"Deleting PVC {pvc.metadata.name} ...") + + try: + if pvc.metadata.finalizers is not None: + response = self.core_v1_api.read_namespaced_persistent_volume_claim( + name=pvc.metadata.name, + namespace=self.namespace + ) + response.metadata.finalizers = [] + self.core_v1_api.patch_namespaced_persistent_volume_claim( + name=response.metadata.name, + namespace=self.namespace, + body=response + ) + time.sleep(0.1) + self.core_v1_api.delete_namespaced_persistent_volume_claim( + name=pvc.metadata.name, + namespace=self.namespace, + grace_period_seconds=0, + propagation_policy="Foreground" + ) + logging.info(f"PVC {pvc.metadata.name} deleted.") + except ApiException as exc: + logging.warning(f"Failed to delete PVC {pvc.metadata.name}. Reason: {str(exc)}") + + def delete_volumes(self) -> None: + """ + Deletes all volumes. + + Args: + None + + Returns: + None: This function does not return anything. + """ + volumes = self.list_volumes() + + threads = [] + for volume in volumes: + thread = threading.Thread(target=self.delete_volume, args=(volume,)) + thread.start() + threads.append(thread) + + for thread in threads: + thread.join() + + logging.info("Waiting for volumes resources to be in the removing state...") + time.sleep(2) + + def delete_volume(self, volume: dict) -> None: + """ + Deletes a volume. + + Args: + volume (dict): The volume object to be deleted. + + Returns: + None: This function does not return anything. + """ + try: + logging.info(f"Deleting volume {volume['metadata']['name']} ...") + + if "metadata" in volume and "finalizers" in volume["metadata"]: + volume["metadata"]["finalizers"] = [] + self.custom_objects_api.replace_namespaced_custom_object( + const.CR_GROUP, + const.CR_VERSION, + self.namespace, + plural=const.VOLUMES_PLURAL, + name=volume["metadata"]["name"], + body=volume + ) + time.sleep(0.1) + self.custom_objects_api.delete_namespaced_custom_object( + const.CR_GROUP, + const.CR_VERSION, + self.namespace, + plural=const.VOLUMES_PLURAL, + name=volume["metadata"]["name"] + ) + logging.info(f"Volume {volume['metadata']['name']} deleted") + except ApiException as exc: + logging.warning(f"Failed to delete volume {volume['metadata']['name']}. Reason: {str(exc)}") + + def clear_lvg_finalizers(self, namespace: str) -> None: + """ + Clears the finalizers of Logical Volume Groups (LVGs) in the specified namespace. + + Args: + namespace (str): The namespace of the LVGs. + + Returns: + None: This function does not return anything. + """ + lvg_list = self.custom_objects_api.list_cluster_custom_object( + group=const.CR_GROUP, + version=const.CR_VERSION, + plural="logicalvolumegroups", + )["items"] + for lvg in lvg_list: + if "finalizers" in lvg.get("metadata", {}): + lvg["metadata"]["finalizers"] = [] + self.custom_objects_api.replace_cluster_custom_object( + group=const.CR_GROUP, + version=const.CR_VERSION, + namespace=namespace, + plural=const.LVG_PLURAL, + name=lvg["metadata"]["name"], + body=lvg, + ) + logging.info(f"LVG {lvg['metadata']['name']}: finalizers cleared") + + logging.info("Waiting for LVG resources to be in the removing state...") + time.sleep(1) + + def delete_custom_objects(self, plurals: List[str]) -> None: + """ + Deletes the custom objects with the specified plurals. + + Args: + plurals (List[str]): The list of plurals of the custom objects to be deleted. + + Returns: + None: This function does not return anything. + """ + for plural in plurals: + self.custom_objects_api.delete_collection_cluster_custom_object( + group=const.CR_GROUP, + version=const.CR_VERSION, + plural=plural, + grace_period_seconds=0, + propagation_policy="Foreground", + ) + logging.info(f"CR {plural}: delete request sent") + + logging.info("Waiting for custom resources to be in the removing state...") + time.sleep(2) diff --git a/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes.py b/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes.py index aa2195e82..0cb4f8ee2 100644 --- a/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes.py +++ b/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes.py @@ -32,7 +32,8 @@ def setup_class( yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_5921_auto_drive_replacement_with_multiple_volumes_per_pod(self): @@ -92,7 +93,7 @@ def test_5921_auto_drive_replacement_with_multiple_volumes_per_pod(self): # 5. check events and locate event related to DriveHealthFailure for drive in drives: drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_HEALTH_FAILURE, ), f"event {const.DRIVE_HEALTH_FAILURE} for drive {drive_name} not found" @@ -122,14 +123,14 @@ def test_5921_auto_drive_replacement_with_multiple_volumes_per_pod(self): # 9. check event DriveReadyForRemoval is generated for drive in drives: drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_READY_FOR_REMOVAL, ), f"event {const.DRIVE_READY_FOR_REMOVAL} for drive {drive_name} not found" # 10. check events and locate event related to VolumeBadHealth for volume in volumes: volume_name = volume["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=volume_name, reason=const.VOLUME_BAD_HEALTH, ), f"event {const.VOLUME_BAD_HEALTH} for volume {volume_name} not found" @@ -152,7 +153,7 @@ def test_5921_auto_drive_replacement_with_multiple_volumes_per_pod(self): # 13. check for events: DriveReadyForPhysicalRemoval for drive in drives: drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_READY_FOR_PHYSICAL_REMOVAL, ), f"event {const.DRIVE_READY_FOR_PHYSICAL_REMOVAL} for drive {drive_name} not found" @@ -178,7 +179,7 @@ def test_5921_auto_drive_replacement_with_multiple_volumes_per_pod(self): # 16. check for events DriveSuccessfullyRemoved in kubernetes events for drive in drives: drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_SUCCESSFULLY_REMOVED, ), f"event {const.DRIVE_SUCCESSFULLY_REMOVED} for drive {drive_name} not found" diff --git a/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes_single_fail.py b/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes_single_fail.py index 8574c10af..47bc56b8d 100644 --- a/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes_single_fail.py +++ b/tests/e2e-test-framework/tests/test_drive_replacement_multi_volumes_single_fail.py @@ -32,7 +32,8 @@ def setup_class( yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_5955_auto_drive_replacement_with_multiple_volumes_per_pod_single_failure( @@ -91,7 +92,7 @@ def test_5955_auto_drive_replacement_with_multiple_volumes_per_pod_single_failur f"volume {failed_volume_name} went in OperationalStatus: {const.STATUS_OPERATIVE}, Health: {const.HEALTH_BAD}, Usage: {const.USAGE_RELEASING}" ) # 5. check events and locate event related to DriveHealthFailure - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=failed_drive_name, reason=const.DRIVE_HEALTH_FAILURE, ), f"event {const.DRIVE_HEALTH_FAILURE} for drive {failed_drive_name} not found" @@ -117,12 +118,12 @@ def test_5955_auto_drive_replacement_with_multiple_volumes_per_pod_single_failur f"volume {failed_volume_name} went in Usage: {const.USAGE_RELEASED}" ) # 9. check event DriveReadyForRemoval is generated - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=failed_drive_name, reason=const.DRIVE_READY_FOR_REMOVAL, ), f"event {const.DRIVE_READY_FOR_REMOVAL} for drive {failed_drive_name} not found" # 10. check events and locate event related to VolumeBadHealth - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=failed_volume_name, reason=const.VOLUME_BAD_HEALTH, ), f"event {const.VOLUME_BAD_HEALTH} for volume {failed_volume_name} not found" @@ -142,7 +143,7 @@ def test_5955_auto_drive_replacement_with_multiple_volumes_per_pod_single_failur f"drive {failed_drive_name} went in Status: {const.STATUS_ONLINE}, Health: {const.HEALTH_BAD}, Usage: {const.USAGE_REMOVED}, LEDState: {drive["spec"]["LEDState"]}" ) # 13. check for events: DriveReadyForPhysicalRemoval - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=failed_drive_name, reason=const.DRIVE_READY_FOR_PHYSICAL_REMOVAL, ), f"event {const.DRIVE_READY_FOR_PHYSICAL_REMOVAL} for drive {failed_drive_name} not found" @@ -168,7 +169,7 @@ def test_5955_auto_drive_replacement_with_multiple_volumes_per_pod_single_failur cr_existence=True, ), f"Drive CR {healthy_drive_name} does not exist" # 16. check for events DriveSuccessfullyRemoved in kubernetes events - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=failed_drive_name, reason=const.DRIVE_SUCCESSFULLY_REMOVED, ), f"event {const.DRIVE_SUCCESSFULLY_REMOVED} for drive {failed_drive_name} not found" diff --git a/tests/e2e-test-framework/tests/test_drive_replacement_one_volume.py b/tests/e2e-test-framework/tests/test_drive_replacement_one_volume.py index 63ee48986..03cc99dba 100644 --- a/tests/e2e-test-framework/tests/test_drive_replacement_one_volume.py +++ b/tests/e2e-test-framework/tests/test_drive_replacement_one_volume.py @@ -32,7 +32,8 @@ def setup_class( yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_5771_auto_drive_replacement_with_one_volume_per_pod(self): @@ -87,7 +88,7 @@ def test_5771_auto_drive_replacement_with_one_volume_per_pod(self): # 3. check events and locate event related to DriveHealthFailure drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_HEALTH_FAILURE, ), f"event {const.DRIVE_HEALTH_FAILURE} for drive {drive_name} not found" @@ -119,13 +120,13 @@ def test_5771_auto_drive_replacement_with_one_volume_per_pod(self): # 7.check event DriveReadyForRemoval is generated, check events and locate event related to VolumeBadHealth drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_READY_FOR_REMOVAL, ), f"event {const.DRIVE_READY_FOR_REMOVAL} for drive {drive_name} not found" volume_name = volume["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=volume_name, reason=const.VOLUME_BAD_HEALTH, ), f"event {const.VOLUME_BAD_HEALTH} for volume {volume_name} not found" @@ -147,7 +148,7 @@ def test_5771_auto_drive_replacement_with_one_volume_per_pod(self): # 12. check for events: DriveReadyForPhysicalRemoval drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_READY_FOR_PHYSICAL_REMOVAL, ), f"event {const.DRIVE_READY_FOR_PHYSICAL_REMOVAL} for drive {drive_name} not found" @@ -174,7 +175,7 @@ def test_5771_auto_drive_replacement_with_one_volume_per_pod(self): # 16. check for events DriveSuccessfullyRemoved in kubernetes events drive_name = drive["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_SUCCESSFULLY_REMOVED, ), f"event {const.DRIVE_SUCCESSFULLY_REMOVED} for drive {drive_name} not found" diff --git a/tests/e2e-test-framework/tests/test_fake_attach.py b/tests/e2e-test-framework/tests/test_fake_attach.py index ba7471de3..df1bbc135 100644 --- a/tests/e2e-test-framework/tests/test_fake_attach.py +++ b/tests/e2e-test-framework/tests/test_fake_attach.py @@ -32,7 +32,8 @@ def setup_class( yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_5808_fake_attach_without_dr(self): @@ -80,7 +81,7 @@ def test_5808_fake_attach_without_dr(self): pod = self.utils.recreate_pod(name=pod.metadata.name, namespace=self.namespace) volume_name = volume["metadata"]["name"] - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=volume_name, reason=const.FAKE_ATTACH_INVOLVED ), f"event {const.FAKE_ATTACH_INVOLVED} not found" @@ -93,7 +94,7 @@ def test_5808_fake_attach_without_dr(self): ) self.utils.recreate_pod(name=pod.metadata.name, namespace=self.namespace) - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=volume_name, reason=const.FAKE_ATTACH_CLEARED, ), f"event {const.FAKE_ATTACH_CLEARED} not found" diff --git a/tests/e2e-test-framework/tests/test_fake_attach_dr.py b/tests/e2e-test-framework/tests/test_fake_attach_dr.py index 4a103a422..bc690bd2d 100644 --- a/tests/e2e-test-framework/tests/test_fake_attach_dr.py +++ b/tests/e2e-test-framework/tests/test_fake_attach_dr.py @@ -33,7 +33,8 @@ def setup_class( yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_6281_multiple_volumes_per_pod_fake_attach(self): @@ -77,7 +78,7 @@ def test_6281_multiple_volumes_per_pod_fake_attach(self): expected_usage=const.USAGE_RELEASING, ), f"Drive: {drive_name} failed to reach expected health: {const.HEALTH_BAD}" - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_HEALTH_FAILURE_EVENT, ) @@ -109,8 +110,7 @@ def test_6281_multiple_volumes_per_pod_fake_attach(self): pod = self.utils.recreate_pod( name=pod.metadata.name, namespace=self.namespace ) - - assert self.utils.event_in( + assert self.utils.wait_event_in( resource_name=drive_name, reason=const.DRIVE_READY_FOR_PHYSICAL_REMOVAL_EVENT, ) diff --git a/tests/e2e-test-framework/tests/test_sts_hdd.py b/tests/e2e-test-framework/tests/test_sts_hdd.py index 3267cbec0..d2ace7ad5 100644 --- a/tests/e2e-test-framework/tests/test_sts_hdd.py +++ b/tests/e2e-test-framework/tests/test_sts_hdd.py @@ -1,12 +1,18 @@ import pytest from framework.sts import STS +from framework.utils import Utils class TestStsHdd: @classmethod @pytest.fixture(autouse=True) - def setup_class(cls, namespace): + def setup_class( + cls, + namespace, + utils: Utils, + ): cls.namespace = namespace + cls.utils = utils cls.name = "test-sts-hdd" cls.timeout = 120 cls.replicas = 1 @@ -17,7 +23,8 @@ def setup_class(cls, namespace): yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.hal def test_6105_create_sts_with_hdd_volume(self): diff --git a/tests/e2e-test-framework/tests/test_sts_loopback.py b/tests/e2e-test-framework/tests/test_sts_loopback.py index 4245e7545..3d75d52ad 100644 --- a/tests/e2e-test-framework/tests/test_sts_loopback.py +++ b/tests/e2e-test-framework/tests/test_sts_loopback.py @@ -1,12 +1,18 @@ import pytest from framework.sts import STS +from framework.utils import Utils class TestStsLoopback: @classmethod @pytest.fixture(autouse=True) - def setup_class(cls, namespace): + def setup_class( + cls, + namespace, + utils: Utils + ): cls.namespace = namespace + cls.utils = utils cls.name = "test-sts-loopback" cls.timeout = 120 cls.replicas = 1 @@ -17,7 +23,8 @@ def setup_class(cls, namespace): yield - cls.sts.delete() + cls.utils.wait_for_pod_removing(cls.sts.delete()) + cls.utils.clear_csi_resources(namespace=cls.namespace) @pytest.mark.loopback def test_6106_create_sts_with_loopback_volume(self):