Skip to content

Commit

Permalink
test: remove io-engine restart wa
Browse files Browse the repository at this point in the history
Dataplane has fixed several issues where usage cache was not getting discarded,
yielded incorrect allocate sizes for lvols so we can now remove the WA's.

Signed-off-by: Tiago Castro <[email protected]>
  • Loading branch information
tiagolobocastro committed Aug 24, 2023
1 parent ac15c13 commit b765869
Showing 1 changed file with 0 additions and 8 deletions.
8 changes: 0 additions & 8 deletions tests/bdd/features/snapshot/restore/test_delete.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,6 @@ def the_pool_space_usage_should_be_zero():
def the_pool_space_usage_should_reflect_the_original_volume(original_volume):
"""the pool space usage should reflect the original volume."""
pool = ApiClient.pools_api().get_pool(POOL)
# Bug, dataplane caches allocated, requires a restart until fixed
Docker.restart_container(NODE)
wait_node_online(NODE)
volume = Volume.update(original_volume, cached=False)
assert pool.state.used == volume.state.usage.allocated

Expand Down Expand Up @@ -264,8 +261,6 @@ def the_restored_volume_1_snapshot_1_allocation_size_should_be_12mib(
restored_1_snapshot_1,
):
"""the restored volume 1 snapshot 1 allocation size should be 12MiB."""
Docker.restart_container(NODE)
wait_node_online(NODE)
Cluster.wait_cache_update()
snapshot = Snapshot.update(restored_1_snapshot_1)
assert snapshot.state.allocated_size == 12 * 1024 * 1024
Expand Down Expand Up @@ -438,9 +433,6 @@ def the_pool_space_usage_should_reflect_the_snapshot_2_restored_volume_2_and_del
restored_1_snapshot_2,
):
"""the pool space usage should reflect the snapshot 2, restored volume 2, and deleted snapshot and deleted restored volume 1 (16MiB)."""
# Bug, dataplane caches allocated, requires a restart until fixed
Docker.restart_container(NODE)
wait_node_online(NODE)
Cluster.wait_cache_update()

pool = ApiClient.pools_api().get_pool(POOL)
Expand Down

0 comments on commit b765869

Please sign in to comment.