Skip to content

Commit

Permalink
chore(bors): merge pull request #667
Browse files Browse the repository at this point in the history
667: Cherry pick #666 #664 #665 r=tiagolobocastro a=Abhinandan-Purkait



Co-authored-by: Abhinandan Purkait <[email protected]>
Co-authored-by: Tiago Castro <[email protected]>
  • Loading branch information
3 people committed Aug 24, 2023
2 parents 872616c + b765869 commit 7291b84
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use super::translation::{rpc_replica_to_agent, AgentToIoEngine};
use crate::controller::io_engine::translation::TryIoEngineToAgent;
use agents::errors::{GrpcRequest as GrpcRequestError, SvcError};
use rpc::v1::{
replica::{ListReplicaOptions, ReplicaType},
replica::{list_replica_options, ListReplicaOptions},
snapshot::{destroy_snapshot_request, DestroySnapshotRequest},
};
use stor_port::{
Expand All @@ -19,14 +19,19 @@ use snafu::ResultExt;
#[async_trait::async_trait]
impl crate::controller::io_engine::ReplicaListApi for super::RpcClient {
async fn list_replicas(&self) -> Result<Vec<Replica>, SvcError> {
let replicas_except_snapshots_query = list_replica_options::Query {
replica: true,
snapshot: false,
clone: true,
};
let rpc_replicas = self
.replica()
.list_replicas(ListReplicaOptions {
name: None,
poolname: None,
uuid: None,
pooluuid: None,
replicatype: ReplicaType::AllReplicasExceptSnapshots as i32,
query: Some(replicas_except_snapshots_query),
})
.await
.context(GrpcRequestError {
Expand Down Expand Up @@ -58,7 +63,7 @@ impl crate::controller::io_engine::ReplicaListApi for super::RpcClient {
poolname: None,
uuid: Some(replica_id.to_string()),
pooluuid: None,
replicatype: ReplicaType::AllReplicas as i32,
query: None,
})
.await
.context(GrpcRequestError {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -574,11 +574,17 @@ impl AgentToIoEngine for transport::ListReplicaSnapshots {
transport::ListReplicaSnapshots::ReplicaSnapshots(id) => (Some(id), None),
transport::ListReplicaSnapshots::Snapshot(id) => (None, Some(id)),
};

// All snapshots except the discarded ones.
let non_discarded_snaps = v1::snapshot::list_snapshots_request::Query {
invalid: None,
discarded: Some(false),
};

v1::snapshot::ListSnapshotsRequest {
source_uuid: source.map(ToString::to_string),
snapshot_uuid: snapshot.map(ToString::to_string),
snapshot_query_type:
v1::snapshot::SnapshotQueryType::AllSnapshotsExceptDiscardedSnapshots as i32,
query: Some(non_discarded_snaps),
}
}
}
Expand Down
1 change: 1 addition & 0 deletions deployer/src/infra/io_engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ impl ComponentAction for IoEngine {
.with_env("MAYASTOR_NVMF_HOSTID", Uuid::new_v4().to_string().as_str())
.with_env("NEXUS_NVMF_RESV_ENABLE", "1")
.with_env("NEXUS_NVMF_ANA_ENABLE", "1")
.with_env("NVMF_TGT_CRDT", "0")
.with_bind("/tmp", "/host/tmp")
.with_bind("/var/run/dpdk", "/var/run/dpdk");

Expand Down
2 changes: 1 addition & 1 deletion rpc/api
Submodule api updated from a17cf9 to 0bbc1f
21 changes: 10 additions & 11 deletions rpc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ pub mod io_engine {

use crate::v1::pb::{
CreateReplicaSnapshotResponse, ListSnapshotsResponse, NexusCreateSnapshotResponse,
SnapshotQueryType,
};
/// AutoGenerated Io Engine Client V0.
pub use mayastor_client::MayastorClient as IoEngineClientV0;
Expand Down Expand Up @@ -386,7 +385,7 @@ pub mod io_engine {
.list_snapshot(super::v1::pb::ListSnapshotsRequest {
source_uuid: source_uuid.map(|uuid| uuid.to_string()),
snapshot_uuid: snapshot_uuid.map(|uuid| uuid.to_string()),
snapshot_query_type: SnapshotQueryType::AllSnapshots as i32,
query: None,
})
.await?
.into_inner())
Expand Down Expand Up @@ -615,9 +614,9 @@ pub mod v1 {
/// V1 Replica autogenerated grpc code.
pub mod replica {
pub use super::pb::{
destroy_replica_request, replica_rpc_client, CreateReplicaRequest,
DestroyReplicaRequest, ListReplicaOptions, ListReplicasResponse, Replica,
ReplicaSpaceUsage, ReplicaType, ShareReplicaRequest, UnshareReplicaRequest,
destroy_replica_request, list_replica_options, replica_rpc_client,
CreateReplicaRequest, DestroyReplicaRequest, ListReplicaOptions, ListReplicasResponse,
Replica, ReplicaSpaceUsage, ShareReplicaRequest, UnshareReplicaRequest,
};
}

Expand All @@ -638,12 +637,12 @@ pub mod v1 {

pub mod snapshot {
pub use super::pb::{
destroy_snapshot_request, snapshot_rpc_client, CreateReplicaSnapshotRequest,
CreateReplicaSnapshotResponse, CreateSnapshotCloneRequest, DestroySnapshotRequest,
ListSnapshotCloneRequest, ListSnapshotCloneResponse, ListSnapshotsRequest,
ListSnapshotsResponse, Nexus, NexusCreateSnapshotReplicaDescriptor,
NexusCreateSnapshotReplicaStatus, NexusCreateSnapshotRequest,
NexusCreateSnapshotResponse, SnapshotInfo, SnapshotQueryType,
destroy_snapshot_request, list_snapshots_request, snapshot_rpc_client,
CreateReplicaSnapshotRequest, CreateReplicaSnapshotResponse,
CreateSnapshotCloneRequest, DestroySnapshotRequest, ListSnapshotCloneRequest,
ListSnapshotCloneResponse, ListSnapshotsRequest, ListSnapshotsResponse, Nexus,
NexusCreateSnapshotReplicaDescriptor, NexusCreateSnapshotReplicaStatus,
NexusCreateSnapshotRequest, NexusCreateSnapshotResponse, SnapshotInfo,
};
}

Expand Down
8 changes: 0 additions & 8 deletions tests/bdd/features/snapshot/restore/test_delete.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,6 @@ def the_pool_space_usage_should_be_zero():
def the_pool_space_usage_should_reflect_the_original_volume(original_volume):
"""the pool space usage should reflect the original volume."""
pool = ApiClient.pools_api().get_pool(POOL)
# Bug, dataplane caches allocated, requires a restart until fixed
Docker.restart_container(NODE)
wait_node_online(NODE)
volume = Volume.update(original_volume, cached=False)
assert pool.state.used == volume.state.usage.allocated

Expand Down Expand Up @@ -264,8 +261,6 @@ def the_restored_volume_1_snapshot_1_allocation_size_should_be_12mib(
restored_1_snapshot_1,
):
"""the restored volume 1 snapshot 1 allocation size should be 12MiB."""
Docker.restart_container(NODE)
wait_node_online(NODE)
Cluster.wait_cache_update()
snapshot = Snapshot.update(restored_1_snapshot_1)
assert snapshot.state.allocated_size == 12 * 1024 * 1024
Expand Down Expand Up @@ -438,9 +433,6 @@ def the_pool_space_usage_should_reflect_the_snapshot_2_restored_volume_2_and_del
restored_1_snapshot_2,
):
"""the pool space usage should reflect the snapshot 2, restored volume 2, and deleted snapshot and deleted restored volume 1 (16MiB)."""
# Bug, dataplane caches allocated, requires a restart until fixed
Docker.restart_container(NODE)
wait_node_online(NODE)
Cluster.wait_cache_update()

pool = ApiClient.pools_api().get_pool(POOL)
Expand Down

0 comments on commit 7291b84

Please sign in to comment.