diff --git a/control-plane/agents/src/bin/core/controller/resources/operations.rs b/control-plane/agents/src/bin/core/controller/resources/operations.rs index a3b4263b9..58b667115 100644 --- a/control-plane/agents/src/bin/core/controller/resources/operations.rs +++ b/control-plane/agents/src/bin/core/controller/resources/operations.rs @@ -1,5 +1,6 @@ -use crate::controller::registry::Registry; +use crate::controller::{registry::Registry, resources::OperationGuardArc}; use agents::errors::SvcError; +use stor_port::types::v0::store::volume::VolumeSpec; /// Resource Cordon Operations. #[async_trait::async_trait] @@ -265,6 +266,7 @@ pub(crate) trait ResourcePruning { pub(crate) trait ResourceCloning { type Create: Sync + Send; type CreateOutput: Sync + Send + Sized; + type Destroy: Sync + Send; /// Create a clone for the `Self` resource. async fn create_clone( @@ -272,4 +274,12 @@ pub(crate) trait ResourceCloning { registry: &Registry, request: &Self::Create, ) -> Result; + + /// Destroy a clone for the `Self` resource, using the volume life cycle op. + async fn destroy_clone( + &mut self, + registry: &Registry, + request: &Self::Destroy, + volume: OperationGuardArc, + ) -> Result<(), SvcError>; } diff --git a/control-plane/agents/src/bin/core/controller/resources/operations_helper.rs b/control-plane/agents/src/bin/core/controller/resources/operations_helper.rs index 3a6f482d7..e1d57990b 100644 --- a/control-plane/agents/src/bin/core/controller/resources/operations_helper.rs +++ b/control-plane/agents/src/bin/core/controller/resources/operations_helper.rs @@ -33,7 +33,10 @@ use std::{fmt::Debug, ops::Deref, sync::Arc}; use stor_port::{ pstor::{product_v1_key_prefix, API_VERSION}, transport_api::ErrorChain, - types::v0::{store::snapshots::volume::VolumeSnapshot, transport::SnapshotId}, + types::v0::{ + store::{snapshots::volume::VolumeSnapshot, volume::VolumeContentSource}, + transport::SnapshotId, + }, }; #[derive(Debug, Snafu)] @@ -939,6 +942,18 @@ impl ResourceSpecsLocked { } } + // add runtime information for volume restores + for volume in self.read().volumes.values() { + match volume.immutable_ref().content_source.as_ref() { + None => continue, + Some(VolumeContentSource::Snapshot(snap_uuid, _)) => { + if let Some(snapshot) = self.read().volume_snapshots.get(snap_uuid) { + snapshot.lock().insert_restore(volume.uuid()) + } + } + } + } + // Remove all entries of v1 key prefix. store .delete_values_prefix(&product_v1_key_prefix()) diff --git a/control-plane/agents/src/bin/core/tests/pool/mod.rs b/control-plane/agents/src/bin/core/tests/pool/mod.rs index 1e7278e0d..6e2f8680b 100644 --- a/control-plane/agents/src/bin/core/tests/pool/mod.rs +++ b/control-plane/agents/src/bin/core/tests/pool/mod.rs @@ -550,8 +550,8 @@ async fn reconciler_missing_pool_state() { .with_agents(vec!["core"]) .with_io_engines(1) .with_pool(0, disk.uri()) - .with_cache_period("1s") - .with_reconcile_period(Duration::from_secs(1), Duration::from_secs(1)) + .with_cache_period("500ms") + .with_reconcile_period(Duration::from_millis(500), Duration::from_millis(500)) .build() .await .unwrap(); diff --git a/control-plane/agents/src/bin/core/volume/clone_operations.rs b/control-plane/agents/src/bin/core/volume/clone_operations.rs index a78beee8f..335c09af9 100644 --- a/control-plane/agents/src/bin/core/volume/clone_operations.rs +++ b/control-plane/agents/src/bin/core/volume/clone_operations.rs @@ -2,8 +2,8 @@ use crate::{ controller::{ registry::Registry, resources::{ - operations::{ResourceCloning, ResourceLifecycleExt}, - operations_helper::SpecOperationsHelper, + operations::{ResourceCloning, ResourceLifecycle, ResourceLifecycleExt}, + operations_helper::{GuardedOperationsHelper, SpecOperationsHelper}, OperationGuardArc, TraceStrLog, }, scheduling::{volume::CloneVolumeSnapshot, ResourceFilter}, @@ -16,11 +16,13 @@ use stor_port::{ types::v0::{ store::{ replica::ReplicaSpec, - snapshots::volume::VolumeSnapshot, + snapshots::volume::{ + CreateRestoreInfo, DestroyRestoreInfo, VolumeSnapshot, VolumeSnapshotOperation, + }, volume::{VolumeContentSource, VolumeSpec}, }, transport::{ - CreateSnapshotVolume, Replica, SnapshotCloneId, SnapshotCloneParameters, + CreateSnapshotVolume, DestroyVolume, Replica, SnapshotCloneId, SnapshotCloneParameters, SnapshotCloneSpecParams, }, }, @@ -45,14 +47,52 @@ impl SnapshotCloneOp<'_> { impl ResourceCloning for OperationGuardArc { type Create = CreateSnapshotVolume; type CreateOutput = OperationGuardArc; + type Destroy = DestroyVolume; async fn create_clone( &mut self, registry: &Registry, request: &Self::Create, ) -> Result { + let spec_clone = self + .start_update( + registry, + self.as_ref(), + VolumeSnapshotOperation::CreateRestore(CreateRestoreInfo::new( + request.params().uuid.clone(), + )), + ) + .await?; let request = CreateVolumeSource::Snapshot(SnapshotCloneOp(request, self)); - OperationGuardArc::::create_ext(registry, &request).await + + // Create the restore using the volume op guard. + let create_result = OperationGuardArc::::create_ext(registry, &request).await; + + self.complete_update(registry, create_result, spec_clone) + .await + } + + async fn destroy_clone( + &mut self, + registry: &Registry, + request: &Self::Destroy, + mut volume: OperationGuardArc, + ) -> Result<(), SvcError> { + let spec_clone = self + .start_update( + registry, + self.as_ref(), + VolumeSnapshotOperation::DestroyRestore(DestroyRestoreInfo::new( + request.uuid.clone(), + )), + ) + .await?; + + // Destroy the restore using the volume op guard. + let destroy_result = volume.destroy(registry, request).await; + + self.complete_update(registry, destroy_result, spec_clone) + .await } } diff --git a/control-plane/agents/src/bin/core/volume/service.rs b/control-plane/agents/src/bin/core/volume/service.rs index 801a47914..9ff24fac2 100644 --- a/control-plane/agents/src/bin/core/volume/service.rs +++ b/control-plane/agents/src/bin/core/volume/service.rs @@ -30,7 +30,10 @@ use grpc::{ use stor_port::{ transport_api::{v0::Volumes, ReplyError, ResourceKind}, types::v0::{ - store::{snapshots::volume::VolumeSnapshotUserSpec, volume::VolumeSpec}, + store::{ + snapshots::volume::VolumeSnapshotUserSpec, + volume::{VolumeContentSource, VolumeSpec}, + }, transport::{ CreateSnapshotVolume, CreateVolume, DestroyShutdownTargets, DestroyVolume, Filter, PublishVolume, RepublishVolume, SetVolumeReplica, ShareVolume, UnpublishVolume, @@ -298,8 +301,26 @@ impl Service { #[tracing::instrument(level = "info", skip(self), err, fields(volume.uuid = %request.uuid))] pub(super) async fn destroy_volume(&self, request: &DestroyVolume) -> Result<(), SvcError> { let mut volume = self.specs().volume(&request.uuid).await?; - volume.destroy(&self.registry, request).await?; - Ok(()) + let content_source = volume.as_ref().content_source.as_ref(); + let snap_guard = match content_source { + None => None, + Some(VolumeContentSource::Snapshot(snap_uuid, _)) => { + match self.specs().volume_snapshot(snap_uuid).await { + Ok(snap_guard) => Some(snap_guard), + Err(SvcError::VolSnapshotNotFound { .. }) => None, + Err(error) => return Err(error), + } + } + }; + + match snap_guard { + None => volume.destroy(&self.registry, request).await, + Some(mut snap_guard) => { + snap_guard + .destroy_clone(&self.registry, request, volume) + .await + } + } } /// Destroy the shutdown targets associate with the volume. diff --git a/control-plane/grpc/proto/v1/volume/volume.proto b/control-plane/grpc/proto/v1/volume/volume.proto index ac3985b30..3e272c4f7 100644 --- a/control-plane/grpc/proto/v1/volume/volume.proto +++ b/control-plane/grpc/proto/v1/volume/volume.proto @@ -59,6 +59,8 @@ message VolumeSpec { optional AffinityGroup affinity_group = 10; // Volume Content Source i.e the snapshot or a volume. optional VolumeContentSource content_source = 11; + // Number of snapshots taken on this volume. + uint32 num_snapshots = 12; // Volume Content Source i.e the snapshot or a volume. message VolumeContentSource { @@ -507,6 +509,8 @@ message VolumeSnapshotMeta { uint64 spec_size = 6; // Size taken by the snapshot and its predecessors. uint64 total_allocated_size = 7; + // Number of restores done from this snapshot. + uint32 num_restores = 8; message ReplicaSnapshots { repeated ReplicaSnapshot snapshots = 1; diff --git a/control-plane/grpc/src/operations/volume/traits.rs b/control-plane/grpc/src/operations/volume/traits.rs index 9cd2ea542..84be6da2e 100644 --- a/control-plane/grpc/src/operations/volume/traits.rs +++ b/control-plane/grpc/src/operations/volume/traits.rs @@ -156,6 +156,7 @@ impl From for volume::VolumeDefinition { thin: volume_spec.thin, affinity_group: volume_spec.affinity_group.into_opt(), content_source: volume_spec.content_source.into_opt(), + num_snapshots: volume_spec.metadata.num_snapshots() as u32, }), metadata: Some(volume::Metadata { spec_status: spec_status as i32, @@ -318,6 +319,7 @@ impl TryFrom for VolumeSpec { affinity_group: volume_spec.affinity_group.into_opt(), metadata: VolumeMetadata::new(volume_meta.as_thin), content_source: volume_spec.content_source.try_into_opt()?, + num_snapshots: volume_spec.num_snapshots, }; Ok(volume_spec) } diff --git a/control-plane/grpc/src/operations/volume/traits_snapshots.rs b/control-plane/grpc/src/operations/volume/traits_snapshots.rs index 0c85e8968..91ccf5fdf 100644 --- a/control-plane/grpc/src/operations/volume/traits_snapshots.rs +++ b/control-plane/grpc/src/operations/volume/traits_snapshots.rs @@ -107,6 +107,7 @@ impl From<&stor_port::types::v0::store::snapshots::volume::VolumeSnapshot> for V total_allocated_size: value.metadata().total_allocated_size(), txn_id: value.metadata().txn_id().clone(), transactions, + num_restores: value.num_restores(), }, } } @@ -137,7 +138,10 @@ pub struct VolumeSnapshotMeta { /// The "actual" snapshots can be accessed by the key `txn_id`. /// Failed transactions are any other key. transactions: HashMap>, + /// The number of restores done from this snapshot. + num_restores: u32, } + impl VolumeSnapshotMeta { /// Get the volume snapshot status. pub fn status(&self) -> &SpecStatus<()> { @@ -167,6 +171,10 @@ impl VolumeSnapshotMeta { pub fn total_allocated_size(&self) -> u64 { self.total_allocated_size } + /// The number of restores done from this snapshot. + pub fn num_restores(&self) -> u32 { + self.num_restores + } } /// Volume replica snapshot information. @@ -468,6 +476,7 @@ impl TryFrom for VolumeSnapshot { snapshots.map(|s| (k, s)) }) .collect::, _>>()?, + num_restores: meta.num_restores, }, state: VolumeSnapshotState { info, @@ -624,6 +633,7 @@ impl TryFrom for volume::VolumeSnapshot { size: value.meta.size, spec_size: value.meta.spec_size, total_allocated_size: value.meta.total_allocated_size, + num_restores: value.meta.num_restores, }), state: Some(volume::VolumeSnapshotState { state: Some(snapshot::SnapshotState { diff --git a/control-plane/plugin/src/bin/rest-plugin/main.rs b/control-plane/plugin/src/bin/rest-plugin/main.rs index ce42389aa..8790bb4cd 100644 --- a/control-plane/plugin/src/bin/rest-plugin/main.rs +++ b/control-plane/plugin/src/bin/rest-plugin/main.rs @@ -2,8 +2,8 @@ use clap::Parser; use openapi::tower::client::Url; use plugin::{ operations::{ - Cordoning, Drain, Get, GetBlockDevices, GetSnapshots, List, Operations, RebuildHistory, - ReplicaTopology, Scale, + Cordoning, Drain, Get, GetBlockDevices, GetSnapshots, List, ListExt, Operations, + RebuildHistory, ReplicaTopology, Scale, }, resources::{ blockdevice, cordon, drain, node, pool, snapshot, volume, CordonResources, DrainResources, @@ -83,13 +83,15 @@ async fn execute(cli_args: CliArgs) { } GetDrainArgs::Nodes => drain::NodeDrains::list(&cli_args.output).await, }, - GetResources::Volumes => volume::Volumes::list(&cli_args.output).await, + GetResources::Volumes(vol_args) => { + volume::Volumes::list(&cli_args.output, vol_args).await + } GetResources::Volume { id } => volume::Volume::get(id, &cli_args.output).await, GetResources::RebuildHistory { id } => { volume::Volume::rebuild_history(id, &cli_args.output).await } - GetResources::VolumeReplicaTopologies => { - volume::Volume::topologies(&cli_args.output).await + GetResources::VolumeReplicaTopologies(vol_args) => { + volume::Volume::topologies(&cli_args.output, vol_args).await } GetResources::VolumeReplicaTopology { id } => { volume::Volume::topology(id, &cli_args.output).await diff --git a/control-plane/plugin/src/operations.rs b/control-plane/plugin/src/operations.rs index ba39aee17..4e73586bd 100644 --- a/control-plane/plugin/src/operations.rs +++ b/control-plane/plugin/src/operations.rs @@ -41,6 +41,14 @@ pub trait List { async fn list(output: &utils::OutputFormat); } +/// List trait. +/// To be implemented by resources which support the 'list' operation, with context. +#[async_trait(?Send)] +pub trait ListExt { + type Context; + async fn list(output: &utils::OutputFormat, context: &Self::Context); +} + /// Get trait. /// To be implemented by resources which support the 'get' operation. #[async_trait(?Send)] @@ -62,7 +70,8 @@ pub trait Scale { #[async_trait(?Send)] pub trait ReplicaTopology { type ID; - async fn topologies(output: &utils::OutputFormat); + type Context; + async fn topologies(output: &utils::OutputFormat, context: &Self::Context); async fn topology(id: &Self::ID, output: &utils::OutputFormat); } diff --git a/control-plane/plugin/src/resources/mod.rs b/control-plane/plugin/src/resources/mod.rs index c79a05f32..5784629dd 100644 --- a/control-plane/plugin/src/resources/mod.rs +++ b/control-plane/plugin/src/resources/mod.rs @@ -2,6 +2,7 @@ use crate::resources::{ blockdevice::BlockDeviceArgs, node::{DrainNodeArgs, GetNodeArgs}, snapshot::VolumeSnapshotArgs, + volume::VolumesArgs, }; pub mod blockdevice; @@ -29,13 +30,13 @@ pub enum GetResources { #[clap(subcommand)] Drain(GetDrainArgs), /// Get all volumes. - Volumes, + Volumes(VolumesArgs), /// Get volume with the given ID. Volume { id: VolumeId }, /// Get Rebuild history for the volume with the given ID. RebuildHistory { id: VolumeId }, /// Get the replica topology for all volumes. - VolumeReplicaTopologies, + VolumeReplicaTopologies(VolumesArgs), /// Get the replica topology for the volume with the given ID. VolumeReplicaTopology { id: VolumeId }, /// Get volume snapshots based on input args. diff --git a/control-plane/plugin/src/resources/snapshot.rs b/control-plane/plugin/src/resources/snapshot.rs index 847363e93..328b6d0c8 100644 --- a/control-plane/plugin/src/resources/snapshot.rs +++ b/control-plane/plugin/src/resources/snapshot.rs @@ -56,7 +56,8 @@ impl CreateRow for openapi::models::VolumeSnapshot { ::utils::bytes::into_human(meta.spec_size), ::utils::bytes::into_human(state.allocated_size), ::utils::bytes::into_human(meta.total_allocated_size), - state.source_volume + state.source_volume, + self.definition.metadata.num_restores ] } } diff --git a/control-plane/plugin/src/resources/utils.rs b/control-plane/plugin/src/resources/utils.rs index e18a8a327..5811aeebb 100644 --- a/control-plane/plugin/src/resources/utils.rs +++ b/control-plane/plugin/src/resources/utils.rs @@ -20,7 +20,9 @@ lazy_static! { "STATUS", "SIZE", "THIN-PROVISIONED", - "ALLOCATED" + "ALLOCATED", + "SNAPSHOTS", + "SOURCE" ]; pub static ref SNAPSHOT_HEADERS: Row = row![ "ID", @@ -28,7 +30,8 @@ lazy_static! { "SOURCE-SIZE", "ALLOCATED-SIZE", "TOTAL-ALLOCATED-SIZE", - "SOURCE-VOL" + "SOURCE-VOL", + "RESTORES" ]; pub static ref POOLS_HEADERS: Row = row![ "ID", diff --git a/control-plane/plugin/src/resources/volume.rs b/control-plane/plugin/src/resources/volume.rs index 335510bfa..790c02f90 100644 --- a/control-plane/plugin/src/resources/volume.rs +++ b/control-plane/plugin/src/resources/volume.rs @@ -1,16 +1,16 @@ use crate::{ - operations::{Get, List, Scale}, - resources::{utils, VolumeId}, + operations::{Get, ListExt, RebuildHistory, ReplicaTopology, Scale}, + resources::{ + utils, + utils::{optional_cell, CreateRow, CreateRows, GetHeaderRow, OutputFormat}, + VolumeId, + }, rest_wrapper::RestClient, }; -use async_trait::async_trait; +use openapi::{models::VolumeContentSource, tower::client::Url}; -use crate::{ - operations::{RebuildHistory, ReplicaTopology}, - resources::utils::{optional_cell, CreateRow, CreateRows, GetHeaderRow, OutputFormat}, -}; +use async_trait::async_trait; use chrono::prelude::*; -use openapi::tower::client::Url; use prettytable::Row; use std::{collections::HashMap, str::FromStr}; @@ -18,6 +18,21 @@ use std::{collections::HashMap, str::FromStr}; #[derive(clap::Args, Debug)] pub struct Volumes {} +#[derive(Debug, Clone, strum_macros::EnumString, strum_macros::AsRefStr, PartialEq)] +#[strum(serialize_all = "lowercase")] +enum VolumeSource { + None, + Snapshot, +} + +#[derive(Debug, Clone, clap::Args)] +/// Volume args. +pub struct VolumesArgs { + #[clap(long)] + /// Shows only volumes created from specific source, viz none, snapshot + source: Option, +} + impl CreateRow for openapi::models::Volume { fn row(&self) -> Row { let state = &self.state; @@ -38,7 +53,13 @@ impl CreateRow for openapi::models::Volume { .usage .as_ref() .map(|u| ::utils::bytes::into_human(u.allocated)) - ) + ), + self.spec.num_snapshots, + optional_cell(self.spec.content_source.as_ref().map(|source| { + match source { + VolumeContentSource::snapshot(_) => "Snapshot", + } + })), ] } } @@ -58,9 +79,10 @@ impl GetHeaderRow for openapi::models::Volume { } #[async_trait(?Send)] -impl List for Volumes { - async fn list(output: &utils::OutputFormat) { - if let Some(volumes) = get_paginated_volumes().await { +impl ListExt for Volumes { + type Context = VolumesArgs; + async fn list(output: &OutputFormat, context: &Self::Context) { + if let Some(volumes) = get_paginated_volumes(context).await { // Print table, json or yaml based on output format. utils::print_table(output, volumes); } @@ -70,7 +92,7 @@ impl List for Volumes { /// Get the list of volumes over multiple paginated requests if necessary. /// If any `get_volumes` request fails, `None` will be returned. This prevents the user from getting /// a partial list when they expect a complete list. -async fn get_paginated_volumes() -> Option> { +async fn get_paginated_volumes(volume_args: &VolumesArgs) -> Option> { // The number of volumes to get per request. let max_entries = 200; let mut starting_token = Some(0); @@ -95,6 +117,22 @@ async fn get_paginated_volumes() -> Option> { } } + match volume_args.source.as_ref() { + None => {} + Some(VolumeSource::None) => { + volumes.retain(|vol| vol.spec.content_source.is_none()); + } + Some(VolumeSource::Snapshot) => { + volumes.retain(|vol| { + vol.spec.content_source.is_some() + && matches!( + vol.spec.content_source, + Some(VolumeContentSource::snapshot(_)) + ) + }); + } + } + Some(volumes) } @@ -147,8 +185,9 @@ impl Scale for Volume { #[async_trait(?Send)] impl ReplicaTopology for Volume { type ID = VolumeId; - async fn topologies(output: &OutputFormat) { - let volumes = VolumeTopologies(get_paginated_volumes().await.unwrap_or_default()); + type Context = VolumesArgs; + async fn topologies(output: &OutputFormat, context: &Self::Context) { + let volumes = VolumeTopologies(get_paginated_volumes(context).await.unwrap_or_default()); utils::print_table(output, volumes); } async fn topology(id: &Self::ID, output: &OutputFormat) { diff --git a/control-plane/rest/openapi-specs/v0_api_spec.yaml b/control-plane/rest/openapi-specs/v0_api_spec.yaml index 6de048734..afec5bad8 100644 --- a/control-plane/rest/openapi-specs/v0_api_spec.yaml +++ b/control-plane/rest/openapi-specs/v0_api_spec.yaml @@ -3209,6 +3209,11 @@ components: $ref: '#/components/schemas/AffinityGroup' content_source: $ref: '#/components/schemas/VolumeContentSource' + num_snapshots: + description: Number of snapshots taken on this volume. + type: integer + format: int32 + minimum: 0 required: - num_paths - num_replicas @@ -3218,6 +3223,7 @@ components: - uuid - policy - thin + - num_snapshots VolumeTarget: example: node: io-engine-1 @@ -3499,6 +3505,11 @@ components: type: array items: $ref: '#/components/schemas/ReplicaSnapshot' + num_restores: + description: Number of restores done from this snapshot. + type: integer + format: int32 + minimum: 0 required: - status - size @@ -3506,6 +3517,7 @@ components: - total_allocated_size - txn_id - transactions + - num_restores VolumeSnapshotSpec: description: |- Volume Snapshot Spec information. diff --git a/control-plane/rest/service/src/v0/snapshots.rs b/control-plane/rest/service/src/v0/snapshots.rs index f13723bac..ea619f777 100644 --- a/control-plane/rest/service/src/v0/snapshots.rs +++ b/control-plane/rest/service/src/v0/snapshots.rs @@ -184,6 +184,7 @@ fn to_models_volume_snapshot(snap: &VolumeSnapshot) -> models::VolumeSnapshot { ) }) .collect::>(), + snap.meta().num_restores(), ), models::VolumeSnapshotSpec::new_all(snap.spec().snap_id(), snap.spec().source_id()), ), diff --git a/control-plane/stor-port/src/types/v0/store/snapshots/volume.rs b/control-plane/stor-port/src/types/v0/store/snapshots/volume.rs index f653e7e19..4283bd9d3 100644 --- a/control-plane/stor-port/src/types/v0/store/snapshots/volume.rs +++ b/control-plane/stor-port/src/types/v0/store/snapshots/volume.rs @@ -82,6 +82,21 @@ impl VolumeSnapshot { .retain(|key, _| key == &self.metadata.txn_id); self.metadata.transactions.extend(transactions) } + /// Insert a restore to runtime meta restores. + pub fn insert_restore(&mut self, restored_volume: &VolumeId) { + self.metadata + .runtime_meta + .restores + .insert(restored_volume.clone()) + } + /// Remove a restore from runtime meta restores. + pub fn remove_restore(&mut self, restored_volume: &VolumeId) { + self.metadata.runtime_meta.restores.remove(restored_volume) + } + /// Number of restores done from this snapshot. + pub fn num_restores(&self) -> u32 { + self.metadata.runtime_meta.restores.len() + } } impl From<&VolumeSnapshotUserSpec> for VolumeSnapshot { fn from(value: &VolumeSnapshotUserSpec) -> Self { @@ -115,6 +130,9 @@ pub struct VolumeSnapshotMeta { /// The "actual" snapshots can be accessed by the key `txn_id`. /// Failed transactions are any other key. transactions: HashMap>, + /// VolumeSnapshot runtime metadata information. + #[serde(skip)] + runtime_meta: VolumeSnapshotRuntimeMetadata, } impl VolumeSnapshotMeta { /// Get the snapshot operation state. @@ -170,6 +188,38 @@ impl VolumeSnapshotMeta { } } +/// VolumeSnapshot runtime metadata information. +#[derive(Debug, Clone, PartialEq, Default)] +struct VolumeSnapshotRuntimeMetadata { + /// Runtime list of all restores done from this snapshot. + restores: VolumeRestoreList, +} + +/// List of all volume snapshot restore and related information. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct VolumeRestoreList { + /// Runtime list of all volumes restored from this snapshot. + pub(crate) restores: HashSet, +} +impl VolumeRestoreList { + /// Insert restored volume id into the list. + pub fn insert(&mut self, restored_volume: VolumeId) { + self.restores.insert(restored_volume); + } + /// Remove restored volume id from the list. + pub fn remove(&mut self, restored_volume: &VolumeId) { + self.restores.remove(restored_volume); + } + /// Check if restores are empty. + pub fn is_empty(&self) -> bool { + self.restores.is_empty() + } + /// Number of restores. + pub fn len(&self) -> u32 { + self.restores.len() as u32 + } +} + /// Operation State for a VolumeSnapshot resource. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct VolumeSnapshotOperationState { @@ -186,6 +236,8 @@ pub enum VolumeSnapshotOperation { Create(VolumeSnapshotCreateInfo), Destroy, CleanupStaleTransactions, + CreateRestore(CreateRestoreInfo), + DestroyRestore(DestroyRestoreInfo), } /// Completion info for volume snapshot create operation. @@ -234,6 +286,40 @@ impl PartialEq for VolumeSnapshot { } } +/// CreateRestore Operation info. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct CreateRestoreInfo { + volume_uuid: VolumeId, +} + +impl CreateRestoreInfo { + /// Create a new CreateRestoreInfo. + pub fn new(volume_uuid: VolumeId) -> Self { + Self { volume_uuid } + } + /// Get the restore volume uuid. + pub fn volume_uuid(&self) -> &VolumeId { + &self.volume_uuid + } +} + +/// Destroy Operation info. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct DestroyRestoreInfo { + volume_uuid: VolumeId, +} + +impl DestroyRestoreInfo { + /// Destroy a new CreateRestoreInfo. + pub fn new(volume_uuid: VolumeId) -> Self { + Self { volume_uuid } + } + /// Get the restore volume uuid. + pub fn volume_uuid(&self) -> &VolumeId { + &self.volume_uuid + } +} + /// The replica snapshot created from the creation operation. #[derive(Debug, Clone, PartialEq)] pub struct VolumeSnapshotCreateResult { @@ -318,6 +404,10 @@ impl SpecTransaction for VolumeSnapshot { } } VolumeSnapshotOperation::CleanupStaleTransactions => {} + VolumeSnapshotOperation::CreateRestore(info) => self.insert_restore(info.volume_uuid()), + VolumeSnapshotOperation::DestroyRestore(info) => { + self.remove_restore(info.volume_uuid()) + } } } @@ -344,6 +434,8 @@ impl SpecTransaction for VolumeSnapshot { } VolumeSnapshotOperation::Destroy => {} VolumeSnapshotOperation::CleanupStaleTransactions => {} + VolumeSnapshotOperation::CreateRestore(_) => {} + VolumeSnapshotOperation::DestroyRestore(_) => {} } } @@ -426,6 +518,10 @@ impl VolumeSnapshotList { pub fn is_empty(&self) -> bool { self.snapshots.is_empty() } + /// Number of snapshots. + pub fn len(&self) -> usize { + self.snapshots.len() + } } impl PartialEq<()> for VolumeSnapshot { diff --git a/control-plane/stor-port/src/types/v0/store/volume.rs b/control-plane/stor-port/src/types/v0/store/volume.rs index 57cb7ff92..75284d898 100644 --- a/control-plane/stor-port/src/types/v0/store/volume.rs +++ b/control-plane/stor-port/src/types/v0/store/volume.rs @@ -175,6 +175,9 @@ pub struct VolumeSpec { /// Affinity Group related information. #[serde(default)] pub affinity_group: Option, + /// Number of snapshots taken on this volume. + #[serde(skip)] + pub num_snapshots: u32, /// Volume metadata information. #[serde(default, skip_serializing_if = "super::is_default")] pub metadata: VolumeMetadata, @@ -219,6 +222,10 @@ impl VolumeMetadata { // we become thin provisioned! self.persisted.snapshot_as_thin = Some(true); } + /// Number of snapshots taken on this volume. + pub fn num_snapshots(&self) -> usize { + self.runtime.snapshots.len() + } } /// Volume meta information. @@ -744,6 +751,7 @@ impl From for models::VolumeSpec { src.metadata.persisted.snapshot_as_thin, src.affinity_group.into_opt(), src.content_source.into_opt(), + src.num_snapshots, ) } } diff --git a/tests/bdd/features/volume/create/test_feature.py b/tests/bdd/features/volume/create/test_feature.py index 1739aecf0..6c92a2139 100644 --- a/tests/bdd/features/volume/create/test_feature.py +++ b/tests/bdd/features/volume/create/test_feature.py @@ -258,6 +258,7 @@ def volume_creation_should_succeed_with_a_returned_volume_object(create_request) VOLUME_UUID, VolumePolicy(False), False, + 0, ) # Check the volume object returned is as expected diff --git a/tests/bdd/features/volume/observability/test_feature.py b/tests/bdd/features/volume/observability/test_feature.py index 15760b98e..1c68f53ec 100644 --- a/tests/bdd/features/volume/observability/test_feature.py +++ b/tests/bdd/features/volume/observability/test_feature.py @@ -37,7 +37,9 @@ def init(): Deployer.start(1) ApiClient.pools_api().put_node_pool( - NODE_NAME, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"]) + NODE_NAME, + POOL_UUID, + CreatePoolBody(["malloc:///disk?size_mb=50"]), ) ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE, False) @@ -80,6 +82,7 @@ def a_volume_object_representing_the_volume_should_be_returned(volume_ctx): VOLUME_UUID, VolumePolicy(False), False, + 0, ) volume = volume_ctx[VOLUME_CTX_KEY] diff --git a/tests/bdd/features/volume/topology/test_feature.py b/tests/bdd/features/volume/topology/test_feature.py index 3c9fe8221..80d46ac62 100644 --- a/tests/bdd/features/volume/topology/test_feature.py +++ b/tests/bdd/features/volume/topology/test_feature.py @@ -474,6 +474,7 @@ def volume_creation_should_succeed_with_a_returned_volume_object_with_topology( VOLUME_UUID, VolumePolicy(False), False, + 0, topology=Topology( pool_topology=PoolTopology( labelled=LabelledTopology( @@ -499,7 +500,13 @@ def volume_creation_should_succeed_with_a_returned_volume_object_without_pool_to ): """volume creation should succeed with a returned volume object without pool topology.""" expected_spec = VolumeSpec( - 1, VOLUME_SIZE, SpecStatus("Created"), VOLUME_UUID, VolumePolicy(False), False + 1, + VOLUME_SIZE, + SpecStatus("Created"), + VOLUME_UUID, + VolumePolicy(False), + False, + 0, ) # Check the volume object returned is as expected