From 0a6eca65fc9790866aa7f32264424d0d18b9b8c2 Mon Sep 17 00:00:00 2001 From: shubham Date: Fri, 20 Aug 2021 11:00:37 +0530 Subject: [PATCH 1/8] feat(lvm): add lvm lib Signed-off-by: shubham --- mayastor/src/lib.rs | 1 + mayastor/src/lvm/error.rs | 22 +++ mayastor/src/lvm/mod.rs | 4 + mayastor/src/lvm/pool.rs | 334 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 361 insertions(+) create mode 100644 mayastor/src/lvm/error.rs create mode 100644 mayastor/src/lvm/mod.rs create mode 100644 mayastor/src/lvm/pool.rs diff --git a/mayastor/src/lib.rs b/mayastor/src/lib.rs index 17b8a14a8..ab48953e2 100644 --- a/mayastor/src/lib.rs +++ b/mayastor/src/lib.rs @@ -18,6 +18,7 @@ pub mod grpc; pub mod host; pub mod jsonrpc; pub mod logger; +pub mod lvm; pub mod lvs; pub mod nexus_uri; pub mod persistent_store; diff --git a/mayastor/src/lvm/error.rs b/mayastor/src/lvm/error.rs new file mode 100644 index 000000000..cb823a2f1 --- /dev/null +++ b/mayastor/src/lvm/error.rs @@ -0,0 +1,22 @@ +use snafu::Snafu; + +#[derive(Debug, Snafu)] +#[snafu(visibility = "pub(crate)")] +pub enum Error { + #[snafu(display("Failed to parse {}", err))] + FailedParsing { err: String }, + #[snafu(display("Failed to execute command {}", err))] + FailedExec { err: String }, + #[snafu(display("I/O error: {}", err))] + Io { err: std::io::Error }, + #[snafu(display("Invalid PoolType {}", value))] + InvalidPoolType { value: i32 }, +} + +impl From for Error { + fn from(e: std::io::Error) -> Self { + Error::FailedExec { + err: e.to_string(), + } + } +} diff --git a/mayastor/src/lvm/mod.rs b/mayastor/src/lvm/mod.rs new file mode 100644 index 000000000..34fad8178 --- /dev/null +++ b/mayastor/src/lvm/mod.rs @@ -0,0 +1,4 @@ +pub use error::Error; + +mod error; +pub mod pool; diff --git a/mayastor/src/lvm/pool.rs b/mayastor/src/lvm/pool.rs new file mode 100644 index 000000000..0bdab7dc5 --- /dev/null +++ b/mayastor/src/lvm/pool.rs @@ -0,0 +1,334 @@ +//! Logical Volume Manager (LVM) is a device mapper framework that provides +//! logical volume management for the Linux kernel. +//! - PV (Physical Volume) is any block device that is configured to be used by +//! lvm i.e. formatted withthe lvm2_member filesystem. Commands available +//! - pvcreate -> to create a physical volume out of any block device +//! - pvchange -> to make any change like adding tags +//! - pvs -> to list the physical volumes with their attributes +//! - pvremove -> to delete a PV which removes the lvm specific filesystem +//! from the block device +//! - VG (Volume Group) is a collection of PVs that is used as a store to +//! provision volumes. Commands available +//! - vgcreate -> to create a volume group with a specific name and +//! mentioned physical volumes +//! - vgchange -> to make any change like adding tags, activate/deactivate +//! volume group +//! - vgs -> to list the VGs with their attributes +//! - vgremove -> removes the volume group +//! - LV (Logical Volume) is a block device carved out of VG. Commands +//! available +//! - lvcreate -> to create a logical volume with a specific name on +//! mentioned volume group +//! - lvchange -> to make any change like adding tags, activate/deactivate +//! logical volume +//! - lvs -> to list the logical volumes with their attributes +//! - lvremove -> removes the logical volume +use crate::lvm::error::Error; +use serde::de::{self, Deserialize, Deserializer}; +use std::{ + fmt::Display, + io::{Error as ioError, ErrorKind}, + str::FromStr, +}; +use tokio::process::Command; + +use rpc::mayastor::CreatePoolRequest; + +const PVS_COMMAND: &str = "pvs"; +const VGCHANGE_COMMAND: &str = "vgchange"; +const VGS_COMMAND: &str = "vgs"; +pub const MAYASTOR_LABEL: &str = "@mayastor"; + +fn deserialize_number_from_string<'de, T, D>( + deserializer: D, +) -> Result +where + T: FromStr, + T::Err: Display, + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + T::from_str(&s).map_err(de::Error::custom) +} + +/// used to decode the json output for vgs command to get +/// the capacity and free size of a given vol group +/// sudo vgs --options=vg_size,vg_free --units=b --nosuffix --reportformat=json +/// { +/// "report": [ +/// { +/// "vg": [ +/// {"vg_name": "pool", "vg_size":"15372124160", +/// "vg_free":"15372124160"} ] +/// } +/// ] +/// } +#[derive(Debug, Serialize, Deserialize)] +struct VolGroupList { + report: Vec, +} +#[derive(Debug, Serialize, Deserialize)] +struct VolGroups { + /// corresponds to the vg field in json output + vg: Vec, +} +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VolGroup { + /// corresponds to the vg_name field in json output, the name of the + /// vol group + vg_name: String, + /// corresponds to the vg_size field in json output, the total capacity of + /// vol group in bytes + #[serde(deserialize_with = "deserialize_number_from_string")] + vg_size: u64, + /// corresponds to the vg_free field in json output, the free space on + /// vol group in bytes + #[serde(deserialize_with = "deserialize_number_from_string")] + vg_free: u64, + /// the physical vol disks used by the vol group + #[serde(skip_deserializing)] + disks: Vec, +} + +/// used to decode the json output for pvs command to get +/// the all the physical vols and its corresponding vol group +/// sudo pvs --options=vg_name,pv_name --reportformat=json +/// { +/// "report": [ +/// { +/// "pv": [ +/// {"vg_name":"pool", "pv_name":"/dev/sdb"} +/// ] +/// } +/// ] +/// } +#[derive(Debug, Serialize, Deserialize)] +struct PhysicalVolsReport { + /// corresponds to the report field in json output + report: Vec, +} +#[derive(Debug, Serialize, Deserialize)] +struct PhysicalVol { + /// corresponds to the pv field in json output + pv: Vec, +} +#[derive(Debug, Serialize, Deserialize)] +struct VolGroupPhysicalVolMap { + /// corresponds to the vg_name field in json output + vg_name: String, + /// corresponds to the pv_name field in json output + pv_name: String, +} + +impl VolGroup { + /// lookup a vol group by its name + pub async fn lookup_by_name(name: &str, label: &str) -> Option { + Self::list(label) + .await + .ok()? + .iter() + .find(|p| p.vg_name == name) + .cloned() + } + + /// check if the given disk is already in use by some other vol group + /// and returns the vol group which is using it + pub async fn lookup_by_disk(name: &str) -> Option { + Self::list("") + .await + .ok()? + .iter() + .find(|p| p.disks.iter().any(|disk| disk.as_str() == name)) + .cloned() + } + + /// list all the vol group having the specified label tag + pub async fn list(label: &str) -> Result, Error> { + let mut args = vec![ + "--units=b", + "--nosuffix", + "--options=vg_name,vg_size,vg_free", + "--reportformat=json", + ]; + if !label.is_empty() { + args.push(label); + } + + let output = Command::new(VGS_COMMAND) + .args(args.as_slice()) + .output() + .await?; + + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!("failed to parse stderr for vgs: {}", e.to_string()) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + + let json_result: VolGroupList = + serde_json::from_slice(output.stdout.as_slice()).map_err(|e| { + Error::FailedParsing { + err: e.to_string(), + } + })?; + + let mut pools = json_result.report[0].vg.clone(); + for p in &mut pools { + p.disks = p.clone().get_disks().await? + } + Ok(pools) + } + + /// import a vol group with the name provided or create one with the name + /// and disks provided currently only import is supported + pub async fn import_or_create( + req: CreatePoolRequest, + ) -> Result { + let pool = Self::import(req.name.as_str()).await?; + info!("The lvm pool '{}' has been created.", pool.name(),); + Ok(pool) + } + + /// import a vol group by its name, match the disks on the vol group + /// and if true add the tag mayastor to the vol group to make it available + /// as a Pool. + pub async fn import(name: &str) -> Result { + if let Some(pool) = Self::lookup_by_name(name, "").await { + let output = Command::new(VGCHANGE_COMMAND) + .arg(name) + .arg("--addtag=mayastor") + .output() + .await?; + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!( + "failed to parse stderr for vg_change: {}", + e.to_string() + ) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + + Ok(pool) + } else { + Err(Error::Io { + err: ioError::new( + ErrorKind::NotFound, + format!("vol group {} not found", name), + ), + }) + } + } + + /// fetch the physical vols for the vol group + pub async fn get_disks(self) -> Result, Error> { + let output = Command::new(PVS_COMMAND) + .args(&["--options=vg_name,pv_name", "--reportformat=json"]) + .output() + .await?; + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!("failed to parse stderr for pvs: {}", e.to_string()) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + + let json_output: PhysicalVolsReport = + serde_json::from_slice(output.stdout.as_slice()).map_err(|e| { + Error::FailedParsing { + err: e.to_string(), + } + })?; + + let mut disks: Vec = vec![]; + + for p in json_output + .report + .get(0) + .ok_or(Error::FailedParsing { + err: "failed to get pvs report".to_string(), + })? + .pv + .as_slice() + { + if p.vg_name == self.vg_name { + disks.push(p.pv_name.as_str().to_string()) + } + } + Ok(disks) + } + + /// return the name of the current vol group + pub fn name(&self) -> &str { + self.vg_name.as_str() + } + + /// return the disks of the current vol group + pub fn disks(&self) -> Vec { + self.disks.clone() + } + + /// returns the total capacity of the vol group + pub fn capacity(&self) -> u64 { + self.vg_size + } + + /// returns the available capacity + pub fn available(&self) -> u64 { + self.vg_free + } + + /// returns the used capacity + pub fn used(&self) -> u64 { + self.capacity() - self.available() + } + + /// delete a given vol group and its corresponding physical vols + pub async fn destroy(self) -> Result<(), Error> { + // As currently only import of vol group is supported + // exporting the vol group on destroy. + self.clone().export().await?; + info!("pool '{}' has been destroyed successfully", self.name()); + Ok(()) + } + + /// exports a given vol group by removing the mayastor tag + pub async fn export(self) -> Result<(), Error> { + let output = Command::new(VGCHANGE_COMMAND) + .arg(self.name()) + .arg("--deltag=mayastor") + .output() + .await?; + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!( + "failed to parse stderr for vg_change: {}", + e.to_string() + ) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + Ok(()) + } +} From f0b6b6ff13c9d9dbd8e87ab770fa35b03458553f Mon Sep 17 00:00:00 2001 From: shubham Date: Fri, 20 Aug 2021 11:01:45 +0530 Subject: [PATCH 2/8] feat(lvm): add lvm support to grpc requests Signed-off-by: shubham --- Cargo.lock | 2 - mayastor/src/bin/mayastor-client/pool_cli.rs | 46 +++++- mayastor/src/core/env.rs | 3 + mayastor/src/core/mod.rs | 2 + mayastor/src/grpc/mayastor_grpc.rs | 146 ++++++++++++++++--- mayastor/src/pool.rs | 1 + mayastor/src/subsys/config/pool.rs | 27 +++- mayastor/src/subsys/mod.rs | 2 +- rpc/mayastor-api | 2 +- 9 files changed, 193 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c978b31f0..859c1bdcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,7 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 - [[package]] name = "addr2line" version = "0.16.0" diff --git a/mayastor/src/bin/mayastor-client/pool_cli.rs b/mayastor/src/bin/mayastor-client/pool_cli.rs index c788b676a..b974c756d 100644 --- a/mayastor/src/bin/mayastor-client/pool_cli.rs +++ b/mayastor/src/bin/mayastor-client/pool_cli.rs @@ -8,7 +8,7 @@ use byte_unit::Byte; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use colored_json::ToColoredJson; use snafu::ResultExt; -use tonic::Status; +use tonic::{Code, Status}; pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let create = SubCommand::with_name("create") @@ -21,10 +21,18 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { ) .arg( Arg::with_name("disk") - .required(true) + .required(false) .multiple(true) .index(2) .help("Disk device files"), + ) + .arg( + Arg::with_name("pooltype") + .short("t") + .help("the type of the pool") + .required(false) + .possible_values(&["lvs", "lvm"]) + .default_value("lvs"), ); let destroy = SubCommand::with_name("destroy") .about("Destroy storage pool") @@ -73,17 +81,20 @@ async fn create( .to_owned(); let disks = matches .values_of("disk") - .ok_or_else(|| Error::MissingValue { - field: "disk".to_string(), - })? - .map(|dev| dev.to_owned()) + .map(|values| values.collect::>()) + .unwrap_or_default() + .iter() + .map(|disk| disk.to_string()) .collect(); + let pooltype = + parse_pooltype(matches.value_of("pooltype")).context(GrpcStatus)?; let response = ctx .client .create_pool(rpc::CreatePoolRequest { name: name.clone(), disks, + pooltype, }) .await .context(GrpcStatus)?; @@ -178,8 +189,10 @@ async fn list( let cap = Byte::from_bytes(p.capacity.into()); let used = Byte::from_bytes(p.used.into()); let state = pool_state_to_str(p.state); + let pooltype = pooltype_to_str(p.pooltype); vec![ p.name.clone(), + pooltype.to_string(), state.to_string(), ctx.units(cap), ctx.units(used), @@ -188,7 +201,7 @@ async fn list( }) .collect(); ctx.print_list( - vec!["NAME", "STATE", ">CAPACITY", ">USED", "DISKS"], + vec!["NAME", "TYPE", "STATE", ">CAPACITY", ">USED", "DISKS"], table, ); } @@ -205,3 +218,22 @@ fn pool_state_to_str(idx: i32) -> &'static str { rpc::PoolState::PoolFaulted => "faulted", } } + +fn pooltype_to_str(idx: i32) -> &'static str { + match rpc::PoolType::from_i32(idx).unwrap() { + rpc::PoolType::Lvs => "lvs", + rpc::PoolType::Lvm => "lvm", + } +} + +fn parse_pooltype(ptype: Option<&str>) -> Result { + match ptype { + None => Ok(rpc::PoolType::Lvs as i32), + Some("lvs") => Ok(rpc::PoolType::Lvs as i32), + Some("lvm") => Ok(rpc::PoolType::Lvm as i32), + Some(_) => Err(Status::new( + Code::Internal, + "Invalid value of pool type".to_owned(), + )), + } +} diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 8a5c2c60d..990667c0d 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -142,8 +142,11 @@ impl MayastorFeatures { Err(_) => false, }; + let lvm = matches!(std::env::var("LVM_ENABLE"), Ok(s) if s == "1"); + MayastorFeatures { asymmetric_namespace_access: ana, + lvm, } } diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 25ee3af9b..d9ba3a2a7 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -299,4 +299,6 @@ pub static MWQ: once_cell::sync::Lazy> = #[derive(Debug, Clone)] pub struct MayastorFeatures { pub asymmetric_namespace_access: bool, + /// when set to true, support for lvm pools and volumes is enabled + pub lvm: bool, } diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 67dc57278..490635354 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -38,9 +38,13 @@ use crate::{ Serializer, }, host::{blk_device, resource}, + lvm::{ + pool::{VolGroup, MAYASTOR_LABEL}, + Error as LvmError, + }, lvs::{Error as LvsError, Lvol, Lvs}, nexus_uri::NexusBdevError, - subsys::PoolConfig, + subsys::{PoolBackend, PoolConfig}, }; use futures::FutureExt; use nix::errno::Errno; @@ -158,6 +162,20 @@ impl From for Status { } } +impl From for Status { + fn from(e: LvmError) -> Self { + match e { + LvmError::InvalidPoolType { + .. + } + | LvmError::Io { + .. + } => Status::invalid_argument(e.to_string()), + _ => Status::internal(e.to_string()), + } + } +} + impl From for i32 { fn from(p: Protocol) -> Self { match p { @@ -176,6 +194,20 @@ impl From for Pool { state: PoolState::PoolOnline.into(), capacity: l.capacity(), used: l.used(), + pooltype: PoolType::Lvs as i32, + } + } +} + +impl From for Pool { + fn from(v: VolGroup) -> Self { + Self { + name: v.name().into(), + disks: v.disks(), + state: PoolState::PoolOnline.into(), + capacity: v.capacity(), + used: v.used(), + pooltype: PoolType::Lvm as i32, } } } @@ -222,6 +254,7 @@ impl From for rpc::mayastor::MayastorFeatures { fn from(f: MayastorFeatures) -> Self { Self { asymmetric_namespace_access: f.asymmetric_namespace_access, + lvm: f.lvm, } } } @@ -238,21 +271,49 @@ impl mayastor_server::Mayastor for MayastorSvc { async move { let args = request.into_inner(); - if args.disks.is_empty() { - return Err(Status::invalid_argument("Missing devices")); - } - - let rx = rpc_submit::<_, _, LvsError>(async move { - let pool = Lvs::create_or_import(args).await?; - // Capture current pool config and export to file. - PoolConfig::capture().export().await; - Ok(Pool::from(pool)) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) + let resp = match PoolBackend::try_from(args.pooltype)? { + PoolBackend::Lvs => { + if MayastorFeatures::get_features().lvm { + // check if a lvm pool already exists with the same name + if let Some(pool) = VolGroup::lookup_by_name(args.name.as_str(), MAYASTOR_LABEL).await { + return Err(Status::invalid_argument(format!("lvm pool with the name '{}' already exists", pool.name()))) + }; + // check if the disks are used by existing lvm pool + if let Some(pool) = VolGroup::lookup_by_disk(args.disks[0].as_str()).await { + return Err(Status::invalid_argument(format!("a lvm pool {} already uses the disks {:?}", pool.name(), pool.disks()))) + }; + } + let rx = rpc_submit::<_, _, LvsError>(async move { + let pool = Lvs::create_or_import(args).await?; + // Capture current pool config and export to file. + PoolConfig::capture().export().await; + Ok(Pool::from(pool)) + })?; + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + PoolBackend::Lvm => { + if !MayastorFeatures::get_features().lvm { + return Err(Status::failed_precondition("lvm support not available")) + } + // check if a lvs pool already exists with the same name + if let Some(_pool) = Lvs::lookup(args.name.as_str()) { + return Err(Status::invalid_argument("lvs pool with the same name already exists")) + }; + // check if the disks are used by existing lvs pool + if Lvs::iter() + .map(|l| l.base_bdev().name()).any(|d| args.disks.contains(&d)){ + return Err(Status::invalid_argument("a lvs pool already uses the disk")) + }; + VolGroup::import_or_create(args).await + .map_err(Status::from) + .map(Pool::from) + .map(Response::new) + }, + }; + resp }, ) .await @@ -268,6 +329,23 @@ impl mayastor_server::Mayastor for MayastorSvc { async move { let args = request.into_inner(); info!("{:?}", args); + let mut lvm_pool_found = false; + if MayastorFeatures::get_features().lvm { + let res: Result<_, LvmError> = { + if let Some(pool) = + VolGroup::lookup_by_name(&args.name, MAYASTOR_LABEL) + .await + { + lvm_pool_found = true; + pool.destroy().await?; + } + Ok(Null {}) + }; + if lvm_pool_found { + return res.map_err(Status::from).map(Response::new); + } + } + let rx = rpc_submit::<_, _, LvsError>(async move { if let Some(pool) = Lvs::lookup(&args.name) { // Remove pool from current config and export to file. @@ -299,17 +377,37 @@ impl mayastor_server::Mayastor for MayastorSvc { GrpcClientContext::new(&request, function_name!()), async move { let rx = rpc_submit::<_, _, LvsError>(async move { - Ok(ListPoolsReply { - pools: Lvs::iter() - .map(|l| l.into()) - .collect::>(), - }) + Ok(Lvs::iter().map(|l| l.into()).collect::>()) })?; - rx.await + let rec = rx + .await .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) + .map_err(Status::from); + + let mut lvs_pools = match rec { + Ok(pools) => pools, + Err(e) => return Err(e), + }; + + if MayastorFeatures::get_features().lvm { + let mut lvm_pools = + match VolGroup::list(MAYASTOR_LABEL).await { + Ok(pools) => pools + .iter() + .map(|v| v.clone().into()) + .collect::>(), + Err(e) => { + error!("failed to fetch lvm pools {}", e); + vec![] + } + }; + lvs_pools.append(&mut lvm_pools); + } + + Ok(Response::new(ListPoolsReply { + pools: lvs_pools, + })) }, ) .await diff --git a/mayastor/src/pool.rs b/mayastor/src/pool.rs index 3c99c65bc..c8ba57a3e 100644 --- a/mayastor/src/pool.rs +++ b/mayastor/src/pool.rs @@ -128,6 +128,7 @@ impl From for rpc::Pool { state: rpc::PoolState::PoolOnline as i32, capacity: pool.get_capacity(), used: pool.get_capacity() - pool.get_free(), + pooltype: rpc::PoolType::Lvs as i32, } } } diff --git a/mayastor/src/subsys/config/pool.rs b/mayastor/src/subsys/config/pool.rs index 25f41c8af..98600bd2b 100644 --- a/mayastor/src/subsys/config/pool.rs +++ b/mayastor/src/subsys/config/pool.rs @@ -1,20 +1,20 @@ -use std::{fmt::Display, fs, path::Path, sync::Mutex}; - use futures::channel::oneshot; use once_cell::sync::{Lazy, OnceCell}; use serde::{Deserialize, Serialize}; +use std::{convert::TryFrom, fmt::Display, fs, path::Path, sync::Mutex}; use tonic::Status; use crate::{ bdev::VerboseError, core::{runtime, Cores, Mthread, Reactor, Share}, grpc::rpc_submit, + lvm::Error as LvmError, lvs::{Error as LvsError, Lvs}, pool::{Pool as SpdkPool, PoolsIter}, replica::ShareType, }; -use rpc::mayastor::CreatePoolRequest; +use rpc::mayastor::{CreatePoolRequest, PoolType}; static CONFIG_FILE: OnceCell = OnceCell::new(); @@ -177,6 +177,27 @@ impl From<&Pool> for CreatePoolRequest { Self { name: pool.name.clone(), disks: pool.disks.clone(), + pooltype: PoolType::Lvs as i32, + } + } +} + +#[derive(Debug, PartialOrd, PartialEq)] +pub enum PoolBackend { + Lvs, + Lvm, +} + +impl TryFrom for PoolBackend { + type Error = LvmError; + + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(Self::Lvs), + 1 => Ok(Self::Lvm), + _ => Err(LvmError::InvalidPoolType { + value, + }), } } } diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index 34e34064e..ce2a2cce9 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -3,7 +3,7 @@ pub use config::{ opts::{NexusOpts, NvmeBdevOpts}, - pool::PoolConfig, + pool::{PoolBackend, PoolConfig}, Config, ConfigSubsystem, }; diff --git a/rpc/mayastor-api b/rpc/mayastor-api index cca648058..3cc0fede9 160000 --- a/rpc/mayastor-api +++ b/rpc/mayastor-api @@ -1 +1 @@ -Subproject commit cca648058810fb9ca0059e957163e687b9a11548 +Subproject commit 3cc0fede9e40f880884d60f1a18da5f7e1d8e6e8 From fcf7856fd5fa04325f974e20264d22890a36854c Mon Sep 17 00:00:00 2001 From: shubham Date: Fri, 20 Aug 2021 11:02:04 +0530 Subject: [PATCH 3/8] test(python): add lvm bdd tests Signed-off-by: shubham --- test/python/common/hdl.py | 4 +- test/python/docker-compose.yml | 9 ++ test/python/features/lvm.feature | 28 ++++++ test/python/test_bdd_lvm.py | 156 +++++++++++++++++++++++++++++++ test/python/test_bdd_pool.py | 14 +-- test/python/test_bdd_replica.py | 2 +- 6 files changed, 203 insertions(+), 10 deletions(-) create mode 100644 test/python/features/lvm.feature create mode 100644 test/python/test_bdd_lvm.py diff --git a/test/python/common/hdl.py b/test/python/common/hdl.py index 4fe0b8336..9f42e0f02 100644 --- a/test/python/common/hdl.py +++ b/test/python/common/hdl.py @@ -80,13 +80,13 @@ def bdev_unshare(self, name): def bdev_destroy(self, uri): return self.bdev.Destroy(pb.BdevUri(uri=str(uri))) - def pool_create(self, name, bdev): + def pool_create(self, name, bdev, pooltype): """Create a pool with given name on this node using the bdev as the backend device. The bdev is implicitly created.""" disks = [] disks.append(bdev) - return self.ms.CreatePool(pb.CreatePoolRequest(name=name, disks=disks)) + return self.ms.CreatePool(pb.CreatePoolRequest(name=name, disks=disks, pooltype=pooltype)) def pool_destroy(self, name): """Destroy the pool.""" diff --git a/test/python/docker-compose.yml b/test/python/docker-compose.yml index 40d20f1da..8e1ef51dd 100644 --- a/test/python/docker-compose.yml +++ b/test/python/docker-compose.yml @@ -11,6 +11,7 @@ services: - MY_POD_IP=10.0.0.2 - NEXUS_NVMF_ANA_ENABLE=1 - NEXUS_NVMF_RESV_ENABLE=1 + - LVM_ENABLE=1 command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock networks: mayastor_net: @@ -24,12 +25,20 @@ services: security_opt: # we can set this to a JSON file to allow per syscall access - seccomp:unconfined + privileged: true volumes: - ${SRCDIR}:${SRCDIR} - /nix:/nix - /dev/hugepages:/dev/hugepages - /tmp:/tmp - /var/tmp:/var/tmp + - /run/udev:/run/udev + - /dev:/dev + - /sbin:/sbin + - /usr/lib:/usr/lib + - /lib:/lib + - /lib64:/lib64 + - /etc/lvm:/etc/lvm ms1: container_name: "ms1" image: rust:latest diff --git a/test/python/features/lvm.feature b/test/python/features/lvm.feature new file mode 100644 index 000000000..78f78b00f --- /dev/null +++ b/test/python/features/lvm.feature @@ -0,0 +1,28 @@ +Feature: LVM pool and replica support + + Background: + Given a mayastor instance "ms0" + + Scenario: Determine if the mayastor instance supports LVM pools + When the user calls the gRPC mayastor info request + Then the instance shall report if it supports the LVM feature + + Scenario: creating a pool with an losetup disk + When the user creates a pool specifying a URI representing an loop disk + Then the lvm pool should be created + + Scenario: creating a lvs pool with an AIO disk + When the user creates a lvs pool specifying a URI representing an aio disk + Then the lvs pool should be created + + Scenario: Listing LVM pools and LVS pools + When a user calls the listPool() gRPC method + Then the pool should be listed reporting the correct pooltype field + + Scenario: destroying a lvm pool with an losetup disk + When the user destroys a pool specifying type as lvm + Then the lvm pool should be removed + + Scenario: destroying a lvs pool with an AIO disk + When the user destroys a pool specifying type as lvs + Then the lvs pool should be removed diff --git a/test/python/test_bdd_lvm.py b/test/python/test_bdd_lvm.py new file mode 100644 index 000000000..6e6015fc6 --- /dev/null +++ b/test/python/test_bdd_lvm.py @@ -0,0 +1,156 @@ +"""LVM pool and replica support feature tests.""" + +import subprocess +import pytest +from pytest_bdd import ( + given, + scenario, + then, + when, + parsers, +) + +from common.command import run_cmd +from common.mayastor import mayastor_mod +import grpc +import mayastor_pb2 as pb + + +@scenario('features/lvm.feature', 'Determine if the mayastor instance supports LVM pools') +def test_determine_if_the_mayastor_instance_supports_lvm_pools(): + """Determine if the mayastor instance supports LVM pools.""" + +@scenario('features/lvm.feature', 'creating a pool with an losetup disk') +def test_creating_a_pool_with_an_losetup_disk(): + """creating a pool with an losetup disk.""" + +@scenario('features/lvm.feature', 'creating a lvs pool with an AIO disk') +def test_creating_a_pool_with_an_aio_disk(): + """creating a pool with an AIO disk.""" + +@scenario('features/lvm.feature', 'Listing LVM pools and LVS pools') +def test_listing_lvm_pools_and_lvs_pools(): + """Listing LVM pools and LVS pools.""" + +@scenario('features/lvm.feature', 'destroying a lvm pool with an losetup disk') +def test_destroying_a_lvm_pool_with_an_losetup_disk(): + """destroying a lvm pool with an losetup disk.""" + +@scenario('features/lvm.feature', 'destroying a lvs pool with an AIO disk') +def test_destroying_a_lvs_pool_with_an_aio_disk(): + """destroying a lvs pool with an AIO disk.""" + +@pytest.fixture +def get_mayastor_info(get_mayastor_instance): + return get_mayastor_instance.mayastor_info() + +@given( + parsers.parse('a mayastor instance "{name}"'), + target_fixture="get_mayastor_instance", +) +def get_mayastor_instance(mayastor_mod, name): + return mayastor_mod[f"{name}"] + + +@pytest.fixture +def volgrp_with_losetup_disk(): + file = "/tmp/ms0-disk0.img" + name = "lvmpool" + run_cmd(f"rm -f '{file}'", True) + run_cmd(f"truncate -s 64M '{file}'", True) + out = subprocess.run(f"sudo losetup -f '{file}' --show", shell=True, check=True, capture_output=True) + disk = out.stdout.decode('ascii').strip('\n') + run_cmd(f"sudo pvcreate '{disk}'", True) + run_cmd(f"sudo vgcreate '{name}' '{disk}'", True) + yield name + run_cmd(f"rm -f '{file}'", True) + +@pytest.fixture +def image_file(): + name = "/tmp/ms0-disk1.img" + run_cmd(f"rm -f '{name}'", True) + run_cmd(f"truncate -s 64M '{name}'", True) + yield name + run_cmd(f"rm -f '{name}'", True) + +@pytest.fixture +def create_pool(get_mayastor_instance): + def create(name, disks, pooltype): + get_mayastor_instance.ms.CreatePool( + pb.CreatePoolRequest(name=name, disks=disks, pooltype=pooltype) + ) + + yield create + +@pytest.fixture +def find_pool(get_mayastor_instance): + def find(name): + for pool in get_mayastor_instance.ms.ListPools(pb.Null()).pools: + if pool.name == name: + return pool + return None + + yield find + + +@when('the user calls the gRPC mayastor info request', target_fixture="get_lvm_feature") +def get_lvm_feature(get_mayastor_instance, get_mayastor_info): + return get_mayastor_info.supportedFeatures.lvm + +@then('the instance shall report if it supports the LVM feature') +def the_instance_shall_report_if_it_supports_the_lvm_feature(get_mayastor_instance, get_mayastor_info, get_lvm_feature): + assert get_lvm_feature == True + + +@when('the user creates a pool specifying a URI representing an loop disk') +def the_user_creates_a_pool_specifying_a_uri_representing_an_loop_disk(get_mayastor_instance, volgrp_with_losetup_disk, create_pool): + create_pool(f"{volgrp_with_losetup_disk}", [], pb.Lvm) + + +@then('the lvm pool should be created') +def the_lvm_pool_should_be_created(find_pool): + assert find_pool("lvmpool") != None + + +@when('the user destroys a pool specifying type as lvm') +def the_user_destroys_a_pool_specifying_type_as_lvm(get_mayastor_instance): + get_mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name="lvmpool")) + + +@then('the lvm pool should be removed') +def the_lvm_pool_should_be_removed(find_pool): + assert find_pool("lvmpool") == None + + +@when('the user creates a lvs pool specifying a URI representing an aio disk') +def create_pool_from_aio_disk(get_mayastor_instance, create_pool, image_file): + create_pool("lvspool", [f"aio://{image_file}"], pb.Lvs) + +@then('the lvs pool should be created') +def the_lvm_pool_should_be_created(find_pool): + assert find_pool("lvspool") != None + +@when('the user destroys a pool specifying type as lvs') +def the_user_destroys_a_pool_specifying_type_as_lvs(get_mayastor_instance): + get_mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name="lvspool")) + +@then('the lvs pool should be removed') +def the_lvs_pool_should_be_removed(find_pool): + assert find_pool("lvspool") == None + + +@when('a user calls the listPool() gRPC method', target_fixture="list_pools") +def list_pools(get_mayastor_instance): + return get_mayastor_instance.ms.ListPools(pb.Null()).pools + +@then('the pool should be listed reporting the correct pooltype field') +def the_pool_should_be_listed_reporting_the_correct_pooltype_field(list_pools): + for pool in list_pools: + assert pool.capacity == 62914560 + assert pool.used == 0 + assert pool.state == pb.POOL_ONLINE + if pool.name == "lvmpool": + assert pool.pooltype == pb.Lvm + if pool.name == "lvspool": + assert pool.pooltype == pb.Lvs + diff --git a/test/python/test_bdd_pool.py b/test/python/test_bdd_pool.py index 312027620..5d766b332 100644 --- a/test/python/test_bdd_pool.py +++ b/test/python/test_bdd_pool.py @@ -73,9 +73,9 @@ def replica_pools(get_mayastor_instance): @pytest.fixture def create_pool(get_mayastor_instance, replica_pools): - def create(name, disks): + def create(name, disks, pooltype): pool = get_mayastor_instance.ms.CreatePool( - pb.CreatePoolRequest(name=name, disks=disks) + pb.CreatePoolRequest(name=name, disks=disks, pooltype=pooltype) ) replica_pools[name] = pool @@ -92,13 +92,13 @@ def get_mayastor_instance(mayastor_mod, name): @given(parsers.parse('a pool "{name}"'), target_fixture="get_pool_name") def get_pool_name(get_mayastor_instance, create_pool, name): - create_pool(name, ["malloc:///disk0?size_mb=100"]) + create_pool(name, ["malloc:///disk0?size_mb=100"], pb.Lvs) return name @when("the user creates a pool specifying a URI representing an aio disk") def create_pool_from_aio_disk(get_mayastor_instance, create_pool, image_file): - create_pool("p0", [f"aio://{image_file}"]) + create_pool("p0", [f"aio://{image_file}"], pb.Lvs) @when("the user attempts to create a pool specifying a disk with an invalid block size") @@ -106,7 +106,7 @@ def attempt_to_create_pool_from_disk_with_invalid_block_size( get_mayastor_instance, create_pool ): with pytest.raises(grpc.RpcError) as error: - create_pool("p0", "malloc:///disk0?size_mb=100&blk_size=1024") + create_pool("p0", "malloc:///disk0?size_mb=100&blk_size=1024", pb.Lvs) assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT @@ -114,14 +114,14 @@ def attempt_to_create_pool_from_disk_with_invalid_block_size( def attempt_to_create_pool_from_multiple_disks(get_mayastor_instance, create_pool): with pytest.raises(grpc.RpcError) as error: create_pool( - "p0", ["malloc:///disk0?size_mb=100", "malloc:///disk1?size_mb=100"] + "p0", ["malloc:///disk0?size_mb=100", "malloc:///disk1?size_mb=100"], pb.Lvs ) assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT @when("the user creates a pool with the name of an existing pool") def create_pool_that_already_exists(get_mayastor_instance, create_pool, get_pool_name): - create_pool(get_pool_name, ["malloc:///disk0?size_mb=100"]) + create_pool(get_pool_name, ["malloc:///disk0?size_mb=100"], pb.Lvs) @when("the user destroys a pool that does not exist") diff --git a/test/python/test_bdd_replica.py b/test/python/test_bdd_replica.py index c0f2cabc4..c9b4261ca 100644 --- a/test/python/test_bdd_replica.py +++ b/test/python/test_bdd_replica.py @@ -109,7 +109,7 @@ def mayastor_instance(mayastor_mod): @pytest.fixture(scope="module") def mayastor_pool(mayastor_instance): pool = mayastor_instance.ms.CreatePool( - pb.CreatePoolRequest(name="p0", disks=["malloc:///disk0?size_mb=512"]) + pb.CreatePoolRequest(name="p0", disks=["malloc:///disk0?size_mb=512"], pooltype=pb.Lvs) ) yield pool.name mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name=pool.name)) From 902a72fb4e35cc5cad3405523fc18ac7915fdbf2 Mon Sep 17 00:00:00 2001 From: shubham Date: Tue, 31 Aug 2021 12:25:06 +0530 Subject: [PATCH 4/8] test(rust): add pooltype to grpc requests Signed-off-by: shubham --- mayastor/tests/lvs_pool.rs | 9 ++++++++- mayastor/tests/lvs_pool_rpc.rs | 3 +++ mayastor/tests/nexus_multipath.rs | 3 +++ mayastor/tests/nexus_with_local.rs | 2 ++ mayastor/tests/replica_snapshot.rs | 3 +++ mayastor/tests/replica_uri.rs | 2 ++ 6 files changed, 21 insertions(+), 1 deletion(-) diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index 3506a5694..b0e1b4d7b 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -5,7 +5,7 @@ use mayastor::{ nexus_uri::bdev_create, subsys::NvmfSubsystem, }; -use rpc::mayastor::CreatePoolRequest; +use rpc::mayastor::{CreatePoolRequest, PoolType}; pub mod common; @@ -34,6 +34,7 @@ async fn lvs_pool_test() { Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -46,6 +47,7 @@ async fn lvs_pool_test() { assert!(Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], + pooltype: PoolType::Lvs as i32, }) .await .is_ok()) @@ -142,6 +144,7 @@ async fn lvs_pool_test() { let pool2 = Lvs::create_or_import(CreatePoolRequest { name: "tpool2".to_string(), disks: vec!["malloc:///malloc0?size_mb=64".to_string()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -175,6 +178,7 @@ async fn lvs_pool_test() { let pool = Lvs::create_or_import(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["aio:///tmp/disk1.img".to_string()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -306,6 +310,7 @@ async fn lvs_pool_test() { let pool = Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -334,6 +339,7 @@ async fn lvs_pool_test() { Lvs::create_or_import(CreatePoolRequest { name: "jpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], + pooltype: PoolType::Lvs as i32, }) .await .err() @@ -348,6 +354,7 @@ async fn lvs_pool_test() { let pool = Lvs::create_or_import(CreatePoolRequest { name: "tpool2".into(), disks: vec!["/tmp/disk2.img".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs index 39bed762f..ec57f038e 100644 --- a/mayastor/tests/lvs_pool_rpc.rs +++ b/mayastor/tests/lvs_pool_rpc.rs @@ -4,6 +4,7 @@ use rpc::mayastor::{ DestroyPoolRequest, DestroyReplicaRequest, Null, + PoolType, ShareReplicaRequest, }; @@ -30,6 +31,7 @@ async fn lvs_pool_rpc() { .create_pool(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["malloc:///disk0?size_mb=64".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -38,6 +40,7 @@ async fn lvs_pool_rpc() { .create_pool(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["malloc:///disk0?size_mb=64".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); diff --git a/mayastor/tests/nexus_multipath.rs b/mayastor/tests/nexus_multipath.rs index 5dc38fc24..cf76421ee 100644 --- a/mayastor/tests/nexus_multipath.rs +++ b/mayastor/tests/nexus_multipath.rs @@ -12,6 +12,7 @@ use rpc::mayastor::{ DestroyNexusRequest, Null, NvmeAnaState, + PoolType, PublishNexusRequest, ShareProtocolNexus, }; @@ -120,6 +121,7 @@ async fn nexus_multipath() { .create_pool(CreatePoolRequest { name: POOL_NAME.to_string(), disks: vec!["malloc:///disk0?size_mb=64".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -346,6 +348,7 @@ async fn nexus_resv_acquire() { .create_pool(CreatePoolRequest { name: POOL_NAME.to_string(), disks: vec!["malloc:///disk0?size_mb=64".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); diff --git a/mayastor/tests/nexus_with_local.rs b/mayastor/tests/nexus_with_local.rs index 43c266d8b..defc823f6 100644 --- a/mayastor/tests/nexus_with_local.rs +++ b/mayastor/tests/nexus_with_local.rs @@ -9,6 +9,7 @@ use rpc::mayastor::{ CreatePoolRequest, CreateReplicaRequest, Null, + PoolType, RemoveChildNexusRequest, }; @@ -25,6 +26,7 @@ async fn create_replicas(h: &mut RpcHandle) { .create_pool(CreatePoolRequest { name: pool(), disks: vec!["malloc:///disk0?size_mb=64".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs index 86358b1fa..c2b44d354 100644 --- a/mayastor/tests/replica_snapshot.rs +++ b/mayastor/tests/replica_snapshot.rs @@ -7,6 +7,7 @@ use mayastor::{ use rpc::mayastor::{ CreatePoolRequest, CreateReplicaRequest, + PoolType, ShareProtocolReplica, ShareReplicaRequest, }; @@ -50,6 +51,7 @@ async fn replica_snapshot() { .create_pool(CreatePoolRequest { name: POOL2_NAME.to_string(), disks: vec!["malloc:///disk0?size_mb=96".into()], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); @@ -75,6 +77,7 @@ async fn replica_snapshot() { Lvs::create_or_import(CreatePoolRequest { name: POOL1_NAME.to_string(), disks: vec![format!("aio://{}", DISKNAME1)], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); diff --git a/mayastor/tests/replica_uri.rs b/mayastor/tests/replica_uri.rs index c82495719..53e7367df 100644 --- a/mayastor/tests/replica_uri.rs +++ b/mayastor/tests/replica_uri.rs @@ -5,6 +5,7 @@ use rpc::mayastor::{ CreatePoolRequest, CreateReplicaRequest, Null, + PoolType, Replica, ShareProtocolReplica, ShareReplicaRequest, @@ -53,6 +54,7 @@ async fn replica_uri() { "malloc:///disk0?size_mb={}", DISKSIZE_KB / 1024 )], + pooltype: PoolType::Lvs as i32, }) .await .unwrap(); From 2d70507292ec4735503afa1d020ea3c6fddf4b55 Mon Sep 17 00:00:00 2001 From: Akhil Mohan Date: Wed, 8 Sep 2021 12:19:03 +0530 Subject: [PATCH 5/8] feat(lvm): add volume support to lvm module - add support to create/remove/list/change lvm volumes Signed-off-by: Akhil Mohan --- mayastor/src/lvm/mod.rs | 1 + mayastor/src/lvm/pool.rs | 3 +- mayastor/src/lvm/volume.rs | 258 +++++++++++++++++++++++++++++++++++++ 3 files changed, 261 insertions(+), 1 deletion(-) create mode 100644 mayastor/src/lvm/volume.rs diff --git a/mayastor/src/lvm/mod.rs b/mayastor/src/lvm/mod.rs index 34fad8178..2ccf62141 100644 --- a/mayastor/src/lvm/mod.rs +++ b/mayastor/src/lvm/mod.rs @@ -2,3 +2,4 @@ pub use error::Error; mod error; pub mod pool; +pub mod volume; diff --git a/mayastor/src/lvm/pool.rs b/mayastor/src/lvm/pool.rs index 0bdab7dc5..3ba87124d 100644 --- a/mayastor/src/lvm/pool.rs +++ b/mayastor/src/lvm/pool.rs @@ -37,9 +37,10 @@ use rpc::mayastor::CreatePoolRequest; const PVS_COMMAND: &str = "pvs"; const VGCHANGE_COMMAND: &str = "vgchange"; const VGS_COMMAND: &str = "vgs"; +pub const MAYASTOR_TAG: &str = "mayastor"; pub const MAYASTOR_LABEL: &str = "@mayastor"; -fn deserialize_number_from_string<'de, T, D>( +pub fn deserialize_number_from_string<'de, T, D>( deserializer: D, ) -> Result where diff --git a/mayastor/src/lvm/volume.rs b/mayastor/src/lvm/volume.rs new file mode 100644 index 000000000..42d7baf5e --- /dev/null +++ b/mayastor/src/lvm/volume.rs @@ -0,0 +1,258 @@ +use crate::lvm::error::Error; +use serde::de::{Deserialize, Deserializer}; +use tokio::process::Command; +use rpc::mayastor::{CreateReplicaRequest}; +use crate::lvm::pool::{ + MAYASTOR_TAG, + MAYASTOR_LABEL, + deserialize_number_from_string, +}; + + +const LVCREATE_COMMAND: &str = "lvcreate"; +const LVS_COMMAND: &str = "lvs"; +const LVREMOVE_COMMAND: &str = "lvremove"; +const LVCHANGE_COMMAND: &str = "lvchange"; + +fn deserialize_vec_from_string_sequence<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, +{ + let sequence = String::deserialize(deserializer)?; + Ok( + sequence + .split(",") + .map(|item| item.to_owned()) + .collect() + ) +} + + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LogicalVolume { + lv_uuid: String, + lv_name: String, + vg_name: String, + lv_path: String, + #[serde(rename = "lv_size")] + #[serde(deserialize_with = "deserialize_number_from_string")] + size: u64, + #[serde(rename = "lv_tags")] + #[serde(deserialize_with = "deserialize_vec_from_string_sequence")] + tags: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +struct LogicalVolumeList { + lv: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +struct LogicalVolsReport { + report: Vec, +} + +impl LogicalVolume { + + pub async fn create(req: CreateReplicaRequest) -> Result { + let lv_name = req.uuid.as_str(); + let vg_name = req.pool.as_str(); + let mut size = req.size.to_string(); + // need to append the units as bytes + size.push_str("b"); + + // add the necessary tags for the lvm volume + let mut add_tag_command: Vec<&str> = Vec::new(); + let share_tag = format!("--addtag={}", req.share); + let mayastor_tag = format!("--addtag={}", MAYASTOR_TAG); + add_tag_command.push(mayastor_tag.as_str()); + add_tag_command.push(share_tag.as_str()); + + let output = Command::new(LVCREATE_COMMAND) + .args(&["-L", size.as_str()]) + .args(&["-n", lv_name]) + .args(add_tag_command) + .arg(vg_name) + .output() + .await?; + + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!("failed to parse stderr for lvcreate: {}", e.to_string()) + }, + |s| s, + ); + return Err(Error::FailedExec{ + err: msg, + }); + } + + let lv_path = format!("/dev/{}/{}", vg_name, lv_name); + + info!("lvm volume {} created", lv_path); + + Ok( + LogicalVolume { + lv_path, + lv_name: req.uuid, + vg_name: req.pool, + size: req.size, + lv_uuid: String::new(), + tags: vec![format!("{}", req.share), MAYASTOR_TAG.to_string()], + } + ) + } + + pub async fn lookup_by_lv_name(lv_name: String) -> Option { + Self::list("") + .await + .ok()? + .iter() + .find(|v| v.lv_name == lv_name) + .cloned() + } + + pub async fn list(_vg_name: &str) -> Result, Error> { + let args = vec![ + "--reportformat=json", + "--options=lv_name,vg_name,lv_size,lv_uuid,lv_tags,lv_path", + "--units=b", + "--nosuffix", + MAYASTOR_LABEL + ]; + + /*if !vg_name.is_empty() { + let select_option = "select vg_name=".to_owned() + vg_name; + args.push(select_option.as_str()); + }*/ + + let output = Command::new(LVS_COMMAND) + .args(args.as_slice()) + .output() + .await?; + + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!("failed to parse stderr for lvs: {}", e.to_string()) + }, + |s| s, + ); + return Err(Error::FailedExec{ + err: msg, + }); + } + + let json_result: LogicalVolsReport = serde_json::from_slice(output.stdout.as_slice()) + .map_err(|e| Error::FailedParsing { + err: e.to_string() + })?; + + let volumes = json_result.report[0].lv.clone(); + + Ok(volumes) + } + + pub async fn remove(self) -> Result<(), Error> { + + let output = Command::new(LVREMOVE_COMMAND) + .arg(self.lv_path) + .arg("-y") + .output() + .await?; + + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!("failed to parse stderr for lvremove: {}", e.to_string()) + }, + |s| s, + ); + return Err(Error::FailedExec{ + err: msg, + }); + } + + info!("lvm volume {} deleted", self.lv_name); + + Ok(()) + + } + + pub async fn change_share_tag(self, share_protocol: i32) -> Result<(), Error> { + // remove the first tag, which corresponds to the share protocol + Self::delete_tag(&self, self.tags[0].as_str()).await?; + Self::add_tag(&self, format!("{}", share_protocol).as_str()).await?; + info!("share tag changed to {} for {}", share_protocol, self.lv_name); + Ok(()) + } + + async fn add_tag(&self, tag: &str) -> Result<(), Error> { + let output = Command::new(LVCHANGE_COMMAND) + .args(&["--addtag", tag]) + .arg(&self.lv_path) + .output() + .await?; + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!( + "failed to parse stderr for lvchange addtag: {}", + e.to_string() + ) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + Ok(()) + } + + async fn delete_tag(&self, tag: &str) -> Result<(), Error> { + let output = Command::new(LVCHANGE_COMMAND) + .args(&["--deltag", tag]) + .arg(&self.lv_path) + .output() + .await?; + if !output.status.success() { + let msg = String::from_utf8(output.stderr).map_or_else( + |e: std::string::FromUtf8Error| { + format!( + "failed to parse stderr for lvchange deltag: {}", + e.to_string() + ) + }, + |s| s, + ); + return Err(Error::FailedExec { + err: msg, + }); + } + Ok(()) + } + + pub fn uuid(&self) -> &str { + &self.lv_uuid + } + pub fn name(&self) -> &str { + &self.lv_name + } + pub fn vg_name(&self) -> &str { + &self.vg_name + } + pub fn lv_path(&self) -> &str { + &self.lv_path + } + pub fn size(&self) -> u64 { + self.size + } + pub fn tags(&self) -> &Vec { + &self.tags + } + pub fn share(&self) -> i32 { + self.tags()[0].parse::().unwrap() + } +} \ No newline at end of file From 10f6080692e6563a243a47c88cc7635266ea7877 Mon Sep 17 00:00:00 2001 From: Akhil Mohan Date: Wed, 8 Sep 2021 12:20:10 +0530 Subject: [PATCH 6/8] feat(grpc): lvm volume support to mayastor grpc - add support to create/destroy/list replicas backed by lvm volume - add support to share/unshare replicas backed by lvm volume - add support to auto import replicas when lvm pool is imported Signed-off-by: Akhil Mohan --- mayastor/src/grpc/mayastor_grpc.rs | 250 ++++++++++++++++++++++------- mayastor/src/replica.rs | 1 + 2 files changed, 196 insertions(+), 55 deletions(-) diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 490635354..a7bead0f9 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -40,10 +40,11 @@ use crate::{ host::{blk_device, resource}, lvm::{ pool::{VolGroup, MAYASTOR_LABEL}, + volume::{LogicalVolume}, Error as LvmError, }, lvs::{Error as LvsError, Lvol, Lvs}, - nexus_uri::NexusBdevError, + nexus_uri::{bdev_create, bdev_destroy, NexusBdevError}, subsys::{PoolBackend, PoolConfig}, }; use futures::FutureExt; @@ -232,6 +233,7 @@ impl From for Replica { size: l.size(), share: l.shared().unwrap().into(), uri: l.share_uri().unwrap(), + pooltype: PoolType::Lvs as i32, } } } @@ -250,6 +252,23 @@ impl From for ReplicaV2 { } } +impl From for Replica { + fn from(l: LogicalVolume) -> Self { + Self { + // name is used as uuid also, although we can use lv_uuid + uuid: l.name().into(), + pool: l.vg_name().into(), + // not supporting thin pool at the moment + thin: false, + size: l.size(), + share: l.share(), + //lv_path is used as uri + uri: l.lv_path().into(), + pooltype: PoolType::Lvm as i32, + } + } +} + impl From for rpc::mayastor::MayastorFeatures { fn from(f: MayastorFeatures) -> Self { Self { @@ -307,10 +326,16 @@ impl mayastor_server::Mayastor for MayastorSvc { .map(|l| l.base_bdev().name()).any(|d| args.disks.contains(&d)){ return Err(Status::invalid_argument("a lvs pool already uses the disk")) }; - VolGroup::import_or_create(args).await + let res = VolGroup::import_or_create(args).await .map_err(Status::from) .map(Pool::from) - .map(Response::new) + .map(Response::new); + let volumes = LogicalVolume::list("").await + .map_err(Status::from)?; + for volume in volumes.iter() { + let _res = create_and_share_bdev(volume.lv_path(), volume.share()).await; + } + res }, }; resp @@ -419,63 +444,108 @@ impl mayastor_server::Mayastor for MayastorSvc { request: Request, ) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async move { - let rx = rpc_submit(async move { - let args = request.into_inner(); - if Lvs::lookup(&args.pool).is_none() { - return Err(LvsError::Invalid { - source: Errno::ENOSYS, - msg: format!("Pool {} not found", args.pool), - }); - } - - if let Some(b) = Bdev::lookup_by_name(&args.uuid) { - let lvol = Lvol::try_from(b)?; - return Ok(Replica::from(lvol)); - } + let args = request.into_inner(); if !matches!( - Protocol::try_from(args.share)?, - Protocol::Off | Protocol::Nvmf - ) { + Protocol::try_from(args.share)?, + Protocol::Off | Protocol::Nvmf + ) { return Err(LvsError::ReplicaShareProtocol { value: args.share, - }); + }).map_err(Status::from); } - let p = Lvs::lookup(&args.pool).unwrap(); - match p.create_lvol(&args.uuid, args.size, None, false).await { - Ok(lvol) - if Protocol::try_from(args.share)? == Protocol::Nvmf => - { - match lvol.share_nvmf(None).await { - Ok(s) => { - debug!("created and shared {} as {}", lvol, s); - Ok(Replica::from(lvol)) + match PoolBackend::try_from(args.pooltype)? { + PoolBackend::Lvs => { + let rx = rpc_submit(async move { + if Lvs::lookup(&args.pool).is_none() { + return Err(LvsError::Invalid { + source: Errno::ENOSYS, + msg: format!("Pool {} not found", args.pool), + }); } - Err(e) => { - debug!( + + if let Some(b) = Bdev::lookup_by_name(&args.uuid) { + let lvol = Lvol::try_from(b)?; + return Ok(Replica::from(lvol)); + } + + let p = Lvs::lookup(&args.pool).unwrap(); + match p.create_lvol(&args.uuid, args.size, None, false).await { + Ok(lvol) + if Protocol::try_from(args.share)? == Protocol::Nvmf => + { + match lvol.share_nvmf(None).await { + Ok(s) => { + debug!("created and shared {} as {}", lvol, s); + Ok(Replica::from(lvol)) + } + Err(e) => { + debug!( "failed to share created lvol {}: {} (destroying)", lvol, e.to_string() ); - let _ = lvol.destroy().await; - Err(e) + let _ = lvol.destroy().await; + Err(e) + } + } + } + Ok(lvol) => { + debug!("created lvol {}", lvol); + Ok(Replica::from(lvol)) + } + Err(e) => Err(e), } + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + PoolBackend::Lvm => { + + if !MayastorFeatures::get_features().lvm { + return Err(Status::failed_precondition("lvm support not available")) } + + if let None = VolGroup::lookup_by_name(args.pool.as_str(), MAYASTOR_LABEL).await { + return Err(Status::invalid_argument(format!("lvm pool {} does not exist", args.pool))) + }; + + if let Some(replica) = LogicalVolume::lookup_by_lv_name(args.uuid.as_str().to_string()).await { + return Ok(Response::new(Replica::from(replica))) + } + + let vol_uuid = args.uuid.as_str().to_string(); + let share_protocol = args.share; + + let vol = LogicalVolume::create(args).await + .map_err(|e| { + println!("{:#?}", e); + LvsError::RepCreate { + source: Errno::UnknownErrno, + name: vol_uuid, + } + }) + .map_err(Status::from)?; + + create_and_share_bdev(&vol.lv_path(), share_protocol).await + .map(|b| { + let mut replica = Replica::from(vol.clone()); + replica.uri = b; + replica + }) + .map_err(|e| { + let _r = vol.remove(); + e + }) + .map(Response::new) + } - Ok(lvol) => { - debug!("created lvol {}", lvol); - Ok(Replica::from(lvol)) - } - Err(e) => Err(e), } - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) }).await } @@ -554,7 +624,33 @@ impl mayastor_server::Mayastor for MayastorSvc { ) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async { let args = request.into_inner(); - let rx = rpc_submit::<_, _, LvsError>(async move { + + if let Some(replica) = LogicalVolume::lookup_by_lv_name(args.uuid.as_str().to_string()).await { + + let lv_path = replica.lv_path().to_owned(); + let share = replica.share(); + + let rx1 = rpc_submit::<_, _, Status>(async move { + + if let Some(bdev) = Bdev::lookup_by_name(lv_path.as_str()) { + if matches!(Protocol::try_from(share)?, + Protocol::Nvmf | Protocol::Iscsi) { + bdev.unshare().await?; + info!("unshared replica {}", lv_path); + } + bdev_destroy(bdev.bdev_uri().unwrap().as_str()).await?; + info!("destroyed bdev {}", lv_path); + } + Ok(Null{}) + })?; + rx1.await.map_err(|_| Status::cancelled("cancelled"))?.map_err(Status::from)?; + + let _rs = replica.remove().await?; + + return Ok(Response::new(Null {})) + } + + let rx2 = rpc_submit::<_, _, LvsError>(async move { if let Some(bdev) = Bdev::lookup_by_name(&args.uuid) { let lvol = Lvol::try_from(bdev)?; lvol.destroy().await?; @@ -562,7 +658,7 @@ impl mayastor_server::Mayastor for MayastorSvc { Ok(Null {}) })?; - rx.await + rx2.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) .map(Response::new) @@ -577,24 +673,39 @@ impl mayastor_server::Mayastor for MayastorSvc { ) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async { let rx = rpc_submit::<_, _, LvsError>(async move { - let mut replicas = Vec::new(); + let mut lvs_replicas = Vec::new(); if let Some(bdev) = Bdev::bdev_first() { - replicas = bdev + lvs_replicas = bdev .into_iter() .filter(|b| b.driver() == "lvol") .map(|b| Replica::from(Lvol::try_from(b).unwrap())) .collect(); } - Ok(ListReplicasReply { - replicas, - }) + Ok(lvs_replicas) })?; - rx.await + let r = rx.await .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) + .map_err(Status::from); + + let mut replicas = match r { + Ok(r) => r, + Err(e) => return Err(e), + }; + + let lvm_replicas = LogicalVolume::list("").await + .map_err(|e| LvsError::Invalid { + source: Errno::UnknownErrno, + msg: e.to_string(), + })?; + + replicas.append(&mut lvm_replicas.into_iter().map(|r| Replica::from(r)).collect()); + + Ok(ListReplicasReply { + replicas, + }).map(Response::new) + }) .await } @@ -674,7 +785,14 @@ impl mayastor_server::Mayastor for MayastorSvc { self.locked( GrpcClientContext::new(&request, function_name!()), async move { - let args = request.into_inner(); + let mut args = request.into_inner(); + + if let Some(volume) = LogicalVolume::lookup_by_lv_name(args.uuid.as_str().to_string()).await { + let uuid = volume.name().to_string(); + volume.change_share_tag(args.share).await?; + args.uuid = uuid; + } + let rx = rpc_submit(async move { match Bdev::lookup_by_name(&args.uuid) { Some(bdev) => { @@ -1331,3 +1449,25 @@ impl mayastor_server::Mayastor for MayastorSvc { Ok(Response::new(reply)) } } + +async fn create_and_share_bdev(uri: &str, share_protocol: i32) -> Result { + let uri = format!("uring://{}", uri); + let rx = rpc_submit(async move { + + match bdev_create(uri.as_str()).await { + Ok(b) + // no need to check for error here, as the share protocol is already validated + if Protocol::try_from(share_protocol).unwrap() == Protocol::Nvmf => { + let bdev = Bdev::lookup_by_name(&b).unwrap(); + let share = bdev.share_nvmf(None).await?; + let bdev = Bdev::lookup_by_name(&b).unwrap(); + Ok(bdev.share_uri().unwrap_or(share)) + }, + Ok(b) => Ok(b), + Err(e) => Err(e).map_err(Status::from), + } + })?; + + rx.await.map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) +} diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index cc3feb9b2..65861afdc 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -294,6 +294,7 @@ impl From for rpc::Replica { None => rpc::ShareProtocolReplica::ReplicaNone, } as i32, uri: r.get_share_uri(), + pooltype: rpc::PoolType::Lvs as i32, } } } From e4f44cd5838099d9e1f2df0e0fed17dc566296a1 Mon Sep 17 00:00:00 2001 From: Akhil Mohan Date: Wed, 8 Sep 2021 12:23:32 +0530 Subject: [PATCH 7/8] feat(cli): support lvm operations via mayastor cli - add support for replica operations backed by lvm Signed-off-by: Akhil Mohan --- mayastor/src/bin/mayastor-client/pool_cli.rs | 4 ++-- mayastor/src/bin/mayastor-client/replica_cli.rs | 17 +++++++++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/mayastor/src/bin/mayastor-client/pool_cli.rs b/mayastor/src/bin/mayastor-client/pool_cli.rs index b974c756d..c385abb23 100644 --- a/mayastor/src/bin/mayastor-client/pool_cli.rs +++ b/mayastor/src/bin/mayastor-client/pool_cli.rs @@ -219,14 +219,14 @@ fn pool_state_to_str(idx: i32) -> &'static str { } } -fn pooltype_to_str(idx: i32) -> &'static str { +pub(crate) fn pooltype_to_str(idx: i32) -> &'static str { match rpc::PoolType::from_i32(idx).unwrap() { rpc::PoolType::Lvs => "lvs", rpc::PoolType::Lvm => "lvm", } } -fn parse_pooltype(ptype: Option<&str>) -> Result { +pub(crate) fn parse_pooltype(ptype: Option<&str>) -> Result { match ptype { None => Ok(rpc::PoolType::Lvs as i32), Some("lvs") => Ok(rpc::PoolType::Lvs as i32), diff --git a/mayastor/src/bin/mayastor-client/replica_cli.rs b/mayastor/src/bin/mayastor-client/replica_cli.rs index 16cecc735..ca94582e6 100644 --- a/mayastor/src/bin/mayastor-client/replica_cli.rs +++ b/mayastor/src/bin/mayastor-client/replica_cli.rs @@ -1,6 +1,7 @@ use crate::{ context::{Context, OutputFormat}, parse_size, + pool_cli::{parse_pooltype, pooltype_to_str}, Error, GrpcStatus, }; @@ -44,7 +45,14 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .short("t") .long("thin") .takes_value(false) - .help("Whether replica is thin provisioned (default false)")); + .help("Whether replica is thin provisioned (default false)")) + .arg( + Arg::with_name("pooltype") + .long("type") + .required(false) + .possible_values(&["lvs", "lvm"]) + .default_value("lvs") + .help("type of the pool")); let create_v2 = SubCommand::with_name("create2") .about("Create replica on pool") @@ -168,12 +176,15 @@ async fn replica_create( let thin = matches.is_present("thin"); let share = parse_replica_protocol(matches.value_of("protocol")) .context(GrpcStatus)?; + let pooltype = parse_pooltype(matches.value_of("pooltype")) + .context(GrpcStatus)?; let rq = rpc::CreateReplicaRequest { uuid: name.clone(), pool, thin, share, + pooltype, size: size.get_bytes() as u64, }; let response = ctx.client.create_replica(rq).await.context(GrpcStatus)?; @@ -327,8 +338,10 @@ async fn replica_list( .map(|r| { let proto = replica_protocol_to_str(r.share); let size = ctx.units(Byte::from_bytes(r.size.into())); + let pooltype = pooltype_to_str(r.pooltype); vec![ r.pool.clone(), + pooltype.to_string(), r.uuid.clone(), r.thin.to_string(), proto.to_string(), @@ -338,7 +351,7 @@ async fn replica_list( }) .collect(); ctx.print_list( - vec!["POOL", "NAME", ">THIN", ">SHARE", ">SIZE", "URI"], + vec!["POOL", "TYPE", "NAME", ">THIN", ">SHARE", ">SIZE", "URI"], table, ); } From 5498786f88ec2d0694d27c482354acc78773e66f Mon Sep 17 00:00:00 2001 From: Akhil Mohan Date: Wed, 8 Sep 2021 12:25:06 +0530 Subject: [PATCH 8/8] feat(tests): add python tests for lvm volume - add python tests for replica operations backed by lvm Signed-off-by: Akhil Mohan --- test/python/docker-compose.yml | 155 ++++++++++++----------- test/python/features/lvm_replica.feature | 20 +++ test/python/test_bdd_lvm_replica.py | 126 ++++++++++++++++++ 3 files changed, 227 insertions(+), 74 deletions(-) create mode 100644 test/python/features/lvm_replica.feature create mode 100644 test/python/test_bdd_lvm_replica.py diff --git a/test/python/docker-compose.yml b/test/python/docker-compose.yml index 8e1ef51dd..3cf9155e2 100644 --- a/test/python/docker-compose.yml +++ b/test/python/docker-compose.yml @@ -12,7 +12,14 @@ services: - NEXUS_NVMF_ANA_ENABLE=1 - NEXUS_NVMF_RESV_ENABLE=1 - LVM_ENABLE=1 - command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + # lvm conf is changed to ignore udev rules, and turn off udev sync + command: | + /bin/bash -c + "cp /etc/lvm/lvm.conf lvm.conf && + sed -i 's/udev_sync = 1/udev_sync = 0/' /lvm.conf && + sed -i 's/udev_rules = 1/udev_rules = 0/' /lvm.conf && + cat lvm.conf > /etc/lvm/lvm.conf && + ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock" networks: mayastor_net: ipv4_address: 10.0.0.2 @@ -39,79 +46,79 @@ services: - /lib:/lib - /lib64:/lib64 - /etc/lvm:/etc/lvm - ms1: - container_name: "ms1" - image: rust:latest - environment: - - MY_POD_IP=10.0.0.3 - command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock - networks: - mayastor_net: - ipv4_address: 10.0.0.3 - cap_add: - # NUMA related - - SYS_ADMIN - - SYS_NICE - # uring needs mmap - - IPC_LOCK - security_opt: - # we can set this to a JSON file to allow per syscall access - - seccomp:unconfined - volumes: - - ${SRCDIR}:${SRCDIR} - - /nix:/nix - - /dev/hugepages:/dev/hugepages - - /tmp:/tmp - - /var/tmp:/var/tmp - ms2: - container_name: "ms2" - image: rust:latest - environment: - - MY_POD_IP=10.0.0.4 - - NEXUS_NVMF_ANA_ENABLE=1 - command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 5,6 -r /tmp/ms2.sock - networks: - mayastor_net: - ipv4_address: 10.0.0.4 - cap_add: - - SYS_ADMIN - - SYS_NICE - - IPC_LOCK - security_opt: - - seccomp:unconfined - volumes: - - ${SRCDIR}:${SRCDIR} - - /nix:/nix - - /dev/hugepages:/dev/hugepages - - /tmp:/tmp - - /var/tmp:/var/tmp - ms3: - container_name: "ms3" - image: rust:latest - environment: - - MY_POD_IP=10.0.0.5 - - RUST_BACKTRACE=full - - NVME_KATO_MS=1000 - - RUST_LOG=mayastor=trace - - NEXUS_DONT_READ_LABELS=true - - NEXUS_NVMF_ANA_ENABLE=1 - - NEXUS_NVMF_RESV_ENABLE=1 - command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 0,7 -r /tmp/ms3.sock - networks: - mayastor_net: - ipv4_address: 10.0.0.5 - cap_add: - - SYS_ADMIN - - SYS_NICE - - IPC_LOCK - security_opt: - - seccomp:unconfined - volumes: - - ${SRCDIR}:${SRCDIR} - - /nix:/nix - - /dev/hugepages:/dev/hugepages - - /tmp:/tmp - - /var/tmp:/var/tmp +# ms1: +# container_name: "ms1" +# image: rust:latest +# environment: +# - MY_POD_IP=10.0.0.3 +# command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock +# networks: +# mayastor_net: +# ipv4_address: 10.0.0.3 +# cap_add: +# # NUMA related +# - SYS_ADMIN +# - SYS_NICE +# # uring needs mmap +# - IPC_LOCK +# security_opt: +# # we can set this to a JSON file to allow per syscall access +# - seccomp:unconfined +# volumes: +# - ${SRCDIR}:${SRCDIR} +# - /nix:/nix +# - /dev/hugepages:/dev/hugepages +# - /tmp:/tmp +# - /var/tmp:/var/tmp +# ms2: +# container_name: "ms2" +# image: rust:latest +# environment: +# - MY_POD_IP=10.0.0.4 +# - NEXUS_NVMF_ANA_ENABLE=1 +# command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 5,6 -r /tmp/ms2.sock +# networks: +# mayastor_net: +# ipv4_address: 10.0.0.4 +# cap_add: +# - SYS_ADMIN +# - SYS_NICE +# - IPC_LOCK +# security_opt: +# - seccomp:unconfined +# volumes: +# - ${SRCDIR}:${SRCDIR} +# - /nix:/nix +# - /dev/hugepages:/dev/hugepages +# - /tmp:/tmp +# - /var/tmp:/var/tmp +# ms3: +# container_name: "ms3" +# image: rust:latest +# environment: +# - MY_POD_IP=10.0.0.5 +# - RUST_BACKTRACE=full +# - NVME_KATO_MS=1000 +# - RUST_LOG=mayastor=trace +# - NEXUS_DONT_READ_LABELS=true +# - NEXUS_NVMF_ANA_ENABLE=1 +# - NEXUS_NVMF_RESV_ENABLE=1 +# command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 0,7 -r /tmp/ms3.sock +# networks: +# mayastor_net: +# ipv4_address: 10.0.0.5 +# cap_add: +# - SYS_ADMIN +# - SYS_NICE +# - IPC_LOCK +# security_opt: +# - seccomp:unconfined +# volumes: +# - ${SRCDIR}:${SRCDIR} +# - /nix:/nix +# - /dev/hugepages:/dev/hugepages +# - /tmp:/tmp +# - /var/tmp:/var/tmp networks: mayastor_net: name: mayastor_net diff --git a/test/python/features/lvm_replica.feature b/test/python/features/lvm_replica.feature new file mode 100644 index 000000000..68683694e --- /dev/null +++ b/test/python/features/lvm_replica.feature @@ -0,0 +1,20 @@ +Feature: LVM replica support + + Background: + Given a mayastor instance "ms0" + And an LVM VG backed pool called "lvmpool" + + Scenario: Creating an lvm volume on an imported lvm volume group + When a user calls the createreplica on pool "lvmpool" + Then a vol should be created on that pool with the given uuid as its name + + Scenario: Destroying a replica backed by lvm pool + Given an LVM backed replica + When a user calls destroy replica + Then the replica gets destroyed + + Scenario: Listing replicas from either an LVS or LVM pool + Given an LVS pool with a replica + And an LVM backed replica + When a user calls list replicas + Then all replicas should be listed \ No newline at end of file diff --git a/test/python/test_bdd_lvm_replica.py b/test/python/test_bdd_lvm_replica.py new file mode 100644 index 000000000..448bebe3b --- /dev/null +++ b/test/python/test_bdd_lvm_replica.py @@ -0,0 +1,126 @@ +"""LVM replica support feature tests.""" + +import pytest +from test_bdd_replica import replica_uuid, replica_size +from test_bdd_lvm import image_file +from pytest_bdd import ( + given, + scenario, + then, + when, + parsers, +) +from common.command import run_cmd +from common.mayastor import mayastor_mod +import mayastor_pb2 as pb +import subprocess + + +@scenario('features/lvm_replica.feature', 'Creating an lvm volume on an imported lvm volume group') +def test_creating_an_lvm_volume_on_an_imported_lvm_volume_group(): + """Creating an lvm volume on an imported lvm volume group.""" + +@scenario('features/lvm_replica.feature', 'Destroying a replica backed by lvm pool') +def test_destroying_a_replica_backed_by_lvm_pool(): + """Destroying a replica backed by lvm pool""" + +@scenario('features/lvm_replica.feature', 'Listing replicas from either an LVS or LVM pool') +def test_listing_replicas_from_either_an_lvs_or_lvm_pool(): + """Listing replicas from either an LVS or LVM pool""" + +@pytest.fixture +def create_replica(get_mayastor_instance): + def create(uuid, pool, size, share, pooltype): + get_mayastor_instance.ms.CreateReplica ( + pb.CreateReplicaRequest(uuid=uuid, pool=pool, size=size, share=share, pooltype=pooltype) + ) + yield create + +@pytest.fixture +def find_replica(get_mayastor_instance): + def find(uuid): + for replica in get_mayastor_instance.ms.ListReplicas(pb.Null()).replicas: + if replica.uuid == uuid: + return replica + return None + + yield find + +@pytest.fixture +def create_pool(get_mayastor_instance): + def create(name, disks, pooltype): + get_mayastor_instance.ms.CreatePool( + pb.CreatePoolRequest(name=name, disks=disks, pooltype=pooltype) + ) + + yield create + +@pytest.fixture +def volgrp_with_losetup_disk(): + pool_name="lvmpool" + p = subprocess.run(f"sudo vgs {pool_name}", shell=True, check=False) + # if volume group already exists then dont create it again + if p.returncode != 0: + file = "/tmp/ms0-disk0.img" + run_cmd(f"rm -f '{file}'", True) + run_cmd(f"truncate -s 128M '{file}'", True) + out = subprocess.run(f"sudo losetup -f '{file}' --show", shell=True, check=True, capture_output=True) + disk = out.stdout.decode('ascii').strip('\n') + run_cmd(f"sudo pvcreate '{disk}'", True) + run_cmd(f"sudo vgcreate '{pool_name}' '{disk}'", True) + yield pool_name + +@given( + parsers.parse('a mayastor instance "{name}"'), + target_fixture="get_mayastor_instance", +) +def get_mayastor_instance(mayastor_mod, name): + return mayastor_mod[f"{name}"] + + +@given( + parsers.parse('an LVM VG backed pool called "{pool_name}"'), + target_fixture="create_pool_on_vol_group", +) +def create_pool_on_vol_group(volgrp_with_losetup_disk, create_pool, pool_name): + create_pool(f"{volgrp_with_losetup_disk}", [], pb.Lvm) + +@given('an LVS pool with a replica') +def an_lvs_pool_with_a_replica(create_pool, create_replica, replica_size, image_file): + create_pool("lvspool", [f"aio://{image_file}"], pb.Lvs) + create_replica("uuid1", "lvspool", replica_size, pb.REPLICA_NONE, pb.Lvs) + +@when( + parsers.parse('a user calls the createreplica on pool "{pool_name}"'), + target_fixture="a_user_calls_the_create_replica", +) +@given('an LVM backed replica') +def a_user_calls_the_create_replica(create_replica, pool_name, replica_uuid, replica_size): + create_replica(replica_uuid, pool_name, replica_size, pb.REPLICA_NONE, pb.Lvm) + +@then('a vol should be created on that pool with the given uuid as its name') +def a_vol_should_be_created_on_that_pool_with_the_given_uuid_as_its_name(find_replica, replica_uuid): + assert find_replica(replica_uuid) != None + +@when('a user calls destroy replica') +def a_user_calls_destroy_replica(get_mayastor_instance, replica_uuid): + get_mayastor_instance.ms.DestroyReplica( + pb.DestroyReplicaRequest(uuid=replica_uuid) + ) + +@then('the replica gets destroyed') +def the_replica_gets_destroyed(find_replica, replica_uuid): + assert find_replica(replica_uuid) == None + +@when('a user calls list replicas', target_fixture="list_replicas") +def list_replicas(get_mayastor_instance): + return get_mayastor_instance.ms.ListReplicas(pb.Null()).replicas + +@then('all replicas should be listed') +def all_replicas_should_be_listed(list_replicas, replica_uuid, replica_size): + for replica in list_replicas: + assert replica.size == replica_size + if replica.uuid == replica_uuid: + assert replica.pooltype == pb.Lvm + if replica.uuid == "uuid1": + assert replica.pooltype == pb.Lvs