Skip to content

Commit

Permalink
chore(bors): merge pull request #525
Browse files Browse the repository at this point in the history
525: Cherry-pick #524 r=tiagolobocastro a=tiagolobocastro

feat: allow for a range of 2 controller ids when publishing

Previously we had a strict limit of only 1 controller range. This effectively means only 1 initiator can connect to our volume target.
However it seems that when an initiator times out before the target "notices" this we end up getting a new connection whilst the old one is still in place, and thus the new connect call fails!

Why were we so strict? Because the current code in the dataplane can not guarantee that older initiators connected with allow_any set to true will be disconnected when we set a new allowed list!
So is it safe to bump this? We believe so, because currently CSI sets allowed list of 1 for all published volumes, so when updating this list the previous one should be booted out!

todo: update dataplane to boot out any connected host!

Co-authored-by: Tiago Castro <[email protected]>
  • Loading branch information
mayastor-bors and tiagolobocastro committed May 17, 2023
2 parents e468679 + 34b1120 commit 3e105a3
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 13 deletions.
6 changes: 3 additions & 3 deletions control-plane/agents/src/bin/core/volume/operations_helper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,17 @@ impl OperationGuardArc<VolumeSpec> {
let nexus = NexusId::new();
let resv_key = nexus.as_u128() as u64;
let range = match self.as_ref().config() {
None => NvmfControllerIdRange::new_min(),
None => NvmfControllerIdRange::new_min(2),
Some(cfg) => {
// todo: should the cluster agent tell us which controller Id to use?
#[allow(clippy::if_same_then_else)]
if self.published() {
cfg.config().controller_id_range().next()
cfg.config().controller_id_range().next(2)
} else {
// if we're not published should we start over?
// for now let's carry on to next as there might some cases where we unpublish
// but the initiator doesn't disconnect properly.
cfg.config().controller_id_range().next()
cfg.config().controller_id_range().next(2)
}
}
};
Expand Down
94 changes: 84 additions & 10 deletions control-plane/stor-port/src/types/v0/transport/nexus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -304,15 +304,87 @@ pub enum NexusNvmePreemption {
Holder,
}

#[test]
fn nvmf_controller_range() {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
struct TestSpec {
nvmf: NvmfControllerIdRange,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
struct Test<'a> {
input: &'a str,
expected: NvmfControllerIdRange,
}
let tests: Vec<Test> = vec![
Test {
input: r#"{"nvmf":{"start":1,"end":1}}"#,
expected: NvmfControllerIdRange::new_min(1),
},
Test {
input: r#"{"nvmf":{"start":1,"end":5}}"#,
expected: NvmfControllerIdRange::new_min(5),
},
Test {
input: r#"{"nvmf":{"start":1,"end":65519}}"#,
expected: NvmfControllerIdRange::new_min(0xffff),
},
Test {
input: r#"{"nvmf":{"start":1,"end":1}}"#,
expected: NvmfControllerIdRange::new(1, 0xffef).unwrap().next(1),
},
Test {
input: r#"{"nvmf":{"start":1,"end":2}}"#,
expected: NvmfControllerIdRange::new(1, 0xffef).unwrap().next(2),
},
Test {
input: r#"{"nvmf":{"start":65519,"end":65519}}"#,
expected: NvmfControllerIdRange::new(1, 0xffee).unwrap().next(1),
},
Test {
input: r#"{"nvmf":{"start":1,"end":2}}"#,
expected: NvmfControllerIdRange::new(1, 0xffee).unwrap().next(2),
},
Test {
input: r#"{"nvmf":{"start":6,"end":7}}"#,
expected: NvmfControllerIdRange::new_min(5).next(2),
},
];

for test in &tests {
println!("{}", serde_json::to_string(&test.expected).unwrap());
let spec: TestSpec = serde_json::from_str(test.input).unwrap();
assert_eq!(test.expected, spec.nvmf);
}

let mut range = NvmfControllerIdRange::new_min(1);
loop {
range = range.next(8);
if range.min() == &1 {
break;
}
}
}

/// Nvmf Controller Id Range
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct NvmfControllerIdRange(std::ops::RangeInclusive<u16>);
impl NvmfControllerIdRange {
/// Create `Self` with the minimum controller id.
pub fn new_min() -> Self {
pub fn new_min(width: u16) -> Self {
let min = *Self::controller_id_range().start();
Self(min ..= min)
let max = Self::end_from_min(min, width);
Self(min ..= max)
}
fn end_from_min(min: u16, width: u16) -> u16 {
let width = width.min(*Self::controller_id_range().end());

if width > 0 {
let end = min as u32 + width as u32 - 1;
u16::try_from(end).unwrap_or(u16::MAX)
} else {
min
}
}
/// create `Self` with a random minimum controller id
pub fn random_min() -> Self {
Expand Down Expand Up @@ -349,18 +421,20 @@ impl NvmfControllerIdRange {
))
}
}
/// Bump the controller id range by 1.
/// Bump the controller id range.
/// The new range shall use the given `width`.
/// # Note: this will wrap around back to the minimum.
pub fn next(&self) -> Self {
pub fn next(&self, width: u16) -> Self {
let range = self.0.clone();
let start = *range.start();
if start >= *Self::controller_id_range().end() {

let start = u16::try_from(*range.end() as u32 + 1).unwrap_or_default();
let end = Self::end_from_min(start, width);

if let Ok(range) = Self::new(start, end) {
// wrap around to the start of the range
let start = *Self::controller_id_range().start();
Self(start ..= start)
range
} else {
let start = start + 1;
Self(start ..= start)
Self::new_min(width)
}
}
}
Expand Down

0 comments on commit 3e105a3

Please sign in to comment.