Skip to content

Commit

Permalink
detach disks on instance delete (#1068)
Browse files Browse the repository at this point in the history
lookup all disks attached to an instance during instance delete, and
detach them before the instance is been deleted.

fixes #1031
  • Loading branch information
jmpesp authored May 16, 2022
1 parent a1a5916 commit e2c5d84
Show file tree
Hide file tree
Showing 3 changed files with 200 additions and 5 deletions.
56 changes: 51 additions & 5 deletions nexus/src/app/instance.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,12 +173,58 @@ impl super::Nexus {
// TODO-robustness We need to figure out what to do with Destroyed
// instances? Presumably we need to clean them up at some point, but
// not right away so that callers can see that they've been destroyed.
let (.., authz_instance) = LookupPath::new(opctx, &self.db_datastore)
.organization_name(organization_name)
.project_name(project_name)
.instance_name(instance_name)
.lookup_for(authz::Action::Delete)
let (.., authz_instance, db_instance) =
LookupPath::new(opctx, &self.db_datastore)
.organization_name(organization_name)
.project_name(project_name)
.instance_name(instance_name)
.fetch()
.await?;

opctx.authorize(authz::Action::Delete, &authz_instance).await?;

match db_instance.runtime_state.state.state() {
InstanceState::Stopped | InstanceState::Failed => {
// ok
}

state => {
return Err(Error::InvalidRequest {
message: format!(
"instance cannot be deleted in state \"{}\"",
state,
),
});
}
}

// Detach all attached disks
let disks = self
.instance_list_disks(
opctx,
organization_name,
project_name,
instance_name,
&DataPageParams {
marker: None,
direction: dropshot::PaginationOrder::Ascending,
limit: std::num::NonZeroU32::new(MAX_DISKS_PER_INSTANCE)
.unwrap(),
},
)
.await?;

for disk in &disks {
self.instance_detach_disk(
opctx,
organization_name,
project_name,
instance_name,
&disk.name(),
)
.await?;
}

self.db_datastore.project_delete_instance(opctx, &authz_instance).await
}

Expand Down
1 change: 1 addition & 0 deletions nexus/src/db/datastore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1024,6 +1024,7 @@ impl DataStore {
ErrorHandler::NotFoundByResource(authz_instance),
)
})?;

match result.status {
UpdateStatus::Updated => Ok(()),
UpdateStatus::NotUpdatedButExists => {
Expand Down
148 changes: 148 additions & 0 deletions nexus/tests/integration_tests/instances.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1527,6 +1527,154 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) {
}
}

// Test that disks are detached when instance is destroyed
#[nexus_test]
async fn test_disks_detached_when_instance_destroyed(
cptestctx: &ControlPlaneTestContext,
) {
let client = &cptestctx.external_client;

const ORGANIZATION_NAME: &str = "bobs-barrel-of-bytes";
const PROJECT_NAME: &str = "bit-barrel";

// Test pre-reqs
DiskTest::new(&cptestctx).await;
create_organization(&client, ORGANIZATION_NAME).await;
create_project(client, ORGANIZATION_NAME, PROJECT_NAME).await;

// Make 8 disks
for i in 0..8 {
create_disk(
&client,
ORGANIZATION_NAME,
PROJECT_NAME,
&format!("probablydata{}", i,),
)
.await;
}

// Assert we created 8 disks
let url_project_disks = format!(
"/organizations/{}/projects/{}/disks",
ORGANIZATION_NAME, PROJECT_NAME,
);
let disks: Vec<Disk> = NexusRequest::iter_collection_authn(
client,
&url_project_disks,
"",
None,
)
.await
.expect("failed to list disks")
.all_items;

assert_eq!(disks.len(), 8);
for disk in &disks {
assert_eq!(disk.state, DiskState::Detached);
}

// Boot the instance
let instance_params = params::InstanceCreate {
identity: IdentityMetadataCreateParams {
name: Name::try_from(String::from("nfs")).unwrap(),
description: String::from("probably serving data"),
},
ncpus: InstanceCpuCount::try_from(2).unwrap(),
memory: ByteCount::from_mebibytes_u32(4),
hostname: String::from("nfs"),
user_data: vec![],
network_interfaces: params::InstanceNetworkInterfaceAttachment::Default,
disks: (0..8)
.map(|i| {
params::InstanceDiskAttachment::Attach(
params::InstanceDiskAttach {
name: Name::try_from(
format!("probablydata{}", i).to_string(),
)
.unwrap(),
},
)
})
.collect(),
};

let url_instances = format!(
"/organizations/{}/projects/{}/instances",
ORGANIZATION_NAME, PROJECT_NAME
);

let builder =
RequestBuilder::new(client, http::Method::POST, &url_instances)
.body(Some(&instance_params))
.expect_status(Some(http::StatusCode::CREATED));

let _response = NexusRequest::new(builder)
.authn_as(AuthnMode::PrivilegedUser)
.execute()
.await
.expect("expected instance creation!");

// Assert disks are attached
let url_project_disks = format!(
"/organizations/{}/projects/{}/disks",
ORGANIZATION_NAME, PROJECT_NAME,
);
let disks: Vec<Disk> = NexusRequest::iter_collection_authn(
client,
&url_project_disks,
"",
None,
)
.await
.expect("failed to list disks")
.all_items;

assert_eq!(disks.len(), 8);
for disk in &disks {
assert!(matches!(disk.state, DiskState::Attached(_)));
}

// Stop and delete instance
let instance_url = format!(
"/organizations/{}/projects/{}/instances/nfs",
ORGANIZATION_NAME, PROJECT_NAME
);

let instance =
instance_post(&client, &instance_url, InstanceOp::Stop).await;
let apictx = &cptestctx.server.apictx;
let nexus = &apictx.nexus;
instance_simulate(nexus, &instance.identity.id).await;
let instance = instance_get(&client, &instance_url).await;
assert_eq!(instance.runtime.run_state, InstanceState::Stopped);

NexusRequest::object_delete(&client, &instance_url)
.authn_as(AuthnMode::PrivilegedUser)
.execute()
.await
.unwrap();

// Assert disks are detached
let url_project_disks = format!(
"/organizations/{}/projects/{}/disks",
ORGANIZATION_NAME, PROJECT_NAME,
);
let disks: Vec<Disk> = NexusRequest::iter_collection_authn(
client,
&url_project_disks,
"",
None,
)
.await
.expect("failed to list disks")
.all_items;

assert_eq!(disks.len(), 8);
for disk in &disks {
assert_eq!(disk.state, DiskState::Detached);
}
}

async fn instance_get(
client: &ClientTestContext,
instance_url: &str,
Expand Down

0 comments on commit e2c5d84

Please sign in to comment.