diff --git a/Cargo.lock b/Cargo.lock index 6ee028bbc5..c2f3e1a949 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4273,6 +4273,7 @@ dependencies = [ "steno", "strum", "subprocess", + "swrite", "term", "thiserror", "tokio", @@ -4387,6 +4388,7 @@ dependencies = [ "serde_urlencoded", "slog", "tokio", + "tokio-util", "trust-dns-resolver", "uuid", ] @@ -4847,6 +4849,7 @@ dependencies = [ "async-trait", "base64", "buf-list", + "bytes", "camino", "camino-tempfile", "cancel-safe-futures", @@ -4947,6 +4950,9 @@ dependencies = [ "tokio-postgres", "tough", "trust-dns-resolver", + "tufaceous", + "tufaceous-lib", + "update-common", "uuid", ] @@ -9570,6 +9576,7 @@ dependencies = [ "bytes", "camino", "camino-tempfile", + "chrono", "clap 4.4.3", "debug-ignore", "display-error-chain", diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 1e1cbc31e7..17fb5aa367 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -236,32 +236,6 @@ impl From } } -impl From - for types::KnownArtifactKind -{ - fn from( - s: omicron_common::api::internal::nexus::KnownArtifactKind, - ) -> Self { - use omicron_common::api::internal::nexus::KnownArtifactKind; - - match s { - KnownArtifactKind::GimletSp => types::KnownArtifactKind::GimletSp, - KnownArtifactKind::GimletRot => types::KnownArtifactKind::GimletRot, - KnownArtifactKind::Host => types::KnownArtifactKind::Host, - KnownArtifactKind::Trampoline => { - types::KnownArtifactKind::Trampoline - } - KnownArtifactKind::ControlPlane => { - types::KnownArtifactKind::ControlPlane - } - KnownArtifactKind::PscSp => types::KnownArtifactKind::PscSp, - KnownArtifactKind::PscRot => types::KnownArtifactKind::PscRot, - KnownArtifactKind::SwitchSp => types::KnownArtifactKind::SwitchSp, - KnownArtifactKind::SwitchRot => types::KnownArtifactKind::SwitchRot, - } - } -} - impl From for types::Duration { fn from(s: std::time::Duration) -> Self { Self { secs: s.as_secs(), nanos: s.subsec_nanos() } diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index a8aff00afa..dc3537fbb2 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -13,6 +13,8 @@ use dropshot::HttpError; pub use error::*; pub use crate::api::internal::shared::SwitchLocation; +use crate::update::ArtifactHash; +use crate::update::ArtifactId; use anyhow::anyhow; use anyhow::Context; use api_identity::ObjectIdentity; @@ -760,13 +762,9 @@ pub enum ResourceType { Oximeter, MetricProducer, RoleBuiltin, - UpdateArtifact, + TufRepo, + TufArtifact, SwitchPort, - SystemUpdate, - ComponentUpdate, - SystemUpdateComponentUpdate, - UpdateDeployment, - UpdateableComponent, UserBuiltin, Zpool, Vmm, @@ -2625,6 +2623,101 @@ pub struct BgpImportedRouteIpv4 { pub switch: SwitchLocation, } +/// A description of an uploaded TUF repository. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +pub struct TufRepoDescription { + // Information about the repository. + pub repo: TufRepoMeta, + + // Information about the artifacts present in the repository. + pub artifacts: Vec, +} + +impl TufRepoDescription { + /// Sorts the artifacts so that descriptions can be compared. + pub fn sort_artifacts(&mut self) { + self.artifacts.sort_by(|a, b| a.id.cmp(&b.id)); + } +} + +/// Metadata about a TUF repository. +/// +/// Found within a [`TufRepoDescription`]. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +pub struct TufRepoMeta { + /// The hash of the repository. + /// + /// This is a slight abuse of `ArtifactHash`, since that's the hash of + /// individual artifacts within the repository. However, we use it here for + /// convenience. + pub hash: ArtifactHash, + + /// The version of the targets role. + pub targets_role_version: u64, + + /// The time until which the repo is valid. + pub valid_until: DateTime, + + /// The system version in artifacts.json. + pub system_version: SemverVersion, + + /// The file name of the repository. + /// + /// This is purely used for debugging and may not always be correct (e.g. + /// with wicket, we read the file contents from stdin so we don't know the + /// correct file name). + pub file_name: String, +} + +/// Metadata about an individual TUF artifact. +/// +/// Found within a [`TufRepoDescription`]. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +pub struct TufArtifactMeta { + /// The artifact ID. + pub id: ArtifactId, + + /// The hash of the artifact. + pub hash: ArtifactHash, + + /// The size of the artifact in bytes. + pub size: u64, +} + +/// Data about a successful TUF repo import into Nexus. +#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct TufRepoInsertResponse { + /// The repository as present in the database. + pub recorded: TufRepoDescription, + + /// Whether this repository already existed or is new. + pub status: TufRepoInsertStatus, +} + +/// Status of a TUF repo import. +/// +/// Part of [`TufRepoInsertResponse`]. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, JsonSchema, +)] +#[serde(rename_all = "snake_case")] +pub enum TufRepoInsertStatus { + /// The repository already existed in the database. + AlreadyExists, + + /// The repository did not exist, and was inserted into the database. + Inserted, +} + +/// Data about a successful TUF repo get from Nexus. +#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct TufRepoGetResponse { + /// The description of the repository. + pub description: TufRepoDescription, +} + #[cfg(test)] mod test { use serde::Deserialize; diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index 7f26bd84b0..be4b05ffdf 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -213,8 +213,6 @@ pub struct ConsoleConfig { pub struct UpdatesConfig { /// Trusted root.json role for the TUF updates repository. pub trusted_root: Utf8PathBuf, - /// Default base URL for the TUF repository. - pub default_base_url: String, } /// Options to tweak database schema changes. @@ -631,7 +629,6 @@ mod test { address = "[::1]:8123" [updates] trusted_root = "/path/to/root.json" - default_base_url = "http://example.invalid/" [tunables] max_vpc_ipv4_subnet_prefix = 27 [deployment] @@ -728,7 +725,6 @@ mod test { }, updates: Some(UpdatesConfig { trusted_root: Utf8PathBuf::from("/path/to/root.json"), - default_base_url: "http://example.invalid/".into(), }), schema: None, tunables: Tunables { max_vpc_ipv4_subnet_prefix: 27 }, diff --git a/common/src/update.rs b/common/src/update.rs index 28d5ae50a6..9feff1f868 100644 --- a/common/src/update.rs +++ b/common/src/update.rs @@ -95,6 +95,13 @@ pub struct ArtifactId { pub kind: ArtifactKind, } +/// Used for user-friendly messages. +impl fmt::Display for ArtifactId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} v{} ({})", self.name, self.version, self.kind) + } +} + /// A hash-based identifier for an artifact. /// /// Some places, e.g. the installinator, request artifacts by hash rather than diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 52ee7034dd..87703cce77 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -13,8 +13,10 @@ assert_matches.workspace = true async-trait.workspace = true base64.workspace = true buf-list.workspace = true +bytes.workspace = true cancel-safe-futures.workspace = true camino.workspace = true +camino-tempfile.workspace = true clap.workspace = true chrono.workspace = true crucible-agent-client.workspace = true @@ -88,6 +90,7 @@ oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } rustls-pemfile = { workspace = true } +update-common.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] @@ -120,6 +123,8 @@ rustls = { workspace = true } subprocess.workspace = true term.workspace = true trust-dns-resolver.workspace = true +tufaceous.workspace = true +tufaceous-lib.workspace = true httptest.workspace = true strum.workspace = true diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 8fdf05e876..5c0a68c253 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -49,7 +49,6 @@ mod project; mod semver_version; mod switch_interface; mod switch_port; -mod system_update; // These actually represent subqueries, not real table. // However, they must be defined in the same crate as our tables // for join-based marker trait generation. @@ -78,8 +77,8 @@ mod sled_underlay_subnet_allocation; mod snapshot; mod ssh_key; mod switch; +mod tuf_repo; mod unsigned; -mod update_artifact; mod user_builtin; mod utilization; mod virtual_provisioning_collection; @@ -165,8 +164,7 @@ pub use ssh_key::*; pub use switch::*; pub use switch_interface::*; pub use switch_port::*; -pub use system_update::*; -pub use update_artifact::*; +pub use tuf_repo::*; pub use user_builtin::*; pub use utilization::*; pub use virtual_provisioning_collection::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 954647f70d..eb71a12f04 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(26, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(27, 0, 0); table! { disk (id) { @@ -1177,72 +1177,45 @@ table! { } table! { - update_artifact (name, version, kind) { - name -> Text, - version -> Text, - kind -> crate::KnownArtifactKindEnum, + tuf_repo (id) { + id -> Uuid, + time_created -> Timestamptz, + sha256 -> Text, targets_role_version -> Int8, valid_until -> Timestamptz, - target_name -> Text, - target_sha256 -> Text, - target_length -> Int8, + system_version -> Text, + file_name -> Text, } } table! { - system_update (id) { - id -> Uuid, - time_created -> Timestamptz, - time_modified -> Timestamptz, - + tuf_artifact (name, version, kind) { + name -> Text, version -> Text, - } -} - -table! { - update_deployment (id) { - id -> Uuid, + kind -> Text, time_created -> Timestamptz, - time_modified -> Timestamptz, - - version -> Text, - status -> crate::UpdateStatusEnum, - // TODO: status reason for updateable_component + sha256 -> Text, + artifact_size -> Int8, } } table! { - component_update (id) { - id -> Uuid, - time_created -> Timestamptz, - time_modified -> Timestamptz, - - version -> Text, - component_type -> crate::UpdateableComponentTypeEnum, - } -} - -table! { - updateable_component (id) { - id -> Uuid, - time_created -> Timestamptz, - time_modified -> Timestamptz, - - device_id -> Text, - version -> Text, - system_version -> Text, - component_type -> crate::UpdateableComponentTypeEnum, - status -> crate::UpdateStatusEnum, - // TODO: status reason for updateable_component + tuf_repo_artifact (tuf_repo_id, tuf_artifact_name, tuf_artifact_version, tuf_artifact_kind) { + tuf_repo_id -> Uuid, + tuf_artifact_name -> Text, + tuf_artifact_version -> Text, + tuf_artifact_kind -> Text, } } -table! { - system_update_component_update (system_update_id, component_update_id) { - system_update_id -> Uuid, - component_update_id -> Uuid, - } -} +allow_tables_to_appear_in_same_query!( + tuf_repo, + tuf_repo_artifact, + tuf_artifact +); +joinable!(tuf_repo_artifact -> tuf_repo (tuf_repo_id)); +// Can't specify joinable for a composite primary key (tuf_repo_artifact -> +// tuf_artifact). /* hardware inventory */ @@ -1432,13 +1405,6 @@ table! { } } -allow_tables_to_appear_in_same_query!( - system_update, - component_update, - system_update_component_update, -); -joinable!(system_update_component_update -> component_update (component_update_id)); - allow_tables_to_appear_in_same_query!(ip_pool_range, ip_pool, ip_pool_resource); joinable!(ip_pool_range -> ip_pool (ip_pool_id)); joinable!(ip_pool_resource -> ip_pool (ip_pool_id)); diff --git a/nexus/db-model/src/semver_version.rs b/nexus/db-model/src/semver_version.rs index 8e168e11a2..f314e98ab3 100644 --- a/nexus/db-model/src/semver_version.rs +++ b/nexus/db-model/src/semver_version.rs @@ -24,6 +24,8 @@ use serde::{Deserialize, Serialize}; Serialize, Deserialize, PartialEq, + Eq, + Hash, Display, )] #[diesel(sql_type = sql_types::Text)] diff --git a/nexus/db-model/src/system_update.rs b/nexus/db-model/src/system_update.rs deleted file mode 100644 index 17421936b1..0000000000 --- a/nexus/db-model/src/system_update.rs +++ /dev/null @@ -1,306 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use crate::{ - impl_enum_type, - schema::{ - component_update, system_update, system_update_component_update, - update_deployment, updateable_component, - }, - SemverVersion, -}; -use db_macros::Asset; -use nexus_types::{ - external_api::{params, shared, views}, - identity::Asset, -}; -use omicron_common::api::external; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -#[derive( - Queryable, - Insertable, - Selectable, - Clone, - Debug, - Asset, - Serialize, - Deserialize, -)] -#[diesel(table_name = system_update)] -pub struct SystemUpdate { - #[diesel(embed)] - pub identity: SystemUpdateIdentity, - pub version: SemverVersion, -} - -impl SystemUpdate { - /// Can fail if version numbers are too high. - pub fn new( - version: external::SemverVersion, - ) -> Result { - Ok(Self { - identity: SystemUpdateIdentity::new(Uuid::new_v4()), - version: SemverVersion(version), - }) - } -} - -impl From for views::SystemUpdate { - fn from(system_update: SystemUpdate) -> Self { - Self { - identity: system_update.identity(), - version: system_update.version.into(), - } - } -} - -impl_enum_type!( - #[derive(SqlType, Debug, QueryId)] - #[diesel(postgres_type(name = "update_status", schema = "public"))] - pub struct UpdateStatusEnum; - - #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] - #[diesel(sql_type = UpdateStatusEnum)] - pub enum UpdateStatus; - - Updating => b"updating" - Steady => b"steady" -); - -impl From for views::UpdateStatus { - fn from(status: UpdateStatus) -> Self { - match status { - UpdateStatus::Updating => Self::Updating, - UpdateStatus::Steady => Self::Steady, - } - } -} - -impl_enum_type!( - #[derive(SqlType, Debug, QueryId)] - #[diesel(postgres_type(name = "updateable_component_type", schema = "public"))] - pub struct UpdateableComponentTypeEnum; - - #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] - #[diesel(sql_type = UpdateableComponentTypeEnum)] - pub enum UpdateableComponentType; - - BootloaderForRot => b"bootloader_for_rot" - BootloaderForSp => b"bootloader_for_sp" - BootloaderForHostProc => b"bootloader_for_host_proc" - HubrisForPscRot => b"hubris_for_psc_rot" - HubrisForPscSp => b"hubris_for_psc_sp" - HubrisForSidecarRot => b"hubris_for_sidecar_rot" - HubrisForSidecarSp => b"hubris_for_sidecar_sp" - HubrisForGimletRot => b"hubris_for_gimlet_rot" - HubrisForGimletSp => b"hubris_for_gimlet_sp" - HeliosHostPhase1 => b"helios_host_phase_1" - HeliosHostPhase2 => b"helios_host_phase_2" - HostOmicron => b"host_omicron" -); - -impl From for UpdateableComponentType { - fn from(component_type: shared::UpdateableComponentType) -> Self { - match component_type { - shared::UpdateableComponentType::BootloaderForRot => { - UpdateableComponentType::BootloaderForRot - } - shared::UpdateableComponentType::BootloaderForSp => { - UpdateableComponentType::BootloaderForSp - } - shared::UpdateableComponentType::BootloaderForHostProc => { - UpdateableComponentType::BootloaderForHostProc - } - shared::UpdateableComponentType::HubrisForPscRot => { - UpdateableComponentType::HubrisForPscRot - } - shared::UpdateableComponentType::HubrisForPscSp => { - UpdateableComponentType::HubrisForPscSp - } - shared::UpdateableComponentType::HubrisForSidecarRot => { - UpdateableComponentType::HubrisForSidecarRot - } - shared::UpdateableComponentType::HubrisForSidecarSp => { - UpdateableComponentType::HubrisForSidecarSp - } - shared::UpdateableComponentType::HubrisForGimletRot => { - UpdateableComponentType::HubrisForGimletRot - } - shared::UpdateableComponentType::HubrisForGimletSp => { - UpdateableComponentType::HubrisForGimletSp - } - shared::UpdateableComponentType::HeliosHostPhase1 => { - UpdateableComponentType::HeliosHostPhase1 - } - shared::UpdateableComponentType::HeliosHostPhase2 => { - UpdateableComponentType::HeliosHostPhase2 - } - shared::UpdateableComponentType::HostOmicron => { - UpdateableComponentType::HostOmicron - } - } - } -} - -impl Into for UpdateableComponentType { - fn into(self) -> shared::UpdateableComponentType { - match self { - UpdateableComponentType::BootloaderForRot => { - shared::UpdateableComponentType::BootloaderForRot - } - UpdateableComponentType::BootloaderForSp => { - shared::UpdateableComponentType::BootloaderForSp - } - UpdateableComponentType::BootloaderForHostProc => { - shared::UpdateableComponentType::BootloaderForHostProc - } - UpdateableComponentType::HubrisForPscRot => { - shared::UpdateableComponentType::HubrisForPscRot - } - UpdateableComponentType::HubrisForPscSp => { - shared::UpdateableComponentType::HubrisForPscSp - } - UpdateableComponentType::HubrisForSidecarRot => { - shared::UpdateableComponentType::HubrisForSidecarRot - } - UpdateableComponentType::HubrisForSidecarSp => { - shared::UpdateableComponentType::HubrisForSidecarSp - } - UpdateableComponentType::HubrisForGimletRot => { - shared::UpdateableComponentType::HubrisForGimletRot - } - UpdateableComponentType::HubrisForGimletSp => { - shared::UpdateableComponentType::HubrisForGimletSp - } - UpdateableComponentType::HeliosHostPhase1 => { - shared::UpdateableComponentType::HeliosHostPhase1 - } - UpdateableComponentType::HeliosHostPhase2 => { - shared::UpdateableComponentType::HeliosHostPhase2 - } - UpdateableComponentType::HostOmicron => { - shared::UpdateableComponentType::HostOmicron - } - } - } -} - -#[derive( - Queryable, - Insertable, - Selectable, - Clone, - Debug, - Asset, - Serialize, - Deserialize, -)] -#[diesel(table_name = component_update)] -pub struct ComponentUpdate { - #[diesel(embed)] - pub identity: ComponentUpdateIdentity, - pub version: SemverVersion, - pub component_type: UpdateableComponentType, -} - -#[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, -)] -#[diesel(table_name = system_update_component_update)] -pub struct SystemUpdateComponentUpdate { - pub component_update_id: Uuid, - pub system_update_id: Uuid, -} - -impl From for views::ComponentUpdate { - fn from(component_update: ComponentUpdate) -> Self { - Self { - identity: component_update.identity(), - version: component_update.version.into(), - component_type: component_update.component_type.into(), - } - } -} - -#[derive( - Queryable, - Insertable, - Selectable, - Clone, - Debug, - Asset, - Serialize, - Deserialize, -)] -#[diesel(table_name = updateable_component)] -pub struct UpdateableComponent { - #[diesel(embed)] - pub identity: UpdateableComponentIdentity, - pub device_id: String, - pub component_type: UpdateableComponentType, - pub version: SemverVersion, - pub system_version: SemverVersion, - pub status: UpdateStatus, - // TODO: point to the actual update artifact -} - -impl TryFrom for UpdateableComponent { - type Error = external::Error; - - fn try_from( - create: params::UpdateableComponentCreate, - ) -> Result { - Ok(Self { - identity: UpdateableComponentIdentity::new(Uuid::new_v4()), - version: SemverVersion(create.version), - system_version: SemverVersion(create.system_version), - component_type: create.component_type.into(), - device_id: create.device_id, - status: UpdateStatus::Steady, - }) - } -} - -impl From for views::UpdateableComponent { - fn from(component: UpdateableComponent) -> Self { - Self { - identity: component.identity(), - device_id: component.device_id, - component_type: component.component_type.into(), - version: component.version.into(), - system_version: component.system_version.into(), - status: component.status.into(), - } - } -} - -#[derive( - Queryable, - Insertable, - Selectable, - Clone, - Debug, - Asset, - Serialize, - Deserialize, -)] -#[diesel(table_name = update_deployment)] -pub struct UpdateDeployment { - #[diesel(embed)] - pub identity: UpdateDeploymentIdentity, - pub version: SemverVersion, - pub status: UpdateStatus, -} - -impl From for views::UpdateDeployment { - fn from(deployment: UpdateDeployment) -> Self { - Self { - identity: deployment.identity(), - version: deployment.version.into(), - status: deployment.status.into(), - } - } -} diff --git a/nexus/db-model/src/tuf_repo.rs b/nexus/db-model/src/tuf_repo.rs new file mode 100644 index 0000000000..5fa2a0aac7 --- /dev/null +++ b/nexus/db-model/src/tuf_repo.rs @@ -0,0 +1,312 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::str::FromStr; + +use crate::{ + schema::{tuf_artifact, tuf_repo, tuf_repo_artifact}, + SemverVersion, +}; +use chrono::{DateTime, Utc}; +use diesel::{deserialize::FromSql, serialize::ToSql, sql_types::Text}; +use omicron_common::{ + api::external, + update::{ + ArtifactHash as ExternalArtifactHash, ArtifactId as ExternalArtifactId, + ArtifactKind, + }, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use uuid::Uuid; + +/// A description of a TUF update: a repo, along with the artifacts it +/// contains. +/// +/// This is the internal variant of [`external::TufRepoDescription`]. +#[derive(Debug, Clone)] +pub struct TufRepoDescription { + /// The repository. + pub repo: TufRepo, + + /// The artifacts. + pub artifacts: Vec, +} + +impl TufRepoDescription { + /// Creates a new `TufRepoDescription` from an + /// [`external::TufRepoDescription`]. + /// + /// This is not implemented as a `From` impl because we insert new fields + /// as part of the process, which `From` doesn't necessarily communicate + /// and can be surprising. + pub fn from_external(description: external::TufRepoDescription) -> Self { + Self { + repo: TufRepo::from_external(description.repo), + artifacts: description + .artifacts + .into_iter() + .map(TufArtifact::from_external) + .collect(), + } + } + + /// Converts self into [`external::TufRepoDescription`]. + pub fn into_external(self) -> external::TufRepoDescription { + external::TufRepoDescription { + repo: self.repo.into_external(), + artifacts: self + .artifacts + .into_iter() + .map(TufArtifact::into_external) + .collect(), + } + } +} + +/// A record representing an uploaded TUF repository. +/// +/// This is the internal variant of [`external::TufRepoMeta`]. +#[derive( + Queryable, Identifiable, Insertable, Clone, Debug, Selectable, AsChangeset, +)] +#[diesel(table_name = tuf_repo)] +pub struct TufRepo { + pub id: Uuid, + pub time_created: DateTime, + // XXX: We're overloading ArtifactHash here to also mean the hash of the + // repository zip itself. + pub sha256: ArtifactHash, + pub targets_role_version: i64, + pub valid_until: DateTime, + pub system_version: SemverVersion, + pub file_name: String, +} + +impl TufRepo { + /// Creates a new `TufRepo` ready for insertion. + pub fn new( + sha256: ArtifactHash, + targets_role_version: u64, + valid_until: DateTime, + system_version: SemverVersion, + file_name: String, + ) -> Self { + Self { + id: Uuid::new_v4(), + time_created: Utc::now(), + sha256, + targets_role_version: targets_role_version as i64, + valid_until, + system_version, + file_name, + } + } + + /// Creates a new `TufRepo` ready for insertion from an external + /// `TufRepoMeta`. + /// + /// This is not implemented as a `From` impl because we insert new fields + /// as part of the process, which `From` doesn't necessarily communicate + /// and can be surprising. + pub fn from_external(repo: external::TufRepoMeta) -> Self { + Self::new( + repo.hash.into(), + repo.targets_role_version, + repo.valid_until, + repo.system_version.into(), + repo.file_name, + ) + } + + /// Converts self into [`external::TufRepoMeta`]. + pub fn into_external(self) -> external::TufRepoMeta { + external::TufRepoMeta { + hash: self.sha256.into(), + targets_role_version: self.targets_role_version as u64, + valid_until: self.valid_until, + system_version: self.system_version.into(), + file_name: self.file_name, + } + } + + /// Returns the repository's ID. + pub fn id(&self) -> Uuid { + self.id + } + + /// Returns the targets role version. + pub fn targets_role_version(&self) -> u64 { + self.targets_role_version as u64 + } +} + +#[derive(Queryable, Insertable, Clone, Debug, Selectable, AsChangeset)] +#[diesel(table_name = tuf_artifact)] +pub struct TufArtifact { + #[diesel(embed)] + pub id: ArtifactId, + pub time_created: DateTime, + pub sha256: ArtifactHash, + artifact_size: i64, +} + +impl TufArtifact { + /// Creates a new `TufArtifact` ready for insertion. + pub fn new( + id: ArtifactId, + sha256: ArtifactHash, + artifact_size: u64, + ) -> Self { + Self { + id, + time_created: Utc::now(), + sha256, + artifact_size: artifact_size as i64, + } + } + + /// Creates a new `TufArtifact` ready for insertion from an external + /// `TufArtifactMeta`. + /// + /// This is not implemented as a `From` impl because we insert new fields + /// as part of the process, which `From` doesn't necessarily communicate + /// and can be surprising. + pub fn from_external(artifact: external::TufArtifactMeta) -> Self { + Self::new(artifact.id.into(), artifact.hash.into(), artifact.size) + } + + /// Converts self into [`external::TufArtifactMeta`]. + pub fn into_external(self) -> external::TufArtifactMeta { + external::TufArtifactMeta { + id: self.id.into(), + hash: self.sha256.into(), + size: self.artifact_size as u64, + } + } + + /// Returns the artifact's ID. + pub fn id(&self) -> (String, SemverVersion, String) { + (self.id.name.clone(), self.id.version.clone(), self.id.kind.clone()) + } + + /// Returns the artifact length in bytes. + pub fn artifact_size(&self) -> u64 { + self.artifact_size as u64 + } +} + +/// The ID (primary key) of a [`TufArtifact`]. +/// +/// This is the internal variant of a [`ExternalArtifactId`]. +#[derive( + Queryable, + Insertable, + Clone, + Debug, + Selectable, + PartialEq, + Eq, + Hash, + Deserialize, + Serialize, +)] +#[diesel(table_name = tuf_artifact)] +pub struct ArtifactId { + pub name: String, + pub version: SemverVersion, + pub kind: String, +} + +impl From for ArtifactId { + fn from(id: ExternalArtifactId) -> Self { + Self { + name: id.name, + version: id.version.into(), + kind: id.kind.as_str().to_owned(), + } + } +} + +impl From for ExternalArtifactId { + fn from(id: ArtifactId) -> Self { + Self { + name: id.name, + version: id.version.into(), + kind: ArtifactKind::new(id.kind), + } + } +} + +impl fmt::Display for ArtifactId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // This is the same as ExternalArtifactId's Display impl. + write!(f, "{} v{} ({})", self.name, self.version, self.kind) + } +} + +/// Required by the authz_resource macro. +impl From for (String, SemverVersion, String) { + fn from(id: ArtifactId) -> Self { + (id.name, id.version, id.kind) + } +} + +/// A many-to-many relationship between [`TufRepo`] and [`TufArtifact`]. +#[derive(Queryable, Insertable, Clone, Debug, Selectable)] +#[diesel(table_name = tuf_repo_artifact)] +pub struct TufRepoArtifact { + pub tuf_repo_id: Uuid, + pub tuf_artifact_name: String, + pub tuf_artifact_version: SemverVersion, + pub tuf_artifact_kind: String, +} + +/// A wrapper around omicron-common's [`ArtifactHash`](ExternalArtifactHash), +/// supported by Diesel. +#[derive( + Copy, + Clone, + Debug, + AsExpression, + FromSqlRow, + Serialize, + Deserialize, + PartialEq, +)] +#[diesel(sql_type = Text)] +#[serde(transparent)] +pub struct ArtifactHash(pub ExternalArtifactHash); + +NewtypeFrom! { () pub struct ArtifactHash(ExternalArtifactHash); } +NewtypeDeref! { () pub struct ArtifactHash(ExternalArtifactHash); } + +impl fmt::Display for ArtifactHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl ToSql for ArtifactHash { + fn to_sql<'a>( + &'a self, + out: &mut diesel::serialize::Output<'a, '_, diesel::pg::Pg>, + ) -> diesel::serialize::Result { + >::to_sql( + &self.0.to_string(), + &mut out.reborrow(), + ) + } +} + +impl FromSql for ArtifactHash { + fn from_sql( + bytes: diesel::pg::PgValue<'_>, + ) -> diesel::deserialize::Result { + let s = String::from_sql(bytes)?; + ExternalArtifactHash::from_str(&s) + .map(ArtifactHash) + .map_err(|e| e.into()) + } +} diff --git a/nexus/db-model/src/update_artifact.rs b/nexus/db-model/src/update_artifact.rs deleted file mode 100644 index 97c57b44cc..0000000000 --- a/nexus/db-model/src/update_artifact.rs +++ /dev/null @@ -1,62 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use super::impl_enum_wrapper; -use crate::schema::update_artifact; -use crate::SemverVersion; -use chrono::{DateTime, Utc}; -use omicron_common::api::internal; -use parse_display::Display; -use serde::Deserialize; -use serde::Serialize; -use std::io::Write; - -impl_enum_wrapper!( - #[derive(SqlType, Debug, QueryId)] - #[diesel(postgres_type(name = "update_artifact_kind", schema = "public"))] - pub struct KnownArtifactKindEnum; - - #[derive(Clone, Copy, Debug, Display, AsExpression, FromSqlRow, PartialEq, Eq, Serialize, Deserialize)] - #[display("{0}")] - #[diesel(sql_type = KnownArtifactKindEnum)] - pub struct KnownArtifactKind(pub internal::nexus::KnownArtifactKind); - - // Enum values - GimletSp => b"gimlet_sp" - GimletRot => b"gimlet_rot" - Host => b"host" - Trampoline => b"trampoline" - ControlPlane => b"control_plane" - PscSp => b"psc_sp" - PscRot => b"psc_rot" - SwitchSp => b"switch_sp" - SwitchRot => b"switch_rot" -); - -#[derive( - Queryable, Insertable, Clone, Debug, Display, Selectable, AsChangeset, -)] -#[diesel(table_name = update_artifact)] -#[display("{kind} \"{name}\" v{version}")] -pub struct UpdateArtifact { - pub name: String, - /// Version of the artifact itself - pub version: SemverVersion, - pub kind: KnownArtifactKind, - /// `version` field of targets.json from the repository - // FIXME this *should* be a NonZeroU64 - pub targets_role_version: i64, - pub valid_until: DateTime, - pub target_name: String, - // FIXME should this be [u8; 32]? - pub target_sha256: String, - // FIXME this *should* be a u64 - pub target_length: i64, -} - -impl UpdateArtifact { - pub fn id(&self) -> (String, SemverVersion, KnownArtifactKind) { - (self.name.clone(), self.version.clone(), self.kind) - } -} diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index cae42a0944..3240c54f3f 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -43,6 +43,7 @@ sled-agent-client.workspace = true slog.workspace = true static_assertions.workspace = true steno.workspace = true +swrite.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true diff --git a/nexus/db-queries/src/authz/api_resources.rs b/nexus/db-queries/src/authz/api_resources.rs index 444a00d5ad..b4fd4e1890 100644 --- a/nexus/db-queries/src/authz/api_resources.rs +++ b/nexus/db-queries/src/authz/api_resources.rs @@ -36,8 +36,7 @@ use crate::authn; use crate::context::OpContext; use crate::db; use crate::db::fixed_data::FLEET_ID; -use crate::db::model::KnownArtifactKind; -use crate::db::model::SemverVersion; +use crate::db::model::{ArtifactId, SemverVersion}; use crate::db::DataStore; use authz_macros::authz_resource; use futures::future::BoxFuture; @@ -1067,35 +1066,28 @@ authz_resource! { } authz_resource! { - name = "UpdateArtifact", + name = "TufRepo", parent = "Fleet", - primary_key = (String, SemverVersion, KnownArtifactKind), - roles_allowed = false, - polar_snippet = FleetChild, -} - -authz_resource! { - name = "Certificate", - parent = "Silo", primary_key = Uuid, roles_allowed = false, - polar_snippet = Custom, + polar_snippet = FleetChild, } authz_resource! { - name = "SystemUpdate", + name = "TufArtifact", parent = "Fleet", - primary_key = Uuid, + primary_key = (String, SemverVersion, String), + input_key = ArtifactId, roles_allowed = false, polar_snippet = FleetChild, } authz_resource! { - name = "UpdateDeployment", - parent = "Fleet", + name = "Certificate", + parent = "Silo", primary_key = Uuid, roles_allowed = false, - polar_snippet = FleetChild, + polar_snippet = Custom, } authz_resource! { diff --git a/nexus/db-queries/src/authz/oso_generic.rs b/nexus/db-queries/src/authz/oso_generic.rs index 9b842216b4..dd646a1c98 100644 --- a/nexus/db-queries/src/authz/oso_generic.rs +++ b/nexus/db-queries/src/authz/oso_generic.rs @@ -154,12 +154,11 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { IdentityProvider::init(), SamlIdentityProvider::init(), Sled::init(), + TufRepo::init(), + TufArtifact::init(), Zpool::init(), Service::init(), - UpdateArtifact::init(), UserBuiltin::init(), - SystemUpdate::init(), - UpdateDeployment::init(), ]; for init in generated_inits { diff --git a/nexus/db-queries/src/authz/policy_test/resources.rs b/nexus/db-queries/src/authz/policy_test/resources.rs index 9cc4e28790..3e87f6db51 100644 --- a/nexus/db-queries/src/authz/policy_test/resources.rs +++ b/nexus/db-queries/src/authz/policy_test/resources.rs @@ -7,6 +7,8 @@ use super::resource_builder::ResourceBuilder; use super::resource_builder::ResourceSet; use crate::authz; +use crate::db::model::ArtifactId; +use nexus_db_model::SemverVersion; use omicron_common::api::external::LookupType; use oso::PolarClass; use std::collections::BTreeSet; @@ -126,20 +128,23 @@ pub async fn make_resources( LookupType::ById(blueprint_id), )); - let system_update_id = - "9c86d713-1bc2-4927-9892-ada3eb6f5f62".parse().unwrap(); - builder.new_resource(authz::SystemUpdate::new( + let tuf_repo_id = "3c52d72f-cbf7-4951-a62f-a4154e74da87".parse().unwrap(); + builder.new_resource(authz::TufRepo::new( authz::FLEET, - system_update_id, - LookupType::ById(system_update_id), + tuf_repo_id, + LookupType::ById(tuf_repo_id), )); - let update_deployment_id = - "c617a035-7c42-49ff-a36a-5dfeee382832".parse().unwrap(); - builder.new_resource(authz::UpdateDeployment::new( + let artifact_id = ArtifactId { + name: "a".to_owned(), + version: SemverVersion("1.0.0".parse().unwrap()), + kind: "b".to_owned(), + }; + let artifact_id_desc = artifact_id.to_string(); + builder.new_resource(authz::TufArtifact::new( authz::FLEET, - update_deployment_id, - LookupType::ById(update_deployment_id), + artifact_id, + LookupType::ByCompositeId(artifact_id_desc), )); let address_lot_id = @@ -375,7 +380,6 @@ pub fn exempted_authz_classes() -> BTreeSet { authz::RouterRoute::get_polar_class(), authz::ConsoleSession::get_polar_class(), authz::RoleBuiltin::get_polar_class(), - authz::UpdateArtifact::get_polar_class(), authz::UserBuiltin::get_polar_class(), ] .into_iter() diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 5fd16e2633..78a7aeda87 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -397,14 +397,12 @@ mod test { use crate::db::identity::Asset; use crate::db::lookup::LookupPath; use crate::db::model::{ - BlockSize, ComponentUpdate, ComponentUpdateIdentity, ConsoleSession, - Dataset, DatasetKind, ExternalIp, PhysicalDisk, PhysicalDiskKind, - Project, Rack, Region, Service, ServiceKind, SiloUser, SledBaseboard, - SledProvisionState, SledSystemHardware, SledUpdate, SshKey, - SystemUpdate, UpdateableComponentType, VpcSubnet, Zpool, + BlockSize, ConsoleSession, Dataset, DatasetKind, ExternalIp, + PhysicalDisk, PhysicalDiskKind, Project, Rack, Region, Service, + ServiceKind, SiloUser, SledBaseboard, SledProvisionState, + SledSystemHardware, SledUpdate, SshKey, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; - use assert_matches::assert_matches; use chrono::{Duration, Utc}; use futures::stream; use futures::StreamExt; @@ -413,7 +411,7 @@ mod test { use nexus_types::external_api::params; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::{ - self, ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, + ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; use omicron_common::nexus_config::RegionAllocationStrategy; use omicron_test_utils::dev; @@ -1988,109 +1986,4 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } - - /// Expect DB error if we try to insert a system update with an id that - /// already exists. If version matches, update the existing row (currently - /// only time_modified) - #[tokio::test] - async fn test_system_update_conflict() { - let logctx = dev::test_setup_log("test_system_update_conflict"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - let v1 = external::SemverVersion::new(1, 0, 0); - let update1 = SystemUpdate::new(v1.clone()).unwrap(); - datastore - .upsert_system_update(&opctx, update1.clone()) - .await - .expect("Failed to create system update"); - - // same version, but different ID (generated by constructor). should - // conflict and therefore update time_modified, keeping the old ID - let update2 = SystemUpdate::new(v1).unwrap(); - let updated_update = datastore - .upsert_system_update(&opctx, update2.clone()) - .await - .unwrap(); - assert!(updated_update.identity.id == update1.identity.id); - assert!( - updated_update.identity.time_modified - != update1.identity.time_modified - ); - - // now let's do same ID, but different version. should conflict on the - // ID because it's the PK, but since the version doesn't match an - // existing row, it errors out instead of updating one - let update3 = - SystemUpdate::new(external::SemverVersion::new(2, 0, 0)).unwrap(); - let update3 = SystemUpdate { identity: update1.identity, ..update3 }; - let conflict = - datastore.upsert_system_update(&opctx, update3).await.unwrap_err(); - assert_matches!(conflict, Error::ObjectAlreadyExists { .. }); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - /// Expect DB error if we try to insert a component update with a (version, - /// component_type) that already exists - #[tokio::test] - async fn test_component_update_conflict() { - let logctx = dev::test_setup_log("test_component_update_conflict"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // we need a system update for the component updates to hang off of - let v1 = external::SemverVersion::new(1, 0, 0); - let system_update = SystemUpdate::new(v1.clone()).unwrap(); - datastore - .upsert_system_update(&opctx, system_update.clone()) - .await - .expect("Failed to create system update"); - - // create a component update, that's fine - let cu1 = ComponentUpdate { - identity: ComponentUpdateIdentity::new(Uuid::new_v4()), - component_type: UpdateableComponentType::HubrisForSidecarRot, - version: db::model::SemverVersion::new(1, 0, 0), - }; - datastore - .create_component_update( - &opctx, - system_update.identity.id, - cu1.clone(), - ) - .await - .expect("Failed to create component update"); - - // create a second component update with same version but different - // type, also fine - let cu2 = ComponentUpdate { - identity: ComponentUpdateIdentity::new(Uuid::new_v4()), - component_type: UpdateableComponentType::HubrisForSidecarSp, - version: db::model::SemverVersion::new(1, 0, 0), - }; - datastore - .create_component_update( - &opctx, - system_update.identity.id, - cu2.clone(), - ) - .await - .expect("Failed to create component update"); - - // but same type and version should fail - let cu3 = ComponentUpdate { - identity: ComponentUpdateIdentity::new(Uuid::new_v4()), - ..cu1 - }; - let conflict = datastore - .create_component_update(&opctx, system_update.identity.id, cu3) - .await - .unwrap_err(); - assert_matches!(conflict, Error::ObjectAlreadyExists { .. }); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } } diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index 0790bd458e..3725797f83 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -4,376 +4,368 @@ //! [`DataStore`] methods related to updates and artifacts. +use std::collections::HashMap; + use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::{public_error_from_diesel, ErrorHandler}; -use crate::db::model::{ - ComponentUpdate, SemverVersion, SystemUpdate, UpdateArtifact, - UpdateDeployment, UpdateStatus, UpdateableComponent, -}; -use crate::db::pagination::paginated; +use crate::db::model::SemverVersion; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; -use chrono::Utc; use diesel::prelude::*; -use nexus_db_model::SystemUpdateComponentUpdate; -use nexus_types::identity::Asset; +use diesel::result::Error as DieselError; +use nexus_db_model::{ArtifactHash, TufArtifact, TufRepo, TufRepoDescription}; use omicron_common::api::external::{ - CreateResult, DataPageParams, DeleteResult, InternalContext, ListResultVec, - LookupResult, LookupType, ResourceType, UpdateResult, + self, CreateResult, LookupResult, LookupType, ResourceType, + TufRepoInsertStatus, }; +use swrite::{swrite, SWrite}; use uuid::Uuid; -impl DataStore { - pub async fn update_artifact_upsert( - &self, - opctx: &OpContext, - artifact: UpdateArtifact, - ) -> CreateResult { - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; +/// The return value of [`DataStore::update_tuf_repo_description_insert`]. +/// +/// This is similar to [`external::TufRepoInsertResponse`], but uses +/// nexus-db-model's types instead of external types. +pub struct TufRepoInsertResponse { + pub recorded: TufRepoDescription, + pub status: TufRepoInsertStatus, +} - use db::schema::update_artifact::dsl; - diesel::insert_into(dsl::update_artifact) - .values(artifact.clone()) - .on_conflict((dsl::name, dsl::version, dsl::kind)) - .do_update() - .set(artifact.clone()) - .returning(UpdateArtifact::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) +impl TufRepoInsertResponse { + pub fn into_external(self) -> external::TufRepoInsertResponse { + external::TufRepoInsertResponse { + recorded: self.recorded.into_external(), + status: self.status, + } } +} + +async fn artifacts_for_repo( + repo_id: Uuid, + conn: &async_bb8_diesel::Connection, +) -> Result, DieselError> { + use db::schema::tuf_artifact::dsl as tuf_artifact_dsl; + use db::schema::tuf_repo_artifact::dsl as tuf_repo_artifact_dsl; + + let join_on_dsl = tuf_artifact_dsl::name + .eq(tuf_repo_artifact_dsl::tuf_artifact_name) + .and( + tuf_artifact_dsl::version + .eq(tuf_repo_artifact_dsl::tuf_artifact_version), + ) + .and( + tuf_artifact_dsl::kind.eq(tuf_repo_artifact_dsl::tuf_artifact_kind), + ); + // Don't bother paginating because each repo should only have a few (under + // 20) artifacts. + tuf_repo_artifact_dsl::tuf_repo_artifact + .filter(tuf_repo_artifact_dsl::tuf_repo_id.eq(repo_id)) + .inner_join(tuf_artifact_dsl::tuf_artifact.on(join_on_dsl)) + .select(TufArtifact::as_select()) + .load_async(conn) + .await +} - pub async fn update_artifact_hard_delete_outdated( +impl DataStore { + /// Inserts a new TUF repository into the database. + /// + /// Returns the repository just inserted, or an existing + /// `TufRepoDescription` if one was already found. (This is not an upsert, + /// because if we know about an existing repo but with different contents, + /// we reject that.) + pub async fn update_tuf_repo_insert( &self, opctx: &OpContext, - current_targets_role_version: i64, - ) -> DeleteResult { + description: TufRepoDescription, + ) -> CreateResult { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let log = opctx.log.new( + slog::o!( + "method" => "update_tuf_repo_insert", + "uploaded_system_version" => description.repo.system_version.to_string(), + ), + ); - // We use the `targets_role_version` column in the table to delete any - // old rows, keeping the table in sync with the current copy of - // artifacts.json. - use db::schema::update_artifact::dsl; - diesel::delete(dsl::update_artifact) - .filter(dsl::targets_role_version.lt(current_targets_role_version)) - .execute_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - .internal_context("deleting outdated available artifacts") - } + let err = OptionalError::new(); + let err2 = err.clone(); - pub async fn upsert_system_update( - &self, - opctx: &OpContext, - update: SystemUpdate, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - use db::schema::system_update::dsl::*; - - diesel::insert_into(system_update) - .values(update.clone()) - .on_conflict(version) - .do_update() - // for now the only modifiable field is time_modified, but we intend - // to add more metadata to this model - .set(time_modified.eq(Utc::now())) - .returning(SystemUpdate::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::SystemUpdate, - &update.version.to_string(), - ), + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("update_tuf_repo_insert") + .transaction(&conn, move |conn| { + insert_impl( + log.clone(), + conn, + description.clone(), + err2.clone(), ) }) - } - - // version is unique but not the primary key, so we can't use LookupPath to handle this for us - pub async fn system_update_fetch_by_version( - &self, - opctx: &OpContext, - target: SemverVersion, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::system_update::dsl::*; - - let version_string = target.to_string(); - - system_update - .filter(version.eq(target)) - .select(SystemUpdate::as_select()) - .first_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SystemUpdate, - LookupType::ByCompositeId(version_string), - ), - ) - }) - } - - pub async fn create_component_update( - &self, - opctx: &OpContext, - system_update_id: Uuid, - update: ComponentUpdate, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - // TODO: make sure system update with that ID exists first - // let (.., db_system_update) = LookupPath::new(opctx, &self) - - use db::schema::component_update; - use db::schema::system_update_component_update as join_table; - - let version_string = update.version.to_string(); - - let conn = self.pool_connection_authorized(opctx).await?; - - self.transaction_retry_wrapper("create_component_update") - .transaction(&conn, |conn| { - let update = update.clone(); - async move { - let db_update = - diesel::insert_into(component_update::table) - .values(update.clone()) - .returning(ComponentUpdate::as_returning()) - .get_result_async(&conn) - .await?; - - diesel::insert_into(join_table::table) - .values(SystemUpdateComponentUpdate { - system_update_id, - component_update_id: update.id(), - }) - .returning(SystemUpdateComponentUpdate::as_returning()) - .get_result_async(&conn) - .await?; - - Ok(db_update) + if let Some(err) = err.take() { + err.into() + } else { + public_error_from_diesel(e, ErrorHandler::Server) } }) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::ComponentUpdate, - &version_string, - ), - ) - }) } - pub async fn system_updates_list_by_id( + /// Returns the TUF repo description corresponding to this system version. + pub async fn update_tuf_repo_get( &self, opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + system_version: SemverVersion, + ) -> LookupResult { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - use db::schema::system_update::dsl::*; + use db::schema::tuf_repo::dsl; - paginated(system_update, id, pagparams) - .select(SystemUpdate::as_select()) - .order(version.desc()) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - pub async fn system_update_components_list( - &self, - opctx: &OpContext, - system_update_id: Uuid, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::component_update; - use db::schema::system_update_component_update as join_table; - - component_update::table - .inner_join(join_table::table) - .filter(join_table::columns::system_update_id.eq(system_update_id)) - .select(ComponentUpdate::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - pub async fn create_updateable_component( - &self, - opctx: &OpContext, - component: UpdateableComponent, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - // make sure system version exists - let sys_version = component.system_version.clone(); - self.system_update_fetch_by_version(opctx, sys_version).await?; - - use db::schema::updateable_component::dsl::*; + let conn = self.pool_connection_authorized(opctx).await?; - diesel::insert_into(updateable_component) - .values(component.clone()) - .returning(UpdateableComponent::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) + let repo = dsl::tuf_repo + .filter(dsl::system_version.eq(system_version.clone())) + .select(TufRepo::as_select()) + .first_async::(&*conn) .await .map_err(|e| { public_error_from_diesel( e, - ErrorHandler::Conflict( - ResourceType::UpdateableComponent, - &component.id().to_string(), // TODO: more informative identifier + ErrorHandler::NotFoundByLookup( + ResourceType::TufRepo, + LookupType::ByCompositeId(system_version.to_string()), ), ) - }) - } - - pub async fn updateable_components_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::updateable_component::dsl::*; - - paginated(updateable_component, id, pagparams) - .select(UpdateableComponent::as_select()) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - pub async fn lowest_component_system_version( - &self, - opctx: &OpContext, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::updateable_component::dsl::*; - - updateable_component - .select(system_version) - .order(system_version.asc()) - .first_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - pub async fn highest_component_system_version( - &self, - opctx: &OpContext, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::updateable_component::dsl::*; - - updateable_component - .select(system_version) - .order(system_version.desc()) - .first_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } - - pub async fn create_update_deployment( - &self, - opctx: &OpContext, - deployment: UpdateDeployment, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - use db::schema::update_deployment::dsl::*; + })?; - diesel::insert_into(update_deployment) - .values(deployment.clone()) - .returning(UpdateDeployment::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) + let artifacts = artifacts_for_repo(repo.id, &conn) .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::UpdateDeployment, - &deployment.id().to_string(), - ), - ) - }) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(TufRepoDescription { repo, artifacts }) } +} - pub async fn steady_update_deployment( - &self, - opctx: &OpContext, - deployment_id: Uuid, - ) -> UpdateResult { - // TODO: use authz::UpdateDeployment as the input so we can check Modify - // on that instead - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - use db::schema::update_deployment::dsl::*; - - diesel::update(update_deployment) - .filter(id.eq(deployment_id)) - .set(( - status.eq(UpdateStatus::Steady), - time_modified.eq(diesel::dsl::now), - )) - .returning(UpdateDeployment::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) +// This is a separate method mostly to make rustfmt not bail out on long lines +// of text. +async fn insert_impl( + log: slog::Logger, + conn: async_bb8_diesel::Connection, + desc: TufRepoDescription, + err: OptionalError, +) -> Result { + let repo = { + use db::schema::tuf_repo::dsl; + + // Load the existing repo by the system version, if + // any. + let existing_repo = dsl::tuf_repo + .filter(dsl::system_version.eq(desc.repo.system_version.clone())) + .select(TufRepo::as_select()) + .first_async::(&conn) .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::UpdateDeployment, - LookupType::ById(deployment_id), - ), - ) - }) + .optional()?; + + if let Some(existing_repo) = existing_repo { + // It doesn't matter whether the UUID of the repo matches or not, + // since it's uniquely generated. But do check the hash. + if existing_repo.sha256 != desc.repo.sha256 { + return Err(err.bail(InsertError::RepoHashMismatch { + system_version: desc.repo.system_version, + uploaded: desc.repo.sha256, + existing: existing_repo.sha256, + })); + } + + // Just return the existing repo along with all of its artifacts. + let artifacts = artifacts_for_repo(existing_repo.id, &conn).await?; + + let recorded = + TufRepoDescription { repo: existing_repo, artifacts }; + return Ok(TufRepoInsertResponse { + recorded, + status: TufRepoInsertStatus::AlreadyExists, + }); + } + + // This will fail if this ID or system version already exists with a + // different hash, but that's a weird situation that should error out + // anyway (IDs are not user controlled, hashes are). + diesel::insert_into(dsl::tuf_repo) + .values(desc.repo.clone()) + .execute_async(&conn) + .await?; + desc.repo.clone() + }; + + // Since we've inserted a new repo, we also need to insert the + // corresponding artifacts. + let all_artifacts = { + use db::schema::tuf_artifact::dsl; + + // Multiple repos can have the same artifacts, so we shouldn't error + // out if we find an existing artifact. However, we should check that + // the SHA256 hash and length matches if an existing artifact matches. + + let mut filter_dsl = dsl::tuf_artifact.into_boxed(); + for artifact in desc.artifacts.clone() { + filter_dsl = filter_dsl.or_filter( + dsl::name + .eq(artifact.id.name) + .and(dsl::version.eq(artifact.id.version)) + .and(dsl::kind.eq(artifact.id.kind)), + ); + } + + let results = filter_dsl + .select(TufArtifact::as_select()) + .load_async(&conn) + .await?; + debug!( + log, + "found {} existing artifacts", results.len(); + "results" => ?results, + ); + + let results_by_id = results + .iter() + .map(|artifact| (&artifact.id, artifact)) + .collect::>(); + + // uploaded_and_existing contains non-matching artifacts in pairs of + // (uploaded, currently in db). + let mut uploaded_and_existing = Vec::new(); + let mut new_artifacts = Vec::new(); + let mut all_artifacts = Vec::new(); + + for uploaded_artifact in desc.artifacts.clone() { + let Some(&existing_artifact) = + results_by_id.get(&uploaded_artifact.id) + else { + // This is a new artifact. + new_artifacts.push(uploaded_artifact.clone()); + all_artifacts.push(uploaded_artifact); + continue; + }; + + if existing_artifact.sha256 != uploaded_artifact.sha256 + || existing_artifact.artifact_size() + != uploaded_artifact.artifact_size() + { + uploaded_and_existing.push(( + uploaded_artifact.clone(), + existing_artifact.clone(), + )); + } else { + all_artifacts.push(uploaded_artifact); + } + } + + if !uploaded_and_existing.is_empty() { + debug!(log, "uploaded artifacts don't match existing artifacts"; + "uploaded_and_existing" => ?uploaded_and_existing, + ); + return Err(err.bail(InsertError::ArtifactMismatch { + uploaded_and_existing, + })); + } + + debug!( + log, + "inserting {} new artifacts", new_artifacts.len(); + "new_artifacts" => ?new_artifacts, + ); + + // Insert new artifacts into the database. + diesel::insert_into(dsl::tuf_artifact) + .values(new_artifacts) + .execute_async(&conn) + .await?; + all_artifacts + }; + + // Finally, insert all the associations into the tuf_repo_artifact table. + { + use db::schema::tuf_repo_artifact::dsl; + + let mut values = Vec::new(); + for artifact in desc.artifacts.clone() { + slog::debug!( + log, + "inserting artifact into tuf_repo_artifact table"; + "artifact" => %artifact.id, + ); + values.push(( + dsl::tuf_repo_id.eq(desc.repo.id), + dsl::tuf_artifact_name.eq(artifact.id.name), + dsl::tuf_artifact_version.eq(artifact.id.version), + dsl::tuf_artifact_kind.eq(artifact.id.kind), + )); + } + + diesel::insert_into(dsl::tuf_repo_artifact) + .values(values) + .execute_async(&conn) + .await?; } - pub async fn update_deployments_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::update_deployment::dsl::*; - - paginated(update_deployment, id, pagparams) - .select(UpdateDeployment::as_select()) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } + let recorded = TufRepoDescription { repo, artifacts: all_artifacts }; + Ok(TufRepoInsertResponse { + recorded, + status: TufRepoInsertStatus::Inserted, + }) +} - pub async fn latest_update_deployment( - &self, - opctx: &OpContext, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; +#[derive(Clone, Debug)] +enum InsertError { + /// The SHA256 of the uploaded repository doesn't match the SHA256 of the + /// existing repository with the same system version. + RepoHashMismatch { + system_version: SemverVersion, + uploaded: ArtifactHash, + existing: ArtifactHash, + }, + /// The SHA256 or length of one or more artifacts doesn't match the + /// corresponding entries in the database. + ArtifactMismatch { + // Pairs of (uploaded, existing) artifacts. + uploaded_and_existing: Vec<(TufArtifact, TufArtifact)>, + }, +} - use db::schema::update_deployment::dsl::*; +impl From for external::Error { + fn from(e: InsertError) -> Self { + match e { + InsertError::RepoHashMismatch { + system_version, + uploaded, + existing, + } => external::Error::conflict(format!( + "Uploaded repository with system version {} has SHA256 hash \ + {}, but existing repository has SHA256 hash {}.", + system_version, uploaded, existing, + )), + InsertError::ArtifactMismatch { uploaded_and_existing } => { + // Build a message out of uploaded and existing artifacts. + let mut message = "Uploaded artifacts don't match existing \ + artifacts with same IDs:\n" + .to_string(); + for (uploaded, existing) in uploaded_and_existing { + swrite!( + message, + "- Uploaded artifact {} has SHA256 hash {} and length \ + {}, but existing artifact {} has SHA256 hash {} and \ + length {}.\n", + uploaded.id, + uploaded.sha256, + uploaded.artifact_size(), + existing.id, + existing.sha256, + existing.artifact_size(), + ); + } - update_deployment - .select(UpdateDeployment::as_returning()) - .order(time_created.desc()) - .first_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + external::Error::conflict(message) + } + } } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 028694dc4b..1cf14c5a8f 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -17,7 +17,6 @@ use async_bb8_diesel::AsyncRunQueryDsl; use db_macros::lookup_resource; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; -use nexus_db_model::KnownArtifactKind; use nexus_db_model::Name; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; @@ -431,27 +430,27 @@ impl<'a> LookupPath<'a> { ) } + /// Select a resource of type TufRepo, identified by its UUID. + pub fn tuf_repo_id(self, id: Uuid) -> TufRepo<'a> { + TufRepo::PrimaryKey(Root { lookup_root: self }, id) + } + /// Select a resource of type UpdateArtifact, identified by its /// `(name, version, kind)` tuple - pub fn update_artifact_tuple( + pub fn tuf_artifact_tuple( self, - name: &str, + name: impl Into, version: db::model::SemverVersion, - kind: KnownArtifactKind, - ) -> UpdateArtifact<'a> { - UpdateArtifact::PrimaryKey( + kind: impl Into, + ) -> TufArtifact<'a> { + TufArtifact::PrimaryKey( Root { lookup_root: self }, - name.to_string(), + name.into(), version, - kind, + kind.into(), ) } - /// Select a resource of type UpdateDeployment, identified by its id - pub fn update_deployment_id(self, id: Uuid) -> UpdateDeployment<'a> { - UpdateDeployment::PrimaryKey(Root { lookup_root: self }, id) - } - /// Select a resource of type UserBuiltin, identified by its `name` pub fn user_builtin_id<'b>(self, id: Uuid) -> UserBuiltin<'b> where @@ -857,21 +856,10 @@ lookup_resource! { } lookup_resource! { - name = "UpdateArtifact", - ancestors = [], - children = [], - lookup_by_name = false, - soft_deletes = false, - primary_key_columns = [ - { column_name = "name", rust_type = String }, - { column_name = "version", rust_type = db::model::SemverVersion }, - { column_name = "kind", rust_type = KnownArtifactKind } - ] -} - -lookup_resource! { - name = "SystemUpdate", + name = "TufRepo", ancestors = [], + // TODO: should this have TufArtifact as a child? This is a many-many + // relationship. children = [], lookup_by_name = false, soft_deletes = false, @@ -879,12 +867,16 @@ lookup_resource! { } lookup_resource! { - name = "UpdateDeployment", + name = "TufArtifact", ancestors = [], children = [], lookup_by_name = false, soft_deletes = false, - primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] + primary_key_columns = [ + { column_name = "name", rust_type = String }, + { column_name = "version", rust_type = db::model::SemverVersion }, + { column_name = "kind", rust_type = String }, + ] } lookup_resource! { diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index e8ef721e98..2d57274909 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -67,9 +67,6 @@ static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ "switch_link_fec", "switch_link_speed", "switch_port_geometry", - "update_artifact_kind", - "update_status", - "updateable_component_type", "user_provision_type", "vpc_firewall_rule_action", "vpc_firewall_rule_direction", diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index 26cc13fc6a..ee55d775f0 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -950,7 +950,7 @@ resource: Blueprint id "b9e923f6-caf3-4c83-96f9-8ffe8c627dd2" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! -resource: SystemUpdate id "9c86d713-1bc2-4927-9892-ada3eb6f5f62" +resource: TufRepo id "3c52d72f-cbf7-4951-a62f-a4154e74da87" USER Q R LC RP M MP CC D fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ @@ -964,7 +964,7 @@ resource: SystemUpdate id "9c86d713-1bc2-4927-9892-ada3eb6f5f62" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! -resource: UpdateDeployment id "c617a035-7c42-49ff-a36a-5dfeee382832" +resource: TufArtifact id "a v1.0.0 (b)" USER Q R LC RP M MP CC D fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index 9d6bf2d22f..f13ea721b8 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -40,9 +40,13 @@ external_dns_servers = [ "1.1.1.1", "9.9.9.9" ] [deployment.dropshot_external] # IP Address and TCP port on which to listen for the external API bind_address = "127.0.0.1:12220" -# Allow larger request bodies (1MiB) to accomodate firewall endpoints (one -# rule is ~500 bytes) -request_body_max_bytes = 1048576 +# Allow large request bodies to support uploading TUF archives. The number here +# is picked based on the typical size for tuf-mupdate.zip as of 2024-01 +# (~1.5GiB) and multiplying it by 2. +# +# This should be brought back down to a more reasonable value once per-endpoint +# request body limits are implemented. +request_body_max_bytes = 3221225472 # To have Nexus's external HTTP endpoint use TLS, uncomment the line below. You # will also need to provide an initial TLS certificate during rack # initialization. If you're using this config file, you're probably running a diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index d643969924..d6ad7c98ea 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -140,6 +140,7 @@ pub struct Nexus { timeseries_client: LazyTimeseriesClient, /// Contents of the trusted root role for the TUF repository. + #[allow(dead_code)] updates_config: Option, /// The tunable parameters from a configuration file diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 17e7a17444..38c7861e46 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -151,9 +151,6 @@ impl super::Nexus { }) .collect(); - // internally ignores ObjectAlreadyExists, so will not error on repeat runs - let _ = self.populate_mock_system_updates(&opctx).await?; - let dns_zone = request .internal_dns_zone_config .zones diff --git a/nexus/src/app/update/mod.rs b/nexus/src/app/update/mod.rs index 36d4dbcb9e..d4a47375bc 100644 --- a/nexus/src/app/update/mod.rs +++ b/nexus/src/app/update/mod.rs @@ -4,27 +4,17 @@ //! Software Updates -use chrono::Utc; -use hex; +use bytes::Bytes; +use dropshot::HttpError; +use futures::Stream; +use nexus_db_model::TufRepoDescription; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; -use nexus_db_queries::db; -use nexus_db_queries::db::identity::Asset; -use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::db::model::KnownArtifactKind; -use nexus_types::external_api::{params, shared}; use omicron_common::api::external::{ - self, CreateResult, DataPageParams, Error, ListResultVec, LookupResult, - PaginationOrder, UpdateResult, + Error, SemverVersion, TufRepoInsertResponse, }; -use omicron_common::api::internal::nexus::UpdateArtifactId; -use rand::Rng; -use ring::digest; -use std::convert::TryFrom; -use std::num::NonZeroU32; -use std::path::Path; -use tokio::io::AsyncWriteExt; -use uuid::Uuid; +use omicron_common::update::ArtifactId; +use update_common::artifacts::ArtifactsWithPlan; mod common_sp_update; mod host_phase1_updater; @@ -47,927 +37,70 @@ pub enum UpdateProgress { Failed(String), } -static BASE_ARTIFACT_DIR: &str = "/var/tmp/oxide_artifacts"; - impl super::Nexus { - async fn tuf_base_url( + pub(crate) async fn updates_put_repository( &self, opctx: &OpContext, - ) -> Result, Error> { - let rack = self.rack_lookup(opctx, &self.rack_id).await?; - - Ok(self.updates_config.as_ref().map(|c| { - rack.tuf_base_url.unwrap_or_else(|| c.default_base_url.clone()) - })) - } - - pub(crate) async fn updates_refresh_metadata( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { + body: impl Stream> + Send + Sync + 'static, + file_name: String, + ) -> Result { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let updates_config = self.updates_config.as_ref().ok_or_else(|| { - Error::invalid_request("updates system not configured") - })?; - let base_url = self.tuf_base_url(opctx).await?.ok_or_else(|| { - Error::invalid_request("updates system not configured") - })?; - let trusted_root = tokio::fs::read(&updates_config.trusted_root) - .await - .map_err(|e| Error::InternalError { - internal_message: format!( - "error trying to read trusted root: {}", - e - ), + // XXX: this needs to validate against the trusted root! + let _updates_config = + self.updates_config.as_ref().ok_or_else(|| { + Error::internal_error("updates system not initialized") })?; - let artifacts = crate::updates::read_artifacts(&trusted_root, base_url) - .await - .map_err(|e| Error::InternalError { - internal_message: format!( - "error trying to refresh updates: {}", - e - ), - })?; - - // FIXME: if we hit an error in any of these database calls, the - // available artifact table will be out of sync with the current - // artifacts.json. can we do a transaction or something? + let artifacts_with_plan = + ArtifactsWithPlan::from_stream(body, Some(file_name), &self.log) + .await + .map_err(|error| error.to_http_error())?; - let mut current_version = None; - for artifact in &artifacts { - current_version = Some(artifact.targets_role_version); - self.db_datastore - .update_artifact_upsert(&opctx, artifact.clone()) - .await?; - } - - // ensure table is in sync with current copy of artifacts.json - if let Some(current_version) = current_version { - self.db_datastore - .update_artifact_hard_delete_outdated(&opctx, current_version) - .await?; - } - - // demo-grade update logic: tell all sleds to apply all artifacts - for sled in self - .db_datastore - .sled_list( - &opctx, - &DataPageParams { - marker: None, - direction: PaginationOrder::Ascending, - limit: NonZeroU32::new(100).unwrap(), - }, - ) - .await? - { - let client = self.sled_client(&sled.id()).await?; - for artifact in &artifacts { - info!( - self.log, - "telling sled {} to apply {}", - sled.id(), - artifact.target_name - ); - client - .update_artifact( - &sled_agent_client::types::UpdateArtifactId { - name: artifact.name.clone(), - version: artifact.version.0.clone().into(), - kind: artifact.kind.0.into(), - }, - ) - .await?; - } - } - - Ok(()) - } - - /// Downloads a file from within [`BASE_ARTIFACT_DIR`]. - pub(crate) async fn download_artifact( - &self, - opctx: &OpContext, - artifact: UpdateArtifactId, - ) -> Result, Error> { - let mut base_url = - self.tuf_base_url(opctx).await?.ok_or_else(|| { - Error::invalid_request("updates system not configured") - })?; - if !base_url.ends_with('/') { - base_url.push('/'); - } - - // We cache the artifact based on its checksum, so fetch that from the - // database. - let (.., artifact_entry) = LookupPath::new(opctx, &self.db_datastore) - .update_artifact_tuple( - &artifact.name, - db::model::SemverVersion(artifact.version.clone()), - KnownArtifactKind(artifact.kind), - ) - .fetch() - .await?; - let filename = format!( - "{}.{}.{}-{}", - artifact_entry.target_sha256, - artifact.kind, - artifact.name, - artifact.version + // Now store the artifacts in the database. + let tuf_repo_description = TufRepoDescription::from_external( + artifacts_with_plan.description().clone(), ); - let path = Path::new(BASE_ARTIFACT_DIR).join(&filename); - - if !path.exists() { - // If the artifact doesn't exist, we should download it. - // - // TODO: There also exists the question of "when should we *remove* - // things from BASE_ARTIFACT_DIR", which we should also resolve. - // Demo-quality solution could be "destroy it on boot" or something? - // (we aren't doing that yet). - info!(self.log, "Accessing {} - needs to be downloaded", filename); - tokio::fs::create_dir_all(BASE_ARTIFACT_DIR).await.map_err( - |e| { - Error::internal_error(&format!( - "Failed to create artifacts directory: {}", - e - )) - }, - )?; - - let mut response = reqwest::get(format!( - "{}targets/{}.{}", - base_url, - artifact_entry.target_sha256, - artifact_entry.target_name - )) - .await - .map_err(|e| { - Error::internal_error(&format!( - "Failed to fetch artifact: {}", - e - )) - })?; - // To ensure another request isn't trying to use this target while we're downloading it - // or before we've verified it, write to a random path in the same directory, then move - // it to the correct path after verification. - let temp_path = path.with_file_name(format!( - ".{}.{:x}", - filename, - rand::thread_rng().gen::() - )); - let mut file = - tokio::fs::File::create(&temp_path).await.map_err(|e| { - Error::internal_error(&format!( - "Failed to create file: {}", - e - )) - })?; - - let mut context = digest::Context::new(&digest::SHA256); - let mut length: i64 = 0; - while let Some(chunk) = response.chunk().await.map_err(|e| { - Error::internal_error(&format!( - "Failed to read HTTP body: {}", - e - )) - })? { - file.write_all(&chunk).await.map_err(|e| { - Error::internal_error(&format!( - "Failed to write to file: {}", - e - )) - })?; - context.update(&chunk); - length += i64::try_from(chunk.len()).unwrap(); - - if length > artifact_entry.target_length { - return Err(Error::internal_error(&format!( - "target {} is larger than expected", - artifact_entry.target_name - ))); - } - } - drop(file); - - if hex::encode(context.finish()) == artifact_entry.target_sha256 - && length == artifact_entry.target_length - { - tokio::fs::rename(temp_path, &path).await.map_err(|e| { - Error::internal_error(&format!( - "Failed to rename file after verification: {}", - e - )) - })? - } else { - return Err(Error::internal_error(&format!( - "failed to verify target {}", - artifact_entry.target_name - ))); - } - - info!( - self.log, - "wrote {} to artifact dir", artifact_entry.target_name - ); - } else { - info!(self.log, "Accessing {} - already exists", path.display()); - } - - // TODO: These artifacts could be quite large - we should figure out how to - // stream this file back instead of holding it entirely in-memory in a - // Vec. - // - // Options: - // - RFC 7233 - "Range Requests" (is this HTTP/1.1 only?) - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests - // - "Roll our own". See: - // https://stackoverflow.com/questions/20969331/standard-method-for-http-partial-upload-resume-upload - let body = tokio::fs::read(&path).await.map_err(|e| { - Error::internal_error(&format!( - "Cannot read artifact from filesystem: {}", - e - )) - })?; - Ok(body) - } - - pub async fn upsert_system_update( - &self, - opctx: &OpContext, - create_update: params::SystemUpdateCreate, - ) -> CreateResult { - let update = db::model::SystemUpdate::new(create_update.version)?; - self.db_datastore.upsert_system_update(opctx, update).await - } - - pub async fn create_component_update( - &self, - opctx: &OpContext, - create_update: params::ComponentUpdateCreate, - ) -> CreateResult { - let now = Utc::now(); - let update = db::model::ComponentUpdate { - identity: db::model::ComponentUpdateIdentity { - id: Uuid::new_v4(), - time_created: now, - time_modified: now, - }, - version: db::model::SemverVersion(create_update.version), - component_type: create_update.component_type.into(), - }; - - self.db_datastore - .create_component_update( - opctx, - create_update.system_update_id, - update, - ) - .await - } - - pub(crate) async fn system_update_fetch_by_version( - &self, - opctx: &OpContext, - version: &external::SemverVersion, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - self.db_datastore - .system_update_fetch_by_version(opctx, version.clone().into()) + let response = self + .db_datastore + .update_tuf_repo_insert(opctx, tuf_repo_description) .await + .map_err(HttpError::from)?; + Ok(response.into_external()) } - pub(crate) async fn system_updates_list_by_id( + pub(crate) async fn updates_get_repository( &self, opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - self.db_datastore.system_updates_list_by_id(opctx, pagparams).await - } + system_version: SemverVersion, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - pub(crate) async fn system_update_list_components( - &self, - opctx: &OpContext, - version: &external::SemverVersion, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + let _updates_config = + self.updates_config.as_ref().ok_or_else(|| { + Error::internal_error("updates system not initialized") + })?; - let system_update = self + let tuf_repo_description = self .db_datastore - .system_update_fetch_by_version(opctx, version.clone().into()) - .await?; - - self.db_datastore - .system_update_components_list(opctx, system_update.id()) + .update_tuf_repo_get(opctx, system_version.into()) .await - } - - pub async fn create_updateable_component( - &self, - opctx: &OpContext, - create_component: params::UpdateableComponentCreate, - ) -> CreateResult { - let component = - db::model::UpdateableComponent::try_from(create_component)?; - self.db_datastore.create_updateable_component(opctx, component).await - } - - pub(crate) async fn updateable_components_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - self.db_datastore - .updateable_components_list_by_id(opctx, pagparams) - .await - } - - pub(crate) async fn create_update_deployment( - &self, - opctx: &OpContext, - start: params::SystemUpdateStart, - ) -> CreateResult { - // 404 if specified version doesn't exist - // TODO: is 404 the right error for starting an update with a nonexistent version? - self.system_update_fetch_by_version(opctx, &start.version).await?; - - // We only need to look at the latest deployment because it's the only - // one that could be running - - let latest_deployment = self.latest_update_deployment(opctx).await; - if let Ok(dep) = latest_deployment { - if dep.status == db::model::UpdateStatus::Updating { - // TODO: should "already updating" conflict be a new kind of error? - return Err(Error::ObjectAlreadyExists { - type_name: external::ResourceType::UpdateDeployment, - object_name: dep.id().to_string(), - }); - } - } - - let deployment = db::model::UpdateDeployment { - identity: db::model::UpdateDeploymentIdentity::new(Uuid::new_v4()), - version: db::model::SemverVersion(start.version), - status: db::model::UpdateStatus::Updating, - }; - self.db_datastore.create_update_deployment(opctx, deployment).await - } - - /// If there's a running update, change it to steady. Otherwise do nothing. - // TODO: codify the state machine around update deployments - pub(crate) async fn steady_update_deployment( - &self, - opctx: &OpContext, - ) -> UpdateResult { - let latest = self.latest_update_deployment(opctx).await?; - // already steady. do nothing in order to avoid updating `time_modified` - if latest.status == db::model::UpdateStatus::Steady { - return Ok(latest); - } - - self.db_datastore.steady_update_deployment(opctx, latest.id()).await - } - - pub(crate) async fn update_deployments_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - self.db_datastore.update_deployments_list_by_id(opctx, pagparams).await - } + .map_err(HttpError::from)?; - pub(crate) async fn update_deployment_fetch_by_id( - &self, - opctx: &OpContext, - deployment_id: &Uuid, - ) -> LookupResult { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let (.., db_deployment) = LookupPath::new(opctx, &self.db_datastore) - .update_deployment_id(*deployment_id) - .fetch() - .await?; - Ok(db_deployment) + Ok(tuf_repo_description) } - pub(crate) async fn latest_update_deployment( + /// Downloads a file (currently not implemented). + pub(crate) async fn updates_download_artifact( &self, - opctx: &OpContext, - ) -> LookupResult { - self.db_datastore.latest_update_deployment(opctx).await - } - - pub(crate) async fn lowest_component_system_version( - &self, - opctx: &OpContext, - ) -> LookupResult { - self.db_datastore.lowest_component_system_version(opctx).await - } - - pub(crate) async fn highest_component_system_version( - &self, - opctx: &OpContext, - ) -> LookupResult { - self.db_datastore.highest_component_system_version(opctx).await - } - - /// Inner function makes it easier to implement the logic where we ignore - /// ObjectAlreadyExists errors but let the others pass through - async fn populate_mock_system_updates_inner( - &self, - opctx: &OpContext, - ) -> CreateResult<()> { - let types = vec![ - shared::UpdateableComponentType::HubrisForPscRot, - shared::UpdateableComponentType::HubrisForPscSp, - shared::UpdateableComponentType::HubrisForSidecarRot, - shared::UpdateableComponentType::HubrisForSidecarSp, - shared::UpdateableComponentType::HubrisForGimletRot, - shared::UpdateableComponentType::HubrisForGimletSp, - shared::UpdateableComponentType::HeliosHostPhase1, - shared::UpdateableComponentType::HeliosHostPhase2, - shared::UpdateableComponentType::HostOmicron, - ]; - - // create system updates and associated component updates - for v in [1, 2, 3] { - let version = external::SemverVersion::new(v, 0, 0); - let su = self - .upsert_system_update( - opctx, - params::SystemUpdateCreate { version: version.clone() }, - ) - .await?; - - for component_type in types.clone() { - self.create_component_update( - &opctx, - params::ComponentUpdateCreate { - version: external::SemverVersion::new(1, v, 0), - system_update_id: su.identity.id, - component_type, - }, - ) - .await?; - } - } - - // create deployment for v1.0.0, stop it, then create one for v2.0.0. - // This makes plausible the state of the components: all v1 except for one v2 - self.create_update_deployment( - &opctx, - params::SystemUpdateStart { - version: external::SemverVersion::new(1, 0, 0), - }, - ) - .await?; - self.steady_update_deployment(opctx).await?; - - self.create_update_deployment( - &opctx, - params::SystemUpdateStart { - version: external::SemverVersion::new(2, 0, 0), - }, - ) - .await?; - - // now create components, with one component on a different system - // version from the others - - for (i, component_type) in types.iter().enumerate() { - let version = if i == 0 { - external::SemverVersion::new(1, 2, 0) - } else { - external::SemverVersion::new(1, 1, 0) - }; - - let system_version = if i == 0 { - external::SemverVersion::new(2, 0, 0) - } else { - external::SemverVersion::new(1, 0, 0) - }; - - self.create_updateable_component( - opctx, - params::UpdateableComponentCreate { - version, - system_version, - device_id: "a-device".to_string(), - component_type: component_type.clone(), - }, - ) - .await?; - } - - Ok(()) - } - - /// Populate the DB with update-related data. Data is hard-coded until we - /// figure out how to pull it from the TUF repo. - /// - /// We need this to be idempotent because it can be called arbitrarily many - /// times. The service functions we call to create these resources will - /// error on ID or version conflicts, so to remain idempotent we can simply - /// ignore those errors. We let other errors through. - pub(crate) async fn populate_mock_system_updates( - &self, - opctx: &OpContext, - ) -> CreateResult<()> { - self.populate_mock_system_updates_inner(opctx).await.or_else(|error| { - match error { - // ignore ObjectAlreadyExists but pass through other errors - external::Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(error), - } - }) - } -} - -// TODO: convert system update tests to integration tests now that I know how to -// call nexus functions in those - -#[cfg(test)] -mod tests { - use assert_matches::assert_matches; - use std::num::NonZeroU32; - - use dropshot::PaginationOrder; - use nexus_db_queries::context::OpContext; - use nexus_db_queries::db::model::UpdateStatus; - use nexus_test_utils_macros::nexus_test; - use nexus_types::external_api::{ - params::{ - ComponentUpdateCreate, SystemUpdateCreate, SystemUpdateStart, - UpdateableComponentCreate, - }, - shared::UpdateableComponentType, - }; - use omicron_common::api::external::{self, DataPageParams}; - use uuid::Uuid; - - type ControlPlaneTestContext = - nexus_test_utils::ControlPlaneTestContext; - - pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { - OpContext::for_tests( - cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx.nexus.datastore().clone(), - ) - } - - pub fn test_pagparams() -> DataPageParams<'static, Uuid> { - DataPageParams { - marker: None, - direction: PaginationOrder::Ascending, - limit: NonZeroU32::new(100).unwrap(), - } - } - - #[nexus_test(server = crate::Server)] - async fn test_system_updates(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; - let opctx = test_opctx(&cptestctx); - - // starts out with 3 populated - let system_updates = nexus - .system_updates_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap(); - - assert_eq!(system_updates.len(), 3); - - let su1_create = SystemUpdateCreate { - version: external::SemverVersion::new(5, 0, 0), - }; - let su1 = nexus.upsert_system_update(&opctx, su1_create).await.unwrap(); - - // weird order is deliberate - let su3_create = SystemUpdateCreate { - version: external::SemverVersion::new(10, 0, 0), - }; - nexus.upsert_system_update(&opctx, su3_create).await.unwrap(); - - let su2_create = SystemUpdateCreate { - version: external::SemverVersion::new(0, 7, 0), - }; - let su2 = nexus.upsert_system_update(&opctx, su2_create).await.unwrap(); - - // now there should be a bunch of system updates, sorted by version descending - let versions: Vec = nexus - .system_updates_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap() - .iter() - .map(|su| su.version.to_string()) - .collect(); - - assert_eq!(versions.len(), 6); - assert_eq!(versions[0], "10.0.0".to_string()); - assert_eq!(versions[1], "5.0.0".to_string()); - assert_eq!(versions[2], "3.0.0".to_string()); - assert_eq!(versions[3], "2.0.0".to_string()); - assert_eq!(versions[4], "1.0.0".to_string()); - assert_eq!(versions[5], "0.7.0".to_string()); - - // let's also make sure we can fetch by version - let su1_fetched = nexus - .system_update_fetch_by_version(&opctx, &su1.version) - .await - .unwrap(); - assert_eq!(su1.identity.id, su1_fetched.identity.id); - - // now create two component updates for update 1, one at root, and one - // hanging off the first - nexus - .create_component_update( - &opctx, - ComponentUpdateCreate { - version: external::SemverVersion::new(1, 0, 0), - component_type: UpdateableComponentType::BootloaderForRot, - system_update_id: su1.identity.id, - }, - ) - .await - .expect("Failed to create component update"); - nexus - .create_component_update( - &opctx, - ComponentUpdateCreate { - version: external::SemverVersion::new(2, 0, 0), - component_type: UpdateableComponentType::HubrisForGimletSp, - system_update_id: su1.identity.id, - }, - ) - .await - .expect("Failed to create component update"); - - // now there should be two component updates - let cus_for_su1 = nexus - .system_update_list_components(&opctx, &su1.version) - .await - .unwrap(); - - assert_eq!(cus_for_su1.len(), 2); - - // other system update should not be associated with any component updates - let cus_for_su2 = nexus - .system_update_list_components(&opctx, &su2.version) - .await - .unwrap(); - - assert_eq!(cus_for_su2.len(), 0); - } - - #[nexus_test(server = crate::Server)] - async fn test_semver_max(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; - let opctx = test_opctx(&cptestctx); - - let expected = "Invalid Value: version, Major, minor, and patch version must be less than 99999999"; - - // major, minor, and patch are all capped - - let su_create = SystemUpdateCreate { - version: external::SemverVersion::new(100000000, 0, 0), - }; - let error = - nexus.upsert_system_update(&opctx, su_create).await.unwrap_err(); - assert!(error.to_string().contains(expected)); - - let su_create = SystemUpdateCreate { - version: external::SemverVersion::new(0, 100000000, 0), - }; - let error = - nexus.upsert_system_update(&opctx, su_create).await.unwrap_err(); - assert!(error.to_string().contains(expected)); - - let su_create = SystemUpdateCreate { - version: external::SemverVersion::new(0, 0, 100000000), - }; - let error = - nexus.upsert_system_update(&opctx, su_create).await.unwrap_err(); - assert!(error.to_string().contains(expected)); - } - - #[nexus_test(server = crate::Server)] - async fn test_updateable_components(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; - let opctx = test_opctx(&cptestctx); - - // starts out populated - let components = nexus - .updateable_components_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap(); - - assert_eq!(components.len(), 9); - - // with no components these should both 500. as discussed in the - // implementation, this is appropriate because we should never be - // running the external API without components populated - // - // let low = - // nexus.lowest_component_system_version(&opctx).await.unwrap_err(); - // assert_matches!(low, external::Error::InternalError { .. }); - // let high = - // nexus.highest_component_system_version(&opctx).await.unwrap_err(); - // assert_matches!(high, external::Error::InternalError { .. }); - - // creating a component if its system_version doesn't exist is a 404 - let uc_create = UpdateableComponentCreate { - version: external::SemverVersion::new(0, 4, 1), - system_version: external::SemverVersion::new(0, 2, 0), - component_type: UpdateableComponentType::BootloaderForSp, - device_id: "look-a-device".to_string(), - }; - let uc_404 = nexus - .create_updateable_component(&opctx, uc_create.clone()) - .await - .unwrap_err(); - assert_matches!(uc_404, external::Error::ObjectNotFound { .. }); - - // create system updates for the component updates to hang off of - let v020 = external::SemverVersion::new(0, 2, 0); - nexus - .upsert_system_update(&opctx, SystemUpdateCreate { version: v020 }) - .await - .expect("Failed to create system update"); - let v3 = external::SemverVersion::new(4, 0, 0); - nexus - .upsert_system_update(&opctx, SystemUpdateCreate { version: v3 }) - .await - .expect("Failed to create system update"); - let v10 = external::SemverVersion::new(10, 0, 0); - nexus - .upsert_system_update(&opctx, SystemUpdateCreate { version: v10 }) - .await - .expect("Failed to create system update"); - - // now uc_create and friends will work - nexus - .create_updateable_component(&opctx, uc_create) - .await - .expect("failed to create updateable component"); - nexus - .create_updateable_component( - &opctx, - UpdateableComponentCreate { - version: external::SemverVersion::new(0, 4, 1), - system_version: external::SemverVersion::new(3, 0, 0), - component_type: UpdateableComponentType::HeliosHostPhase2, - device_id: "another-device".to_string(), - }, - ) - .await - .expect("failed to create updateable component"); - nexus - .create_updateable_component( - &opctx, - UpdateableComponentCreate { - version: external::SemverVersion::new(0, 4, 1), - system_version: external::SemverVersion::new(10, 0, 0), - component_type: UpdateableComponentType::HeliosHostPhase1, - device_id: "a-third-device".to_string(), - }, - ) - .await - .expect("failed to create updateable component"); - - // now there should be 3 more, or 12 - let components = nexus - .updateable_components_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap(); - - assert_eq!(components.len(), 12); - - let low = nexus.lowest_component_system_version(&opctx).await.unwrap(); - assert_eq!(&low.to_string(), "0.2.0"); - let high = - nexus.highest_component_system_version(&opctx).await.unwrap(); - assert_eq!(&high.to_string(), "10.0.0"); - - // TODO: update the version of a component - } - - #[nexus_test(server = crate::Server)] - async fn test_update_deployments(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; - let opctx = test_opctx(&cptestctx); - - // starts out with one populated - let deployments = nexus - .update_deployments_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap(); - - assert_eq!(deployments.len(), 2); - - // start update fails with nonexistent version - let not_found = nexus - .create_update_deployment( - &opctx, - SystemUpdateStart { - version: external::SemverVersion::new(6, 0, 0), - }, - ) - .await - .unwrap_err(); - - assert_matches!(not_found, external::Error::ObjectNotFound { .. }); - - // starting with existing version fails because there's already an - // update running - let start_v3 = SystemUpdateStart { - version: external::SemverVersion::new(3, 0, 0), - }; - let already_updating = nexus - .create_update_deployment(&opctx, start_v3.clone()) - .await - .unwrap_err(); - - assert_matches!( - already_updating, - external::Error::ObjectAlreadyExists { .. } - ); - - // stop the running update - nexus - .steady_update_deployment(&opctx) - .await - .expect("Failed to stop running update"); - - // now starting an update succeeds - let d = nexus - .create_update_deployment(&opctx, start_v3) - .await - .expect("Failed to create deployment"); - - let deployment_ids: Vec = nexus - .update_deployments_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap() - .into_iter() - .map(|d| d.identity.id) - .collect(); - - assert_eq!(deployment_ids.len(), 3); - assert!(deployment_ids.contains(&d.identity.id)); - - // latest deployment returns the one just created - let latest_deployment = - nexus.latest_update_deployment(&opctx).await.unwrap(); - - assert_eq!(latest_deployment.identity.id, d.identity.id); - assert_eq!(latest_deployment.status, UpdateStatus::Updating); - assert!( - latest_deployment.identity.time_modified - == d.identity.time_modified - ); - - // stopping update updates both its status and its time_modified - nexus - .steady_update_deployment(&opctx) - .await - .expect("Failed to steady running update"); - - let latest_deployment = - nexus.latest_update_deployment(&opctx).await.unwrap(); - - assert_eq!(latest_deployment.identity.id, d.identity.id); - assert_eq!(latest_deployment.status, UpdateStatus::Steady); - assert!( - latest_deployment.identity.time_modified > d.identity.time_modified - ); - } - - #[nexus_test(server = crate::Server)] - async fn test_populate_mock_system_updates( - cptestctx: &ControlPlaneTestContext, - ) { - let nexus = &cptestctx.server.apictx.nexus; - let opctx = test_opctx(&cptestctx); - - // starts out with updates because they're populated at rack init - let su_count = nexus - .system_updates_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap() - .len(); - assert!(su_count > 0); - - // additional call doesn't error because the conflict gets eaten - let result = nexus.populate_mock_system_updates(&opctx).await; - assert!(result.is_ok()); - - // count didn't change - let system_updates = nexus - .system_updates_list_by_id(&opctx, &test_pagparams()) - .await - .unwrap(); - assert_eq!(system_updates.len(), su_count); + _opctx: &OpContext, + _artifact: ArtifactId, + ) -> Result, Error> { + // TODO: this is part of the TUF repo depot. + return Err(Error::internal_error( + "artifact download not implemented, \ + will be part of TUF repo depot", + )); } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index a6cb9e80fe..3c3c40d026 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -8,14 +8,13 @@ use super::{ console_api, device_auth, params, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, SiloUtilization, Sled, - Snapshot, SshKey, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, SiloQuotas, SiloUtilization, + Sled, Snapshot, SshKey, User, UserBuiltin, Utilization, Vpc, VpcRouter, + VpcSubnet, }, }; use crate::external_api::shared; use crate::ServerContext; -use chrono::Utc; -use dropshot::ApiDescription; use dropshot::EmptyScanParams; use dropshot::HttpError; use dropshot::HttpResponseAccepted; @@ -34,6 +33,7 @@ use dropshot::WhichPage; use dropshot::{ channel, endpoint, WebsocketChannelResult, WebsocketConnection, }; +use dropshot::{ApiDescription, StreamingBody}; use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::db; @@ -41,9 +41,6 @@ use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -use nexus_types::external_api::views::SiloQuotas; -use nexus_types::external_api::views::Utilization; -use nexus_types::identity::AssetIdentityMetadata; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::marker_for_name; use omicron_common::api::external::http_pagination::marker_for_name_or_id; @@ -76,6 +73,8 @@ use omicron_common::api::external::RouterRouteKind; use omicron_common::api::external::SwitchPort; use omicron_common::api::external::SwitchPortSettings; use omicron_common::api::external::SwitchPortSettingsView; +use omicron_common::api::external::TufRepoGetResponse; +use omicron_common::api::external::TufRepoInsertResponse; use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_common::api::external::VpcFirewallRules; use omicron_common::bail_unless; @@ -309,16 +308,8 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(system_metric)?; api.register(silo_metric)?; - api.register(system_update_refresh)?; - api.register(system_version)?; - api.register(system_component_version_list)?; - api.register(system_update_list)?; - api.register(system_update_view)?; - api.register(system_update_start)?; - api.register(system_update_stop)?; - api.register(system_update_components_list)?; - api.register(update_deployments_list)?; - api.register(update_deployment_view)?; + api.register(system_update_put_repository)?; + api.register(system_update_get_repository)?; api.register(user_list)?; api.register(silo_user_list)?; @@ -433,12 +424,6 @@ async fn system_policy_view( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Path parameters for `/by-id/` endpoints -#[derive(Deserialize, JsonSchema)] -struct ByIdPathParams { - id: Uuid, -} - /// Update the top-level IAM policy #[endpoint { method = PUT, @@ -5376,320 +5361,56 @@ async fn silo_metric( // Updates -/// Refresh update data -#[endpoint { - method = POST, - path = "/v1/system/update/refresh", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_refresh( - rqctx: RequestContext>, -) -> Result { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.updates_refresh_metadata(&opctx).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// View system version and update status -#[endpoint { - method = GET, - path = "/v1/system/update/version", - tags = ["system/update"], - unpublished = true, -}] -async fn system_version( - rqctx: RequestContext>, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - // The only way we have no latest deployment is if the rack was just set - // up and no system updates have ever been run. In this case there is no - // update running, so we can fall back to steady. - let status = nexus - .latest_update_deployment(&opctx) - .await - .map_or(views::UpdateStatus::Steady, |d| d.status.into()); - - // Updateable components, however, are populated at rack setup before - // the external API is even started, so if we get here and there are no - // components, that's a real issue and the 500 we throw is appropriate. - let low = nexus.lowest_component_system_version(&opctx).await?.into(); - let high = nexus.highest_component_system_version(&opctx).await?.into(); - - Ok(HttpResponseOk(views::SystemVersion { - version_range: views::VersionRange { low, high }, - status, - })) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// View version and update status of component tree -#[endpoint { - method = GET, - path = "/v1/system/update/components", - tags = ["system/update"], - unpublished = true, -}] -async fn system_component_version_list( - rqctx: RequestContext>, - query_params: Query, -) -> Result>, HttpError> -{ - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let components = nexus - .updateable_components_list_by_id(&opctx, &pagparams) - .await? - .into_iter() - .map(|u| u.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - components, - &|_, u: &views::UpdateableComponent| u.identity.id, - )?)) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// List all updates -#[endpoint { - method = GET, - path = "/v1/system/update/updates", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_list( - rqctx: RequestContext>, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let updates = nexus - .system_updates_list_by_id(&opctx, &pagparams) - .await? - .into_iter() - .map(|u| u.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - updates, - &|_, u: &views::SystemUpdate| u.identity.id, - )?)) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// View system update -#[endpoint { - method = GET, - path = "/v1/system/update/updates/{version}", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_view( - rqctx: RequestContext>, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let system_update = - nexus.system_update_fetch_by_version(&opctx, &path.version).await?; - Ok(HttpResponseOk(system_update.into())) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// View system update component tree +/// Upload a TUF repository #[endpoint { - method = GET, - path = "/v1/system/update/updates/{version}/components", + method = PUT, + path = "/v1/system/update/repository", tags = ["system/update"], unpublished = true, }] -async fn system_update_components_list( +async fn system_update_put_repository( rqctx: RequestContext>, - path_params: Path, -) -> Result>, HttpError> { + query: Query, + body: StreamingBody, +) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; - let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let components = nexus - .system_update_list_components(&opctx, &path.version) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(ResultsPage { items: components, next_page: None })) + let query = query.into_inner(); + let body = body.into_stream(); + let update = + nexus.updates_put_repository(&opctx, body, query.file_name).await?; + Ok(HttpResponseOk(update)) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Start system update +/// Get the description of a repository by system version. #[endpoint { - method = POST, - path = "/v1/system/update/start", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_start( - rqctx: RequestContext>, - // The use of the request body here instead of a path param is deliberate. - // Unlike instance start (which uses a path param), update start is about - // modifying the state of the system rather than the state of the resource - // (instance there, system update here) identified by the param. This - // approach also gives us symmetry with the /stop endpoint. - update: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let _nexus = &apictx.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - - // inverse situation to stop: we only want to actually start an update - // if there isn't one already in progress. - - // 1. check that there is no update in progress - // a. if there is one, this should probably 409 - // 2. kick off the update start saga, which - // a. tells the update system to get going - // b. creates an update deployment - - // similar question for stop: do we return the deployment directly, or a - // special StartUpdateResult that includes a deployment ID iff an update - // was actually started - - Ok(HttpResponseAccepted(views::UpdateDeployment { - identity: AssetIdentityMetadata { - id: Uuid::new_v4(), - time_created: Utc::now(), - time_modified: Utc::now(), - }, - version: update.into_inner().version, - status: views::UpdateStatus::Updating, - })) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// Stop system update -/// -/// If there is no update in progress, do nothing. -#[endpoint { - method = POST, - path = "/v1/system/update/stop", + method = GET, + path = "/v1/system/update/repository/{system_version}", tags = ["system/update"], unpublished = true, }] -async fn system_update_stop( - rqctx: RequestContext>, -) -> Result { - let apictx = rqctx.context(); - let _nexus = &apictx.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - - // TODO: Implement stopping an update. Should probably be a saga. - - // Ask update subsystem if it's doing anything. If so, tell it to stop. - // This could be done in a single call to the updater if the latter can - // respond to a stop command differently depending on whether it did - // anything or not. - - // If we did in fact stop a running update, update the status on the - // latest update deployment in the DB to `stopped` and respond with that - // deployment. If we do nothing, what should we return? Maybe instead of - // responding with the deployment, this endpoint gets its own - // `StopUpdateResult` response view that says whether it was a noop, and - // if it wasn't, includes the ID of the stopped deployment, which allows - // the client to fetch it if it actually wants it. - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// List all update deployments -#[endpoint { - method = GET, - path = "/v1/system/update/deployments", - tags = ["system/update"], - unpublished = true, -}] -async fn update_deployments_list( +async fn system_update_get_repository( rqctx: RequestContext>, - query_params: Query, -) -> Result>, HttpError> { + path_params: Path, +) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let updates = nexus - .update_deployments_list_by_id(&opctx, &pagparams) - .await? - .into_iter() - .map(|u| u.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - updates, - &|_, u: &views::UpdateDeployment| u.identity.id, - )?)) + let params = path_params.into_inner(); + let description = + nexus.updates_get_repository(&opctx, params.system_version).await?; + Ok(HttpResponseOk(TufRepoGetResponse { + description: description.into_external(), + })) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Fetch a system update deployment -#[endpoint { - method = GET, - path = "/v1/system/update/deployments/{id}", - tags = ["system/update"], - unpublished = true, -}] -async fn update_deployment_view( - rqctx: RequestContext>, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let id = &path.id; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let deployment = - nexus.update_deployment_fetch_by_id(&opctx, id).await?; - Ok(HttpResponseOk(deployment.into())) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} // Silo users /// List users diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 63578e360a..58038cb37a 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -40,7 +40,7 @@ use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::SledInstanceState; -use omicron_common::api::internal::nexus::UpdateArtifactId; +use omicron_common::update::ArtifactId; use oximeter::types::ProducerResults; use oximeter_producer::{collect, ProducerIdPathParams}; use schemars::JsonSchema; @@ -438,15 +438,16 @@ async fn cpapi_metrics_collect( }] async fn cpapi_artifact_download( request_context: RequestContext>, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { let context = request_context.context(); let nexus = &context.nexus; let opctx = crate::context::op_context_for_internal_api(&request_context).await; // TODO: return 404 if the error we get here says that the record isn't found - let body = - nexus.download_artifact(&opctx, path_params.into_inner()).await?; + let body = nexus + .updates_download_artifact(&opctx, path_params.into_inner()) + .await?; Ok(HttpResponseOk(Body::from(body).into())) } diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 01aca36e1d..e1392440a1 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -20,7 +20,6 @@ pub mod external_api; // Public for testing mod internal_api; mod populate; mod saga_interface; -mod updates; // public for testing pub use app::test_interfaces::TestInterfaces; pub use app::Nexus; diff --git a/nexus/src/updates.rs b/nexus/src/updates.rs deleted file mode 100644 index 2f57868acc..0000000000 --- a/nexus/src/updates.rs +++ /dev/null @@ -1,74 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use buf_list::BufList; -use futures::TryStreamExt; -use nexus_db_queries::db; -use omicron_common::update::ArtifactsDocument; -use std::convert::TryInto; - -pub(crate) async fn read_artifacts( - trusted_root: &[u8], - mut base_url: String, -) -> Result< - Vec, - Box, -> { - if !base_url.ends_with('/') { - base_url.push('/'); - } - - let repository = tough::RepositoryLoader::new( - &trusted_root, - format!("{}metadata/", base_url).parse()?, - format!("{}targets/", base_url).parse()?, - ) - .load() - .await?; - - let artifact_document = - match repository.read_target(&"artifacts.json".parse()?).await? { - Some(target) => target.try_collect::().await?, - None => return Err("artifacts.json missing".into()), - }; - let artifacts: ArtifactsDocument = - serde_json::from_reader(buf_list::Cursor::new(&artifact_document))?; - - let valid_until = repository - .root() - .signed - .expires - .min(repository.snapshot().signed.expires) - .min(repository.targets().signed.expires) - .min(repository.timestamp().signed.expires); - - let mut v = Vec::new(); - for artifact in artifacts.artifacts { - // Skip any artifacts where we don't recognize its kind or the target - // name isn't in the repository - let target = - repository.targets().signed.targets.get(&artifact.target.parse()?); - let (kind, target) = match (artifact.kind.to_known(), target) { - (Some(kind), Some(target)) => (kind, target), - _ => break, - }; - - v.push(db::model::UpdateArtifact { - name: artifact.name, - version: db::model::SemverVersion(artifact.version), - kind: db::model::KnownArtifactKind(kind), - targets_role_version: repository - .targets() - .signed - .version - .get() - .try_into()?, - valid_until, - target_name: artifact.target, - target_sha256: hex::encode(&target.hashes.sha256), - target_length: target.length.try_into()?, - }); - } - Ok(v) -} diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 4a7924770e..5605f33f75 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -36,6 +36,7 @@ serde_json.workspace = true serde_urlencoded.workspace = true slog.workspace = true tokio.workspace = true +tokio-util.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/nexus/test-utils/src/http_testing.rs b/nexus/test-utils/src/http_testing.rs index bf5370a925..ae62218c93 100644 --- a/nexus/test-utils/src/http_testing.rs +++ b/nexus/test-utils/src/http_testing.rs @@ -7,6 +7,7 @@ use anyhow::anyhow; use anyhow::ensure; use anyhow::Context; +use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use dropshot::ResultsPage; use headers::authorization::Credentials; @@ -147,6 +148,35 @@ impl<'a> RequestBuilder<'a> { self } + /// Set the outgoing request body to the contents of a file. + /// + /// A handle to the file will be kept open until the request is completed. + /// + /// If `path` is `None`, the request body will be empty. + pub fn body_file(mut self, path: Option<&Utf8Path>) -> Self { + match path { + Some(path) => { + // Turn the file into a stream. (Opening the file with + // std::fs::File::open means that this method doesn't have to + // be async.) + let file = std::fs::File::open(path).with_context(|| { + format!("failed to open request body file at {path}") + }); + match file { + Ok(file) => { + let stream = tokio_util::io::ReaderStream::new( + tokio::fs::File::from_std(file), + ); + self.body = hyper::Body::wrap_stream(stream); + } + Err(error) => self.error = Some(error), + } + } + None => self.body = hyper::Body::empty(), + }; + self + } + /// Set the outgoing request body using URL encoding /// and set the content type appropriately /// diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 4f606f2bff..c721fe3606 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -32,7 +32,6 @@ use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::external::RouteDestination; use omicron_common::api::external::RouteTarget; -use omicron_common::api::external::SemverVersion; use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_test_utils::certificates::CertificateChain; use once_cell::sync::Lazy; @@ -708,13 +707,6 @@ pub static DEMO_SSHKEY_CREATE: Lazy = pub static DEMO_SPECIFIC_SSHKEY_URL: Lazy = Lazy::new(|| format!("{}/{}", DEMO_SSHKEYS_URL, *DEMO_SSHKEY_NAME)); -// System update - -pub static DEMO_SYSTEM_UPDATE_PARAMS: Lazy = - Lazy::new(|| params::SystemUpdatePath { - version: SemverVersion::new(1, 0, 0), - }); - // Project Floating IPs pub static DEMO_FLOAT_IP_NAME: Lazy = Lazy::new(|| "float-ip".parse().unwrap()); @@ -1920,81 +1912,22 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { /* Updates */ VerifyEndpoint { - url: "/v1/system/update/refresh", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Post( - serde_json::Value::Null - )], - }, - - VerifyEndpoint { - url: "/v1/system/update/version", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/update/components", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/update/updates", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - // TODO: make system update endpoints work instead of expecting 404 - - VerifyEndpoint { - url: "/v1/system/update/updates/1.0.0", + url: "/v1/system/update/repository?file_name=demo-repo.zip", visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/update/updates/1.0.0/components", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/update/start", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Post( - serde_json::to_value(&*DEMO_SYSTEM_UPDATE_PARAMS).unwrap() - )], - }, - - VerifyEndpoint { - url: "/v1/system/update/stop", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Post( - serde_json::Value::Null + allowed_methods: vec![AllowedMethod::Put( + // In reality this is the contents of a zip file. + serde_json::Value::Null, )], }, VerifyEndpoint { - url: "/v1/system/update/deployments", + url: "/v1/system/update/repository/1.0.0", visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/update/deployments/120bbb6f-660a-440c-8cb7-199be202ddff", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::GetNonexistent], + // The update system is disabled, which causes a 500 error even for + // privileged users. That is captured by GetUnimplemented. + allowed_methods: vec![AllowedMethod::GetUnimplemented], }, /* Metrics */ diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 6cb99b9e45..4b68a6c4f2 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -40,7 +40,6 @@ mod sp_updater; mod ssh_keys; mod subnet_allocation; mod switch_port; -mod system_updates; mod unauthorized; mod unauthorized_coverage; mod updates; diff --git a/nexus/tests/integration_tests/system_updates.rs b/nexus/tests/integration_tests/system_updates.rs deleted file mode 100644 index aa00caac29..0000000000 --- a/nexus/tests/integration_tests/system_updates.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use dropshot::ResultsPage; -use http::{method::Method, StatusCode}; -use nexus_db_queries::context::OpContext; -use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; -use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::{ - params, shared::UpdateableComponentType, views, -}; -use omicron_common::api::external::SemverVersion; - -type ControlPlaneTestContext = - nexus_test_utils::ControlPlaneTestContext; - -// This file could be combined with ./updates.rs, but there's a lot going on in -// there that has nothing to do with testing the API endpoints. We could come up -// with more descriptive names. - -/// Because there are no create endpoints for these resources, we need to call -/// the `nexus` functions directly. -async fn populate_db(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; - let opctx = OpContext::for_tests( - cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), - ); - - // system updates have to exist first - let create_su = - params::SystemUpdateCreate { version: SemverVersion::new(0, 2, 0) }; - nexus - .upsert_system_update(&opctx, create_su) - .await - .expect("Failed to create system update"); - let create_su = - params::SystemUpdateCreate { version: SemverVersion::new(1, 0, 1) }; - nexus - .upsert_system_update(&opctx, create_su) - .await - .expect("Failed to create system update"); - - nexus - .create_updateable_component( - &opctx, - params::UpdateableComponentCreate { - version: SemverVersion::new(0, 4, 1), - system_version: SemverVersion::new(0, 2, 0), - component_type: UpdateableComponentType::BootloaderForSp, - device_id: "look-a-device".to_string(), - }, - ) - .await - .expect("failed to create updateable component"); - - nexus - .create_updateable_component( - &opctx, - params::UpdateableComponentCreate { - version: SemverVersion::new(0, 4, 1), - system_version: SemverVersion::new(1, 0, 1), - component_type: UpdateableComponentType::HubrisForGimletSp, - device_id: "another-device".to_string(), - }, - ) - .await - .expect("failed to create updateable component"); -} - -#[nexus_test] -async fn test_system_version(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - // Initially the endpoint 500s because there are no updateable components. - // This is the desired behavior because those are populated by rack startup - // before the external API starts, so it really is a problem if we can hit - // this endpoint without any data backing it. - // - // Because this data is now populated at rack init, this doesn't work as a - // test. If we really wanted to test it, we would have to run the tests - // without that bit of setup. - // - // NexusRequest::expect_failure( - // &client, - // StatusCode::INTERNAL_SERVER_ERROR, - // Method::GET, - // "/v1/system/update/version", - // ) - // .authn_as(AuthnMode::PrivilegedUser) - // .execute() - // .await - // .expect("Failed to 500 with no system version data"); - - // create two updateable components - populate_db(&cptestctx).await; - - let version = - NexusRequest::object_get(&client, "/v1/system/update/version") - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - - assert_eq!( - version, - views::SystemVersion { - version_range: views::VersionRange { - low: SemverVersion::new(0, 2, 0), - high: SemverVersion::new(2, 0, 0), - }, - status: views::UpdateStatus::Updating, - } - ); -} - -#[nexus_test] -async fn test_list_updates(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - let updates = - NexusRequest::object_get(&client, &"/v1/system/update/updates") - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::>() - .await; - - assert_eq!(updates.items.len(), 3); -} - -#[nexus_test] -async fn test_list_components(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - let component_updates = - NexusRequest::object_get(&client, &"/v1/system/update/components") - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::>() - .await; - - assert_eq!(component_updates.items.len(), 9); -} - -#[nexus_test] -async fn test_get_update(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - // existing update works - let update = - NexusRequest::object_get(&client, &"/v1/system/update/updates/1.0.0") - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - - assert_eq!(update.version, SemverVersion::new(1, 0, 0)); - - // non-existent update 404s - NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - "/v1/system/update/updates/1.0.1", - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Failed to 404 on non-existent update"); -} - -#[nexus_test] -async fn test_list_update_components(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - // listing components of an existing update works - let components = NexusRequest::object_get( - &client, - &"/v1/system/update/updates/1.0.0/components", - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::>() - .await; - - assert_eq!(components.items.len(), 9); - - // non existent 404s - NexusRequest::expect_failure( - client, - StatusCode::NOT_FOUND, - Method::GET, - "/v1/system/update/updates/1.0.1/components", - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Failed to 404 on components of nonexistent system update"); -} - -#[nexus_test] -async fn test_update_deployments(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - let deployments = - NexusRequest::object_get(&client, &"/v1/system/update/deployments") - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::>() - .await; - - assert_eq!(deployments.items.len(), 2); - - let first_dep = deployments.items.get(0).unwrap(); - - let dep_id = first_dep.identity.id.to_string(); - let dep_url = format!("/v1/system/update/deployments/{}", dep_id); - let deployment = NexusRequest::object_get(&client, &dep_url) - .authn_as(AuthnMode::PrivilegedUser) - .execute_and_parse_unwrap::() - .await; - - assert_eq!(deployment.version, first_dep.version); -} diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index 418e12e001..e830348103 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -7,69 +7,49 @@ // - test that an unknown artifact returns 404, not 500 // - tests around target names and artifact names that contain dangerous paths like `../` -use async_trait::async_trait; -use camino_tempfile::Utf8TempDir; -use chrono::{Duration, Utc}; +use anyhow::{ensure, Context, Result}; +use camino::Utf8Path; +use camino_tempfile::{Builder, Utf8TempDir, Utf8TempPath}; +use clap::Parser; use dropshot::test_util::LogContext; -use dropshot::{ - endpoint, ApiDescription, HttpError, HttpServerStarter, Path, - RequestContext, -}; -use http::{Method, Response, StatusCode}; -use hyper::Body; +use http::{Method, StatusCode}; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::{load_test_config, test_setup, test_setup_with_config}; +use omicron_common::api::external::{ + SemverVersion, TufRepoGetResponse, TufRepoInsertResponse, + TufRepoInsertStatus, +}; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_common::nexus_config::UpdatesConfig; -use omicron_common::update::{Artifact, ArtifactKind, ArtifactsDocument}; use omicron_sled_agent::sim; -use ring::pkcs8::Document; -use ring::rand::{SecureRandom, SystemRandom}; -use ring::signature::Ed25519KeyPair; -use schemars::JsonSchema; +use pretty_assertions::assert_eq; use serde::Deserialize; -use std::collections::HashMap; -use std::convert::TryInto; -use std::fmt::{self, Debug}; +use std::fmt::Debug; use std::fs::File; use std::io::Write; -use std::num::NonZeroU64; -use std::path::PathBuf; -use tempfile::{NamedTempFile, TempDir}; -use tough::editor::signed::{PathExists, SignedRole}; -use tough::editor::RepositoryEditor; -use tough::key_source::KeySource; -use tough::schema::{KeyHolder, RoleKeys, RoleType, Root}; -use tough::sign::Sign; +use tufaceous_lib::assemble::{DeserializedManifest, ManifestTweak}; -const UPDATE_COMPONENT: &'static str = "omicron-test-component"; +const FAKE_MANIFEST_PATH: &'static str = "../tufaceous/manifests/fake.toml"; -#[tokio::test] -async fn test_update_end_to_end() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_update_uninitialized() -> Result<()> { let mut config = load_test_config(); - let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); + let logctx = LogContext::new("test_update_uninitialized", &config.pkg.log); + + // Build a fake TUF repo + let temp_dir = Utf8TempDir::new()?; + let archive_path = temp_dir.path().join("archive.zip"); + + let args = tufaceous::Args::try_parse_from([ + "tufaceous", + "assemble", + FAKE_MANIFEST_PATH, + archive_path.as_str(), + ]) + .context("error parsing args")?; + + args.exec(&logctx.log).await.context("error executing assemble command")?; - // build the TUF repo - let rng = SystemRandom::new(); - let tuf_repo = new_tuf_repo(&rng).await; - slog::info!(logctx.log, "TUF repo created at {}", tuf_repo.path()); - - // serve it over HTTP - let dropshot_config = Default::default(); - let mut api = ApiDescription::new(); - api.register(static_content).unwrap(); - let context = FileServerContext { base: tuf_repo.path().to_owned().into() }; - let server = - HttpServerStarter::new(&dropshot_config, api, context, &logctx.log) - .unwrap() - .start(); - let local_addr = server.local_addr(); - - // stand up the test environment - config.pkg.updates = Some(UpdatesConfig { - trusted_root: tuf_repo.path().join("metadata").join("1.root.json"), - default_base_url: format!("http://{}/", local_addr), - }); let cptestctx = test_setup_with_config::( "test_update_end_to_end", &mut config, @@ -79,212 +59,304 @@ async fn test_update_end_to_end() { .await; let client = &cptestctx.external_client; - // call /v1/system/update/refresh on nexus - // - download and verify the repo - // - return 204 Non Content - // - tells sled agent to do the thing - NexusRequest::new( - RequestBuilder::new(client, Method::POST, "/v1/system/update/refresh") - .expect_status(Some(StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); + // Attempt to upload the repository to Nexus. This should fail with a 500 + // error because the updates system is not configured. + { + make_upload_request( + client, + &archive_path, + StatusCode::INTERNAL_SERVER_ERROR, + ) + .execute() + .await + .context("repository upload should have failed with 500 error")?; + } - let artifact_path = cptestctx.sled_agent_storage.path(); - let component_path = artifact_path.join(UPDATE_COMPONENT); - // check sled agent did the thing - assert_eq!(tokio::fs::read(component_path).await.unwrap(), TARGET_CONTENTS); + // Attempt to fetch a repository description from Nexus. This should also + // fail with a 500 error. + { + make_get_request( + client, + "1.0.0".parse().unwrap(), + StatusCode::INTERNAL_SERVER_ERROR, + ) + .execute() + .await + .context("repository fetch should have failed with 500 error")?; + } - server.close().await.expect("failed to shut down dropshot server"); cptestctx.teardown().await; logctx.cleanup_successful(); + + Ok(()) } -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_update_end_to_end() -> Result<()> { + let mut config = load_test_config(); + config.pkg.updates = Some(UpdatesConfig { + // XXX: This is currently not used by the update system, but + // trusted_root will become meaningful in the future. + trusted_root: "does-not-exist.json".into(), + }); + let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); -struct FileServerContext { - base: PathBuf, -} + // Build a fake TUF repo + let temp_dir = Utf8TempDir::new()?; + let archive_path = temp_dir.path().join("archive.zip"); -#[derive(Deserialize, JsonSchema)] -struct AllPath { - path: Vec, -} + let args = tufaceous::Args::try_parse_from([ + "tufaceous", + "assemble", + FAKE_MANIFEST_PATH, + archive_path.as_str(), + ]) + .context("error parsing args")?; -#[endpoint(method = GET, path = "/{path:.*}", unpublished = true)] -async fn static_content( - rqctx: RequestContext, - path: Path, -) -> Result, HttpError> { - // NOTE: this is a particularly brief and bad implementation of this to keep the test shorter. - // see https://github.com/oxidecomputer/dropshot/blob/main/dropshot/examples/file_server.rs for - // something more robust! - let mut fs_path = rqctx.context().base.clone(); - for component in path.into_inner().path { - fs_path.push(component); - } - let body = tokio::fs::read(fs_path).await.map_err(|e| { - // tough 0.15+ depend on ENOENT being translated into 404. - if e.kind() == std::io::ErrorKind::NotFound { - HttpError::for_not_found(None, e.to_string()) - } else { - HttpError::for_bad_request(None, e.to_string()) - } - })?; - Ok(Response::builder().status(StatusCode::OK).body(body.into())?) -} + args.exec(&logctx.log).await.context("error executing assemble command")?; -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + let cptestctx = test_setup_with_config::( + "test_update_end_to_end", + &mut config, + sim::SimMode::Explicit, + None, + ) + .await; + let client = &cptestctx.external_client; -const TARGET_CONTENTS: &[u8] = b"hello world".as_slice(); - -async fn new_tuf_repo(rng: &(dyn SecureRandom + Sync)) -> Utf8TempDir { - let version = - NonZeroU64::new(Utc::now().timestamp().try_into().unwrap()).unwrap(); - let expires = Utc::now() + Duration::minutes(5); - - // create the key - let key_data = Ed25519KeyPair::generate_pkcs8(rng).unwrap(); - let key = Ed25519KeyPair::from_pkcs8(key_data.as_ref()).unwrap(); - let tuf_key = key.tuf_key(); - let key_id = tuf_key.key_id().unwrap(); - - // create the root role - let mut root = Root { - spec_version: "1.0.0".to_string(), - consistent_snapshot: true, - version: NonZeroU64::new(1).unwrap(), - expires, - keys: HashMap::new(), - roles: HashMap::new(), - _extra: HashMap::new(), + // Upload the repository to Nexus. + let mut initial_description = { + let response = + make_upload_request(client, &archive_path, StatusCode::OK) + .execute() + .await + .context("error uploading repository")?; + + let response = + serde_json::from_slice::(&response.body) + .context("error deserializing response body")?; + assert_eq!(response.status, TufRepoInsertStatus::Inserted); + response.recorded }; - root.keys.insert(key_id.clone(), tuf_key); - for role in [ - RoleType::Root, - RoleType::Snapshot, - RoleType::Targets, - RoleType::Timestamp, - ] { - root.roles.insert( - role, - RoleKeys { - keyids: vec![key_id.clone()], - threshold: NonZeroU64::new(1).unwrap(), - _extra: HashMap::new(), - }, - ); - } - - let signing_keys = - vec![Box::new(KeyKeySource(key_data)) as Box]; - // self-sign the root role - let signed_root = SignedRole::new( - root.clone(), - &KeyHolder::Root(root), - &signing_keys, - rng, - ) - .await - .unwrap(); + // Upload the repository to Nexus again. This should return a 200 with an + // `AlreadyExists` status. + let mut reupload_description = { + let response = + make_upload_request(client, &archive_path, StatusCode::OK) + .execute() + .await + .context("error uploading repository a second time")?; + + let response = + serde_json::from_slice::(&response.body) + .context("error deserializing response body")?; + assert_eq!(response.status, TufRepoInsertStatus::AlreadyExists); + response.recorded + }; - // TODO(iliana): there's no way to create a `RepositoryEditor` without having the root.json on - // disk. this is really unergonomic. write and upstream a fix - let mut root_tmp = NamedTempFile::new().unwrap(); - root_tmp.as_file_mut().write_all(signed_root.buffer()).unwrap(); - let mut editor = RepositoryEditor::new(&root_tmp).await.unwrap(); - root_tmp.close().unwrap(); - - editor - .targets_version(version) - .unwrap() - .targets_expires(expires) - .unwrap() - .snapshot_version(version) - .snapshot_expires(expires) - .timestamp_version(version) - .timestamp_expires(expires); - let (targets_dir, target_names) = generate_targets(); - for target in target_names { - editor.add_target_path(targets_dir.path().join(target)).await.unwrap(); - } + initial_description.sort_artifacts(); + reupload_description.sort_artifacts(); - let signed_repo = editor.sign(&signing_keys).await.unwrap(); + assert_eq!( + initial_description, reupload_description, + "initial description matches reupload" + ); - let repo = Utf8TempDir::new().unwrap(); - signed_repo.write(repo.path().join("metadata")).await.unwrap(); - signed_repo - .copy_targets( - targets_dir, - repo.path().join("targets"), - PathExists::Fail, + // Now get the repository that was just uploaded. + let mut get_description = { + let response = make_get_request( + client, + "1.0.0".parse().unwrap(), // this is the system version of the fake manifest + StatusCode::OK, ) + .execute() .await - .unwrap(); - - repo -} + .context("error fetching repository")?; -// Returns a temporary directory of targets and the list of filenames in it. -fn generate_targets() -> (TempDir, Vec<&'static str>) { - let dir = TempDir::new().unwrap(); - - // The update artifact. This will someday be a tarball of some variety. - std::fs::write( - dir.path().join(format!("{UPDATE_COMPONENT}-1")), - TARGET_CONTENTS, - ) - .unwrap(); - - // artifacts.json, which describes all available artifacts. - let artifacts = ArtifactsDocument { - system_version: "1.0.0".parse().unwrap(), - artifacts: vec![Artifact { - name: UPDATE_COMPONENT.into(), - version: "0.0.0".parse().unwrap(), - kind: ArtifactKind::from_known(KnownArtifactKind::ControlPlane), - target: format!("{UPDATE_COMPONENT}-1"), - }], + let response = + serde_json::from_slice::(&response.body) + .context("error deserializing response body")?; + response.description }; - let f = File::create(dir.path().join("artifacts.json")).unwrap(); - serde_json::to_writer_pretty(f, &artifacts).unwrap(); - (dir, vec!["omicron-test-component-1", "artifacts.json"]) -} + get_description.sort_artifacts(); -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + assert_eq!( + initial_description, get_description, + "initial description matches fetched description" + ); -// Wrapper struct so that we can use an in-memory key as a key source. -// TODO(iliana): this should just be in tough with a lot less hacks -struct KeyKeySource(Document); + // TODO: attempt to download extracted artifacts. -impl Debug for KeyKeySource { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("KeyKeySource").finish() + // Upload a new repository with the same system version but a different + // version for one of the components. This will produce a different hash, + // which should return an error. + { + let tweaks = &[ManifestTweak::ArtifactVersion { + kind: KnownArtifactKind::GimletSp, + version: "2.0.0".parse().unwrap(), + }]; + let archive_path = + make_tweaked_archive(&logctx.log, &temp_dir, tweaks).await?; + + let response = make_upload_request( + client, + &archive_path, + StatusCode::CONFLICT, + ) + .execute() + .await + .context( + "error uploading repository with different artifact version \ + but same system version", + )?; + assert_error_message_contains( + &response.body, + "Uploaded repository with system version 1.0.0 has SHA256 hash", + )?; } -} -#[async_trait] -impl KeySource for KeyKeySource { - async fn as_sign( - &self, - ) -> Result, Box> + // Upload a new repository with a different system version and different + // contents (but same version) for an artifact. { - // this is a really ugly hack, because tough doesn't `impl Sign for &'a T where T: Sign`. - // awslabs/tough#446 - Ok(Box::new(Ed25519KeyPair::from_pkcs8(self.0.as_ref()).unwrap())) + let tweaks = &[ + ManifestTweak::SystemVersion("2.0.0".parse().unwrap()), + ManifestTweak::ArtifactContents { + kind: KnownArtifactKind::ControlPlane, + size_delta: 1024, + }, + ]; + let archive_path = + make_tweaked_archive(&logctx.log, &temp_dir, tweaks).await?; + + let response = + make_upload_request(client, &archive_path, StatusCode::CONFLICT) + .execute() + .await + .context( + "error uploading repository with artifact \ + containing different hash for same version", + )?; + assert_error_message_contains( + &response.body, + "Uploaded artifacts don't match existing artifacts with same IDs:", + )?; } - async fn write( - &self, - _value: &str, - _key_id_hex: &str, - ) -> Result<(), Box> { - unimplemented!(); + // Upload a new repository with a different system version but no other + // changes. This should be accepted. + { + let tweaks = &[ManifestTweak::SystemVersion("2.0.0".parse().unwrap())]; + let archive_path = + make_tweaked_archive(&logctx.log, &temp_dir, tweaks).await?; + + let response = + make_upload_request(client, &archive_path, StatusCode::OK) + .execute() + .await + .context("error uploading repository with different system version (should succeed)")?; + + let response = + serde_json::from_slice::(&response.body) + .context("error deserializing response body")?; + assert_eq!(response.status, TufRepoInsertStatus::Inserted); } + + cptestctx.teardown().await; + logctx.cleanup_successful(); + + Ok(()) +} + +async fn make_tweaked_archive( + log: &slog::Logger, + temp_dir: &Utf8TempDir, + tweaks: &[ManifestTweak], +) -> anyhow::Result { + let manifest = DeserializedManifest::tweaked_fake(tweaks); + let manifest_path = temp_dir.path().join("fake2.toml"); + let mut manifest_file = + File::create(&manifest_path).context("error creating manifest file")?; + let manifest_to_toml = manifest.to_toml()?; + manifest_file.write_all(manifest_to_toml.as_bytes())?; + + let archive_path = Builder::new() + .prefix("archive") + .suffix(".zip") + .tempfile_in(temp_dir.path()) + .context("error creating temp file for tweaked archive")? + .into_temp_path(); + + let args = tufaceous::Args::try_parse_from([ + "tufaceous", + "assemble", + manifest_path.as_str(), + archive_path.as_str(), + ]) + .context("error parsing args")?; + + args.exec(log).await.context("error executing assemble command")?; + + Ok(archive_path) +} + +fn make_upload_request<'a>( + client: &'a dropshot::test_util::ClientTestContext, + archive_path: &'a Utf8Path, + expected_status: StatusCode, +) -> NexusRequest<'a> { + let file_name = + archive_path.file_name().expect("archive_path must have a file name"); + let request = NexusRequest::new( + RequestBuilder::new( + client, + Method::PUT, + &format!("/v1/system/update/repository?file_name={}", file_name), + ) + .body_file(Some(archive_path)) + .expect_status(Some(expected_status)), + ) + .authn_as(AuthnMode::PrivilegedUser); + request +} + +fn make_get_request( + client: &dropshot::test_util::ClientTestContext, + system_version: SemverVersion, + expected_status: StatusCode, +) -> NexusRequest<'_> { + let request = NexusRequest::new( + RequestBuilder::new( + client, + Method::GET, + &format!("/v1/system/update/repository/{system_version}"), + ) + .expect_status(Some(expected_status)), + ) + .authn_as(AuthnMode::PrivilegedUser); + request +} + +#[derive(Debug, Deserialize)] +struct ErrorBody { + message: String, +} + +// XXX: maybe replace this with a more detailed error code +fn assert_error_message_contains( + body: &[u8], + needle: &str, +) -> anyhow::Result<()> { + let body: ErrorBody = + serde_json::from_slice(body).context("body is not valid JSON")?; + ensure!( + body.message.contains(needle), + "expected body to contain {:?}, but it was {:?}", + needle, + body + ); + Ok(()) } // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= diff --git a/nexus/tests/output/unexpected-authz-endpoints.txt b/nexus/tests/output/unexpected-authz-endpoints.txt index 1cd87a75e5..e8bb60224a 100644 --- a/nexus/tests/output/unexpected-authz-endpoints.txt +++ b/nexus/tests/output/unexpected-authz-endpoints.txt @@ -9,13 +9,5 @@ POST "/v1/vpc-router-routes?project=demo-project&vpc=demo-vpc&router=demo-vpc- GET "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" PUT "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" DELETE "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" -POST "/v1/system/update/refresh" -GET "/v1/system/update/version" -GET "/v1/system/update/components" -GET "/v1/system/update/updates" -GET "/v1/system/update/updates/1.0.0" -GET "/v1/system/update/updates/1.0.0/components" -POST "/v1/system/update/start" -POST "/v1/system/update/stop" -GET "/v1/system/update/deployments" -GET "/v1/system/update/deployments/120bbb6f-660a-440c-8cb7-199be202ddff" +PUT "/v1/system/update/repository?file_name=demo-repo.zip" +GET "/v1/system/update/repository/1.0.0" diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 62c8224461..c32dae4df9 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -1960,32 +1960,17 @@ pub struct ResourceMetrics { // SYSTEM UPDATE +/// Parameters for PUT requests for `/v1/system/update/repository`. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct SystemUpdatePath { - pub version: SemverVersion, +pub struct UpdatesPutRepositoryParams { + /// The name of the uploaded file. + pub file_name: String, } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct SystemUpdateStart { - pub version: SemverVersion, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct SystemUpdateCreate { - pub version: SemverVersion, -} +/// Parameters for GET requests for `/v1/system/update/repository`. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct ComponentUpdateCreate { - pub version: SemverVersion, - pub component_type: shared::UpdateableComponentType, - pub system_update_id: Uuid, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct UpdateableComponentCreate { - pub version: SemverVersion, +#[derive(Clone, Debug, Deserialize, JsonSchema)] +pub struct UpdatesGetRepositoryParams { + /// The version to get. pub system_version: SemverVersion, - pub component_type: shared::UpdateableComponentType, - pub device_id: String, } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 5e31be7af8..45cfe8e267 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -13,7 +13,7 @@ use chrono::DateTime; use chrono::Utc; use omicron_common::api::external::{ ByteCount, Digest, Error, IdentityMetadata, InstanceState, Ipv4Net, - Ipv6Net, Name, ObjectIdentity, RoleName, SemverVersion, SimpleIdentity, + Ipv6Net, Name, ObjectIdentity, RoleName, SimpleIdentity, }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -608,65 +608,6 @@ pub enum DeviceAccessTokenType { Bearer, } -// SYSTEM UPDATES - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] -pub struct VersionRange { - pub low: SemverVersion, - pub high: SemverVersion, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] -#[serde(tag = "status", rename_all = "snake_case")] -pub enum UpdateStatus { - Updating, - Steady, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] -pub struct SystemVersion { - pub version_range: VersionRange, - pub status: UpdateStatus, - // TODO: time_released? time_last_applied? I got a fever and the only - // prescription is more timestamps -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct SystemUpdate { - #[serde(flatten)] - pub identity: AssetIdentityMetadata, - pub version: SemverVersion, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct ComponentUpdate { - #[serde(flatten)] - pub identity: AssetIdentityMetadata, - - pub component_type: shared::UpdateableComponentType, - pub version: SemverVersion, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct UpdateableComponent { - #[serde(flatten)] - pub identity: AssetIdentityMetadata, - - pub device_id: String, - pub component_type: shared::UpdateableComponentType, - pub version: SemverVersion, - pub system_version: SemverVersion, - pub status: UpdateStatus, -} - -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct UpdateDeployment { - #[serde(flatten)] - pub identity: AssetIdentityMetadata, - pub version: SemverVersion, - pub status: UpdateStatus, -} - // SYSTEM HEALTH #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index b5cbb25c66..2a047068ee 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -18,10 +18,10 @@ { "in": "path", "name": "kind", - "description": "The kind of update artifact this is.", + "description": "The kind of artifact this is.", "required": true, "schema": { - "$ref": "#/components/schemas/KnownArtifactKind" + "type": "string" } }, { @@ -6534,21 +6534,6 @@ "ZpoolPutResponse": { "type": "object" }, - "KnownArtifactKind": { - "description": "Kinds of update artifacts, as used by Nexus to determine what updates are available and by sled-agent to determine how to apply an update when asked.", - "type": "string", - "enum": [ - "gimlet_sp", - "gimlet_rot", - "host", - "trampoline", - "control_plane", - "psc_sp", - "psc_rot", - "switch_sp", - "switch_rot" - ] - }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" diff --git a/schema/crdb/27.0.0/up01.sql b/schema/crdb/27.0.0/up01.sql new file mode 100644 index 0000000000..5b7fb4df93 --- /dev/null +++ b/schema/crdb/27.0.0/up01.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.update_deployment; diff --git a/schema/crdb/27.0.0/up02.sql b/schema/crdb/27.0.0/up02.sql new file mode 100644 index 0000000000..a6ab82583d --- /dev/null +++ b/schema/crdb/27.0.0/up02.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.updateable_component; diff --git a/schema/crdb/27.0.0/up03.sql b/schema/crdb/27.0.0/up03.sql new file mode 100644 index 0000000000..8a9b89bd5c --- /dev/null +++ b/schema/crdb/27.0.0/up03.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.system_update_component_update; diff --git a/schema/crdb/27.0.0/up04.sql b/schema/crdb/27.0.0/up04.sql new file mode 100644 index 0000000000..9fb8d61a1e --- /dev/null +++ b/schema/crdb/27.0.0/up04.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.component_update; diff --git a/schema/crdb/27.0.0/up05.sql b/schema/crdb/27.0.0/up05.sql new file mode 100644 index 0000000000..bb76e717ab --- /dev/null +++ b/schema/crdb/27.0.0/up05.sql @@ -0,0 +1 @@ +DROP TYPE IF EXISTS omicron.public.updateable_component_type; diff --git a/schema/crdb/27.0.0/up06.sql b/schema/crdb/27.0.0/up06.sql new file mode 100644 index 0000000000..a68d6595bb --- /dev/null +++ b/schema/crdb/27.0.0/up06.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.system_update; diff --git a/schema/crdb/27.0.0/up07.sql b/schema/crdb/27.0.0/up07.sql new file mode 100644 index 0000000000..ddcbbbb8fd --- /dev/null +++ b/schema/crdb/27.0.0/up07.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.update_artifact; diff --git a/schema/crdb/27.0.0/up08.sql b/schema/crdb/27.0.0/up08.sql new file mode 100644 index 0000000000..75a15dc817 --- /dev/null +++ b/schema/crdb/27.0.0/up08.sql @@ -0,0 +1 @@ +DROP TYPE IF EXISTS omicron.public.update_artifact_kind; diff --git a/schema/crdb/27.0.0/up09.sql b/schema/crdb/27.0.0/up09.sql new file mode 100644 index 0000000000..984aff57de --- /dev/null +++ b/schema/crdb/27.0.0/up09.sql @@ -0,0 +1 @@ +DROP TYPE IF EXISTS omicron.public.update_status; diff --git a/schema/crdb/27.0.0/up10.sql b/schema/crdb/27.0.0/up10.sql new file mode 100644 index 0000000000..ddb13ca1c0 --- /dev/null +++ b/schema/crdb/27.0.0/up10.sql @@ -0,0 +1,33 @@ +-- Describes a single uploaded TUF repo. +-- +-- Identified by both a random uuid and its SHA256 hash. The hash could be the +-- primary key, but it seems unnecessarily large and unwieldy. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_repo ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + + sha256 STRING(64) NOT NULL, + + -- The version of the targets.json role that was used to generate the repo. + targets_role_version INT NOT NULL, + + -- The valid_until time for the repo. + valid_until TIMESTAMPTZ NOT NULL, + + -- The system version described in the TUF repo. + -- + -- This is the "true" primary key, but is not treated as such in the + -- database because we may want to change this format in the future. + -- Re-doing primary keys is annoying. + -- + -- Because the system version is embedded in the repo's artifacts.json, + -- each system version is associated with exactly one checksum. + system_version STRING(64) NOT NULL, + + -- For debugging only: + -- Filename provided by the user. + file_name TEXT NOT NULL, + + CONSTRAINT unique_checksum UNIQUE (sha256), + CONSTRAINT unique_system_version UNIQUE (system_version) +); diff --git a/schema/crdb/27.0.0/up11.sql b/schema/crdb/27.0.0/up11.sql new file mode 100644 index 0000000000..e0e36a51d7 --- /dev/null +++ b/schema/crdb/27.0.0/up11.sql @@ -0,0 +1,23 @@ +-- Describes an individual artifact from an uploaded TUF repo. +-- +-- In the future, this may also be used to describe artifacts that are fetched +-- from a remote TUF repo, but that requires some additional design work. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_artifact ( + name STRING(63) NOT NULL, + version STRING(63) NOT NULL, + -- This used to be an enum but is now a string, because it can represent + -- artifact kinds currently unknown to a particular version of Nexus as + -- well. + kind STRING(63) NOT NULL, + + -- The time this artifact was first recorded. + time_created TIMESTAMPTZ NOT NULL, + + -- The SHA256 hash of the artifact, typically obtained from the TUF + -- targets.json (and validated at extract time). + sha256 STRING(64) NOT NULL, + -- The length of the artifact, in bytes. + artifact_size INT8 NOT NULL, + + PRIMARY KEY (name, version, kind) +); diff --git a/schema/crdb/27.0.0/up12.sql b/schema/crdb/27.0.0/up12.sql new file mode 100644 index 0000000000..9c1ffb0de4 --- /dev/null +++ b/schema/crdb/27.0.0/up12.sql @@ -0,0 +1,21 @@ +-- Reflects that a particular artifact was provided by a particular TUF repo. +-- This is a many-many mapping. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_repo_artifact ( + tuf_repo_id UUID NOT NULL, + tuf_artifact_name STRING(63) NOT NULL, + tuf_artifact_version STRING(63) NOT NULL, + tuf_artifact_kind STRING(63) NOT NULL, + + /* + For the primary key, this definition uses the natural key rather than a + smaller surrogate key (UUID). That's because with CockroachDB the most + important factor in selecting a primary key is the ability to distribute + well. In this case, the first element of the primary key is the tuf_repo_id, + which is a random UUID. + + For more, see https://www.cockroachlabs.com/blog/how-to-choose-a-primary-key/. + */ + PRIMARY KEY ( + tuf_repo_id, tuf_artifact_name, tuf_artifact_version, tuf_artifact_kind + ) +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 79a43d3c89..c91bb669a9 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1955,184 +1955,84 @@ CREATE INDEX IF NOT EXISTS lookup_console_by_silo_user ON omicron.public.console /*******************************************************************/ -CREATE TYPE IF NOT EXISTS omicron.public.update_artifact_kind AS ENUM ( - -- Sled artifacts - 'gimlet_sp', - 'gimlet_rot', - 'host', - 'trampoline', - 'control_plane', - - -- PSC artifacts - 'psc_sp', - 'psc_rot', - - -- Switch artifacts - 'switch_sp', - 'switch_rot' -); - -CREATE TABLE IF NOT EXISTS omicron.public.update_artifact ( - name STRING(63) NOT NULL, - version STRING(63) NOT NULL, - kind omicron.public.update_artifact_kind NOT NULL, - - /* the version of the targets.json role this came from */ - targets_role_version INT NOT NULL, - - /* when the metadata this artifact was cached from expires */ - valid_until TIMESTAMPTZ NOT NULL, - - /* data about the target from the targets.json role */ - target_name STRING(512) NOT NULL, - target_sha256 STRING(64) NOT NULL, - target_length INT NOT NULL, - - PRIMARY KEY (name, version, kind) -); - -/* This index is used to quickly find outdated artifacts. */ -CREATE INDEX IF NOT EXISTS lookup_artifact_by_targets_role_version ON omicron.public.update_artifact ( - targets_role_version -); - -/* - * System updates - */ -CREATE TABLE IF NOT EXISTS omicron.public.system_update ( - /* Identity metadata (asset) */ - id UUID PRIMARY KEY, - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - - -- Because the version is unique, it could be the PK, but that would make - -- this resource different from every other resource for little benefit. - - -- Unique semver version - version STRING(64) NOT NULL -- TODO: length -); - -CREATE UNIQUE INDEX IF NOT EXISTS lookup_update_by_version ON omicron.public.system_update ( - version -); - - -CREATE TYPE IF NOT EXISTS omicron.public.updateable_component_type AS ENUM ( - 'bootloader_for_rot', - 'bootloader_for_sp', - 'bootloader_for_host_proc', - 'hubris_for_psc_rot', - 'hubris_for_psc_sp', - 'hubris_for_sidecar_rot', - 'hubris_for_sidecar_sp', - 'hubris_for_gimlet_rot', - 'hubris_for_gimlet_sp', - 'helios_host_phase_1', - 'helios_host_phase_2', - 'host_omicron' -); - -/* - * Component updates. Associated with at least one system_update through - * system_update_component_update. - */ -CREATE TABLE IF NOT EXISTS omicron.public.component_update ( - /* Identity metadata (asset) */ +-- Describes a single uploaded TUF repo. +-- +-- Identified by both a random uuid and its SHA256 hash. The hash could be the +-- primary key, but it seems unnecessarily large and unwieldy. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_repo ( id UUID PRIMARY KEY, time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - -- On component updates there's no device ID because the update can apply to - -- multiple instances of a given device kind + sha256 STRING(64) NOT NULL, - -- The *system* update version associated with this version (this is confusing, will rename) - version STRING(64) NOT NULL, -- TODO: length - -- TODO: add component update version to component_update + -- The version of the targets.json role that was used to generate the repo. + targets_role_version INT NOT NULL, - component_type omicron.public.updateable_component_type NOT NULL -); + -- The valid_until time for the repo. + valid_until TIMESTAMPTZ NOT NULL, --- version is unique per component type -CREATE UNIQUE INDEX IF NOT EXISTS lookup_component_by_type_and_version ON omicron.public.component_update ( - component_type, version -); + -- The system version described in the TUF repo. + -- + -- This is the "true" primary key, but is not treated as such in the + -- database because we may want to change this format in the future. + -- Re-doing primary keys is annoying. + -- + -- Because the system version is embedded in the repo's artifacts.json, + -- each system version is associated with exactly one checksum. + system_version STRING(64) NOT NULL, -/* - * Associate system updates with component updates. Not done with a - * system_update_id field on component_update because the same component update - * may be part of more than one system update. - */ -CREATE TABLE IF NOT EXISTS omicron.public.system_update_component_update ( - system_update_id UUID NOT NULL, - component_update_id UUID NOT NULL, + -- For debugging only: + -- Filename provided by the user. + file_name TEXT NOT NULL, - PRIMARY KEY (system_update_id, component_update_id) + CONSTRAINT unique_checksum UNIQUE (sha256), + CONSTRAINT unique_system_version UNIQUE (system_version) ); --- For now, the plan is to treat stopped, failed, completed as sub-cases of --- "steady" described by a "reason". But reason is not implemented yet. --- Obviously this could be a boolean, but boolean status fields never stay --- boolean for long. -CREATE TYPE IF NOT EXISTS omicron.public.update_status AS ENUM ( - 'updating', - 'steady' -); +-- Describes an individual artifact from an uploaded TUF repo. +-- +-- In the future, this may also be used to describe artifacts that are fetched +-- from a remote TUF repo, but that requires some additional design work. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_artifact ( + name STRING(63) NOT NULL, + version STRING(63) NOT NULL, + -- This used to be an enum but is now a string, because it can represent + -- artifact kinds currently unknown to a particular version of Nexus as + -- well. + kind STRING(63) NOT NULL, -/* - * Updateable components and their update status - */ -CREATE TABLE IF NOT EXISTS omicron.public.updateable_component ( - /* Identity metadata (asset) */ - id UUID PRIMARY KEY, + -- The time this artifact was first recorded. time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - - -- Free-form string that comes from the device - device_id STRING(40) NOT NULL, - - component_type omicron.public.updateable_component_type NOT NULL, - - -- The semver version of this component's own software - version STRING(64) NOT NULL, -- TODO: length - -- The version of the system update this component's software came from. - -- This may need to be nullable if we are registering components before we - -- know about system versions at all - system_version STRING(64) NOT NULL, -- TODO: length + -- The SHA256 hash of the artifact, typically obtained from the TUF + -- targets.json (and validated at extract time). + sha256 STRING(64) NOT NULL, + -- The length of the artifact, in bytes. + artifact_size INT8 NOT NULL, - status omicron.public.update_status NOT NULL - -- TODO: status reason for updateable_component -); - --- can't have two components of the same type with the same device ID -CREATE UNIQUE INDEX IF NOT EXISTS lookup_component_by_type_and_device ON omicron.public.updateable_component ( - component_type, device_id -); - -CREATE INDEX IF NOT EXISTS lookup_component_by_system_version ON omicron.public.updateable_component ( - system_version + PRIMARY KEY (name, version, kind) ); -/* - * System updates - */ -CREATE TABLE IF NOT EXISTS omicron.public.update_deployment ( - /* Identity metadata (asset) */ - id UUID PRIMARY KEY, - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - - -- semver version of corresponding system update - -- TODO: this makes sense while version is the PK of system_update, but - -- if/when I change that back to ID, this needs to be the ID too - version STRING(64) NOT NULL, +-- Reflects that a particular artifact was provided by a particular TUF repo. +-- This is a many-many mapping. +CREATE TABLE IF NOT EXISTS omicron.public.tuf_repo_artifact ( + tuf_repo_id UUID NOT NULL, + tuf_artifact_name STRING(63) NOT NULL, + tuf_artifact_version STRING(63) NOT NULL, + tuf_artifact_kind STRING(63) NOT NULL, - status omicron.public.update_status NOT NULL - -- TODO: status reason for update_deployment -); - -CREATE INDEX IF NOT EXISTS lookup_deployment_by_creation on omicron.public.update_deployment ( - time_created + /* + For the primary key, this definition uses the natural key rather than a + smaller surrogate key (UUID). That's because with CockroachDB the most + important factor in selecting a primary key is the ability to distribute + well. In this case, the first element of the primary key is the tuf_repo_id, + which is a random UUID. + + For more, see https://www.cockroachlabs.com/blog/how-to-choose-a-primary-key/. + */ + PRIMARY KEY ( + tuf_repo_id, tuf_artifact_name, tuf_artifact_version, tuf_artifact_kind + ) ); /*******************************************************************/ @@ -3296,7 +3196,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '26.0.0', NULL) + ( TRUE, NOW(), NOW(), '27.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/updates.rs b/sled-agent/src/updates.rs index 6144fd9171..13a1ec7623 100644 --- a/sled-agent/src/updates.rs +++ b/sled-agent/src/updates.rs @@ -127,7 +127,7 @@ impl UpdateManager { let response = nexus .cpapi_artifact_download( - nexus_client::types::KnownArtifactKind::ControlPlane, + &KnownArtifactKind::ControlPlane.to_string(), &artifact.name, &artifact.version.clone().into(), ) diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 3974aa76b2..8825327c1d 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -343,10 +343,66 @@ impl DeserializedManifest { .context("error deserializing manifest") } + pub fn to_toml(&self) -> Result { + toml::to_string(self).context("error serializing manifest to TOML") + } + + /// For fake manifests, applies a set of changes to them. + /// + /// Intended for testing. + pub fn apply_tweaks(&mut self, tweaks: &[ManifestTweak]) -> Result<()> { + for tweak in tweaks { + match tweak { + ManifestTweak::SystemVersion(version) => { + self.system_version = version.clone(); + } + ManifestTweak::ArtifactVersion { kind, version } => { + let entries = + self.artifacts.get_mut(kind).with_context(|| { + format!( + "manifest does not have artifact kind \ + {kind}", + ) + })?; + for entry in entries { + entry.version = version.clone(); + } + } + ManifestTweak::ArtifactContents { kind, size_delta } => { + let entries = + self.artifacts.get_mut(kind).with_context(|| { + format!( + "manifest does not have artifact kind \ + {kind}", + ) + })?; + + for entry in entries { + entry.source.apply_size_delta(*size_delta)?; + } + } + } + } + + Ok(()) + } + /// Returns the fake manifest. pub fn fake() -> Self { Self::from_str(FAKE_MANIFEST_TOML).unwrap() } + + /// Returns a version of the fake manifest with a set of changes applied. + /// + /// This is primarily intended for testing. + pub fn tweaked_fake(tweaks: &[ManifestTweak]) -> Self { + let mut manifest = Self::fake(); + manifest + .apply_tweaks(tweaks) + .expect("builtin fake manifest should accept all tweaks"); + + manifest + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] @@ -380,6 +436,39 @@ pub enum DeserializedArtifactSource { }, } +impl DeserializedArtifactSource { + fn apply_size_delta(&mut self, size_delta: i64) -> Result<()> { + match self { + DeserializedArtifactSource::File { .. } => { + bail!("cannot apply size delta to `file` source") + } + DeserializedArtifactSource::Fake { size } => { + *size = (*size).saturating_add_signed(size_delta); + Ok(()) + } + DeserializedArtifactSource::CompositeHost { phase_1, phase_2 } => { + phase_1.apply_size_delta(size_delta)?; + phase_2.apply_size_delta(size_delta)?; + Ok(()) + } + DeserializedArtifactSource::CompositeRot { + archive_a, + archive_b, + } => { + archive_a.apply_size_delta(size_delta)?; + archive_b.apply_size_delta(size_delta)?; + Ok(()) + } + DeserializedArtifactSource::CompositeControlPlane { zones } => { + for zone in zones { + zone.apply_size_delta(size_delta)?; + } + Ok(()) + } + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(tag = "kind", rename_all = "snake_case")] pub enum DeserializedFileArtifactSource { @@ -416,6 +505,18 @@ impl DeserializedFileArtifactSource { let entry = CompositeEntry { data: &data, mtime_source }; f(entry) } + + fn apply_size_delta(&mut self, size_delta: i64) -> Result<()> { + match self { + DeserializedFileArtifactSource::File { .. } => { + bail!("cannot apply size delta to `file` source") + } + DeserializedFileArtifactSource::Fake { size } => { + *size = (*size).saturating_add_signed(size_delta); + Ok(()) + } + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] @@ -459,6 +560,30 @@ impl DeserializedControlPlaneZoneSource { let entry = CompositeEntry { data: &data, mtime_source }; f(name, entry) } + + fn apply_size_delta(&mut self, size_delta: i64) -> Result<()> { + match self { + DeserializedControlPlaneZoneSource::File { .. } => { + bail!("cannot apply size delta to `file` source") + } + DeserializedControlPlaneZoneSource::Fake { size, .. } => { + (*size) = (*size).saturating_add_signed(size_delta); + Ok(()) + } + } + } +} +/// A change to apply to a manifest. +#[derive(Clone, Debug)] +pub enum ManifestTweak { + /// Update the system version. + SystemVersion(SemverVersion), + + /// Update the versions for this artifact. + ArtifactVersion { kind: KnownArtifactKind, version: SemverVersion }, + + /// Update the contents of this artifact (only support changing the size). + ArtifactContents { kind: KnownArtifactKind, size_delta: i64 }, } fn deserialize_byte_size<'de, D>(deserializer: D) -> Result diff --git a/update-common/Cargo.toml b/update-common/Cargo.toml index cc2ee86232..37542baa8f 100644 --- a/update-common/Cargo.toml +++ b/update-common/Cargo.toml @@ -9,6 +9,7 @@ anyhow.workspace = true bytes.workspace = true camino.workspace = true camino-tempfile.workspace = true +chrono.workspace = true debug-ignore.workspace = true display-error-chain.workspace = true dropshot.workspace = true diff --git a/update-common/src/artifacts/artifacts_with_plan.rs b/update-common/src/artifacts/artifacts_with_plan.rs index 9b579af29a..c2be69e82e 100644 --- a/update-common/src/artifacts/artifacts_with_plan.rs +++ b/update-common/src/artifacts/artifacts_with_plan.rs @@ -4,19 +4,28 @@ use super::ExtractedArtifactDataHandle; use super::UpdatePlan; +use super::UpdatePlanBuildOutput; use super::UpdatePlanBuilder; use crate::errors::RepositoryError; use anyhow::anyhow; +use bytes::Bytes; use camino_tempfile::Utf8TempDir; use debug_ignore::DebugIgnore; +use dropshot::HttpError; +use futures::Stream; +use futures::TryStreamExt; +use omicron_common::api::external::TufRepoDescription; +use omicron_common::api::external::TufRepoMeta; use omicron_common::update::ArtifactHash; use omicron_common::update::ArtifactHashId; use omicron_common::update::ArtifactId; +use sha2::{Digest, Sha256}; use slog::info; use slog::Logger; use std::collections::BTreeMap; use std::collections::HashMap; use std::io; +use tokio::io::AsyncWriteExt; use tough::TargetName; use tufaceous_lib::ArchiveExtractor; use tufaceous_lib::OmicronRepo; @@ -24,6 +33,9 @@ use tufaceous_lib::OmicronRepo; /// A collection of artifacts along with an update plan using those artifacts. #[derive(Debug)] pub struct ArtifactsWithPlan { + // A description of this repository. + description: TufRepoDescription, + // Map of top-level artifact IDs (present in the TUF repo) to the actual // artifacts we're serving (e.g., a top-level RoT artifact will map to two // artifact hashes: one for each of the A and B images). @@ -51,8 +63,65 @@ pub struct ArtifactsWithPlan { } impl ArtifactsWithPlan { + /// Creates a new `ArtifactsWithPlan` from the given stream of `Bytes`. + /// + /// This method reads the stream representing a TUF repo, and writes it to + /// a temporary file. Afterwards, it builds an `ArtifactsWithPlan` from the + /// contents of that file. + pub async fn from_stream( + body: impl Stream> + Send, + file_name: Option, + log: &Logger, + ) -> Result { + // Create a temporary file to store the incoming archive.`` + let tempfile = tokio::task::spawn_blocking(|| { + camino_tempfile::tempfile().map_err(RepositoryError::TempFileCreate) + }) + .await + .unwrap()?; + let mut tempfile = + tokio::io::BufWriter::new(tokio::fs::File::from_std(tempfile)); + + let mut body = std::pin::pin!(body); + + // Stream the uploaded body into our tempfile. + let mut hasher = Sha256::new(); + while let Some(bytes) = body + .try_next() + .await + .map_err(RepositoryError::ReadChunkFromStream)? + { + hasher.update(&bytes); + tempfile + .write_all(&bytes) + .await + .map_err(RepositoryError::TempFileWrite)?; + } + + let repo_hash = ArtifactHash(hasher.finalize().into()); + + // Flush writes. We don't need to seek back to the beginning of the file + // because extracting the repository will do its own seeking as a part of + // unzipping this repo. + tempfile.flush().await.map_err(RepositoryError::TempFileFlush)?; + + let tempfile = tempfile.into_inner().into_std().await; + + let artifacts_with_plan = Self::from_zip( + io::BufReader::new(tempfile), + file_name, + repo_hash, + log, + ) + .await?; + + Ok(artifacts_with_plan) + } + pub async fn from_zip( zip_data: T, + file_name: Option, + repo_hash: ArtifactHash, log: &Logger, ) -> Result where @@ -102,7 +171,7 @@ impl ArtifactsWithPlan { // `dir`, but we'll also unpack nested artifacts like the RoT dual A/B // archives. let mut builder = - UpdatePlanBuilder::new(artifacts.system_version, log)?; + UpdatePlanBuilder::new(artifacts.system_version.clone(), log)?; // Make a pass through each artifact in the repo. For each artifact, we // do one of the following: @@ -124,9 +193,7 @@ impl ArtifactsWithPlan { // priority - copying small SP artifacts is neglible compared to the // work we do to unpack host OS images. - let mut by_id = BTreeMap::new(); - let mut by_hash = HashMap::new(); - for artifact in artifacts.artifacts { + for artifact in &artifacts.artifacts { let target_name = TargetName::try_from(artifact.target.as_str()) .map_err(|error| RepositoryError::LocateTarget { target: artifact.target.clone(), @@ -167,21 +234,44 @@ impl ArtifactsWithPlan { })?; builder - .add_artifact( - artifact.into_id(), - artifact_hash, - stream, - &mut by_id, - &mut by_hash, - ) + .add_artifact(artifact.clone().into_id(), artifact_hash, stream) .await?; } // Ensure we know how to apply updates from this set of artifacts; we'll // remember the plan we create. - let artifacts = builder.build()?; + let UpdatePlanBuildOutput { plan, by_id, by_hash, artifacts_meta } = + builder.build()?; - Ok(Self { by_id, by_hash: by_hash.into(), plan: artifacts }) + let tuf_repository = repository.repo(); + + let file_name = file_name.unwrap_or_else(|| { + // Just pick a reasonable-sounding file name if we don't have one. + format!("system-update-v{}.zip", artifacts.system_version) + }); + + let repo_meta = TufRepoMeta { + hash: repo_hash, + targets_role_version: tuf_repository.targets().signed.version.get(), + valid_until: tuf_repository + .root() + .signed + .expires + .min(tuf_repository.snapshot().signed.expires) + .min(tuf_repository.targets().signed.expires) + .min(tuf_repository.timestamp().signed.expires), + system_version: artifacts.system_version, + file_name, + }; + let description = + TufRepoDescription { repo: repo_meta, artifacts: artifacts_meta }; + + Ok(Self { description, by_id, by_hash: by_hash.into(), plan }) + } + + /// Returns the `ArtifactsDocument` corresponding to this TUF repo. + pub fn description(&self) -> &TufRepoDescription { + &self.description } pub fn by_id(&self) -> &BTreeMap> { @@ -233,13 +323,14 @@ where mod tests { use super::*; use anyhow::{Context, Result}; + use camino::Utf8Path; use camino_tempfile::Utf8TempDir; use clap::Parser; use omicron_common::{ api::internal::nexus::KnownArtifactKind, update::ArtifactKind, }; use omicron_test_utils::dev::test_setup_log; - use std::collections::BTreeSet; + use std::{collections::BTreeSet, time::Duration}; /// Test that `ArtifactsWithPlan` can extract the fake repository generated /// by tufaceous. @@ -253,29 +344,22 @@ mod tests { let archive_path = temp_dir.path().join("archive.zip"); // Create the archive. - let args = tufaceous::Args::try_parse_from([ - "tufaceous", - "assemble", - "../tufaceous/manifests/fake.toml", - archive_path.as_str(), - ]) - .context("error parsing args")?; - - args.exec(&logctx.log) - .await - .context("error executing assemble command")?; + create_fake_archive(&logctx.log, &archive_path).await?; // Now check that it can be read by the archive extractor. - let zip_bytes = std::fs::File::open(&archive_path) - .context("error opening archive.zip")?; - let plan = ArtifactsWithPlan::from_zip(zip_bytes, &logctx.log) - .await - .context("error reading archive.zip")?; + let plan = + build_artifacts_with_plan(&logctx.log, &archive_path).await?; // Check that all known artifact kinds are present in the map. let by_id_kinds: BTreeSet<_> = plan.by_id().keys().map(|id| id.kind.clone()).collect(); let by_hash_kinds: BTreeSet<_> = plan.by_hash().keys().map(|id| id.kind.clone()).collect(); + let artifact_meta_kinds: BTreeSet<_> = plan + .description + .artifacts + .iter() + .map(|meta| meta.id.kind.clone()) + .collect(); // `by_id` should contain one entry for every `KnownArtifactKind`... let mut expected_kinds: BTreeSet<_> = @@ -315,6 +399,10 @@ mod tests { expected_kinds, by_hash_kinds, "expected kinds match by_hash kinds" ); + assert_eq!( + expected_kinds, artifact_meta_kinds, + "expected kinds match artifact_meta kinds" + ); // Every value present in `by_id` should also be a key in `by_hash`. for (id, hash_ids) in plan.by_id() { @@ -327,8 +415,81 @@ mod tests { } } + // + + logctx.cleanup_successful(); + + Ok(()) + } + + /// Test that the archive generated by running `tufaceous assemble` twice + /// has the same artifacts and hashes. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fake_archive_idempotent() -> Result<()> { + let logctx = test_setup_log("test_fake_archive_idempotent"); + let temp_dir = Utf8TempDir::new()?; + let archive_path = temp_dir.path().join("archive1.zip"); + + // Create the archive and build a plan from it. + create_fake_archive(&logctx.log, &archive_path).await?; + let mut plan1 = + build_artifacts_with_plan(&logctx.log, &archive_path).await?; + + // Add a 2 second delay to ensure that if we bake any second-based + // timestamps in, that they end up being different from those in the + // first archive. + tokio::time::sleep(Duration::from_secs(2)).await; + + let archive2_path = temp_dir.path().join("archive2.zip"); + create_fake_archive(&logctx.log, &archive2_path).await?; + let mut plan2 = + build_artifacts_with_plan(&logctx.log, &archive2_path).await?; + + // At the moment, the repo .zip itself doesn't match because it bakes + // in timestamps. However, the artifacts inside should match exactly. + plan1.description.sort_artifacts(); + plan2.description.sort_artifacts(); + + assert_eq!( + plan1.description.artifacts, plan2.description.artifacts, + "artifacts match" + ); + logctx.cleanup_successful(); Ok(()) } + + async fn create_fake_archive( + log: &slog::Logger, + archive_path: &Utf8Path, + ) -> Result<()> { + let args = tufaceous::Args::try_parse_from([ + "tufaceous", + "assemble", + "../tufaceous/manifests/fake.toml", + archive_path.as_str(), + ]) + .context("error parsing args")?; + + args.exec(log).await.context("error executing assemble command")?; + + Ok(()) + } + + async fn build_artifacts_with_plan( + log: &slog::Logger, + archive_path: &Utf8Path, + ) -> Result { + let zip_bytes = std::fs::File::open(&archive_path) + .context("error opening archive.zip")?; + // We could also compute the hash from the file here, but the repo hash + // doesn't matter for the test. + let repo_hash = ArtifactHash([0u8; 32]); + let plan = ArtifactsWithPlan::from_zip(zip_bytes, None, repo_hash, log) + .await + .with_context(|| format!("error reading {archive_path}"))?; + + Ok(plan) + } } diff --git a/update-common/src/artifacts/extracted_artifacts.rs b/update-common/src/artifacts/extracted_artifacts.rs index 06e0e5ec65..5ac4a3a395 100644 --- a/update-common/src/artifacts/extracted_artifacts.rs +++ b/update-common/src/artifacts/extracted_artifacts.rs @@ -106,7 +106,7 @@ pub struct ExtractedArtifacts { impl ExtractedArtifacts { pub fn new(log: &Logger) -> Result { let tempdir = camino_tempfile::Builder::new() - .prefix("wicketd-update-artifacts.") + .prefix("update-artifacts.") .tempdir() .map_err(RepositoryError::TempDirCreate)?; info!( @@ -189,7 +189,7 @@ impl ExtractedArtifacts { &self, ) -> Result { let file = NamedUtf8TempFile::new_in(self.tempdir.path()).map_err( - |error| RepositoryError::TempFileCreate { + |error| RepositoryError::NamedTempFileCreate { path: self.tempdir.path().to_owned(), error, }, diff --git a/update-common/src/artifacts/update_plan.rs b/update-common/src/artifacts/update_plan.rs index 7704d5fe8a..c5b171d648 100644 --- a/update-common/src/artifacts/update_plan.rs +++ b/update-common/src/artifacts/update_plan.rs @@ -2,7 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Constructor for the `UpdatePlan` wicketd uses to drive sled mupdates. +//! Constructor for the `UpdatePlan` wicketd and Nexus use to drive sled +//! mupdates. //! //! This is a "plan" in name only: it is a strict list of which artifacts to //! apply to which components; the ordering and application of the plan lives @@ -20,6 +21,7 @@ use futures::StreamExt; use futures::TryStreamExt; use hubtools::RawHubrisArchive; use omicron_common::api::external::SemverVersion; +use omicron_common::api::external::TufArtifactMeta; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_common::update::ArtifactHash; use omicron_common::update::ArtifactHashId; @@ -107,6 +109,11 @@ pub struct UpdatePlanBuilder<'a> { host_phase_2_hash: Option, control_plane_hash: Option, + // The by_id and by_hash maps, and metadata, used in `ArtifactsWithPlan`. + by_id: BTreeMap>, + by_hash: HashMap, + artifacts_meta: Vec, + // extra fields we use to build the plan extracted_artifacts: ExtractedArtifacts, log: &'a Logger, @@ -135,30 +142,27 @@ impl<'a> UpdatePlanBuilder<'a> { host_phase_2_hash: None, control_plane_hash: None, + by_id: BTreeMap::new(), + by_hash: HashMap::new(), + artifacts_meta: Vec::new(), + extracted_artifacts, log, }) } + /// Adds an artifact with these contents to the by_id and by_hash maps. pub async fn add_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { // If we don't know this artifact kind, we'll still serve it up by hash, // but we don't do any further processing on it. let Some(artifact_kind) = artifact_id.kind.to_known() else { return self - .add_unknown_artifact( - artifact_id, - artifact_hash, - stream, - by_id, - by_hash, - ) + .add_unknown_artifact(artifact_id, artifact_hash, stream) .await; }; @@ -175,39 +179,25 @@ impl<'a> UpdatePlanBuilder<'a> { artifact_kind, artifact_hash, stream, - by_id, - by_hash, ) .await } KnownArtifactKind::GimletRot | KnownArtifactKind::PscRot | KnownArtifactKind::SwitchRot => { - self.add_rot_artifact( - artifact_id, - artifact_kind, - stream, - by_id, - by_hash, - ) - .await + self.add_rot_artifact(artifact_id, artifact_kind, stream).await } KnownArtifactKind::Host => { - self.add_host_artifact(artifact_id, stream, by_id, by_hash) + self.add_host_artifact(artifact_id, stream) + } + KnownArtifactKind::Trampoline => { + self.add_trampoline_artifact(artifact_id, stream) } - KnownArtifactKind::Trampoline => self.add_trampoline_artifact( - artifact_id, - stream, - by_id, - by_hash, - ), KnownArtifactKind::ControlPlane => { self.add_control_plane_artifact( artifact_id, artifact_hash, stream, - by_id, - by_hash, ) .await } @@ -220,8 +210,6 @@ impl<'a> UpdatePlanBuilder<'a> { artifact_kind: KnownArtifactKind, artifact_hash: ArtifactHash, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { let sp_map = match artifact_kind { KnownArtifactKind::GimletSp => &mut self.gimlet_sp, @@ -276,10 +264,8 @@ impl<'a> UpdatePlanBuilder<'a> { data: data.clone(), }); - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, data, artifact_kind.into(), self.log, @@ -293,8 +279,6 @@ impl<'a> UpdatePlanBuilder<'a> { artifact_id: ArtifactId, artifact_kind: KnownArtifactKind, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { let (rot_a, rot_a_kind, rot_b, rot_b_kind) = match artifact_kind { KnownArtifactKind::GimletRot => ( @@ -353,18 +337,14 @@ impl<'a> UpdatePlanBuilder<'a> { rot_a.push(ArtifactIdData { id: rot_a_id, data: rot_a_data.clone() }); rot_b.push(ArtifactIdData { id: rot_b_id, data: rot_b_data.clone() }); - record_extracted_artifact( + self.record_extracted_artifact( artifact_id.clone(), - by_id, - by_hash, rot_a_data, rot_a_kind, self.log, )?; - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, rot_b_data, rot_b_kind, self.log, @@ -377,8 +357,6 @@ impl<'a> UpdatePlanBuilder<'a> { &mut self, artifact_id: ArtifactId, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { if self.host_phase_1.is_some() || self.host_phase_2_hash.is_some() { return Err(RepositoryError::DuplicateArtifactKind( @@ -407,18 +385,14 @@ impl<'a> UpdatePlanBuilder<'a> { Some(ArtifactIdData { id: phase_1_id, data: phase_1_data.clone() }); self.host_phase_2_hash = Some(phase_2_data.hash()); - record_extracted_artifact( + self.record_extracted_artifact( artifact_id.clone(), - by_id, - by_hash, phase_1_data, ArtifactKind::HOST_PHASE_1, self.log, )?; - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, phase_2_data, ArtifactKind::HOST_PHASE_2, self.log, @@ -431,8 +405,6 @@ impl<'a> UpdatePlanBuilder<'a> { &mut self, artifact_id: ArtifactId, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { if self.trampoline_phase_1.is_some() || self.trampoline_phase_2.is_some() @@ -470,18 +442,14 @@ impl<'a> UpdatePlanBuilder<'a> { self.trampoline_phase_2 = Some(ArtifactIdData { id: phase_2_id, data: phase_2_data.clone() }); - record_extracted_artifact( + self.record_extracted_artifact( artifact_id.clone(), - by_id, - by_hash, phase_1_data, ArtifactKind::TRAMPOLINE_PHASE_1, self.log, )?; - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, phase_2_data, ArtifactKind::TRAMPOLINE_PHASE_2, self.log, @@ -495,8 +463,6 @@ impl<'a> UpdatePlanBuilder<'a> { artifact_id: ArtifactId, artifact_hash: ArtifactHash, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { if self.control_plane_hash.is_some() { return Err(RepositoryError::DuplicateArtifactKind( @@ -516,10 +482,8 @@ impl<'a> UpdatePlanBuilder<'a> { self.control_plane_hash = Some(data.hash()); - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, data, KnownArtifactKind::ControlPlane.into(), self.log, @@ -533,8 +497,6 @@ impl<'a> UpdatePlanBuilder<'a> { artifact_id: ArtifactId, artifact_hash: ArtifactHash, stream: impl Stream> + Send, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { let artifact_kind = artifact_id.kind.clone(); let artifact_hash_id = @@ -543,10 +505,8 @@ impl<'a> UpdatePlanBuilder<'a> { let data = self.extracted_artifacts.store(artifact_hash_id, stream).await?; - record_extracted_artifact( + self.record_extracted_artifact( artifact_id, - by_id, - by_hash, data, artifact_kind, self.log, @@ -660,7 +620,62 @@ impl<'a> UpdatePlanBuilder<'a> { Ok((image1, image2)) } - pub fn build(self) -> Result { + // Record an artifact in `by_id` and `by_hash`, or fail if either already has an + // entry for this id/hash. + fn record_extracted_artifact( + &mut self, + tuf_repo_artifact_id: ArtifactId, + data: ExtractedArtifactDataHandle, + data_kind: ArtifactKind, + log: &Logger, + ) -> Result<(), RepositoryError> { + use std::collections::hash_map::Entry; + + let artifact_hash_id = + ArtifactHashId { kind: data_kind.clone(), hash: data.hash() }; + + let by_hash_slot = match self.by_hash.entry(artifact_hash_id) { + Entry::Occupied(slot) => { + return Err(RepositoryError::DuplicateHashEntry( + slot.key().clone(), + )); + } + Entry::Vacant(slot) => slot, + }; + + info!( + log, "added artifact"; + "name" => %tuf_repo_artifact_id.name, + "kind" => %by_hash_slot.key().kind, + "version" => %tuf_repo_artifact_id.version, + "hash" => %by_hash_slot.key().hash, + "length" => data.file_size(), + ); + + self.by_id + .entry(tuf_repo_artifact_id.clone()) + .or_default() + .push(by_hash_slot.key().clone()); + + // In the artifacts_meta document, use the expanded artifact ID + // (artifact kind = data_kind, and name and version from + // tuf_repo_artifact_id). + let artifacts_meta_id = ArtifactId { + name: tuf_repo_artifact_id.name, + version: tuf_repo_artifact_id.version, + kind: data_kind, + }; + self.artifacts_meta.push(TufArtifactMeta { + id: artifacts_meta_id, + hash: data.hash(), + size: data.file_size() as u64, + }); + by_hash_slot.insert(data); + + Ok(()) + } + + pub fn build(self) -> Result { // Ensure our multi-board-supporting kinds have at least one board // present. for (kind, no_artifacts) in [ @@ -738,7 +753,7 @@ impl<'a> UpdatePlanBuilder<'a> { } } - Ok(UpdatePlan { + let plan = UpdatePlan { system_version: self.system_version, gimlet_sp: self.gimlet_sp, // checked above gimlet_rot_a: self.gimlet_rot_a, // checked above @@ -770,10 +785,24 @@ impl<'a> UpdatePlanBuilder<'a> { KnownArtifactKind::ControlPlane, ), )?, + }; + Ok(UpdatePlanBuildOutput { + plan, + by_id: self.by_id, + by_hash: self.by_hash, + artifacts_meta: self.artifacts_meta, }) } } +/// The output of [`UpdatePlanBuilder::build`]. +pub struct UpdatePlanBuildOutput { + pub plan: UpdatePlan, + pub by_id: BTreeMap>, + pub by_hash: HashMap, + pub artifacts_meta: Vec, +} + // This function takes and returns `id` to avoid an unnecessary clone; `id` will // be present in either the Ok tuple or the error. fn read_hubris_board_from_archive( @@ -807,48 +836,6 @@ fn read_hubris_board_from_archive( Ok((id, Board(board.to_string()))) } -// Record an artifact in `by_id` and `by_hash`, or fail if either already has an -// entry for this id/hash. -fn record_extracted_artifact( - tuf_repo_artifact_id: ArtifactId, - by_id: &mut BTreeMap>, - by_hash: &mut HashMap, - data: ExtractedArtifactDataHandle, - data_kind: ArtifactKind, - log: &Logger, -) -> Result<(), RepositoryError> { - use std::collections::hash_map::Entry; - - let artifact_hash_id = - ArtifactHashId { kind: data_kind, hash: data.hash() }; - - let by_hash_slot = match by_hash.entry(artifact_hash_id) { - Entry::Occupied(slot) => { - return Err(RepositoryError::DuplicateHashEntry( - slot.key().clone(), - )); - } - Entry::Vacant(slot) => slot, - }; - - info!( - log, "added artifact"; - "name" => %tuf_repo_artifact_id.name, - "kind" => %by_hash_slot.key().kind, - "version" => %tuf_repo_artifact_id.version, - "hash" => %by_hash_slot.key().hash, - "length" => data.file_size(), - ); - - by_id - .entry(tuf_repo_artifact_id) - .or_default() - .push(by_hash_slot.key().clone()); - by_hash_slot.insert(data); - - Ok(()) -} - #[cfg(test)] mod tests { use std::collections::BTreeSet; @@ -962,13 +949,11 @@ mod tests { let logctx = test_setup_log("test_update_plan_from_artifacts"); - let mut by_id = BTreeMap::new(); - let mut by_hash = HashMap::new(); let mut plan_builder = UpdatePlanBuilder::new("0.0.0".parse().unwrap(), &logctx.log) .unwrap(); - // Add a couple artifacts with kinds wicketd doesn't understand; it + // Add a couple artifacts with kinds wicketd/nexus don't understand; it // should still ingest and serve them. let mut expected_unknown_artifacts = BTreeSet::new(); @@ -986,8 +971,6 @@ mod tests { id, hash, futures::stream::iter([Ok(Bytes::from(data))]), - &mut by_id, - &mut by_hash, ) .await .unwrap(); @@ -1009,8 +992,6 @@ mod tests { id, hash, futures::stream::iter([Ok(Bytes::from(data))]), - &mut by_id, - &mut by_hash, ) .await .unwrap(); @@ -1038,8 +1019,6 @@ mod tests { id, hash, futures::stream::iter([Ok(Bytes::from(data))]), - &mut by_id, - &mut by_hash, ) .await .unwrap(); @@ -1067,8 +1046,6 @@ mod tests { id, hash, futures::stream::iter([Ok(data.clone())]), - &mut by_id, - &mut by_hash, ) .await .unwrap(); @@ -1095,14 +1072,13 @@ mod tests { id, hash, futures::stream::iter([Ok(data.clone())]), - &mut by_id, - &mut by_hash, ) .await .unwrap(); } - let plan = plan_builder.build().unwrap(); + let UpdatePlanBuildOutput { plan, by_id, .. } = + plan_builder.build().unwrap(); assert_eq!(plan.gimlet_sp.len(), 2); assert_eq!(plan.psc_sp.len(), 2); diff --git a/update-common/src/errors.rs b/update-common/src/errors.rs index 5fba43b944..4d992e70b2 100644 --- a/update-common/src/errors.rs +++ b/update-common/src/errors.rs @@ -21,8 +21,20 @@ pub enum RepositoryError { #[error("error creating temporary directory")] TempDirCreate(#[source] std::io::Error), + #[error("error creating temporary file")] + TempFileCreate(#[source] std::io::Error), + + #[error("error reading chunk off of input stream")] + ReadChunkFromStream(#[source] HttpError), + + #[error("error writing to temporary file")] + TempFileWrite(#[source] std::io::Error), + + #[error("error flushing temporary file")] + TempFileFlush(#[source] std::io::Error), + #[error("error creating temporary file in {path}")] - TempFileCreate { + NamedTempFileCreate { path: Utf8PathBuf, #[source] error: std::io::Error, @@ -138,10 +150,21 @@ impl RepositoryError { // Errors we had that are unrelated to the contents of a repository // uploaded by a client. RepositoryError::TempDirCreate(_) - | RepositoryError::TempFileCreate { .. } => { + | RepositoryError::TempFileCreate(_) + | RepositoryError::TempFileWrite(_) + | RepositoryError::TempFileFlush(_) + | RepositoryError::NamedTempFileCreate { .. } => { HttpError::for_unavail(None, message) } + // This error is bubbled up. + RepositoryError::ReadChunkFromStream(error) => HttpError { + status_code: error.status_code, + error_code: error.error_code.clone(), + external_message: error.external_message.clone(), + internal_message: error.internal_message.clone(), + }, + // Errors that are definitely caused by bad repository contents. RepositoryError::DuplicateArtifactKind(_) | RepositoryError::LocateTarget { .. } diff --git a/wicketd/src/artifacts/store.rs b/wicketd/src/artifacts/store.rs index a5f24993a8..01543432a2 100644 --- a/wicketd/src/artifacts/store.rs +++ b/wicketd/src/artifacts/store.rs @@ -3,11 +3,9 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::http_entrypoints::InstallableArtifacts; -use dropshot::HttpError; use omicron_common::api::external::SemverVersion; use omicron_common::update::ArtifactHashId; use slog::Logger; -use std::io; use std::sync::Arc; use std::sync::Mutex; use update_common::artifacts::ArtifactsWithPlan; @@ -32,22 +30,12 @@ impl WicketdArtifactStore { Self { log, artifacts_with_plan: Default::default() } } - pub(crate) async fn put_repository( + pub(crate) fn set_artifacts_with_plan( &self, - data: T, - ) -> Result<(), HttpError> - where - T: io::Read + io::Seek + Send + 'static, - { - slog::debug!(self.log, "adding repository"); - - let log = self.log.clone(); - let new_artifacts = ArtifactsWithPlan::from_zip(data, &log) - .await - .map_err(|error| error.to_http_error())?; - self.replace(new_artifacts); - - Ok(()) + artifacts_with_plan: ArtifactsWithPlan, + ) { + slog::debug!(self.log, "setting artifacts_with_plan"); + self.replace(artifacts_with_plan); } pub(crate) fn system_version_and_artifact_ids( diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index dbd3e31072..9c1740679f 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -25,7 +25,6 @@ use dropshot::Path; use dropshot::RequestContext; use dropshot::StreamingBody; use dropshot::TypedBody; -use futures::TryStreamExt; use gateway_client::types::IgnitionCommand; use gateway_client::types::SpIdentifier; use gateway_client::types::SpType; @@ -44,11 +43,9 @@ use sled_hardware::Baseboard; use slog::o; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::io; use std::net::IpAddr; use std::net::Ipv6Addr; use std::time::Duration; -use tokio::io::AsyncWriteExt; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicket_common::update_events::EventReport; use wicket_common::WICKETD_TIMEOUT; @@ -570,44 +567,7 @@ async fn put_repository( ) -> Result { let rqctx = rqctx.context(); - // Create a temporary file to store the incoming archive. - let tempfile = tokio::task::spawn_blocking(|| { - camino_tempfile::tempfile().map_err(|err| { - HttpError::for_unavail( - None, - format!("failed to create temp file: {err}"), - ) - }) - }) - .await - .unwrap()?; - let mut tempfile = - tokio::io::BufWriter::new(tokio::fs::File::from_std(tempfile)); - - let mut body = std::pin::pin!(body.into_stream()); - - // Stream the uploaded body into our tempfile. - while let Some(bytes) = body.try_next().await? { - tempfile.write_all(&bytes).await.map_err(|err| { - HttpError::for_unavail( - None, - format!("failed to write to temp file: {err}"), - ) - })?; - } - - // Flush writes. We don't need to seek back to the beginning of the file - // because extracting the repository will do its own seeking as a part of - // unzipping this repo. - tempfile.flush().await.map_err(|err| { - HttpError::for_unavail( - None, - format!("failed to flush temp file: {err}"), - ) - })?; - - let tempfile = tempfile.into_inner().into_std().await; - rqctx.update_tracker.put_repository(io::BufReader::new(tempfile)).await?; + rqctx.update_tracker.put_repository(body.into_stream()).await?; Ok(HttpResponseUpdatedNoContent()) } diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index 823a7964de..eec3ee5868 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -18,8 +18,10 @@ use anyhow::bail; use anyhow::ensure; use anyhow::Context; use base64::Engine; +use bytes::Bytes; use display_error_chain::DisplayErrorChain; use dropshot::HttpError; +use futures::Stream; use futures::TryFutureExt; use gateway_client::types::HostPhase2Progress; use gateway_client::types::HostPhase2RecoveryImageId; @@ -48,7 +50,6 @@ use slog::Logger; use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::io; use std::net::SocketAddrV6; use std::sync::atomic::AtomicBool; use std::sync::Arc; @@ -64,6 +65,7 @@ use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio_util::io::StreamReader; use update_common::artifacts::ArtifactIdData; +use update_common::artifacts::ArtifactsWithPlan; use update_common::artifacts::UpdatePlan; use update_engine::events::ProgressUnits; use update_engine::AbortHandle; @@ -342,15 +344,21 @@ impl UpdateTracker { } /// Updates the repository stored inside the update tracker. - pub(crate) async fn put_repository( + pub(crate) async fn put_repository( &self, - data: T, - ) -> Result<(), HttpError> - where - T: io::Read + io::Seek + Send + 'static, - { + stream: impl Stream> + Send + 'static, + ) -> Result<(), HttpError> { + // Build the ArtifactsWithPlan from the stream. + let artifacts_with_plan = ArtifactsWithPlan::from_stream( + stream, + // We don't have a good file name here because file contents are + // uploaded over stdin, so let ArtifactsWithPlan pick the name. + None, &self.log, + ) + .await + .map_err(|error| error.to_http_error())?; let mut update_data = self.sp_update_data.lock().await; - update_data.put_repository(data).await + update_data.set_artifacts_with_plan(artifacts_with_plan).await } /// Gets a list of artifacts stored in the update repository. @@ -725,10 +733,10 @@ impl UpdateTrackerData { } } - async fn put_repository(&mut self, data: T) -> Result<(), HttpError> - where - T: io::Read + io::Seek + Send + 'static, - { + async fn set_artifacts_with_plan( + &mut self, + artifacts_with_plan: ArtifactsWithPlan, + ) -> Result<(), HttpError> { // Are there any updates currently running? If so, then reject the new // repository. let running_sps = self @@ -745,8 +753,8 @@ impl UpdateTrackerData { )); } - // Put the repository into the artifact store. - self.artifact_store.put_repository(data).await?; + // Set the new artifacts_with_plan. + self.artifact_store.set_artifacts_with_plan(artifacts_with_plan); // Reset all running data: a new repository means starting afresh. self.sp_update_data.clear();