diff --git a/Cargo.lock b/Cargo.lock index 004917e5a80..30d0308ec18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12972,7 +12972,6 @@ dependencies = [ "serde", "serde_human_bytes", "serde_json", - "sha3", "sled-agent-types-versions", "sled-hardware-types", "slog", diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index d9d731bffd6..2bf479e3f5e 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -57,7 +57,7 @@ use sled_agent_types::early_networking::{ }; use sled_agent_types::instance::{ InstanceEnsureBody, InstanceExternalIpBody, InstanceMulticastMembership, - VmmPutStateResponse, VmmSpecExt, VmmStateRequested, VmmUnregisterResponse, + VmmPutStateResponse, VmmStateRequested, VmmUnregisterResponse, }; use sled_agent_types::inventory::{ ConfigReconcilerInventory, ConfigReconcilerInventoryStatus, diff --git a/sled-agent/types/Cargo.toml b/sled-agent/types/Cargo.toml index c0f1dc38b3c..18e8cef9022 100644 --- a/sled-agent/types/Cargo.toml +++ b/sled-agent/types/Cargo.toml @@ -24,7 +24,6 @@ schemars.workspace = true serde.workspace = true serde_human_bytes.workspace = true serde_json.workspace = true -sha3.workspace = true sled-agent-types-versions.workspace = true sled-hardware-types.workspace = true slog.workspace = true diff --git a/sled-agent/types/versions/src/add_dual_stack_external_ip_config/inventory.rs b/sled-agent/types/versions/src/add_dual_stack_external_ip_config/inventory.rs index b44725452a1..d71fba17315 100644 --- a/sled-agent/types/versions/src/add_dual_stack_external_ip_config/inventory.rs +++ b/sled-agent/types/versions/src/add_dual_stack_external_ip_config/inventory.rs @@ -10,8 +10,6 @@ use chrono::{DateTime, Utc}; use iddqd::IdOrdItem; use iddqd::IdOrdMap; use iddqd::id_upcast; -use omicron_common::disk::{DatasetKind, DatasetName}; -use omicron_common::ledger::Ledgerable; use omicron_common::{ api::{ external::{ByteCount, Generation}, @@ -30,8 +28,7 @@ use crate::v1::inventory::{ BootPartitionContents, ConfigReconcilerInventoryResult, HostPhase2DesiredSlots, InventoryDataset, InventoryDisk, InventoryZpool, OmicronZoneDataset, OmicronZoneImageSource, OrphanedDataset, - RemoveMupdateOverrideBootSuccessInventory, RemoveMupdateOverrideInventory, - SledRole, ZoneImageResolverInventory, ZoneKind, + RemoveMupdateOverrideInventory, SledRole, ZoneImageResolverInventory, }; use crate::v10; use sled_hardware_types::{Baseboard, SledCpuFamily}; @@ -74,107 +71,6 @@ pub struct ConfigReconcilerInventory { pub remove_mupdate_override: Option, } -impl ConfigReconcilerInventory { - /// Iterate over all running zones as reported by the last reconciliation - /// result. - /// - /// This includes zones that are both present in `last_reconciled_config` - /// and whose status in `zones` indicates "successfully running". - pub fn running_omicron_zones( - &self, - ) -> impl Iterator { - self.zones.iter().filter_map(|(zone_id, result)| match result { - ConfigReconcilerInventoryResult::Ok => { - self.last_reconciled_config.zones.get(zone_id) - } - ConfigReconcilerInventoryResult::Err { .. } => None, - }) - } - - /// Iterate over all zones contained in the most-recently-reconciled sled - /// config and report their status as of that reconciliation. - pub fn reconciled_omicron_zones( - &self, - ) -> impl Iterator - { - // `self.zones` may contain zone IDs that aren't present in - // `last_reconciled_config` at all, if we failed to _shut down_ zones - // that are no longer present in the config. We use `filter_map` to - // strip those out, and only report on the configured zones. - self.zones.iter().filter_map(|(zone_id, result)| { - let config = self.last_reconciled_config.zones.get(zone_id)?; - Some((config, result)) - }) - } - - /// Given a sled config, produce a reconciler result that sled-agent could - /// have emitted if reconciliation succeeded. - /// - /// This method should only be used by tests and dev tools; real code should - /// look at the actual `last_reconciliation` value from the parent - /// [`Inventory`]. - pub fn debug_assume_success(config: OmicronSledConfig) -> Self { - let mut ret = Self { - // These fields will be filled in by `debug_update_assume_success`. - last_reconciled_config: OmicronSledConfig::default(), - external_disks: BTreeMap::new(), - datasets: BTreeMap::new(), - orphaned_datasets: IdOrdMap::new(), - zones: BTreeMap::new(), - remove_mupdate_override: None, - - // These fields will not. - boot_partitions: BootPartitionContents::debug_assume_success(), - }; - - ret.debug_update_assume_success(config); - - ret - } - - /// Given a sled config, update an existing reconciler result to simulate an - /// output that sled-agent could have emitted if reconciliation succeeded. - /// - /// This method should only be used by tests and dev tools; real code should - /// look at the actual `last_reconciliation` value from the parent - /// [`Inventory`]. - pub fn debug_update_assume_success(&mut self, config: OmicronSledConfig) { - let external_disks = config - .disks - .iter() - .map(|d| (d.id, ConfigReconcilerInventoryResult::Ok)) - .collect(); - let datasets = config - .datasets - .iter() - .map(|d| (d.id, ConfigReconcilerInventoryResult::Ok)) - .collect(); - let zones = config - .zones - .iter() - .map(|z| (z.id, ConfigReconcilerInventoryResult::Ok)) - .collect(); - let remove_mupdate_override = - config.remove_mupdate_override.map(|_| { - RemoveMupdateOverrideInventory { - boot_disk_result: Ok( - RemoveMupdateOverrideBootSuccessInventory::Removed, - ), - non_boot_message: "mupdate override successfully removed \ - on non-boot disks" - .to_owned(), - } - }); - - self.last_reconciled_config = config; - self.external_disks = external_disks; - self.datasets = datasets; - self.orphaned_datasets = IdOrdMap::new(); - self.zones = zones; - self.remove_mupdate_override = remove_mupdate_override; - } -} - /// Status of the sled-agent-config-reconciler task. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] #[serde(tag = "status", rename_all = "snake_case")] @@ -216,32 +112,6 @@ pub struct OmicronSledConfig { pub host_phase_2: HostPhase2DesiredSlots, } -impl Default for OmicronSledConfig { - fn default() -> Self { - Self { - generation: Generation::new(), - disks: IdOrdMap::default(), - datasets: IdOrdMap::default(), - zones: IdOrdMap::default(), - remove_mupdate_override: None, - host_phase_2: HostPhase2DesiredSlots::current_contents(), - } - } -} - -impl Ledgerable for OmicronSledConfig { - fn is_newer_than(&self, other: &Self) -> bool { - self.generation > other.generation - } - - fn generation_bump(&mut self) { - // DO NOTHING! - // - // Generation bumps must only ever come from nexus and will be encoded - // in the struct itself - } -} - /// Describes the set of Omicron-managed zones running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, @@ -261,11 +131,6 @@ pub struct OmicronZonesConfig { pub zones: Vec, } -impl OmicronZonesConfig { - /// Generation 1 of `OmicronZonesConfig` is always the set of no zones. - pub const INITIAL_GENERATION: Generation = Generation::from_u32(1); -} - /// Describes one Omicron-managed zone running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, @@ -295,27 +160,6 @@ impl IdOrdItem for OmicronZoneConfig { id_upcast!(); } -impl OmicronZoneConfig { - /// Returns the underlay IP address associated with this zone. - /// - /// Assumes all zone have exactly one underlay IP address (which is - /// currently true). - pub fn underlay_ip(&self) -> Ipv6Addr { - self.zone_type.underlay_ip() - } - - pub fn zone_name(&self) -> String { - illumos_utils::running_zone::InstalledZone::get_zone_name( - self.zone_type.kind().zone_prefix(), - Some(self.id), - ) - } - - pub fn dataset_name(&self) -> Option { - self.zone_type.dataset_name() - } -} - /// Describes what kind of zone this is (i.e., what component is running in it) /// as well as any type-specific configuration #[derive( @@ -415,219 +259,6 @@ pub enum OmicronZoneType { }, } -impl OmicronZoneType { - /// Returns the [`ZoneKind`] corresponding to this variant. - pub fn kind(&self) -> ZoneKind { - match self { - OmicronZoneType::BoundaryNtp { .. } => ZoneKind::BoundaryNtp, - OmicronZoneType::Clickhouse { .. } => ZoneKind::Clickhouse, - OmicronZoneType::ClickhouseKeeper { .. } => { - ZoneKind::ClickhouseKeeper - } - OmicronZoneType::ClickhouseServer { .. } => { - ZoneKind::ClickhouseServer - } - OmicronZoneType::CockroachDb { .. } => ZoneKind::CockroachDb, - OmicronZoneType::Crucible { .. } => ZoneKind::Crucible, - OmicronZoneType::CruciblePantry { .. } => ZoneKind::CruciblePantry, - OmicronZoneType::ExternalDns { .. } => ZoneKind::ExternalDns, - OmicronZoneType::InternalDns { .. } => ZoneKind::InternalDns, - OmicronZoneType::InternalNtp { .. } => ZoneKind::InternalNtp, - OmicronZoneType::Nexus { .. } => ZoneKind::Nexus, - OmicronZoneType::Oximeter { .. } => ZoneKind::Oximeter, - } - } - - /// Does this zone require time synchronization before it is initialized?" - /// - /// This function is somewhat conservative - the set of services - /// that can be launched before timesync has completed is intentionally kept - /// small, since it would be easy to add a service that expects time to be - /// reasonably synchronized. - pub fn requires_timesync(&self) -> bool { - match self { - // These zones can be initialized and started before time has been - // synchronized. For the NTP zones, this should be self-evident -- - // we need the NTP zone to actually perform time synchronization! - // - // The DNS zone is a bit of an exception here, since the NTP zone - // itself may rely on DNS lookups as a dependency. - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::InternalDns { .. } => false, - _ => true, - } - } - - /// Returns the underlay IP address associated with this zone. - /// - /// Assumes all zone have exactly one underlay IP address (which is - /// currently true). - pub fn underlay_ip(&self) -> Ipv6Addr { - match self { - OmicronZoneType::BoundaryNtp { address, .. } - | OmicronZoneType::Clickhouse { address, .. } - | OmicronZoneType::ClickhouseKeeper { address, .. } - | OmicronZoneType::ClickhouseServer { address, .. } - | OmicronZoneType::CockroachDb { address, .. } - | OmicronZoneType::Crucible { address, .. } - | OmicronZoneType::CruciblePantry { address } - | OmicronZoneType::ExternalDns { http_address: address, .. } - | OmicronZoneType::InternalNtp { address } - | OmicronZoneType::Nexus { internal_address: address, .. } - | OmicronZoneType::Oximeter { address } => *address.ip(), - OmicronZoneType::InternalDns { - http_address: address, - dns_address, - .. - } => { - // InternalDns is the only variant that carries two - // `SocketAddrV6`s that are both on the underlay network. We - // expect these to have the same IP address. - debug_assert_eq!(address.ip(), dns_address.ip()); - *address.ip() - } - } - } - - /// Identifies whether this is an NTP zone - pub fn is_ntp(&self) -> bool { - match self { - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } => true, - - OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::ClickhouseServer { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::Crucible { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::ExternalDns { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Nexus { .. } - | OmicronZoneType::Oximeter { .. } => false, - } - } - - /// Identifies whether this is a boundary NTP zone - pub fn is_boundary_ntp(&self) -> bool { - matches!(self, OmicronZoneType::BoundaryNtp { .. }) - } - - /// Identifies whether this is a Nexus zone - pub fn is_nexus(&self) -> bool { - match self { - OmicronZoneType::Nexus { .. } => true, - - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::ClickhouseServer { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::Crucible { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::ExternalDns { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Oximeter { .. } => false, - } - } - - /// Identifies whether this a Crucible (not Crucible pantry) zone - pub fn is_crucible(&self) -> bool { - match self { - OmicronZoneType::Crucible { .. } => true, - - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::ClickhouseServer { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::ExternalDns { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Nexus { .. } - | OmicronZoneType::Oximeter { .. } => false, - } - } - - /// This zone's external IP - pub fn external_ip(&self) -> Option { - match self { - OmicronZoneType::Nexus { external_ip, .. } => Some(*external_ip), - OmicronZoneType::ExternalDns { dns_address, .. } => { - Some(dns_address.ip()) - } - OmicronZoneType::BoundaryNtp { snat_cfg, .. } => Some(snat_cfg.ip), - - OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::ClickhouseServer { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::Crucible { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Oximeter { .. } => None, - } - } - - /// The service vNIC providing external connectivity to this zone - pub fn service_vnic(&self) -> Option<&NetworkInterface> { - match self { - OmicronZoneType::Nexus { nic, .. } - | OmicronZoneType::ExternalDns { nic, .. } - | OmicronZoneType::BoundaryNtp { nic, .. } => Some(nic), - - OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::ClickhouseServer { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::Crucible { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Oximeter { .. } => None, - } - } - - /// If this kind of zone has an associated dataset, return the dataset's - /// name. Otherwise, return `None`. - pub fn dataset_name(&self) -> Option { - let (dataset, dataset_kind) = match self { - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Nexus { .. } - | OmicronZoneType::Oximeter { .. } - | OmicronZoneType::CruciblePantry { .. } => None, - OmicronZoneType::Clickhouse { dataset, .. } => { - Some((dataset, DatasetKind::Clickhouse)) - } - OmicronZoneType::ClickhouseKeeper { dataset, .. } => { - Some((dataset, DatasetKind::ClickhouseKeeper)) - } - OmicronZoneType::ClickhouseServer { dataset, .. } => { - Some((dataset, DatasetKind::ClickhouseServer)) - } - OmicronZoneType::CockroachDb { dataset, .. } => { - Some((dataset, DatasetKind::Cockroach)) - } - OmicronZoneType::Crucible { dataset, .. } => { - Some((dataset, DatasetKind::Crucible)) - } - OmicronZoneType::ExternalDns { dataset, .. } => { - Some((dataset, DatasetKind::ExternalDns)) - } - OmicronZoneType::InternalDns { dataset, .. } => { - Some((dataset, DatasetKind::InternalDns)) - } - }?; - - Some(DatasetName::new(dataset.pool_name, dataset_kind)) - } -} - fn default_nexus_lockstep_port() -> u16 { omicron_common::address::NEXUS_LOCKSTEP_PORT } diff --git a/sled-agent/types/versions/src/add_dual_stack_shared_network_interfaces/inventory.rs b/sled-agent/types/versions/src/add_dual_stack_shared_network_interfaces/inventory.rs index e986bcb130e..99b63db54e7 100644 --- a/sled-agent/types/versions/src/add_dual_stack_shared_network_interfaces/inventory.rs +++ b/sled-agent/types/versions/src/add_dual_stack_shared_network_interfaces/inventory.rs @@ -10,7 +10,6 @@ use chrono::{DateTime, Utc}; use iddqd::IdOrdItem; use iddqd::IdOrdMap; use iddqd::id_upcast; -use omicron_common::ledger::Ledgerable; use omicron_common::{ api::{ external::{self, ByteCount, Generation}, @@ -115,32 +114,6 @@ pub struct OmicronSledConfig { pub host_phase_2: HostPhase2DesiredSlots, } -impl Default for OmicronSledConfig { - fn default() -> Self { - Self { - generation: Generation::new(), - disks: IdOrdMap::default(), - datasets: IdOrdMap::default(), - zones: IdOrdMap::default(), - remove_mupdate_override: None, - host_phase_2: HostPhase2DesiredSlots::current_contents(), - } - } -} - -impl Ledgerable for OmicronSledConfig { - fn is_newer_than(&self, other: &Self) -> bool { - self.generation > other.generation - } - - fn generation_bump(&mut self) { - // DO NOTHING! - // - // Generation bumps must only ever come from nexus and will be encoded - // in the struct itself - } -} - /// Describes the set of Omicron-managed zones running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, @@ -160,11 +133,6 @@ pub struct OmicronZonesConfig { pub zones: Vec, } -impl OmicronZonesConfig { - /// Generation 1 of `OmicronZonesConfig` is always the set of no zones. - pub const INITIAL_GENERATION: Generation = Generation::from_u32(1); -} - /// Describes one Omicron-managed zone running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, diff --git a/sled-agent/types/versions/src/impls/disk.rs b/sled-agent/types/versions/src/impls/disk.rs new file mode 100644 index 00000000000..1520c61e6a8 --- /dev/null +++ b/sled-agent/types/versions/src/impls/disk.rs @@ -0,0 +1,17 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::latest::disk::DiskStateRequested; + +impl DiskStateRequested { + /// Returns whether the requested state is attached to an Instance or not. + pub fn is_attached(&self) -> bool { + match self { + DiskStateRequested::Detached => false, + DiskStateRequested::Destroyed => false, + DiskStateRequested::Faulted => false, + DiskStateRequested::Attached(_) => true, + } + } +} diff --git a/sled-agent/types/versions/src/impls/early_networking.rs b/sled-agent/types/versions/src/impls/early_networking.rs new file mode 100644 index 00000000000..4f9b88dc034 --- /dev/null +++ b/sled-agent/types/versions/src/impls/early_networking.rs @@ -0,0 +1,137 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementations for early networking types. + +use std::str::FromStr; + +use bootstore::schemes::v0 as bootstore; +use slog::{Logger, warn}; + +use crate::latest::early_networking::{ + EarlyNetworkConfig, EarlyNetworkConfigBody, +}; +// This is an exception to the rule that we only use the latest version, since +// the back_compat module is only defined for v1. +use crate::v1::early_networking::back_compat; + +impl FromStr for EarlyNetworkConfig { + type Err = String; + + fn from_str(value: &str) -> Result { + #[derive(serde::Deserialize)] + struct ShadowConfig { + generation: u64, + schema_version: u32, + body: EarlyNetworkConfigBody, + } + + let v2_err = match serde_json::from_str::(&value) { + Ok(cfg) => { + return Ok(EarlyNetworkConfig { + generation: cfg.generation, + schema_version: cfg.schema_version, + body: cfg.body, + }); + } + Err(e) => format!("unable to parse EarlyNetworkConfig: {e:?}"), + }; + // If we fail to parse the config as any known version, we return the + // error corresponding to the parse failure of the newest schema. + serde_json::from_str::(&value) + .map(|v1| EarlyNetworkConfig { + generation: v1.generation, + schema_version: Self::schema_version(), + body: v1.body.into(), + }) + .map_err(|_| v2_err) + } +} + +impl EarlyNetworkConfig { + pub fn schema_version() -> u32 { + 2 + } + + // Note: This currently only converts between v0 and v1 or deserializes v1 of + // `EarlyNetworkConfig`. + pub fn deserialize_bootstore_config( + log: &Logger, + config: &bootstore::NetworkConfig, + ) -> Result { + // Try to deserialize the latest version of the data structure (v2). If + // that succeeds we are done. + let v2_error = + match serde_json::from_slice::(&config.blob) { + Ok(val) => return Ok(val), + Err(error) => { + // Log this error and continue trying to deserialize older + // versions. + warn!( + log, + "Failed to deserialize EarlyNetworkConfig \ + as v2, trying next as v1: {}", + error, + ); + error + } + }; + + match serde_json::from_slice::( + &config.blob, + ) { + Ok(v1) => { + // Convert from v1 to v2 + return Ok(EarlyNetworkConfig { + generation: v1.generation, + schema_version: EarlyNetworkConfig::schema_version(), + body: v1.body.into(), + }); + } + Err(error) => { + // Log this error. + warn!( + log, + "Failed to deserialize EarlyNetworkConfig \ + as v1, trying next as v0: {}", + error + ); + } + }; + + match serde_json::from_slice::( + &config.blob, + ) { + Ok(val) => { + // Convert from v0 to v2 + return Ok(EarlyNetworkConfig { + generation: val.generation, + schema_version: 2, + body: EarlyNetworkConfigBody { + ntp_servers: val.ntp_servers, + rack_network_config: val.rack_network_config.map( + |v0_config| { + back_compat::RackNetworkConfigV0::to_v2( + val.rack_subnet, + v0_config, + ) + }, + ), + }, + }); + } + Err(error) => { + // Log this error. + warn!( + log, + "Failed to deserialize EarlyNetworkConfig as v0: {}", error, + ); + } + }; + + // If we fail to parse the config as any known version, we return the + // error corresponding to the parse failure of the newest schema. + Err(v2_error) + } +} diff --git a/sled-agent/types/versions/src/impls/instance.rs b/sled-agent/types/versions/src/impls/instance.rs new file mode 100644 index 00000000000..3596b561789 --- /dev/null +++ b/sled-agent/types/versions/src/impls/instance.rs @@ -0,0 +1,76 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::latest::instance::{VmmSpec, VmmStateRequested}; +use propolis_api_types::instance_spec::{ + SpecKey, + components::backends::{ + CrucibleStorageBackend, FileStorageBackend, VirtioNetworkBackend, + }, + v0::ComponentV0, +}; + +impl std::fmt::Display for VmmStateRequested { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.label()) + } +} + +impl VmmStateRequested { + fn label(&self) -> &str { + match self { + VmmStateRequested::MigrationTarget(_) => "migrating in", + VmmStateRequested::Running => "running", + VmmStateRequested::Stopped => "stopped", + VmmStateRequested::Reboot => "reboot", + } + } +} + +impl VmmSpec { + pub fn crucible_backends( + &self, + ) -> impl Iterator { + self.0.components.iter().filter_map( + |(key, component)| match component { + ComponentV0::CrucibleStorageBackend(be) => Some((key, be)), + _ => None, + }, + ) + } + + pub fn viona_backends( + &self, + ) -> impl Iterator { + self.0.components.iter().filter_map( + |(key, component)| match component { + ComponentV0::VirtioNetworkBackend(be) => Some((key, be)), + _ => None, + }, + ) + } + + pub fn file_backends( + &self, + ) -> impl Iterator { + self.0.components.iter().filter_map( + |(key, component)| match component { + ComponentV0::FileStorageBackend(be) => Some((key, be)), + _ => None, + }, + ) + } +} + +impl VmmStateRequested { + /// Returns true if the state represents a stopped Instance. + pub fn is_stopped(&self) -> bool { + match self { + VmmStateRequested::MigrationTarget(_) => false, + VmmStateRequested::Running => false, + VmmStateRequested::Stopped => true, + VmmStateRequested::Reboot => false, + } + } +} diff --git a/sled-agent/types/versions/src/impls/inventory.rs b/sled-agent/types/versions/src/impls/inventory.rs new file mode 100644 index 00000000000..b5a767e09b4 --- /dev/null +++ b/sled-agent/types/versions/src/impls/inventory.rs @@ -0,0 +1,883 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeMap; +use std::fmt::{self, Write}; +use std::net::{IpAddr, Ipv6Addr}; + +use camino::Utf8PathBuf; +use iddqd::IdOrdMap; +use indent_write::fmt::IndentWriter; +use omicron_common::api::external::Generation; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::disk::{DatasetKind, DatasetName, M2Slot}; +use omicron_common::ledger::Ledgerable; +use omicron_common::update::{ArtifactId, OmicronInstallManifestSource}; +use omicron_uuid_kinds::MupdateUuid; +use tufaceous_artifact::{ArtifactHash, KnownArtifactKind}; + +use crate::latest::inventory::{ + BootImageHeader, BootPartitionContents, BootPartitionDetails, + ConfigReconcilerInventory, ConfigReconcilerInventoryResult, + HostPhase2DesiredContents, HostPhase2DesiredSlots, ManifestBootInventory, + ManifestInventory, ManifestNonBootInventory, MupdateOverrideBootInventory, + MupdateOverrideInventory, MupdateOverrideNonBootInventory, + OmicronSledConfig, OmicronZoneConfig, OmicronZoneImageSource, + OmicronZoneType, OmicronZonesConfig, + RemoveMupdateOverrideBootSuccessInventory, RemoveMupdateOverrideInventory, + ZoneArtifactInventory, ZoneImageResolverInventory, ZoneKind, +}; + +impl ZoneKind { + /// The NTP prefix used for both BoundaryNtp and InternalNtp zones and services. + pub const NTP_PREFIX: &str = "ntp"; + + /// Return a string that is used to construct **zone names**. This string + /// is guaranteed to be stable over time. + pub fn zone_prefix(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse_keeper", + ZoneKind::ClickhouseServer => "clickhouse_server", + ZoneKind::CockroachDb => "cockroachdb", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible_pantry", + ZoneKind::ExternalDns => "external_dns", + ZoneKind::InternalDns => "internal_dns", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + } + } + + /// Return a string that identifies **zone image filenames** in the install + /// dataset. + /// + /// This method is exactly equivalent to `format!("{}.tar.gz", + /// self.zone_prefix())`, but returns `&'static str`s. A unit test ensures + /// they stay consistent. + pub fn artifact_in_install_dataset(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => "ntp.tar.gz", + ZoneKind::Clickhouse => "clickhouse.tar.gz", + ZoneKind::ClickhouseKeeper => "clickhouse_keeper.tar.gz", + ZoneKind::ClickhouseServer => "clickhouse_server.tar.gz", + ZoneKind::CockroachDb => "cockroachdb.tar.gz", + ZoneKind::Crucible => "crucible.tar.gz", + ZoneKind::CruciblePantry => "crucible_pantry.tar.gz", + ZoneKind::ExternalDns => "external_dns.tar.gz", + ZoneKind::InternalDns => "internal_dns.tar.gz", + ZoneKind::Nexus => "nexus.tar.gz", + ZoneKind::Oximeter => "oximeter.tar.gz", + } + } + + /// Return a string that is used to construct **SMF service names**. This + /// string is guaranteed to be stable over time. + pub fn service_prefix(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse_keeper", + ZoneKind::ClickhouseServer => "clickhouse_server", + ZoneKind::CockroachDb => "cockroachdb", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible/pantry", + ZoneKind::ExternalDns => "external_dns", + ZoneKind::InternalDns => "internal_dns", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + } + } + + /// Return a string suitable for use **in `Name` instances**. This string + /// is guaranteed to be stable over time. + /// + /// This string uses dashes rather than underscores, as required by `Name`. + pub fn name_prefix(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse-keeper", + ZoneKind::ClickhouseServer => "clickhouse-server", + ZoneKind::CockroachDb => "cockroach", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible-pantry", + ZoneKind::ExternalDns => "external-dns", + ZoneKind::InternalDns => "internal-dns", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + } + } + + /// Return a string that is used for reporting and error messages. This is + /// **not guaranteed** to be stable. + /// + /// If you're displaying a user-friendly message, prefer this method. + pub fn report_str(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp => "boundary_ntp", + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse_keeper", + ZoneKind::ClickhouseServer => "clickhouse_server", + ZoneKind::CockroachDb => "cockroach_db", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible_pantry", + ZoneKind::ExternalDns => "external_dns", + ZoneKind::InternalDns => "internal_dns", + ZoneKind::InternalNtp => "internal_ntp", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + } + } + + /// Return a string used as an artifact name for control-plane zones. + /// This is **not guaranteed** to be stable. + /// + /// These strings match the `ArtifactId::name`s Nexus constructs when + /// unpacking the composite control-plane artifact in a TUF repo. Currently, + /// these are chosen by reading the `pkg` value of the `oxide.json` object + /// inside each zone image tarball. + pub fn artifact_id_name(self) -> &'static str { + match self { + ZoneKind::BoundaryNtp => "ntp", + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse_keeper", + ZoneKind::ClickhouseServer => "clickhouse_server", + ZoneKind::CockroachDb => "cockroachdb", + ZoneKind::Crucible => "crucible-zone", + ZoneKind::CruciblePantry => "crucible-pantry-zone", + ZoneKind::ExternalDns => "external-dns", + ZoneKind::InternalDns => "internal-dns", + ZoneKind::InternalNtp => "ntp", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + } + } + + /// Return true if an artifact represents a control plane zone image + /// of this kind. + pub fn is_control_plane_zone_artifact( + self, + artifact_id: &ArtifactId, + ) -> bool { + artifact_id + .kind + .to_known() + .map(|kind| matches!(kind, KnownArtifactKind::Zone)) + .unwrap_or(false) + && artifact_id.name == self.artifact_id_name() + } + + /// Map an artifact ID name to the corresponding file name in the install + /// dataset. + /// + /// We don't allow mapping artifact ID names to `ZoneKind` because the map + /// isn't bijective -- both internal and boundary NTP zones use the same + /// `ntp` artifact. But the artifact ID name and the name in the install + /// dataset do form a bijective map. + pub fn artifact_id_name_to_install_dataset_file( + artifact_id_name: &str, + ) -> Option<&'static str> { + let zone_kind = match artifact_id_name { + "ntp" => ZoneKind::BoundaryNtp, + "clickhouse" => ZoneKind::Clickhouse, + "clickhouse_keeper" => ZoneKind::ClickhouseKeeper, + "clickhouse_server" => ZoneKind::ClickhouseServer, + "cockroachdb" => ZoneKind::CockroachDb, + "crucible-zone" => ZoneKind::Crucible, + "crucible-pantry-zone" => ZoneKind::CruciblePantry, + "external-dns" => ZoneKind::ExternalDns, + "internal-dns" => ZoneKind::InternalDns, + "nexus" => ZoneKind::Nexus, + "oximeter" => ZoneKind::Oximeter, + _ => return None, + }; + Some(zone_kind.artifact_in_install_dataset()) + } +} + +impl OmicronZoneType { + /// Returns the [`ZoneKind`] corresponding to this variant. + pub fn kind(&self) -> ZoneKind { + match self { + OmicronZoneType::BoundaryNtp { .. } => ZoneKind::BoundaryNtp, + OmicronZoneType::Clickhouse { .. } => ZoneKind::Clickhouse, + OmicronZoneType::ClickhouseKeeper { .. } => { + ZoneKind::ClickhouseKeeper + } + OmicronZoneType::ClickhouseServer { .. } => { + ZoneKind::ClickhouseServer + } + OmicronZoneType::CockroachDb { .. } => ZoneKind::CockroachDb, + OmicronZoneType::Crucible { .. } => ZoneKind::Crucible, + OmicronZoneType::CruciblePantry { .. } => ZoneKind::CruciblePantry, + OmicronZoneType::ExternalDns { .. } => ZoneKind::ExternalDns, + OmicronZoneType::InternalDns { .. } => ZoneKind::InternalDns, + OmicronZoneType::InternalNtp { .. } => ZoneKind::InternalNtp, + OmicronZoneType::Nexus { .. } => ZoneKind::Nexus, + OmicronZoneType::Oximeter { .. } => ZoneKind::Oximeter, + } + } + + /// Does this zone require time synchronization before it is initialized? + /// + /// This function is somewhat conservative - the set of services + /// that can be launched before timesync has completed is intentionally kept + /// small, since it would be easy to add a service that expects time to be + /// reasonably synchronized. + pub fn requires_timesync(&self) -> bool { + match self { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + | OmicronZoneType::InternalDns { .. } => false, + _ => true, + } + } + + /// Returns the underlay IP address associated with this zone. + /// + /// Assumes all zones have exactly one underlay IP address (which is + /// currently true). + pub fn underlay_ip(&self) -> Ipv6Addr { + match self { + OmicronZoneType::BoundaryNtp { address, .. } + | OmicronZoneType::Clickhouse { address, .. } + | OmicronZoneType::ClickhouseKeeper { address, .. } + | OmicronZoneType::ClickhouseServer { address, .. } + | OmicronZoneType::CockroachDb { address, .. } + | OmicronZoneType::Crucible { address, .. } + | OmicronZoneType::CruciblePantry { address } + | OmicronZoneType::ExternalDns { http_address: address, .. } + | OmicronZoneType::InternalNtp { address } + | OmicronZoneType::Nexus { internal_address: address, .. } + | OmicronZoneType::Oximeter { address } => *address.ip(), + OmicronZoneType::InternalDns { + http_address: address, + dns_address, + .. + } => { + debug_assert_eq!(address.ip(), dns_address.ip()); + *address.ip() + } + } + } + + /// Identifies whether this is an NTP zone. + pub fn is_ntp(&self) -> bool { + matches!( + self, + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + ) + } + + /// Identifies whether this is a boundary NTP zone. + pub fn is_boundary_ntp(&self) -> bool { + matches!(self, OmicronZoneType::BoundaryNtp { .. }) + } + + /// Identifies whether this is a Nexus zone. + pub fn is_nexus(&self) -> bool { + matches!(self, OmicronZoneType::Nexus { .. }) + } + + /// Identifies whether this is a Crucible (not Crucible pantry) zone. + pub fn is_crucible(&self) -> bool { + matches!(self, OmicronZoneType::Crucible { .. }) + } + + /// This zone's external IP. + pub fn external_ip(&self) -> Option { + match self { + OmicronZoneType::Nexus { external_ip, .. } => Some(*external_ip), + OmicronZoneType::ExternalDns { dns_address, .. } => { + Some(dns_address.ip()) + } + OmicronZoneType::BoundaryNtp { snat_cfg, .. } => Some(snat_cfg.ip), + _ => None, + } + } + + /// The service vNIC providing external connectivity to this zone. + pub fn service_vnic(&self) -> Option<&NetworkInterface> { + match self { + OmicronZoneType::Nexus { nic, .. } + | OmicronZoneType::ExternalDns { nic, .. } + | OmicronZoneType::BoundaryNtp { nic, .. } => Some(nic), + _ => None, + } + } + + /// If this kind of zone has an associated dataset, return the dataset's + /// name. Otherwise, return `None`. + pub fn dataset_name(&self) -> Option { + let (dataset, dataset_kind) = match self { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + | OmicronZoneType::Nexus { .. } + | OmicronZoneType::Oximeter { .. } + | OmicronZoneType::CruciblePantry { .. } => None, + OmicronZoneType::Clickhouse { dataset, .. } => { + Some((dataset, DatasetKind::Clickhouse)) + } + OmicronZoneType::ClickhouseKeeper { dataset, .. } => { + Some((dataset, DatasetKind::ClickhouseKeeper)) + } + OmicronZoneType::ClickhouseServer { dataset, .. } => { + Some((dataset, DatasetKind::ClickhouseServer)) + } + OmicronZoneType::CockroachDb { dataset, .. } => { + Some((dataset, DatasetKind::Cockroach)) + } + OmicronZoneType::Crucible { dataset, .. } => { + Some((dataset, DatasetKind::Crucible)) + } + OmicronZoneType::ExternalDns { dataset, .. } => { + Some((dataset, DatasetKind::ExternalDns)) + } + OmicronZoneType::InternalDns { dataset, .. } => { + Some((dataset, DatasetKind::InternalDns)) + } + }?; + Some(DatasetName::new(dataset.pool_name, dataset_kind)) + } +} + +impl OmicronZonesConfig { + /// Generation 1 of `OmicronZonesConfig` is always the set of no zones. + pub const INITIAL_GENERATION: Generation = Generation::from_u32(1); +} + +impl OmicronZoneConfig { + /// Returns the underlay IP address associated with this zone. + /// + /// Assumes all zones have exactly one underlay IP address (which is + /// currently true). + pub fn underlay_ip(&self) -> Ipv6Addr { + self.zone_type.underlay_ip() + } + + /// Returns the zone name for this zone configuration. + pub fn zone_name(&self) -> String { + illumos_utils::running_zone::InstalledZone::get_zone_name( + self.zone_type.kind().zone_prefix(), + Some(self.id), + ) + } + + /// If this kind of zone has an associated dataset, return the dataset's + /// name. Otherwise, return `None`. + pub fn dataset_name(&self) -> Option { + self.zone_type.dataset_name() + } +} + +impl ConfigReconcilerInventory { + /// Iterate over all running zones as reported by the last reconciliation + /// result. + /// + /// This includes zones that are both present in `last_reconciled_config` + /// and whose status in `zones` indicates "successfully running". + pub fn running_omicron_zones( + &self, + ) -> impl Iterator { + self.zones.iter().filter_map(|(zone_id, result)| match result { + ConfigReconcilerInventoryResult::Ok => { + self.last_reconciled_config.zones.get(zone_id) + } + ConfigReconcilerInventoryResult::Err { .. } => None, + }) + } + + /// Iterate over all zones contained in the most-recently-reconciled sled + /// config and report their status as of that reconciliation. + pub fn reconciled_omicron_zones( + &self, + ) -> impl Iterator + { + self.zones.iter().filter_map(|(zone_id, result)| { + let config = self.last_reconciled_config.zones.get(zone_id)?; + Some((config, result)) + }) + } + + /// Given a sled config, produce a reconciler result that sled-agent could + /// have emitted if reconciliation succeeded. + /// + /// This method should only be used by tests and dev tools; real code should + /// look at the actual `last_reconciliation` value from the parent + /// [`Inventory`](crate::latest::inventory::Inventory). + pub fn debug_assume_success(config: OmicronSledConfig) -> Self { + let mut ret = ConfigReconcilerInventory { + last_reconciled_config: OmicronSledConfig::default(), + external_disks: BTreeMap::new(), + datasets: BTreeMap::new(), + orphaned_datasets: IdOrdMap::new(), + zones: BTreeMap::new(), + remove_mupdate_override: None, + boot_partitions: BootPartitionContents::debug_assume_success(), + }; + ret.debug_update_assume_success(config); + ret + } + + /// Given a sled config, update an existing reconciler result to simulate an + /// output that sled-agent could have emitted if reconciliation succeeded. + /// + /// This method should only be used by tests and dev tools; real code should + /// look at the actual `last_reconciliation` value from the parent + /// [`Inventory`](crate::latest::inventory::Inventory). + pub fn debug_update_assume_success(&mut self, config: OmicronSledConfig) { + let external_disks = config + .disks + .iter() + .map(|d| (d.id, ConfigReconcilerInventoryResult::Ok)) + .collect(); + let datasets = config + .datasets + .iter() + .map(|d| (d.id, ConfigReconcilerInventoryResult::Ok)) + .collect(); + let zones = config + .zones + .iter() + .map(|z| (z.id, ConfigReconcilerInventoryResult::Ok)) + .collect(); + let remove_mupdate_override = + config.remove_mupdate_override.map(|_| { + RemoveMupdateOverrideInventory { + boot_disk_result: Ok( + RemoveMupdateOverrideBootSuccessInventory::Removed, + ), + non_boot_message: + "mupdate override successfully removed on non-boot disks" + .to_owned(), + } + }); + + self.last_reconciled_config = config; + self.external_disks = external_disks; + self.datasets = datasets; + self.orphaned_datasets = IdOrdMap::new(); + self.zones = zones; + self.remove_mupdate_override = remove_mupdate_override; + } +} + +impl HostPhase2DesiredContents { + /// The artifact hash described by `self`, if it has one. + pub fn artifact_hash(&self) -> Option { + match self { + Self::CurrentContents => None, + Self::Artifact { hash } => Some(*hash), + } + } +} + +impl OmicronZoneImageSource { + /// Return the artifact hash used for the zone image, if the zone's image + /// source is from the artifact store. + pub fn artifact_hash(&self) -> Option { + if let OmicronZoneImageSource::Artifact { hash } = self { + Some(*hash) + } else { + None + } + } +} + +impl BootPartitionContents { + /// Returns the slot details for the given M.2 slot. + pub fn slot_details( + &self, + slot: M2Slot, + ) -> &Result { + match slot { + M2Slot::A => &self.slot_a, + M2Slot::B => &self.slot_b, + } + } + + /// Returns a fake `BootPartitionContents` for testing. + pub fn debug_assume_success() -> BootPartitionContents { + BootPartitionContents { + boot_disk: Ok(M2Slot::A), + slot_a: Ok(BootPartitionDetails { + header: BootImageHeader { + flags: 0, + data_size: 1000, + image_size: 1000, + target_size: 1000, + sha256: [0; 32], + image_name: "fake from debug_assume_success()".to_string(), + }, + artifact_hash: ArtifactHash([0x0a; 32]), + artifact_size: 1000, + }), + slot_b: Ok(BootPartitionDetails { + header: BootImageHeader { + flags: 0, + data_size: 1000, + image_size: 1000, + target_size: 1000, + sha256: [1; 32], + image_name: "fake from debug_assume_success()".to_string(), + }, + artifact_hash: ArtifactHash([0x0b; 32]), + artifact_size: 1000, + }), + } + } +} + +impl ZoneImageResolverInventory { + /// Returns a new, fake inventory for tests. + pub fn new_fake() -> ZoneImageResolverInventory { + ZoneImageResolverInventory { + zone_manifest: ManifestInventory::new_fake(), + mupdate_override: MupdateOverrideInventory::new_fake(), + } + } +} + +impl ManifestInventory { + /// Returns a new, empty inventory for tests. + pub fn new_fake() -> ManifestInventory { + ManifestInventory { + boot_disk_path: Utf8PathBuf::from("/fake/path/install/zones.json"), + boot_inventory: Ok(ManifestBootInventory::new_fake()), + non_boot_status: IdOrdMap::new(), + } + } +} + +impl ManifestBootInventory { + /// Returns a new, empty inventory for tests. + /// + /// For a more representative selection of real zones, see `representative` + /// in `nexus-inventory`. + pub fn new_fake() -> ManifestBootInventory { + ManifestBootInventory { + source: OmicronInstallManifestSource::Installinator { + mupdate_id: MupdateUuid::nil(), + }, + artifacts: IdOrdMap::new(), + } + } +} + +impl MupdateOverrideInventory { + /// Returns a new, empty inventory for tests. + pub fn new_fake() -> MupdateOverrideInventory { + MupdateOverrideInventory { + boot_disk_path: Utf8PathBuf::from( + "/fake/path/install/mupdate_override.json", + ), + boot_override: Ok(None), + non_boot_status: IdOrdMap::new(), + } + } +} + +/// Display helper for [`ZoneImageResolverInventory`]. +pub struct ZoneImageResolverInventoryDisplay<'a> { + inner: &'a ZoneImageResolverInventory, +} + +impl fmt::Display for ZoneImageResolverInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ZoneImageResolverInventory { zone_manifest, mupdate_override } = + self.inner; + writeln!(f, "zone manifest:")?; + let mut indented = IndentWriter::new(" ", f); + write!(indented, "{}", zone_manifest.display())?; + let f = indented.into_inner(); + writeln!(f, "mupdate override:")?; + let mut indented = IndentWriter::new(" ", f); + write!(indented, "{}", mupdate_override.display())?; + Ok(()) + } +} + +impl ZoneImageResolverInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> ZoneImageResolverInventoryDisplay<'_> { + ZoneImageResolverInventoryDisplay { inner: self } + } +} + +/// Display helper for [`ManifestInventory`]. +pub struct ManifestInventoryDisplay<'a> { + inner: &'a ManifestInventory, +} + +impl fmt::Display for ManifestInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f; + let ManifestInventory { + boot_disk_path, + boot_inventory, + non_boot_status, + } = self.inner; + writeln!(f, "path on boot disk: {}", boot_disk_path)?; + match boot_inventory { + Ok(boot_inventory) => { + writeln!(f, "boot disk inventory:")?; + let mut indented = IndentWriter::new(" ", f); + write!(indented, "{}", boot_inventory.display())?; + f = indented.into_inner(); + } + Err(error) => { + writeln!( + f, + "error obtaining zone manifest on boot disk: {error}" + )?; + } + } + if non_boot_status.is_empty() { + writeln!(f, "no non-boot disks")?; + } else { + writeln!(f, "non-boot disk status:")?; + for non_boot in non_boot_status { + let mut indented = IndentWriter::new_skip_initial(" ", f); + writeln!(indented, " - {}", non_boot.display())?; + f = indented.into_inner(); + } + } + Ok(()) + } +} + +impl ManifestInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> ManifestInventoryDisplay<'_> { + ManifestInventoryDisplay { inner: self } + } +} + +/// Display helper for [`ManifestBootInventory`]. +pub struct ManifestBootInventoryDisplay<'a> { + inner: &'a ManifestBootInventory, +} + +impl fmt::Display for ManifestBootInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f; + let ManifestBootInventory { source, artifacts } = self.inner; + writeln!(f, "manifest generated by {}", source)?; + if artifacts.is_empty() { + writeln!( + f, + "no artifacts in install dataset (this should only be seen in simulated systems)" + )?; + } else { + writeln!(f, "artifacts in install dataset:")?; + for artifact in artifacts { + let mut indented = IndentWriter::new_skip_initial(" ", f); + writeln!(indented, " - {}", artifact.display())?; + f = indented.into_inner(); + } + } + Ok(()) + } +} + +impl ManifestBootInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> ManifestBootInventoryDisplay<'_> { + ManifestBootInventoryDisplay { inner: self } + } +} + +/// Display helper for [`ZoneArtifactInventory`]. +pub struct ZoneArtifactInventoryDisplay<'a> { + inner: &'a ZoneArtifactInventory, +} + +impl fmt::Display for ZoneArtifactInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ZoneArtifactInventory { + file_name, + path: _, + expected_size, + expected_hash, + status, + } = self.inner; + write!( + f, + "{file_name} (expected {expected_size} bytes with hash {expected_hash}): " + )?; + match status { + Ok(()) => write!(f, "ok"), + Err(message) => write!(f, "error: {message}"), + } + } +} + +impl ZoneArtifactInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> ZoneArtifactInventoryDisplay<'_> { + ZoneArtifactInventoryDisplay { inner: self } + } +} + +/// Display helper for [`ManifestNonBootInventory`]. +pub struct ManifestNonBootInventoryDisplay<'a> { + inner: &'a ManifestNonBootInventory, +} + +impl fmt::Display for ManifestNonBootInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ManifestNonBootInventory { zpool_id: _, path, is_valid, message } = + self.inner; + write!( + f, + "{path} ({}): {message}", + if *is_valid { "valid" } else { "invalid" } + ) + } +} + +impl ManifestNonBootInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> ManifestNonBootInventoryDisplay<'_> { + ManifestNonBootInventoryDisplay { inner: self } + } +} + +/// Display helper for [`MupdateOverrideInventory`]. +pub struct MupdateOverrideInventoryDisplay<'a> { + inner: &'a MupdateOverrideInventory, +} + +impl fmt::Display for MupdateOverrideInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f; + let MupdateOverrideInventory { + boot_disk_path, + boot_override, + non_boot_status, + } = self.inner; + writeln!(f, "path on boot disk: {boot_disk_path}")?; + match boot_override { + Ok(Some(boot_override)) => { + writeln!( + f, + "override on boot disk: {}", + boot_override.display() + )?; + } + Ok(None) => { + writeln!(f, "no override on boot disk")?; + } + Err(error) => { + writeln!(f, "error obtaining override on boot disk: {error}")?; + } + } + if non_boot_status.is_empty() { + writeln!(f, "no non-boot disks")?; + } else { + writeln!(f, "non-boot disk status:")?; + for non_boot in non_boot_status { + let mut indented = IndentWriter::new_skip_initial(" ", f); + writeln!(indented, " - {}", non_boot.display())?; + f = indented.into_inner(); + } + } + Ok(()) + } +} + +impl MupdateOverrideInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> MupdateOverrideInventoryDisplay<'_> { + MupdateOverrideInventoryDisplay { inner: self } + } +} + +/// Display helper for [`MupdateOverrideBootInventory`]. +pub struct MupdateOverrideBootInventoryDisplay<'a> { + inner: &'a MupdateOverrideBootInventory, +} + +impl fmt::Display for MupdateOverrideBootInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let MupdateOverrideBootInventory { mupdate_override_id } = self.inner; + write!(f, "{}", mupdate_override_id) + } +} + +impl MupdateOverrideBootInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> MupdateOverrideBootInventoryDisplay<'_> { + MupdateOverrideBootInventoryDisplay { inner: self } + } +} + +/// Display helper for [`MupdateOverrideNonBootInventory`]. +pub struct MupdateOverrideNonBootInventoryDisplay<'a> { + inner: &'a MupdateOverrideNonBootInventory, +} + +impl fmt::Display for MupdateOverrideNonBootInventoryDisplay<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let MupdateOverrideNonBootInventory { + zpool_id: _, + path, + is_valid, + message, + } = self.inner; + write!( + f, + "{path} ({}): {message}", + if *is_valid { "valid" } else { "invalid" } + ) + } +} + +impl MupdateOverrideNonBootInventory { + /// Returns a displayer for this inventory. + pub fn display(&self) -> MupdateOverrideNonBootInventoryDisplay<'_> { + MupdateOverrideNonBootInventoryDisplay { inner: self } + } +} + +impl HostPhase2DesiredSlots { + /// Return a `HostPhase2DesiredSlots` with both slots set to + /// [`HostPhase2DesiredContents::CurrentContents`]; i.e., "make no changes + /// to the current contents of either slot". + pub const fn current_contents() -> Self { + Self { + slot_a: HostPhase2DesiredContents::CurrentContents, + slot_b: HostPhase2DesiredContents::CurrentContents, + } + } +} + +impl Default for OmicronSledConfig { + fn default() -> Self { + Self { + generation: Generation::new(), + disks: IdOrdMap::default(), + datasets: IdOrdMap::default(), + zones: IdOrdMap::default(), + remove_mupdate_override: None, + host_phase_2: HostPhase2DesiredSlots::current_contents(), + } + } +} + +impl Ledgerable for OmicronSledConfig { + fn is_newer_than(&self, other: &Self) -> bool { + self.generation > other.generation + } + + fn generation_bump(&mut self) { + // DO NOTHING! + // + // Generation bumps must only ever come from nexus and will be encoded + // in the struct itself + } +} diff --git a/sled-agent/types/versions/src/impls/mod.rs b/sled-agent/types/versions/src/impls/mod.rs new file mode 100644 index 00000000000..f889334211b --- /dev/null +++ b/sled-agent/types/versions/src/impls/mod.rs @@ -0,0 +1,12 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Functional code for the latest versions of the types in this crate. + +mod disk; +mod early_networking; +mod instance; +pub(crate) mod inventory; +mod sled; +mod zone_bundle; diff --git a/sled-agent/types/versions/src/impls/sled.rs b/sled-agent/types/versions/src/impls/sled.rs new file mode 100644 index 00000000000..e86d55b2fb8 --- /dev/null +++ b/sled-agent/types/versions/src/impls/sled.rs @@ -0,0 +1,36 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementations for sled types. + +use std::net::{Ipv6Addr, SocketAddrV6}; + +use omicron_common::address; +use sha3::{Digest, Sha3_256}; + +use crate::latest::sled::StartSledAgentRequest; + +impl StartSledAgentRequest { + /// Returns the sled's address. + pub fn sled_address(&self) -> SocketAddrV6 { + address::get_sled_address(self.body.subnet) + } + + /// Returns the switch zone's IP address. + pub fn switch_zone_ip(&self) -> Ipv6Addr { + address::get_switch_zone_address(self.body.subnet) + } + + /// Compute the sha3_256 digest of `self.rack_id` to use as a `salt` + /// for disk encryption. We don't want to include other values that are + /// consistent across sleds as it would prevent us from moving drives + /// between sleds. + pub fn hash_rack_id(&self) -> [u8; 32] { + // We know the unwrap succeeds as a Sha3_256 digest is 32 bytes + Sha3_256::digest(self.body.rack_id.as_bytes()) + .as_slice() + .try_into() + .unwrap() + } +} diff --git a/sled-agent/types/versions/src/impls/zone_bundle.rs b/sled-agent/types/versions/src/impls/zone_bundle.rs new file mode 100644 index 00000000000..3f2e8b10ada --- /dev/null +++ b/sled-agent/types/versions/src/impls/zone_bundle.rs @@ -0,0 +1,303 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementations for zone bundle types. + +use std::cmp::Ordering; +use std::collections::HashSet; +use std::time::Duration; + +use chrono::Utc; +use uuid::Uuid; + +use crate::latest::zone_bundle::{ + CleanupPeriod, CleanupPeriodCreateError, PriorityDimension, PriorityOrder, + PriorityOrderCreateError, StorageLimit, StorageLimitCreateError, + ZoneBundleCause, ZoneBundleId, ZoneBundleMetadata, +}; + +impl ZoneBundleMetadata { + pub const VERSION: u8 = 0; + + /// Create a new set of metadata for the provided zone. + pub fn new(zone_name: &str, cause: ZoneBundleCause) -> Self { + Self { + id: ZoneBundleId { + zone_name: zone_name.to_string(), + bundle_id: Uuid::new_v4(), + }, + time_created: Utc::now(), + version: Self::VERSION, + cause, + } + } +} + +impl std::ops::Deref for PriorityOrder { + type Target = [PriorityDimension; PriorityOrder::EXPECTED_SIZE]; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Default for PriorityOrder { + fn default() -> Self { + Self::DEFAULT + } +} + +impl std::fmt::Display for PriorityOrderCreateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PriorityOrderCreateError::WrongDimensionCount(n) => { + write!( + f, + "expected exactly {} dimensions, found {}", + PriorityOrder::EXPECTED_SIZE, + n + ) + } + PriorityOrderCreateError::DuplicateFound(dim) => { + write!( + f, + "duplicate element found in priority ordering: {:?}", + dim + ) + } + } + } +} + +impl std::error::Error for PriorityOrderCreateError {} + +impl PriorityOrder { + // NOTE: Must match the number of variants in `PriorityDimension`. + pub(crate) const EXPECTED_SIZE: usize = 2; + const DEFAULT: Self = + Self([PriorityDimension::Cause, PriorityDimension::Time]); + + /// Construct a new priority order. + /// + /// This requires that each dimension appear exactly once. + pub fn new( + dims: &[PriorityDimension], + ) -> Result { + if dims.len() != Self::EXPECTED_SIZE { + return Err(PriorityOrderCreateError::WrongDimensionCount( + dims.len(), + )); + } + let mut seen = HashSet::new(); + for dim in dims.iter() { + if !seen.insert(dim) { + return Err(PriorityOrderCreateError::DuplicateFound(*dim)); + } + } + Ok(Self(dims.try_into().unwrap())) + } + + /// Get the priority order as a slice. + pub fn as_slice(&self) -> &[PriorityDimension] { + &self.0 + } + + /// Compare two zone bundle metadata according to this priority order. + pub fn compare_metadata( + &self, + lhs: &ZoneBundleMetadata, + rhs: &ZoneBundleMetadata, + ) -> Ordering { + // PriorityOrder implements Deref to the array, so self.iter() works + for dim in self.iter() { + let ord = match dim { + PriorityDimension::Cause => lhs.cause.cmp(&rhs.cause), + PriorityDimension::Time => { + lhs.time_created.cmp(&rhs.time_created) + } + }; + if matches!(ord, Ordering::Equal) { + continue; + } + return ord; + } + Ordering::Equal + } +} + +impl Default for CleanupPeriod { + fn default() -> Self { + Self(Duration::from_secs(600)) + } +} + +impl std::fmt::Display for CleanupPeriodCreateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalid cleanup period ({:?}): must be between {:?} and {:?}, inclusive", + self.0, + CleanupPeriod::MIN.as_duration(), + CleanupPeriod::MAX.as_duration(), + ) + } +} + +impl std::error::Error for CleanupPeriodCreateError {} + +impl CleanupPeriod { + /// The minimum supported cleanup period. + pub const MIN: Self = Self(Duration::from_secs(60)); + + /// The maximum supported cleanup period. + pub const MAX: Self = Self(Duration::from_secs(60 * 60 * 24)); + + /// Construct a new cleanup period, checking that it's valid. + pub fn new(duration: Duration) -> Result { + if duration >= Self::MIN.as_duration() + && duration <= Self::MAX.as_duration() + { + Ok(Self(duration)) + } else { + Err(CleanupPeriodCreateError(duration)) + } + } + + /// Return the period as a duration. + pub const fn as_duration(&self) -> Duration { + self.0 + } +} + +impl TryFrom for CleanupPeriod { + type Error = CleanupPeriodCreateError; + + fn try_from(duration: Duration) -> Result { + Self::new(duration) + } +} + +impl std::fmt::Debug for CleanupPeriod { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl std::fmt::Display for StorageLimit { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}%", self.as_u8()) + } +} + +impl Default for StorageLimit { + fn default() -> Self { + StorageLimit(25) + } +} + +impl std::fmt::Display for StorageLimitCreateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalid storage limit ({}): must be expressed as a percentage in ({}, {}]", + self.0, + StorageLimit::MIN.0, + StorageLimit::MAX.0, + ) + } +} + +impl std::error::Error for StorageLimitCreateError {} + +impl StorageLimit { + /// Minimum percentage of dataset quota supported. + pub const MIN: Self = Self(0); + + /// Maximum percentage of dataset quota supported. + pub const MAX: Self = Self(50); + + /// Construct a new limit allowed for zone bundles. + /// + /// This should be expressed as a percentage, in the range (Self::MIN, + /// Self::MAX]. + pub const fn new(percentage: u8) -> Result { + if percentage > Self::MIN.0 && percentage <= Self::MAX.0 { + Ok(Self(percentage)) + } else { + Err(StorageLimitCreateError(percentage)) + } + } + + /// Return the contained quota percentage. + pub const fn as_u8(&self) -> u8 { + self.0 + } + + // Compute the number of bytes available from a dataset quota, in bytes. + pub const fn bytes_available(&self, dataset_quota: u64) -> u64 { + (dataset_quota * self.as_u8() as u64) / 100 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::latest::zone_bundle::{StorageLimit, ZoneBundleCause}; + + #[test] + fn test_sort_zone_bundle_cause() { + use ZoneBundleCause::*; + let mut original = [Other, TerminatedInstance, UnexpectedZone]; + let expected = [Other, UnexpectedZone, TerminatedInstance]; + original.sort(); + assert_eq!(original, expected); + } + + #[test] + fn test_priority_dimension() { + assert!(PriorityOrder::new(&[]).is_err()); + assert!(PriorityOrder::new(&[PriorityDimension::Cause]).is_err()); + assert!( + PriorityOrder::new(&[ + PriorityDimension::Cause, + PriorityDimension::Cause + ]) + .is_err() + ); + assert!( + PriorityOrder::new(&[ + PriorityDimension::Cause, + PriorityDimension::Cause, + PriorityDimension::Time + ]) + .is_err() + ); + + assert!( + PriorityOrder::new(&[ + PriorityDimension::Cause, + PriorityDimension::Time + ]) + .is_ok() + ); + assert_eq!( + PriorityOrder::new(PriorityOrder::default().as_slice()).unwrap(), + PriorityOrder::default() + ); + } + + #[test] + fn test_storage_limit_bytes_available() { + let pct = StorageLimit::new(1).unwrap(); + assert_eq!(pct.bytes_available(100), 1); + assert_eq!(pct.bytes_available(1000), 10); + + let pct = StorageLimit::new(50).unwrap(); + assert_eq!(pct.bytes_available(100), 50); + assert_eq!(pct.bytes_available(1000), 500); + + // Test non-power of 10. + let pct = StorageLimit::new(25).unwrap(); + assert_eq!(pct.bytes_available(32768), 8192); + } +} diff --git a/sled-agent/types/versions/src/initial/disk.rs b/sled-agent/types/versions/src/initial/disk.rs index 5ba09e00f65..9d774590dbc 100644 --- a/sled-agent/types/versions/src/initial/disk.rs +++ b/sled-agent/types/versions/src/initial/disk.rs @@ -59,16 +59,3 @@ pub enum DiskStateRequested { Destroyed, Faulted, } - -impl DiskStateRequested { - /// Returns whether the requested state is attached to an Instance or not. - pub fn is_attached(&self) -> bool { - match self { - DiskStateRequested::Detached => false, - DiskStateRequested::Destroyed => false, - DiskStateRequested::Faulted => false, - - DiskStateRequested::Attached(_) => true, - } - } -} diff --git a/sled-agent/types/versions/src/initial/early_networking.rs b/sled-agent/types/versions/src/initial/early_networking.rs index 7c18ea21a46..64750e496ea 100644 --- a/sled-agent/types/versions/src/initial/early_networking.rs +++ b/sled-agent/types/versions/src/initial/early_networking.rs @@ -4,13 +4,10 @@ //! Types for network setup required to bring up the control plane. -use std::str::FromStr; - use bootstore::schemes::v0 as bootstore; use omicron_common::api::internal::shared::RackNetworkConfig; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use slog::{Logger, warn}; /// Network configuration required to bring up the control plane /// @@ -33,126 +30,6 @@ pub struct EarlyNetworkConfig { pub body: EarlyNetworkConfigBody, } -impl FromStr for EarlyNetworkConfig { - type Err = String; - - fn from_str(value: &str) -> Result { - #[derive(Deserialize)] - struct ShadowConfig { - generation: u64, - schema_version: u32, - body: EarlyNetworkConfigBody, - } - - let v2_err = match serde_json::from_str::(&value) { - Ok(cfg) => { - return Ok(EarlyNetworkConfig { - generation: cfg.generation, - schema_version: cfg.schema_version, - body: cfg.body, - }); - } - Err(e) => format!("unable to parse EarlyNetworkConfig: {e:?}"), - }; - // If we fail to parse the config as any known version, we return the - // error corresponding to the parse failure of the newest schema. - serde_json::from_str::(&value) - .map(|v1| EarlyNetworkConfig { - generation: v1.generation, - schema_version: Self::schema_version(), - body: v1.body.into(), - }) - .map_err(|_| v2_err) - } -} - -impl EarlyNetworkConfig { - pub fn schema_version() -> u32 { - 2 - } - - // Note: This currently only converts between v0 and v1 or deserializes v1 of - // `EarlyNetworkConfig`. - pub fn deserialize_bootstore_config( - log: &Logger, - config: &bootstore::NetworkConfig, - ) -> Result { - // Try to deserialize the latest version of the data structure (v2). If - // that succeeds we are done. - let v2_error = - match serde_json::from_slice::(&config.blob) { - Ok(val) => return Ok(val), - Err(error) => { - // Log this error and continue trying to deserialize older - // versions. - warn!( - log, - "Failed to deserialize EarlyNetworkConfig \ - as v2, trying next as v1: {}", - error, - ); - error - } - }; - - match serde_json::from_slice::( - &config.blob, - ) { - Ok(v1) => { - // Convert from v1 to v2 - return Ok(EarlyNetworkConfig { - generation: v1.generation, - schema_version: EarlyNetworkConfig::schema_version(), - body: v1.body.into(), - }); - } - Err(error) => { - // Log this error. - warn!( - log, - "Failed to deserialize EarlyNetworkConfig \ - as v1, trying next as v0: {}", - error - ); - } - }; - - match serde_json::from_slice::( - &config.blob, - ) { - Ok(val) => { - // Convert from v0 to v2 - return Ok(EarlyNetworkConfig { - generation: val.generation, - schema_version: 2, - body: EarlyNetworkConfigBody { - ntp_servers: val.ntp_servers, - rack_network_config: val.rack_network_config.map( - |v0_config| { - back_compat::RackNetworkConfigV0::to_v2( - val.rack_subnet, - v0_config, - ) - }, - ), - }, - }); - } - Err(error) => { - // Log this error. - warn!( - log, - "Failed to deserialize EarlyNetworkConfig as v0: {}", error, - ); - } - }; - - // If we fail to parse the config as any known version, we return the - // error corresponding to the parse failure of the newest schema. - Err(v2_error) - } -} - /// This is the actual configuration of EarlyNetworking. /// /// We nest it below the "header" of `generation` and `schema_version` so that diff --git a/sled-agent/types/versions/src/initial/instance.rs b/sled-agent/types/versions/src/initial/instance.rs index 3c05b49134f..f1a597076f8 100644 --- a/sled-agent/types/versions/src/initial/instance.rs +++ b/sled-agent/types/versions/src/initial/instance.rs @@ -14,13 +14,7 @@ use omicron_common::api::internal::shared::DhcpConfig; use omicron_common::api::internal::shared::external_ip::v1::SourceNatConfig; use omicron_common::api::internal::shared::network_interface::v1::NetworkInterface; use omicron_uuid_kinds::{InstanceUuid, PropolisUuid}; -use propolis_api_types::instance_spec::{ - SpecKey, - components::backends::{ - CrucibleStorageBackend, FileStorageBackend, VirtioNetworkBackend, - }, - v0::{ComponentV0, InstanceSpecV0}, -}; +use propolis_api_types::instance_spec::v0::InstanceSpecV0; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -113,56 +107,6 @@ pub struct InstanceMetadata { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct VmmSpec(pub InstanceSpecV0); -/// Extension trait for VmmSpec to provide helper methods. -pub trait VmmSpecExt { - fn crucible_backends( - &self, - ) -> impl Iterator; - - fn viona_backends( - &self, - ) -> impl Iterator; - - fn file_backends( - &self, - ) -> impl Iterator; -} - -impl VmmSpecExt for VmmSpec { - fn crucible_backends( - &self, - ) -> impl Iterator { - self.0.components.iter().filter_map( - |(key, component)| match component { - ComponentV0::CrucibleStorageBackend(be) => Some((key, be)), - _ => None, - }, - ) - } - - fn viona_backends( - &self, - ) -> impl Iterator { - self.0.components.iter().filter_map( - |(key, component)| match component { - ComponentV0::VirtioNetworkBackend(be) => Some((key, be)), - _ => None, - }, - ) - } - - fn file_backends( - &self, - ) -> impl Iterator { - self.0.components.iter().filter_map( - |(key, component)| match component { - ComponentV0::FileStorageBackend(be) => Some((key, be)), - _ => None, - }, - ) - } -} - /// VPC firewall rule after object name resolution has been performed by Nexus #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)] pub struct ResolvedVpcFirewallRule { @@ -213,33 +157,6 @@ pub enum VmmStateRequested { Reboot, } -impl std::fmt::Display for VmmStateRequested { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.label()) - } -} - -impl VmmStateRequested { - fn label(&self) -> &str { - match self { - VmmStateRequested::MigrationTarget(_) => "migrating in", - VmmStateRequested::Running => "running", - VmmStateRequested::Stopped => "stopped", - VmmStateRequested::Reboot => "reboot", - } - } - - /// Returns true if the state represents a stopped Instance. - pub fn is_stopped(&self) -> bool { - match self { - VmmStateRequested::MigrationTarget(_) => false, - VmmStateRequested::Running => false, - VmmStateRequested::Stopped => true, - VmmStateRequested::Reboot => false, - } - } -} - /// The response sent from a request to unregister an instance. #[derive(Serialize, Deserialize, JsonSchema)] pub struct VmmUnregisterResponse { diff --git a/sled-agent/types/versions/src/initial/inventory.rs b/sled-agent/types/versions/src/initial/inventory.rs index e80ee407c89..fadf4b848c9 100644 --- a/sled-agent/types/versions/src/initial/inventory.rs +++ b/sled-agent/types/versions/src/initial/inventory.rs @@ -5,7 +5,6 @@ //! Inventory types for Sled Agent API versions 1-3. use std::collections::BTreeMap; -use std::fmt::{self, Write}; use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; use std::time::Duration; @@ -15,7 +14,6 @@ use daft::Diffable; use iddqd::IdOrdItem; use iddqd::IdOrdMap; use iddqd::id_upcast; -use indent_write::fmt::IndentWriter; use omicron_common::api::external::{ByteCount, Generation}; use omicron_common::api::internal::shared::external_ip::v1::SourceNatConfig; use omicron_common::api::internal::shared::network_interface::v1::NetworkInterface; @@ -24,16 +22,15 @@ use omicron_common::disk::{ }; use omicron_common::snake_case_result; use omicron_common::snake_case_result::SnakeCaseResult; -use omicron_common::update::{ArtifactId, OmicronInstallManifestSource}; +use omicron_common::update::OmicronInstallManifestSource; use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::{ - DatasetUuid, InternalZpoolUuid, MupdateOverrideUuid, MupdateUuid, - OmicronZoneUuid, PhysicalDiskUuid, SledUuid, ZpoolUuid, + DatasetUuid, InternalZpoolUuid, MupdateOverrideUuid, OmicronZoneUuid, + PhysicalDiskUuid, SledUuid, ZpoolUuid, }; use schemars::schema::{Schema, SchemaObject}; use schemars::{JsonSchema, r#gen::SchemaGenerator}; use serde::{Deserialize, Serialize}; -use tufaceous_artifact::KnownArtifactKind; // Export these types for convenience -- this way, dependents don't have to // depend on sled-hardware-types. pub use sled_hardware_types::{Baseboard, SledCpuFamily}; @@ -143,16 +140,6 @@ pub enum HostPhase2DesiredContents { Artifact { hash: ArtifactHash }, } -impl HostPhase2DesiredContents { - /// The artifact hash described by `self`, if it has one. - pub fn artifact_hash(&self) -> Option { - match self { - Self::CurrentContents => None, - Self::Artifact { hash } => Some(*hash), - } - } -} - /// Describes the desired contents for both host phase 2 slots. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] #[serde(rename_all = "snake_case")] @@ -161,18 +148,6 @@ pub struct HostPhase2DesiredSlots { pub slot_b: HostPhase2DesiredContents, } -impl HostPhase2DesiredSlots { - /// Return a `HostPhase2DesiredSlots` with both slots set to - /// [`HostPhase2DesiredContents::CurrentContents`]; i.e., "make no changes - /// to the current contents of either slot". - pub const fn current_contents() -> Self { - Self { - slot_a: HostPhase2DesiredContents::CurrentContents, - slot_b: HostPhase2DesiredContents::CurrentContents, - } - } -} - /// Describes a persistent ZFS dataset associated with an Omicron zone #[derive( Clone, @@ -208,48 +183,6 @@ pub struct BootPartitionContents { pub slot_b: Result, } -impl BootPartitionContents { - pub fn slot_details( - &self, - slot: M2Slot, - ) -> &Result { - match slot { - M2Slot::A => &self.slot_a, - M2Slot::B => &self.slot_b, - } - } - - pub fn debug_assume_success() -> Self { - Self { - boot_disk: Ok(M2Slot::A), - slot_a: Ok(BootPartitionDetails { - header: BootImageHeader { - flags: 0, - data_size: 1000, - image_size: 1000, - target_size: 1000, - sha256: [0; 32], - image_name: "fake from debug_assume_success()".to_string(), - }, - artifact_hash: ArtifactHash([0x0a; 32]), - artifact_size: 1000, - }), - slot_b: Ok(BootPartitionDetails { - header: BootImageHeader { - flags: 0, - data_size: 1000, - image_size: 1000, - target_size: 1000, - sha256: [1; 32], - image_name: "fake from debug_assume_success()".to_string(), - }, - artifact_hash: ArtifactHash([0x0b; 32]), - artifact_size: 1000, - }), - } - } -} - #[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] pub struct BootPartitionDetails { pub header: BootImageHeader, @@ -347,48 +280,6 @@ pub struct ZoneImageResolverInventory { pub mupdate_override: MupdateOverrideInventory, } -impl ZoneImageResolverInventory { - /// Returns a new, fake inventory for tests. - pub fn new_fake() -> Self { - Self { - zone_manifest: ManifestInventory::new_fake(), - mupdate_override: MupdateOverrideInventory::new_fake(), - } - } - - /// Returns a displayer for this inventory. - pub fn display(&self) -> ZoneImageResolverInventoryDisplay<'_> { - ZoneImageResolverInventoryDisplay { inner: self } - } -} - -/// Displayer for a [`ZoneImageResolverInventory`] -pub struct ZoneImageResolverInventoryDisplay<'a> { - inner: &'a ZoneImageResolverInventory, -} - -impl fmt::Display for ZoneImageResolverInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let ZoneImageResolverInventory { zone_manifest, mupdate_override } = - self.inner; - - writeln!(f, "zone manifest:")?; - let mut indented = IndentWriter::new(" ", f); - // Use write! rather than writeln! because zone_manifest.display() - // always produces a newline at the end. - write!(indented, "{}", zone_manifest.display())?; - let f = indented.into_inner(); - - writeln!(f, "mupdate override:")?; - let mut indented = IndentWriter::new(" ", f); - // Use write! rather than writeln! because mupdate_override.display() - // always produces a newline at the end. - write!(indented, "{}", mupdate_override.display())?; - - Ok(()) - } -} - /// Inventory representation of a manifest. /// /// Part of [`ZoneImageResolverInventory`]. @@ -415,71 +306,6 @@ pub struct ManifestInventory { pub non_boot_status: IdOrdMap, } -impl ManifestInventory { - /// Returns a new, empty inventory for tests. - pub fn new_fake() -> Self { - Self { - boot_disk_path: Utf8PathBuf::from("/fake/path/install/zones.json"), - boot_inventory: Ok(ManifestBootInventory::new_fake()), - non_boot_status: IdOrdMap::new(), - } - } - - /// Returns a displayer for this inventory. - pub fn display(&self) -> ManifestInventoryDisplay<'_> { - ManifestInventoryDisplay { inner: self } - } -} - -/// Displayer for a [`ManifestInventory`] -#[derive(Clone, Debug)] -pub struct ManifestInventoryDisplay<'a> { - inner: &'a ManifestInventory, -} - -impl fmt::Display for ManifestInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f; - - let ManifestInventory { - boot_disk_path, - boot_inventory, - non_boot_status, - } = self.inner; - writeln!(f, "path on boot disk: {}", boot_disk_path)?; - - match boot_inventory { - Ok(boot_inventory) => { - writeln!(f, "boot disk inventory:")?; - let mut indented = IndentWriter::new(" ", f); - // Use write! rather than writeln! because - // boot_inventory.display() always ends with a newline. - write!(indented, "{}", boot_inventory.display())?; - f = indented.into_inner(); - } - Err(error) => { - writeln!( - f, - "error obtaining zone manifest on boot disk: {error}" - )?; - } - } - - if non_boot_status.is_empty() { - writeln!(f, "no non-boot disks")?; - } else { - writeln!(f, "non-boot disk status:")?; - for non_boot in non_boot_status { - let mut indented = IndentWriter::new_skip_initial(" ", f); - writeln!(indented, " - {}", non_boot.display())?; - f = indented.into_inner(); - } - } - - Ok(()) - } -} - /// Inventory representation of zone artifacts on the boot disk. /// /// Part of [`ManifestInventory`]. @@ -497,58 +323,6 @@ pub struct ManifestBootInventory { pub artifacts: IdOrdMap, } -impl ManifestBootInventory { - /// Returns a new, empty inventory for tests. - /// - /// For a more representative selection of real zones, see `representative` - /// in `nexus-inventory`. - pub fn new_fake() -> Self { - Self { - source: OmicronInstallManifestSource::Installinator { - mupdate_id: MupdateUuid::nil(), - }, - artifacts: IdOrdMap::new(), - } - } - - /// Returns a displayer for this inventory. - pub fn display(&self) -> ManifestBootInventoryDisplay<'_> { - ManifestBootInventoryDisplay { inner: self } - } -} - -/// Displayer for a [`ManifestBootInventory`]. -#[derive(Clone, Debug)] -pub struct ManifestBootInventoryDisplay<'a> { - inner: &'a ManifestBootInventory, -} - -impl fmt::Display for ManifestBootInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f; - - let ManifestBootInventory { source, artifacts } = self.inner; - writeln!(f, "manifest generated by {}", source)?; - if artifacts.is_empty() { - writeln!( - f, - "no artifacts in install dataset \ - (this should only be seen in simulated systems)" - )?; - } else { - writeln!(f, "artifacts in install dataset:")?; - - for artifact in artifacts { - let mut indented = IndentWriter::new_skip_initial(" ", f); - writeln!(indented, " - {}", artifact.display())?; - f = indented.into_inner(); - } - } - - Ok(()) - } -} - /// Inventory representation of a single zone artifact on a boot disk. /// /// Part of [`ManifestBootInventory`]. @@ -577,13 +351,6 @@ pub struct ZoneArtifactInventory { pub status: Result<(), String>, } -impl ZoneArtifactInventory { - /// Returns a displayer for this inventory. - pub fn display(&self) -> ZoneArtifactInventoryDisplay<'_> { - ZoneArtifactInventoryDisplay { inner: self } - } -} - impl IdOrdItem for ZoneArtifactInventory { type Key<'a> = &'a str; fn key(&self) -> Self::Key<'_> { @@ -593,36 +360,6 @@ impl IdOrdItem for ZoneArtifactInventory { id_upcast!(); } -/// Displayer for [`ZoneArtifactInventory`]. -#[derive(Clone, Debug)] -pub struct ZoneArtifactInventoryDisplay<'a> { - inner: &'a ZoneArtifactInventory, -} - -impl fmt::Display for ZoneArtifactInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let ZoneArtifactInventory { - file_name, - // We don't show the path here because surrounding code typically - // displays the path. We could make this controllable in the future - // via a method on `ZoneArtifactInventoryDisplay`. - path: _, - expected_size, - expected_hash, - status, - } = self.inner; - write!( - f, - "{file_name} (expected {expected_size} bytes \ - with hash {expected_hash}): ", - )?; - match status { - Ok(()) => write!(f, "ok"), - Err(message) => write!(f, "error: {message}"), - } - } -} - /// Inventory representation of a zone manifest on a non-boot disk. /// /// Unlike [`ManifestBootInventory`] which is structured since @@ -652,13 +389,6 @@ pub struct ManifestNonBootInventory { pub message: String, } -impl ManifestNonBootInventory { - /// Returns a displayer for this inventory. - pub fn display(&self) -> ManifestNonBootInventoryDisplay<'_> { - ManifestNonBootInventoryDisplay { inner: self } - } -} - impl IdOrdItem for ManifestNonBootInventory { type Key<'a> = InternalZpoolUuid; fn key(&self) -> Self::Key<'_> { @@ -667,29 +397,6 @@ impl IdOrdItem for ManifestNonBootInventory { id_upcast!(); } -/// Displayer for a [`ManifestNonBootInventory`]. -#[derive(Clone, Debug)] -pub struct ManifestNonBootInventoryDisplay<'a> { - inner: &'a ManifestNonBootInventory, -} - -impl fmt::Display for ManifestNonBootInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let ManifestNonBootInventory { - // The zpool ID is part of the path, so displaying it is redundant. - zpool_id: _, - path, - is_valid, - message, - } = self.inner; - write!( - f, - "{path} ({}): {message}", - if *is_valid { "valid" } else { "invalid" }, - ) - } -} - /// Inventory representation of MUPdate override status. /// /// Part of [`ZoneImageResolverInventory`]. @@ -715,72 +422,6 @@ pub struct MupdateOverrideInventory { pub non_boot_status: IdOrdMap, } -impl MupdateOverrideInventory { - /// Returns a new, empty inventory for tests. - pub fn new_fake() -> Self { - Self { - boot_disk_path: Utf8PathBuf::from( - "/fake/path/install/mupdate_override.json", - ), - boot_override: Ok(None), - non_boot_status: IdOrdMap::new(), - } - } - - /// Returns a displayer for this inventory. - pub fn display(&self) -> MupdateOverrideInventoryDisplay<'_> { - MupdateOverrideInventoryDisplay { inner: self } - } -} - -/// A displayer for [`MupdateOverrideInventory`]. -#[derive(Clone, Debug)] -pub struct MupdateOverrideInventoryDisplay<'a> { - inner: &'a MupdateOverrideInventory, -} - -impl fmt::Display for MupdateOverrideInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f; - - let MupdateOverrideInventory { - boot_disk_path, - boot_override, - non_boot_status, - } = self.inner; - - writeln!(f, "path on boot disk: {boot_disk_path}")?; - match boot_override { - Ok(Some(boot_override)) => { - writeln!( - f, - "override on boot disk: {}", - boot_override.display() - )?; - } - Ok(None) => { - writeln!(f, "no override on boot disk")?; - } - Err(error) => { - writeln!(f, "error obtaining override on boot disk: {error}")?; - } - } - - if non_boot_status.is_empty() { - writeln!(f, "no non-boot disks")?; - } else { - writeln!(f, "non-boot disk status:")?; - for non_boot in non_boot_status { - let mut indented = IndentWriter::new_skip_initial(" ", f); - writeln!(indented, " - {}", non_boot.display())?; - f = indented.into_inner(); - } - } - - Ok(()) - } -} - /// Inventory representation of the MUPdate override on the boot disk. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] pub struct MupdateOverrideBootInventory { @@ -792,25 +433,6 @@ pub struct MupdateOverrideBootInventory { pub mupdate_override_id: MupdateOverrideUuid, } -impl MupdateOverrideBootInventory { - /// Returns a displayer for this inventory. - pub fn display(&self) -> MupdateOverrideBootInventoryDisplay<'_> { - MupdateOverrideBootInventoryDisplay { inner: self } - } -} - -#[derive(Clone, Debug)] -pub struct MupdateOverrideBootInventoryDisplay<'a> { - inner: &'a MupdateOverrideBootInventory, -} - -impl fmt::Display for MupdateOverrideBootInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let MupdateOverrideBootInventory { mupdate_override_id } = self.inner; - write!(f, "{}", mupdate_override_id) - } -} - /// Inventory representation of the MUPdate override on a non-boot disk. /// /// Unlike [`MupdateOverrideBootInventory`] which is structured since @@ -841,13 +463,6 @@ pub struct MupdateOverrideNonBootInventory { pub message: String, } -impl MupdateOverrideNonBootInventory { - /// Returns a displayer for this inventory. - pub fn display(&self) -> MupdateOverrideNonBootInventoryDisplay<'_> { - MupdateOverrideNonBootInventoryDisplay { inner: self } - } -} - impl IdOrdItem for MupdateOverrideNonBootInventory { type Key<'a> = InternalZpoolUuid; fn key(&self) -> Self::Key<'_> { @@ -856,29 +471,6 @@ impl IdOrdItem for MupdateOverrideNonBootInventory { id_upcast!(); } -/// Displayer for a [`MupdateOverrideNonBootInventory`]. -#[derive(Clone, Debug)] -pub struct MupdateOverrideNonBootInventoryDisplay<'a> { - inner: &'a MupdateOverrideNonBootInventory, -} - -impl fmt::Display for MupdateOverrideNonBootInventoryDisplay<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let MupdateOverrideNonBootInventory { - // The zpool ID is part of the path, so displaying it is redundant. - zpool_id: _, - path, - is_valid, - message, - } = self.inner; - write!( - f, - "{path} ({}): {message}", - if *is_valid { "valid" } else { "invalid" }, - ) - } -} - /// Where Sled Agent should get the image for a zone. #[derive( Clone, @@ -915,16 +507,6 @@ pub enum OmicronZoneImageSource { } impl OmicronZoneImageSource { - /// Return the artifact hash used for the zone image, if the zone's image - /// source is from the artifact store. - pub fn artifact_hash(&self) -> Option { - if let OmicronZoneImageSource::Artifact { hash } = self { - Some(*hash) - } else { - None - } - } - // See `OmicronZoneConfig`. This is a separate function instead of being // `impl Default` because we don't want to accidentally use this default // outside of `serde(default)`. @@ -962,6 +544,7 @@ impl OmicronZoneImageSource { /// the six representations if at all possible. If you must add a new one, /// please add it here rather than doing something ad-hoc in the calling code /// so it's more legible. + #[derive( Debug, Clone, @@ -994,187 +577,6 @@ pub enum ZoneKind { Oximeter, } -impl ZoneKind { - /// The NTP prefix used for both BoundaryNtp and InternalNtp zones and - /// services. - pub const NTP_PREFIX: &'static str = "ntp"; - - /// Return a string that is used to construct **zone names**. This string - /// is guaranteed to be stable over time. - pub fn zone_prefix(self) -> &'static str { - match self { - // BoundaryNtp and InternalNtp both use "ntp". - ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, - ZoneKind::Clickhouse => "clickhouse", - ZoneKind::ClickhouseKeeper => "clickhouse_keeper", - ZoneKind::ClickhouseServer => "clickhouse_server", - // Note "cockroachdb" for historical reasons. - ZoneKind::CockroachDb => "cockroachdb", - ZoneKind::Crucible => "crucible", - ZoneKind::CruciblePantry => "crucible_pantry", - ZoneKind::ExternalDns => "external_dns", - ZoneKind::InternalDns => "internal_dns", - ZoneKind::Nexus => "nexus", - ZoneKind::Oximeter => "oximeter", - } - } - - /// Return a string that identifies **zone image filenames** in the install - /// dataset. - /// - /// This method is exactly equivalent to `format!("{}.tar.gz", - /// self.zone_prefix())`, but returns `&'static str`s. A unit test ensures - /// they stay consistent. - pub fn artifact_in_install_dataset(self) -> &'static str { - match self { - // BoundaryNtp and InternalNtp both use "ntp". - ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => "ntp.tar.gz", - ZoneKind::Clickhouse => "clickhouse.tar.gz", - ZoneKind::ClickhouseKeeper => "clickhouse_keeper.tar.gz", - ZoneKind::ClickhouseServer => "clickhouse_server.tar.gz", - // Note "cockroachdb" for historical reasons. - ZoneKind::CockroachDb => "cockroachdb.tar.gz", - ZoneKind::Crucible => "crucible.tar.gz", - ZoneKind::CruciblePantry => "crucible_pantry.tar.gz", - ZoneKind::ExternalDns => "external_dns.tar.gz", - ZoneKind::InternalDns => "internal_dns.tar.gz", - ZoneKind::Nexus => "nexus.tar.gz", - ZoneKind::Oximeter => "oximeter.tar.gz", - } - } - - /// Return a string that is used to construct **SMF service names**. This - /// string is guaranteed to be stable over time. - pub fn service_prefix(self) -> &'static str { - match self { - // BoundaryNtp and InternalNtp both use "ntp". - ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, - ZoneKind::Clickhouse => "clickhouse", - ZoneKind::ClickhouseKeeper => "clickhouse_keeper", - ZoneKind::ClickhouseServer => "clickhouse_server", - // Note "cockroachdb" for historical reasons. - ZoneKind::CockroachDb => "cockroachdb", - ZoneKind::Crucible => "crucible", - // Note "crucible/pantry" for historical reasons. - ZoneKind::CruciblePantry => "crucible/pantry", - ZoneKind::ExternalDns => "external_dns", - ZoneKind::InternalDns => "internal_dns", - ZoneKind::Nexus => "nexus", - ZoneKind::Oximeter => "oximeter", - } - } - - /// Return a string suitable for use **in `Name` instances**. This string - /// is guaranteed to be stable over time. - /// - /// This string uses dashes rather than underscores, as required by `Name`. - pub fn name_prefix(self) -> &'static str { - match self { - // BoundaryNtp and InternalNtp both use "ntp" here. - ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => Self::NTP_PREFIX, - ZoneKind::Clickhouse => "clickhouse", - ZoneKind::ClickhouseKeeper => "clickhouse-keeper", - ZoneKind::ClickhouseServer => "clickhouse-server", - // Note "cockroach" for historical reasons. - ZoneKind::CockroachDb => "cockroach", - ZoneKind::Crucible => "crucible", - ZoneKind::CruciblePantry => "crucible-pantry", - ZoneKind::ExternalDns => "external-dns", - ZoneKind::InternalDns => "internal-dns", - ZoneKind::Nexus => "nexus", - ZoneKind::Oximeter => "oximeter", - } - } - - /// Return a string that is used for reporting and error messages. This is - /// **not guaranteed** to be stable. - /// - /// If you're displaying a user-friendly message, prefer this method. - pub fn report_str(self) -> &'static str { - match self { - ZoneKind::BoundaryNtp => "boundary_ntp", - ZoneKind::Clickhouse => "clickhouse", - ZoneKind::ClickhouseKeeper => "clickhouse_keeper", - ZoneKind::ClickhouseServer => "clickhouse_server", - ZoneKind::CockroachDb => "cockroach_db", - ZoneKind::Crucible => "crucible", - ZoneKind::CruciblePantry => "crucible_pantry", - ZoneKind::ExternalDns => "external_dns", - ZoneKind::InternalDns => "internal_dns", - ZoneKind::InternalNtp => "internal_ntp", - ZoneKind::Nexus => "nexus", - ZoneKind::Oximeter => "oximeter", - } - } - - /// Return a string used as an artifact name for control-plane zones. - /// This is **not guaranteed** to be stable. - /// - /// These strings match the `ArtifactId::name`s Nexus constructs when - /// unpacking the composite control-plane artifact in a TUF repo. Currently, - /// these are chosen by reading the `pkg` value of the `oxide.json` object - /// inside each zone image tarball. - pub fn artifact_id_name(self) -> &'static str { - match self { - ZoneKind::BoundaryNtp => "ntp", - ZoneKind::Clickhouse => "clickhouse", - ZoneKind::ClickhouseKeeper => "clickhouse_keeper", - ZoneKind::ClickhouseServer => "clickhouse_server", - ZoneKind::CockroachDb => "cockroachdb", - ZoneKind::Crucible => "crucible-zone", - ZoneKind::CruciblePantry => "crucible-pantry-zone", - ZoneKind::ExternalDns => "external-dns", - ZoneKind::InternalDns => "internal-dns", - ZoneKind::InternalNtp => "ntp", - ZoneKind::Nexus => "nexus", - ZoneKind::Oximeter => "oximeter", - } - } - - /// Map an artifact ID name to the corresponding file name in the install - /// dataset. - /// - /// We don't allow mapping artifact ID names to `ZoneKind` because the map - /// isn't bijective -- both internal and boundary NTP zones use the same - /// `ntp` artifact. But the artifact ID name and the name in the install - /// dataset do form a bijective map. - pub fn artifact_id_name_to_install_dataset_file( - artifact_id_name: &str, - ) -> Option<&'static str> { - let zone_kind = match artifact_id_name { - // We arbitrarily select BoundaryNtp to perform the mapping with. - "ntp" => ZoneKind::BoundaryNtp, - "clickhouse" => ZoneKind::Clickhouse, - "clickhouse_keeper" => ZoneKind::ClickhouseKeeper, - "clickhouse_server" => ZoneKind::ClickhouseServer, - "cockroachdb" => ZoneKind::CockroachDb, - "crucible-zone" => ZoneKind::Crucible, - "crucible-pantry-zone" => ZoneKind::CruciblePantry, - "external-dns" => ZoneKind::ExternalDns, - "internal-dns" => ZoneKind::InternalDns, - "nexus" => ZoneKind::Nexus, - "oximeter" => ZoneKind::Oximeter, - _ => return None, - }; - - Some(zone_kind.artifact_in_install_dataset()) - } - - /// Return true if an artifact represents a control plane zone image - /// of this kind. - pub fn is_control_plane_zone_artifact( - self, - artifact_id: &ArtifactId, - ) -> bool { - artifact_id - .kind - .to_known() - .map(|kind| matches!(kind, KnownArtifactKind::Zone)) - .unwrap_or(false) - && artifact_id.name == self.artifact_id_name() - } -} - // Used for schemars to be able to be used with camino: // See https://github.com/camino-rs/camino/issues/91#issuecomment-2027908513 fn path_schema(generator: &mut SchemaGenerator) -> Schema { diff --git a/sled-agent/types/versions/src/initial/sled.rs b/sled-agent/types/versions/src/initial/sled.rs index a678b90816b..8d2fda2e707 100644 --- a/sled-agent/types/versions/src/initial/sled.rs +++ b/sled-agent/types/versions/src/initial/sled.rs @@ -4,16 +4,13 @@ //! Sled-related types for the Sled Agent API. -use std::net::{Ipv6Addr, SocketAddrV6}; - use async_trait::async_trait; use daft::Diffable; -use omicron_common::address::{self, Ipv6Subnet, SLED_PREFIX}; +use omicron_common::address::{Ipv6Subnet, SLED_PREFIX}; use omicron_common::ledger::Ledgerable; use omicron_uuid_kinds::SledUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use sha3::{Digest, Sha3_256}; use uuid::Uuid; /// A representation of a Baseboard ID as used in the inventory subsystem @@ -71,28 +68,6 @@ pub struct StartSledAgentRequest { pub body: StartSledAgentRequestBody, } -impl StartSledAgentRequest { - pub fn sled_address(&self) -> SocketAddrV6 { - address::get_sled_address(self.body.subnet) - } - - pub fn switch_zone_ip(&self) -> Ipv6Addr { - address::get_switch_zone_address(self.body.subnet) - } - - /// Compute the sha3_256 digest of `self.rack_id` to use as a `salt` - /// for disk encryption. We don't want to include other values that are - /// consistent across sleds as it would prevent us from moving drives - /// between sleds. - pub fn hash_rack_id(&self) -> [u8; 32] { - // We know the unwrap succeeds as a Sha3_256 digest is 32 bytes - Sha3_256::digest(self.body.rack_id.as_bytes()) - .as_slice() - .try_into() - .unwrap() - } -} - /// This is the actual app level data of `StartSledAgentRequest` /// /// We nest it below the "header" of `generation` and `schema_version` so that diff --git a/sled-agent/types/versions/src/initial/zone_bundle.rs b/sled-agent/types/versions/src/initial/zone_bundle.rs index dd48453502b..6709636b5d0 100644 --- a/sled-agent/types/versions/src/initial/zone_bundle.rs +++ b/sled-agent/types/versions/src/initial/zone_bundle.rs @@ -4,8 +4,6 @@ //! Zone bundle types for Sled Agent API version 1. -use std::cmp::Ordering; -use std::collections::HashSet; use std::time::Duration; use chrono::{DateTime, Utc}; @@ -117,23 +115,6 @@ pub struct ZoneBundleMetadata { pub cause: ZoneBundleCause, } -impl ZoneBundleMetadata { - pub const VERSION: u8 = 0; - - /// Create a new set of metadata for the provided zone. - pub fn new(zone_name: &str, cause: ZoneBundleCause) -> Self { - Self { - id: ZoneBundleId { - zone_name: zone_name.to_string(), - bundle_id: Uuid::new_v4(), - }, - time_created: Utc::now(), - version: Self::VERSION, - cause, - } - } -} - /// A dimension along with bundles can be sorted, to determine priority. #[derive( Clone, @@ -163,21 +144,11 @@ pub enum PriorityDimension { /// are pruned first, to maintain the dataset quota. Note that bundles are /// sorted by each dimension in the order in which they appear, with each /// dimension having higher priority than the next. +/// +/// TODO: The serde deserializer does not currently verify uniqueness of +/// dimensions. #[derive(Clone, Copy, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] -pub struct PriorityOrder([PriorityDimension; PriorityOrder::EXPECTED_SIZE]); - -impl std::ops::Deref for PriorityOrder { - type Target = [PriorityDimension; PriorityOrder::EXPECTED_SIZE]; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Default for PriorityOrder { - fn default() -> Self { - Self::DEFAULT - } -} +pub struct PriorityOrder(pub(crate) [PriorityDimension; 2]); /// Error type for creating a priority order. #[derive(Clone, Debug, PartialEq, Eq)] @@ -186,155 +157,16 @@ pub enum PriorityOrderCreateError { DuplicateFound(PriorityDimension), } -impl std::fmt::Display for PriorityOrderCreateError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - PriorityOrderCreateError::WrongDimensionCount(n) => { - write!( - f, - "expected exactly {} dimensions, found {}", - PriorityOrder::EXPECTED_SIZE, - n - ) - } - PriorityOrderCreateError::DuplicateFound(dim) => { - write!( - f, - "duplicate element found in priority ordering: {:?}", - dim - ) - } - } - } -} - -impl std::error::Error for PriorityOrderCreateError {} - -impl PriorityOrder { - // NOTE: Must match the number of variants in `PriorityDimension`. - pub(crate) const EXPECTED_SIZE: usize = 2; - const DEFAULT: Self = - Self([PriorityDimension::Cause, PriorityDimension::Time]); - - /// Construct a new priority order. - /// - /// This requires that each dimension appear exactly once. - pub fn new( - dims: &[PriorityDimension], - ) -> Result { - if dims.len() != Self::EXPECTED_SIZE { - return Err(PriorityOrderCreateError::WrongDimensionCount( - dims.len(), - )); - } - let mut seen = HashSet::new(); - for dim in dims.iter() { - if !seen.insert(dim) { - return Err(PriorityOrderCreateError::DuplicateFound(*dim)); - } - } - Ok(Self(dims.try_into().unwrap())) - } - - /// Get the priority order as a slice. - pub fn as_slice(&self) -> &[PriorityDimension] { - &self.0 - } - - /// Order zone bundle metadata according to the contained priority. - /// - /// We sort the metadata by each dimension, in the order in which it - /// appears. That means earlier dimensions have higher priority than later - /// ones. - pub fn compare_metadata( - &self, - lhs: &ZoneBundleMetadata, - rhs: &ZoneBundleMetadata, - ) -> Ordering { - for dim in self.0.iter() { - let ord = match dim { - PriorityDimension::Cause => lhs.cause.cmp(&rhs.cause), - PriorityDimension::Time => { - lhs.time_created.cmp(&rhs.time_created) - } - }; - if matches!(ord, Ordering::Equal) { - continue; - } - return ord; - } - Ordering::Equal - } -} - /// A period on which bundles are automatically cleaned up. #[derive( Clone, Copy, Deserialize, JsonSchema, PartialEq, PartialOrd, Serialize, )] -pub struct CleanupPeriod(Duration); - -impl Default for CleanupPeriod { - fn default() -> Self { - Self(Duration::from_secs(600)) - } -} +pub struct CleanupPeriod(pub(crate) Duration); /// Error type for creating a cleanup period. #[derive(Clone, Debug, PartialEq, Eq)] pub struct CleanupPeriodCreateError(pub Duration); -impl std::fmt::Display for CleanupPeriodCreateError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "invalid cleanup period ({:?}): must be between {:?} and {:?}, inclusive", - self.0, - CleanupPeriod::MIN.as_duration(), - CleanupPeriod::MAX.as_duration(), - ) - } -} - -impl std::error::Error for CleanupPeriodCreateError {} - -impl CleanupPeriod { - /// The minimum supported cleanup period. - pub const MIN: Self = Self(Duration::from_secs(60)); - - /// The maximum supported cleanup period. - pub const MAX: Self = Self(Duration::from_secs(60 * 60 * 24)); - - /// Construct a new cleanup period, checking that it's valid. - pub fn new(duration: Duration) -> Result { - if duration >= Self::MIN.as_duration() - && duration <= Self::MAX.as_duration() - { - Ok(Self(duration)) - } else { - Err(CleanupPeriodCreateError(duration)) - } - } - - /// Return the period as a duration. - pub const fn as_duration(&self) -> Duration { - self.0 - } -} - -impl TryFrom for CleanupPeriod { - type Error = CleanupPeriodCreateError; - - fn try_from(duration: Duration) -> Result { - Self::new(duration) - } -} - -impl std::fmt::Debug for CleanupPeriod { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.fmt(f) - } -} - /// The limit on space allowed for zone bundles, as a percentage of the overall /// dataset's quota. #[derive( @@ -347,68 +179,12 @@ impl std::fmt::Debug for CleanupPeriod { PartialOrd, Serialize, )] -pub struct StorageLimit(u8); - -impl std::fmt::Display for StorageLimit { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}%", self.as_u8()) - } -} - -impl Default for StorageLimit { - fn default() -> Self { - StorageLimit(25) - } -} +pub struct StorageLimit(pub(crate) u8); /// Error type for creating a storage limit. #[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageLimitCreateError(pub u8); -impl std::fmt::Display for StorageLimitCreateError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "invalid storage limit ({}): must be expressed as a percentage in ({}, {}]", - self.0, - StorageLimit::MIN.0, - StorageLimit::MAX.0, - ) - } -} - -impl std::error::Error for StorageLimitCreateError {} - -impl StorageLimit { - /// Minimum percentage of dataset quota supported. - pub const MIN: Self = Self(0); - - /// Maximum percentage of dataset quota supported. - pub const MAX: Self = Self(50); - - /// Construct a new limit allowed for zone bundles. - /// - /// This should be expressed as a percentage, in the range (Self::MIN, - /// Self::MAX]. - pub const fn new(percentage: u8) -> Result { - if percentage > Self::MIN.0 && percentage <= Self::MAX.0 { - Ok(Self(percentage)) - } else { - Err(StorageLimitCreateError(percentage)) - } - } - - /// Return the contained quota percentage. - pub const fn as_u8(&self) -> u8 { - self.0 - } - - // Compute the number of bytes available from a dataset quota, in bytes. - pub const fn bytes_available(&self, dataset_quota: u64) -> u64 { - (dataset_quota * self.as_u8() as u64) / 100 - } -} - /// The portion of a debug dataset used for zone bundles. #[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] pub struct BundleUtilization { @@ -443,65 +219,3 @@ pub struct CleanupCount { /// The number of bytes removed. pub bytes: u64, } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_sort_zone_bundle_cause() { - use ZoneBundleCause::*; - let mut original = [Other, TerminatedInstance, UnexpectedZone]; - let expected = [Other, UnexpectedZone, TerminatedInstance]; - original.sort(); - assert_eq!(original, expected); - } - - #[test] - fn test_priority_dimension() { - assert!(PriorityOrder::new(&[]).is_err()); - assert!(PriorityOrder::new(&[PriorityDimension::Cause]).is_err()); - assert!( - PriorityOrder::new(&[ - PriorityDimension::Cause, - PriorityDimension::Cause - ]) - .is_err() - ); - assert!( - PriorityOrder::new(&[ - PriorityDimension::Cause, - PriorityDimension::Cause, - PriorityDimension::Time - ]) - .is_err() - ); - - assert!( - PriorityOrder::new(&[ - PriorityDimension::Cause, - PriorityDimension::Time - ]) - .is_ok() - ); - assert_eq!( - PriorityOrder::new(PriorityOrder::default().as_slice()).unwrap(), - PriorityOrder::default() - ); - } - - #[test] - fn test_storage_limit_bytes_available() { - let pct = StorageLimit(1); - assert_eq!(pct.bytes_available(100), 1); - assert_eq!(pct.bytes_available(1000), 10); - - let pct = StorageLimit(50); - assert_eq!(pct.bytes_available(100), 50); - assert_eq!(pct.bytes_available(1000), 500); - - // Test non-power of 10. - let pct = StorageLimit(25); - assert_eq!(pct.bytes_available(32768), 8192); - } -} diff --git a/sled-agent/types/versions/src/latest.rs b/sled-agent/types/versions/src/latest.rs index 25c684bb743..e2cb3ebc880 100644 --- a/sled-agent/types/versions/src/latest.rs +++ b/sled-agent/types/versions/src/latest.rs @@ -64,7 +64,6 @@ pub mod instance { pub use crate::v1::instance::VmmPutStateBody; pub use crate::v1::instance::VmmPutStateResponse; pub use crate::v1::instance::VmmSpec; - pub use crate::v1::instance::VmmSpecExt; pub use crate::v1::instance::VmmStateRequested; pub use crate::v1::instance::VmmUnregisterResponse; pub use crate::v1::instance::VpcPathParam; @@ -113,6 +112,15 @@ pub mod inventory { pub use crate::v11::inventory::OmicronZoneConfig; pub use crate::v11::inventory::OmicronZoneType; pub use crate::v11::inventory::OmicronZonesConfig; + + pub use crate::impls::inventory::ManifestBootInventoryDisplay; + pub use crate::impls::inventory::ManifestInventoryDisplay; + pub use crate::impls::inventory::ManifestNonBootInventoryDisplay; + pub use crate::impls::inventory::MupdateOverrideBootInventoryDisplay; + pub use crate::impls::inventory::MupdateOverrideInventoryDisplay; + pub use crate::impls::inventory::MupdateOverrideNonBootInventoryDisplay; + pub use crate::impls::inventory::ZoneArtifactInventoryDisplay; + pub use crate::impls::inventory::ZoneImageResolverInventoryDisplay; } pub mod probes { diff --git a/sled-agent/types/versions/src/lib.rs b/sled-agent/types/versions/src/lib.rs index 7b89e956590..da02546faf7 100644 --- a/sled-agent/types/versions/src/lib.rs +++ b/sled-agent/types/versions/src/lib.rs @@ -31,6 +31,7 @@ #[path = "bootstrap_initial/mod.rs"] pub mod bootstrap_v1; +mod impls; pub mod latest; #[path = "initial/mod.rs"] pub mod v1;