diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 93ee1828..006c7808 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -25,7 +25,7 @@ jobs: components: clippy, rustfmt - name: Run Clippy - run: cargo clippy -- -D warnings --allow unused_variables + run: cargo clippy -- -D warnings -D clippy::expect_used -D clippy::unwrap_used --allow unused_variables - name: Cargo fmt check run: cargo fmt --check --all diff --git a/Cargo.lock b/Cargo.lock index 301fa18c..348a9493 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1330,6 +1330,7 @@ dependencies = [ "clap", "documented", "fs-err", + "or-panic", "rustls", "serde", "tokio", @@ -2192,6 +2193,7 @@ dependencies = [ "jemallocator", "load_config", "nix", + "or-panic", "parcelona", "pin-project", "ra-rpc", @@ -2250,6 +2252,7 @@ dependencies = [ "host-api", "k256", "load_config", + "or-panic", "ra-rpc", "ra-tls", "rand 0.8.5", @@ -2347,15 +2350,20 @@ version = "0.5.5" dependencies = [ "anyhow", "bon", + "dstack-types", + "flate2", "fs-err", "hex", "hex-literal 1.0.0", "log", "object", + "parity-scale-codec", + "reqwest", "serde", "serde-human-bytes", "serde_json", "sha2 0.10.9", + "tar", "thiserror 2.0.15", ] @@ -2514,6 +2522,7 @@ dependencies = [ "key-provider-client", "load_config", "lspci", + "or-panic", "path-absolutize", "ra-rpc", "rocket", @@ -2776,6 +2785,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -4102,6 +4123,7 @@ checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" dependencies = [ "bitflags 2.9.2", "libc", + "redox_syscall 0.5.17", ] [[package]] @@ -4710,6 +4732,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "or-panic" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "596a79faf55e869e7bc0c2162cf2f18a54d4d1112876bceae587ad954fcbd574" + [[package]] name = "os_pipe" version = "1.2.2" @@ -5450,6 +5478,7 @@ version = "0.5.5" dependencies = [ "anyhow", "bon", + "or-panic", "prost-types 0.13.5", "prpc", "ra-tls", @@ -5474,6 +5503,7 @@ dependencies = [ "fs-err", "hex", "hkdf", + "or-panic", "p256", "parity-scale-codec", "rcgen", @@ -6900,6 +6930,7 @@ name = "sodiumbox" version = "0.1.0" dependencies = [ "blake2", + "or-panic", "rand_core 0.6.4", "salsa20", "x25519-dalek", @@ -7008,6 +7039,7 @@ dependencies = [ "load_config", "nix", "notify", + "or-panic", "rocket", "serde", "serde_json", @@ -7149,6 +7181,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "tdx-attest" version = "0.5.5" @@ -8399,6 +8442,16 @@ dependencies = [ "time", ] +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix 1.0.8", +] + [[package]] name = "xsalsa20poly1305" version = "0.9.1" diff --git a/Cargo.toml b/Cargo.toml index 084ded65..98bc4551 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,6 +86,7 @@ size-parser = { path = "size-parser" } # Core dependencies anyhow = { version = "1.0.97", default-features = false } +or-panic = { version = "1.0", default-features = false } chrono = "0.4.40" clap = { version = "4.5.32", features = ["derive", "string"] } dashmap = "6.1.0" diff --git a/certbot/cli/Cargo.toml b/certbot/cli/Cargo.toml index a3f21a10..cd415c08 100644 --- a/certbot/cli/Cargo.toml +++ b/certbot/cli/Cargo.toml @@ -24,3 +24,4 @@ tokio = { workspace = true, features = ["full"] } toml_edit.workspace = true tracing-subscriber.workspace = true rustls.workspace = true +or-panic.workspace = true diff --git a/certbot/cli/src/main.rs b/certbot/cli/src/main.rs index 19fed2e1..083ab641 100644 --- a/certbot/cli/src/main.rs +++ b/certbot/cli/src/main.rs @@ -10,6 +10,7 @@ use certbot::{CertBotConfig, WorkDir}; use clap::Parser; use documented::DocumentedFields; use fs_err as fs; +use or_panic::ResultOrPanic; use serde::{Deserialize, Serialize}; use toml_edit::ser::to_document; @@ -166,7 +167,7 @@ async fn main() -> Result<()> { } rustls::crypto::ring::default_provider() .install_default() - .expect("Failed to install default crypto provider"); + .or_panic("Failed to install default crypto provider"); let args = Args::parse(); match args.command { diff --git a/ct_monitor/src/main.rs b/ct_monitor/src/main.rs index a624835b..6a168cce 100644 --- a/ct_monitor/src/main.rs +++ b/ct_monitor/src/main.rs @@ -140,7 +140,8 @@ impl Monitor { fn validate_domain(domain: &str) -> Result<()> { let domain_regex = - Regex::new(r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$").unwrap(); + Regex::new(r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$") + .context("invalid regex")?; if !domain_regex.is_match(domain) { bail!("invalid domain name"); } diff --git a/dstack-mr/Cargo.toml b/dstack-mr/Cargo.toml index f695d6b6..32a96f96 100644 --- a/dstack-mr/Cargo.toml +++ b/dstack-mr/Cargo.toml @@ -24,3 +24,10 @@ hex-literal.workspace = true fs-err.workspace = true bon.workspace = true log.workspace = true +scale.workspace = true + +[dev-dependencies] +dstack-types.workspace = true +reqwest = { version = "0.12", default-features = false, features = ["blocking", "rustls-tls"] } +flate2 = "1.0" +tar = "0.4" diff --git a/dstack-mr/cli/src/main.rs b/dstack-mr/cli/src/main.rs index ff0274b2..f0fc2596 100644 --- a/dstack-mr/cli/src/main.rs +++ b/dstack-mr/cli/src/main.rs @@ -118,7 +118,7 @@ fn main() -> Result<()> { .context("Failed to measure machine configuration")?; if config.json { - println!("{}", serde_json::to_string_pretty(&measurements).unwrap()); + println!("{}", serde_json::to_string_pretty(&measurements)?); } else { println!("Machine measurements:"); println!("MRTD: {}", hex::encode(measurements.mrtd)); diff --git a/dstack-mr/src/acpi.rs b/dstack-mr/src/acpi.rs index a93f30e1..5976c10f 100644 --- a/dstack-mr/src/acpi.rs +++ b/dstack-mr/src/acpi.rs @@ -7,6 +7,7 @@ use anyhow::{bail, Context, Result}; use log::debug; +use scale::Decode; use crate::Machine; @@ -392,6 +393,13 @@ fn qemu_loader_append(data: &mut Vec, cmd: LoaderCmd) { } } +/// ACPI table header (first 8 bytes of every ACPI table) +#[derive(Debug, Decode)] +struct AcpiTableHeader { + signature: [u8; 4], + length: u32, +} + /// Searches for an ACPI table with the given signature and returns its offset, /// checksum offset, and length. fn find_acpi_table(tables: &[u8], signature: &str) -> Result<(u32, u32, u32)> { @@ -407,22 +415,21 @@ fn find_acpi_table(tables: &[u8], signature: &str) -> Result<(u32, u32, u32)> { bail!("Table not found: {signature}"); } - let tbl_sig = &tables[offset..offset + 4]; - let tbl_len_bytes: [u8; 4] = tables[offset + 4..offset + 8].try_into().unwrap(); - let tbl_len = u32::from_le_bytes(tbl_len_bytes) as usize; + let header = AcpiTableHeader::decode(&mut &tables[offset..]) + .context("failed to decode ACPI table header")?; - if tbl_sig == sig_bytes { + if header.signature == sig_bytes { // Found the table - return Ok((offset as u32, (offset + 9) as u32, tbl_len as u32)); + return Ok((offset as u32, (offset + 9) as u32, header.length)); } - if tbl_len == 0 { + if header.length == 0 { // Invalid table length, stop searching - bail!("Found table with zero length at offset {offset}"); + bail!("found table with zero length at offset {offset}"); } // Move to the next table - offset += tbl_len; + offset += header.length as usize; } - bail!("Table not found: {signature}"); + bail!("table not found: {signature}"); } diff --git a/dstack-mr/src/kernel.rs b/dstack-mr/src/kernel.rs index 51c7bb46..878a2b01 100644 --- a/dstack-mr/src/kernel.rs +++ b/dstack-mr/src/kernel.rs @@ -129,7 +129,7 @@ fn patch_kernel( let mut kd = kernel_data.to_vec(); - let protocol = u16::from_le_bytes(kd[0x206..0x208].try_into().unwrap()); + let protocol = u16::from_le_bytes(kd[0x206..0x208].try_into().context("impossible failure")?); let (real_addr, cmdline_addr) = if protocol < 0x200 || (kd[0x211] & 0x01) == 0 { (0x90000_u32, 0x9a000_u32) @@ -158,14 +158,16 @@ fn patch_kernel( bail!("the kernel image is too old for ramdisk"); } let mut initrd_max = if protocol >= 0x20c { - let xlf = u16::from_le_bytes(kd[0x236..0x238].try_into().unwrap()); + let xlf = + u16::from_le_bytes(kd[0x236..0x238].try_into().context("impossible failure")?); if (xlf & 0x40) != 0 { u32::MAX } else { 0x37ffffff } } else if protocol >= 0x203 { - let max = u32::from_le_bytes(kd[0x22c..0x230].try_into().unwrap()); + let max = + u32::from_le_bytes(kd[0x22c..0x230].try_into().context("impossible failure")?); if max == 0 { 0x37ffffff } else { diff --git a/dstack-mr/src/tdvf.rs b/dstack-mr/src/tdvf.rs index 246cced6..b41dee76 100644 --- a/dstack-mr/src/tdvf.rs +++ b/dstack-mr/src/tdvf.rs @@ -4,6 +4,7 @@ use anyhow::{anyhow, bail, Context, Result}; use hex_literal::hex; +use scale::Decode; use sha2::{Digest, Sha384}; use crate::acpi::Tables; @@ -24,7 +25,13 @@ pub enum PageAddOrder { SinglePass, } -#[derive(Debug)] +/// Helper to decode little-endian integers from byte slice using scale codec +fn decode_le(data: &[u8], context: &str) -> Result { + T::decode(&mut &data[..]) + .with_context(|| format!("failed to decode {} as little-endian", context)) +} + +#[derive(Debug, Decode)] struct TdvfSection { data_offset: u32, raw_data_size: u32, @@ -34,6 +41,14 @@ struct TdvfSection { attributes: u32, } +#[derive(Debug, Decode)] +struct TdvfDescriptor { + signature: [u8; 4], // "TDVF" + _length: u32, + version: u32, + num_sections: u32, +} + #[derive(Debug)] pub(crate) struct Tdvf<'a> { fw: &'a [u8], @@ -77,6 +92,11 @@ fn measure_tdx_efi_variable(vendor_guid: &str, var_name: &str) -> Result } impl<'a> Tdvf<'a> { + /// Parse TDVF firmware metadata + /// + /// This function uses scale codec for clean, panic-free parsing. + /// Correctness is verified by integration test in tests/tdvf_parse.rs + /// which ensures identical measurements to the original implementation. pub fn parse(fw: &'a [u8]) -> Result> { const TDX_METADATA_OFFSET_GUID: &str = "e47a6535-984a-4798-865e-4685a7bf8ec2"; const TABLE_FOOTER_GUID: &str = "96b582de-1fb2-45f7-baea-a366c55a082d"; @@ -99,8 +119,7 @@ impl<'a> Tdvf<'a> { if offset < 18 { bail!("TDVF firmware offset too small for tables length"); } - let tables_len = - u16::from_le_bytes(fw[offset - 18..offset - 16].try_into().unwrap()) as usize; + let tables_len = decode_le::(&fw[offset - 18..offset - 16], "tables length")? as usize; if tables_len == 0 || tables_len > offset.saturating_sub(18) { bail!("Failed to parse TDVF metadata: Invalid tables length"); } @@ -133,37 +152,34 @@ impl<'a> Tdvf<'a> { bail!("TDVF metadata data too small"); } let tdvf_meta_offset_raw = - u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as usize; + decode_le::(&data[data.len() - 4..], "TDVF metadata offset")? as usize; if tdvf_meta_offset_raw > fw.len() { bail!("TDVF metadata offset exceeds firmware size"); } let tdvf_meta_offset = fw.len() - tdvf_meta_offset_raw; - let tdvf_meta_desc = &fw[tdvf_meta_offset..tdvf_meta_offset + 16]; - if &tdvf_meta_desc[..4] != b"TDVF" { + // Decode TDVF descriptor using scale codec + let descriptor = TdvfDescriptor::decode(&mut &fw[tdvf_meta_offset..]) + .context("failed to decode TDVF descriptor")?; + + if &descriptor.signature != b"TDVF" { bail!("Failed to parse TDVF metadata: Invalid TDVF descriptor"); } - let tdvf_version = u32::from_le_bytes(tdvf_meta_desc[8..12].try_into().unwrap()); - if tdvf_version != 1 { + if descriptor.version != 1 { bail!("Failed to parse TDVF metadata: Unsupported TDVF version"); } - let num_sections = u32::from_le_bytes(tdvf_meta_desc[12..16].try_into().unwrap()) as usize; + let num_sections = descriptor.num_sections as usize; let mut meta = Tdvf { fw, sections: Vec::new(), }; + + // Decode all sections using scale codec for i in 0..num_sections { let sec_offset = tdvf_meta_offset + 16 + 32 * i; - let sec_data = &fw[sec_offset..sec_offset + 32]; - let s = TdvfSection { - data_offset: u32::from_le_bytes(sec_data[0..4].try_into().unwrap()), - raw_data_size: u32::from_le_bytes(sec_data[4..8].try_into().unwrap()), - memory_address: u64::from_le_bytes(sec_data[8..16].try_into().unwrap()), - memory_data_size: u64::from_le_bytes(sec_data[16..24].try_into().unwrap()), - sec_type: u32::from_le_bytes(sec_data[24..28].try_into().unwrap()), - attributes: u32::from_le_bytes(sec_data[28..32].try_into().unwrap()), - }; + let s = TdvfSection::decode(&mut &fw[sec_offset..]) + .with_context(|| format!("failed to decode TDVF section {}", i))?; if s.memory_address % PAGE_SIZE != 0 { bail!("Failed to parse TDVF metadata: Section memory address not aligned"); @@ -325,7 +341,7 @@ impl<'a> Tdvf<'a> { td_hob.extend_from_slice(&length.to_le_bytes()); }; - let (_, last_start, last_end) = memory_acceptor.ranges.pop().expect("No ranges"); + let (_, last_start, last_end) = memory_acceptor.ranges.pop().context("No ranges")?; for (accepted, start, end) in memory_acceptor.ranges { if end < start { diff --git a/dstack-mr/tests/tdvf_parse.rs b/dstack-mr/tests/tdvf_parse.rs new file mode 100644 index 00000000..6c7e9382 --- /dev/null +++ b/dstack-mr/tests/tdvf_parse.rs @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +//! Integration test to verify TDVF firmware parsing correctness +//! +//! This test ensures that the scale codec-based parsing produces +//! identical measurements to the original implementation. +//! +//! The test downloads a real dstack release from GitHub and verifies +//! that the measurements remain consistent with the baseline. + +use anyhow::{Context, Result}; +use dstack_mr::Machine; +use std::path::PathBuf; + +// dstack release to download for testing +const DSTACK_VERSION: &str = "v0.5.5"; +const DSTACK_RELEASE_URL: &str = + "https://github.com/Dstack-TEE/meta-dstack/releases/download/v0.5.5/dstack-0.5.5.tar.gz"; + +// Expected measurements from baseline (verified with original implementation) +// These are the measurements for dstack v0.5.5 with default configuration +// Generated with: dstack-mr measure /path/to/dstack-0.5.5/metadata.json --json +const EXPECTED_MRTD: &str = "f06dfda6dce1cf904d4e2bab1dc370634cf95cefa2ceb2de2eee127c9382698090d7a4a13e14c536ec6c9c3c8fa87077"; +const EXPECTED_RTMR0: &str = "68102e7b524af310f7b7d426ce75481e36c40f5d513a9009c046e9d37e31551f0134d954b496a3357fd61d03f07ffe96"; +const EXPECTED_RTMR1: &str = "daa9380dc33b14728a9adb222437cf14db2d40ffc4d7061d8f3c329f6c6b339f71486d33521287e8faeae22301f4d815"; +const EXPECTED_RTMR2: &str = "1c41080c9c74be158e55b92f2958129fc1265647324c4a0dc403292cfa41d4c529f39093900347a11c8c1b82ed8c5edf"; + +/// Download and extract dstack release tarball if not already cached +fn get_test_image_dir() -> Result { + let cache_dir = std::env::temp_dir().join("dstack-mr-test-cache"); + let version_dir = cache_dir.join(DSTACK_VERSION); + let image_dir = version_dir.join("dstack-0.5.5"); + let metadata_path = image_dir.join("metadata.json"); + + // Return cached version if it exists + if metadata_path.exists() { + return Ok(image_dir); + } + + eprintln!("Downloading dstack {DSTACK_VERSION} release for testing...",); + std::fs::create_dir_all(&version_dir)?; + + // Download tarball + let tarball_path = version_dir.join("dstack.tar.gz"); + let response = + reqwest::blocking::get(DSTACK_RELEASE_URL).context("failed to download dstack release")?; + + if !response.status().is_success() { + anyhow::bail!("failed to download: HTTP {}", response.status()); + } + + let bytes = response.bytes().context("failed to read response")?; + std::fs::write(&tarball_path, bytes).context("failed to write tarball")?; + + eprintln!("Extracting tarball..."); + + // Extract tarball + let tarball = std::fs::File::open(&tarball_path)?; + let decoder = flate2::read::GzDecoder::new(tarball); + let mut archive = tar::Archive::new(decoder); + archive + .unpack(&version_dir) + .context("failed to extract tarball")?; + + // Verify extraction + if !metadata_path.exists() { + anyhow::bail!("metadata.json not found after extraction"); + } + + eprintln!("Test image ready at: {}", image_dir.display()); + + Ok(image_dir) +} + +#[test] +#[ignore] // Run with: cargo test --release -- --ignored +fn test_tdvf_parse_produces_correct_measurements() -> Result<()> { + // Get or download test image + let image_dir = get_test_image_dir()?; + let metadata_path = image_dir.join("metadata.json"); + + let metadata = std::fs::read_to_string(&metadata_path) + .with_context(|| format!("failed to read {}", metadata_path.display()))?; + let image_info: dstack_types::ImageInfo = serde_json::from_str(&metadata)?; + + let firmware_path = image_dir.join(&image_info.bios).display().to_string(); + let kernel_path = image_dir.join(&image_info.kernel).display().to_string(); + let initrd_path = image_dir.join(&image_info.initrd).display().to_string(); + let cmdline = image_info.cmdline + " initrd=initrd"; + + eprintln!("Building machine configuration..."); + let machine = Machine::builder() + .cpu_count(1) + .memory_size(2 * 1024 * 1024 * 1024) // 2GB + .firmware(&firmware_path) + .kernel(&kernel_path) + .initrd(&initrd_path) + .kernel_cmdline(&cmdline) + .two_pass_add_pages(true) + .pic(true) + .smm(false) + .hugepages(false) + .num_gpus(0) + .num_nvswitches(0) + .hotplug_off(false) + .root_verity(true) + .build(); + + eprintln!("Computing measurements (this parses TDVF firmware)..."); + let measurements = machine.measure()?; + + eprintln!("Verifying measurements against baseline..."); + + // Verify measurements match expected values + assert_eq!( + hex::encode(&measurements.mrtd), + EXPECTED_MRTD, + "MRTD mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr0), + EXPECTED_RTMR0, + "RTMR0 mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr1), + EXPECTED_RTMR1, + "RTMR1 mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr2), + EXPECTED_RTMR2, + "RTMR2 mismatch - TDVF parsing may have regressed" + ); + + eprintln!("✅ All measurements match baseline - TDVF parsing is correct!"); + + Ok(()) +} diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index d150126c..8a30c6da 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -50,6 +50,7 @@ reqwest = { workspace = true, features = ["json"] } hyper = { workspace = true, features = ["server", "http1"] } hyper-util = { version = "0.1", features = ["tokio"] } jemallocator.workspace = true +or-panic.workspace = true [target.'cfg(unix)'.dependencies] nix = { workspace = true, features = ["resource"] } diff --git a/gateway/rpc/build.rs b/gateway/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/gateway/rpc/build.rs +++ b/gateway/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/gateway/src/main.rs b/gateway/src/main.rs index 61d25632..d4544ffd 100644 --- a/gateway/src/main.rs +++ b/gateway/src/main.rs @@ -167,7 +167,8 @@ async fn main() -> Result<()> { info!("Starting background tasks"); state.start_bg_tasks().await?; state.lock().reconfigure()?; - proxy::start(proxy_config, state.clone()); + + proxy::start(proxy_config, state.clone()).context("failed to start the proxy")?; let admin_figment = Figment::new() diff --git a/gateway/src/main_service.rs b/gateway/src/main_service.rs index 9162ff40..c85d501c 100644 --- a/gateway/src/main_service.rs +++ b/gateway/src/main_service.rs @@ -23,6 +23,7 @@ use dstack_gateway_rpc::{ use dstack_guest_agent_rpc::{dstack_guest_client::DstackGuestClient, RawQuoteArgs}; use fs_err as fs; use http_client::prpc::PrpcClient; +use or_panic::ResultOrPanic; use ra_rpc::{CallContext, RpcCall, VerifiedAttestation}; use ra_tls::attestation::QuoteContentType; use rand::seq::IteratorRandom; @@ -100,7 +101,7 @@ impl Proxy { impl ProxyInner { pub(crate) fn lock(&self) -> MutexGuard { - self.state.lock().expect("Failed to lock AppState") + self.state.lock().or_panic("Failed to lock AppState") } pub async fn new(config: Config, my_app_id: Option>) -> Result { diff --git a/gateway/src/proxy.rs b/gateway/src/proxy.rs index 75cc286e..73b947cc 100644 --- a/gateway/src/proxy.rs +++ b/gateway/src/proxy.rs @@ -166,7 +166,7 @@ pub async fn proxy_main(config: &ProxyConfig, proxy: Proxy) -> Result<()> { .enable_all() .worker_threads(config.workers) .build() - .expect("Failed to build Tokio runtime"); + .context("Failed to build Tokio runtime")?; let dotted_base_domain = { let base_domain = config.base_domain.as_str(); @@ -232,16 +232,16 @@ fn next_connection_id() -> usize { COUNTER.fetch_add(1, Ordering::Relaxed) } -pub fn start(config: ProxyConfig, app_state: Proxy) { +pub fn start(config: ProxyConfig, app_state: Proxy) -> Result<()> { + // Create a new single-threaded runtime + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .context("Failed to build Tokio runtime")?; + std::thread::Builder::new() .name("proxy-main".to_string()) .spawn(move || { - // Create a new single-threaded runtime - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("Failed to build Tokio runtime"); - // Run the proxy_main function in this runtime if let Err(err) = rt.block_on(proxy_main(&config, app_state)) { error!( @@ -250,7 +250,8 @@ pub fn start(config: ProxyConfig, app_state: Proxy) { ); } }) - .expect("Failed to spawn proxy-main thread"); + .context("Failed to spawn proxy-main thread")?; + Ok(()) } #[cfg(test)] diff --git a/gateway/src/proxy/tls_terminate.rs b/gateway/src/proxy/tls_terminate.rs index 9c159492..ad19ebf4 100644 --- a/gateway/src/proxy/tls_terminate.rs +++ b/gateway/src/proxy/tls_terminate.rs @@ -24,6 +24,8 @@ use tokio::time::timeout; use tokio_rustls::{rustls, server::TlsStream, TlsAcceptor}; use tracing::{debug, info}; +use or_panic::ResultOrPanic; + use crate::config::{CryptoProvider, ProxyConfig, TlsVersion}; use crate::main_service::Proxy; @@ -278,12 +280,12 @@ impl Proxy { let acceptor = if h2 { self.h2_acceptor .read() - .expect("Failed to acquire read lock for TLS acceptor") + .or_panic("lock should never fail") .clone() } else { self.acceptor .read() - .expect("Failed to acquire read lock for TLS acceptor") + .or_panic("lock should never fail") .clone() }; let tls_stream = timeout( diff --git a/guest-agent/Cargo.toml b/guest-agent/Cargo.toml index 101c63d7..b8e0c881 100644 --- a/guest-agent/Cargo.toml +++ b/guest-agent/Cargo.toml @@ -50,3 +50,4 @@ ring.workspace = true ed25519-dalek.workspace = true tempfile.workspace = true rand.workspace = true +or-panic.workspace = true diff --git a/guest-agent/rpc/build.rs b/guest-agent/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/guest-agent/rpc/build.rs +++ b/guest-agent/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/guest-agent/src/rpc_service.rs b/guest-agent/src/rpc_service.rs index 3f78d702..16fe61a4 100644 --- a/guest-agent/src/rpc_service.rs +++ b/guest-agent/src/rpc_service.rs @@ -23,6 +23,7 @@ use ed25519_dalek::{ }; use fs_err as fs; use k256::ecdsa::SigningKey; +use or_panic::ResultOrPanic; use ra_rpc::{Attestation, CallContext, RpcCall}; use ra_tls::{ attestation::{QuoteContentType, DEFAULT_HASH_ALGORITHM}, @@ -78,13 +79,18 @@ impl AppStateInner { impl AppState { fn maybe_request_demo_cert(&self) { let state = self.inner.clone(); - if !state.demo_cert.read().unwrap().is_empty() { + if !state + .demo_cert + .read() + .or_panic("lock shoud never fail") + .is_empty() + { return; } tokio::spawn(async move { match state.request_demo_cert().await { Ok(demo_cert) => { - *state.demo_cert.write().unwrap() = demo_cert; + *state.demo_cert.write().or_panic("lock shoud never fail") = demo_cert; } Err(e) => { error!("Failed to request demo cert: {e}"); @@ -179,7 +185,12 @@ pub async fn get_info(state: &AppState, external: bool) -> Result { os_image_hash: app_info.os_image_hash.clone(), key_provider_info: String::from_utf8(app_info.key_provider_info).unwrap_or_default(), compose_hash: app_info.compose_hash.clone(), - app_cert: state.inner.demo_cert.read().unwrap().clone(), + app_cert: state + .inner + .demo_cert + .read() + .or_panic("lock should not fail") + .clone(), tcb_info, vm_config, }) @@ -308,7 +319,11 @@ impl DstackGuestRpc for InternalRpcHandler { .await?; let (signature, public_key) = match request.algorithm.as_str() { "ed25519" => { - let key_bytes: [u8; 32] = key_response.key.try_into().expect("Key is incorrect"); + let key_bytes: [u8; 32] = key_response + .key + .try_into() + .ok() + .context("Key is incorrect")?; let signing_key = Ed25519SigningKey::from_bytes(&key_bytes); let signature = signing_key.sign(&request.data); let public_key = signing_key.verifying_key().to_bytes().to_vec(); @@ -351,7 +366,12 @@ impl DstackGuestRpc for InternalRpcHandler { let valid = match request.algorithm.as_str() { "ed25519" => { let verifying_key = ed25519_dalek::VerifyingKey::from_bytes( - &request.public_key.as_slice().try_into().unwrap(), + &request + .public_key + .as_slice() + .try_into() + .ok() + .context("invalid public key")?, )?; let signature = ed25519_dalek::Signature::from_slice(&request.signature)?; verifying_key.verify(&request.data, &signature).is_ok() @@ -562,7 +582,11 @@ impl WorkerRpc for ExternalRpcHandler { match request.algorithm.as_str() { "ed25519" => { - let key_bytes: [u8; 32] = key_response.key.try_into().expect("Key is incorrect"); + let key_bytes: [u8; 32] = key_response + .key + .try_into() + .ok() + .context("Key is incorrect")?; let ed25519_key = Ed25519SigningKey::from_bytes(&key_bytes); let ed25519_pubkey = ed25519_key.verifying_key().to_bytes(); diff --git a/guest-api/build.rs b/guest-api/build.rs index 2292ec24..dc3e9d96 100644 --- a/guest-api/build.rs +++ b/guest-api/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_service_name_emission() diff --git a/host-api/build.rs b/host-api/build.rs index 2292ec24..dc3e9d96 100644 --- a/host-api/build.rs +++ b/host-api/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_service_name_emission() diff --git a/http-client/src/lib.rs b/http-client/src/lib.rs index cd121c96..ee5c7f9f 100644 --- a/http-client/src/lib.rs +++ b/http-client/src/lib.rs @@ -36,14 +36,14 @@ pub async fn http_request( body: &[u8], ) -> Result<(u16, Vec)> { debug!("Sending HTTP request to {base}, path={path}"); - let mut response = if base.starts_with("unix:") { + let mut response = if let Some(uds) = base.strip_prefix("unix:") { let path = if path.starts_with("/") { path.to_string() } else { format!("/{path}") }; let client: Client> = Client::unix(); - let unix_uri: hyper::Uri = Uri::new(base.strip_prefix("unix:").unwrap(), &path).into(); + let unix_uri: hyper::Uri = Uri::new(uds, &path).into(); let req = Request::builder() .method(method) .uri(unix_uri) diff --git a/kms/rpc/build.rs b/kms/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/kms/rpc/build.rs +++ b/kms/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/kms/src/main_service.rs b/kms/src/main_service.rs index 8e5b5f3b..66fbf87b 100644 --- a/kms/src/main_service.rs +++ b/kms/src/main_service.rs @@ -203,7 +203,9 @@ impl RpcHandler { fn cache_mrs(&self, key: &str, mrs: &Mrs) -> Result<()> { let path = self.mr_cache_dir().join(key); - fs::create_dir_all(path.parent().unwrap()).context("Failed to create cache directory")?; + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).context("Failed to create cache directory")?; + } safe_write::safe_write( &path, serde_json::to_string(mrs).context("Failed to serialize cached MRs")?, diff --git a/ra-rpc/Cargo.toml b/ra-rpc/Cargo.toml index f762308a..2ec2b636 100644 --- a/ra-rpc/Cargo.toml +++ b/ra-rpc/Cargo.toml @@ -23,6 +23,7 @@ rocket-vsock-listener = { workspace = true, optional = true } serde.workspace = true x509-parser.workspace = true prost-types = { workspace = true, optional = true } +or-panic.workspace = true [features] default = ["rocket", "client"] diff --git a/ra-rpc/src/rocket_helper.rs b/ra-rpc/src/rocket_helper.rs index 4abfe3ab..dcdbfa15 100644 --- a/ra-rpc/src/rocket_helper.rs +++ b/ra-rpc/src/rocket_helper.rs @@ -308,7 +308,7 @@ pub async fn handle_prpc_impl>( (Some(quote_verifier), Some(attestation)) => { let pubkey = request .certificate - .expect("certificate is missing") + .context("certificate is missing")? .public_key() .raw .to_vec(); diff --git a/ra-tls/Cargo.toml b/ra-tls/Cargo.toml index 9d7be749..c6451fbb 100644 --- a/ra-tls/Cargo.toml +++ b/ra-tls/Cargo.toml @@ -33,3 +33,4 @@ scale.workspace = true cc-eventlog.workspace = true serde-human-bytes.workspace = true +or-panic.workspace = true diff --git a/ra-tls/src/attestation.rs b/ra-tls/src/attestation.rs index 65f591d0..103f7f62 100644 --- a/ra-tls/src/attestation.rs +++ b/ra-tls/src/attestation.rs @@ -18,6 +18,7 @@ use x509_parser::parse_x509_certificate; use crate::{oids, traits::CertExt}; use cc_eventlog::TdxEventLog as EventLog; +use or_panic::ResultOrPanic; use serde_human_bytes as hex_bytes; /// The content type of a quote. A CVM should only generate quotes for these types. @@ -50,7 +51,7 @@ impl QuoteContentType<'_> { /// Convert the content to the report data. pub fn to_report_data(&self, content: &[u8]) -> [u8; 64] { self.to_report_data_with_hash(content, "") - .expect("sha512 hash should not fail") + .or_panic("sha512 hash should not fail") } /// Convert the content to the report data with a specific hash algorithm. diff --git a/ra-tls/src/cert.rs b/ra-tls/src/cert.rs index 16b9399c..2cfa3e0c 100644 --- a/ra-tls/src/cert.rs +++ b/ra-tls/src/cert.rs @@ -149,7 +149,7 @@ impl CertSigningRequest { // Sign the encoded CSR let signature = key_pair .sign(&rng, &encoded) - .expect("Failed to sign CSR") + .context("Failed to sign CSR")? .as_ref() .to_vec(); Ok(signature) diff --git a/size-parser/src/lib.rs b/size-parser/src/lib.rs index fab531cc..b7cf3989 100644 --- a/size-parser/src/lib.rs +++ b/size-parser/src/lib.rs @@ -126,12 +126,10 @@ impl MemorySize { } // Handle numbers with suffixes - let len = s.len(); - if len == 0 { + let Some(last_char) = s.chars().last() else { return Err(MemorySizeError::Empty); - } + }; - let last_char = s.chars().last().unwrap(); let multiplier = match last_char.to_ascii_lowercase() { 'k' => 1024u64, 'm' => 1024u64.saturating_mul(1024), @@ -142,8 +140,7 @@ impl MemorySize { .saturating_mul(1024), _ => return Err(MemorySizeError::UnknownSuffix(last_char)), }; - - let num_part = &s[0..len - 1]; + let num_part = s.trim_end_matches(last_char); let num = num_part .parse::() .map_err(|_| MemorySizeError::InvalidNumber(num_part.to_string()))?; diff --git a/sodiumbox/Cargo.toml b/sodiumbox/Cargo.toml index ffd55962..9fcd6702 100644 --- a/sodiumbox/Cargo.toml +++ b/sodiumbox/Cargo.toml @@ -17,3 +17,4 @@ xsalsa20poly1305.workspace = true salsa20.workspace = true rand_core.workspace = true blake2.workspace = true +or-panic.workspace = true diff --git a/sodiumbox/src/lib.rs b/sodiumbox/src/lib.rs index f78ab346..8375776c 100644 --- a/sodiumbox/src/lib.rs +++ b/sodiumbox/src/lib.rs @@ -18,6 +18,7 @@ use blake2::{ digest::{Update, VariableOutput}, Blake2bVar, }; +use or_panic::ResultOrPanic; use rand_core::OsRng; use xsalsa20poly1305::{aead::Aead, consts::U10, KeyInit, XSalsa20Poly1305}; @@ -94,14 +95,16 @@ pub fn seal(message: &[u8], recipient_pk: &PublicKey) -> Vec { // Compute nonce: blake2b(ephemeral_pk || recipient_pk, outlen=24) let nonce = derive_nonce(ephemeral_pk.as_bytes(), recipient_pk.as_bytes()) - .expect("Failed to derive nonce"); + .or_panic("Failed to derive nonce"); // Create the XSalsa20Poly1305 cipher with the derived key let cipher = XSalsa20Poly1305::new_from_slice(&key_bytes) - .expect("Failed to create XSalsa20Poly1305 cipher"); + .or_panic("Failed to create XSalsa20Poly1305 cipher"); // Encrypt the message - let ciphertext = cipher.encrypt(&nonce, message).expect("Encryption failed"); + let ciphertext = cipher + .encrypt(&nonce, message) + .or_panic("Encryption failed"); // Combine the ephemeral public key and ciphertext to form the sealed box let mut sealed_box = Vec::with_capacity(PUBLICKEYBYTES + ciphertext.len()); diff --git a/supervisor/Cargo.toml b/supervisor/Cargo.toml index 647a0b1e..e9e91b55 100644 --- a/supervisor/Cargo.toml +++ b/supervisor/Cargo.toml @@ -21,6 +21,7 @@ libc.workspace = true load_config.workspace = true nix = { workspace = true, features = ["resource"] } notify.workspace = true +or-panic.workspace = true rocket = { workspace = true, features = ["json"] } serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/supervisor/src/process.rs b/supervisor/src/process.rs index 769d586e..887aac14 100644 --- a/supervisor/src/process.rs +++ b/supervisor/src/process.rs @@ -6,6 +6,7 @@ use anyhow::{bail, Result}; use bon::Builder; use fs_err as fs; use notify::{RecursiveMode, Watcher}; +use or_panic::ResultOrPanic; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::io::Write; @@ -97,6 +98,7 @@ impl ProcessStateRT { } mod systime { + use or_panic::ResultOrPanic; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -104,8 +106,12 @@ mod systime { time: &Option, serializer: S, ) -> Result { - time.map(|t| t.duration_since(UNIX_EPOCH).unwrap().as_secs()) - .serialize(serializer) + time.map(|t| { + t.duration_since(UNIX_EPOCH) + .or_panic("since zero should never fail") + .as_secs() + }) + .serialize(serializer) } pub(crate) fn deserialize<'de, D: Deserializer<'de>>( @@ -162,7 +168,7 @@ impl Process { } pub(crate) fn lock(&self) -> MutexGuard { - self.state.lock().unwrap() + self.state.lock().or_panic("lock should never fail") } pub fn start(&self) -> Result<()> { @@ -199,7 +205,7 @@ impl Process { // Update process state { - let mut state = self.state.lock().unwrap(); + let mut state = self.lock(); state.started_at = Some(SystemTime::now()); state.status = ProcessStatus::Running; state.pid = pid; @@ -263,7 +269,7 @@ impl Process { } }; if let Some(state) = state { - let mut state = state.lock().unwrap(); + let mut state = state.lock().or_panic("lock should never fail"); state.status = next_status; state.stopped_at = Some(SystemTime::now()); } @@ -276,7 +282,7 @@ impl Process { } pub fn stop(&self) -> Result<()> { - let mut state = self.state.lock().unwrap(); + let mut state = self.lock(); state.started = false; let is_running = state.status.is_running(); let Some(stop_tx) = state.kill_tx.take() else { @@ -295,7 +301,7 @@ impl Process { } pub fn info(&self) -> ProcessInfo { - let state = self.state.lock().unwrap(); + let state = self.lock(); ProcessInfo { config: (*self.config).clone(), state: state.display(), diff --git a/supervisor/src/web_api.rs b/supervisor/src/web_api.rs index f52ebe0b..2521ee20 100644 --- a/supervisor/src/web_api.rs +++ b/supervisor/src/web_api.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{anyhow, Result}; +use or_panic::ResultOrPanic; use rocket::figment::Figment; use rocket::serde::json::Json; use rocket::{delete, get, post, routes, Build, Rocket, State}; @@ -108,13 +109,13 @@ async fn handle_shutdown_signals(supervisor: Supervisor) { let ctrl_c = async { signal::ctrl_c() .await - .expect("failed to install Ctrl+C handler"); + .or_panic("failed to install Ctrl+C handler"); }; #[cfg(unix)] let terminate = async { signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") + .or_panic("failed to install signal handler") .recv() .await; }; @@ -133,5 +134,5 @@ async fn handle_shutdown_signals(supervisor: Supervisor) { perform_shutdown(&supervisor, true) .await - .expect("Force shutdown should never return"); + .or_panic("Force shutdown should never return"); } diff --git a/tdx-attest-sys/build.rs b/tdx-attest-sys/build.rs index b943878e..9e30e05f 100644 --- a/tdx-attest-sys/build.rs +++ b/tdx-attest-sys/build.rs @@ -2,13 +2,15 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + use std::env; use std::path::PathBuf; fn main() { println!("cargo:rerun-if-changed=csrc/tdx_attest.c"); println!("cargo:rerun-if-changed=csrc/qgs_msg_lib.cpp"); - let output_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + let output_path = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); bindgen::Builder::default() .header("bindings.h") .default_enum_style(bindgen::EnumVariation::ModuleConsts) diff --git a/verifier/src/main.rs b/verifier/src/main.rs index d5ad4cc3..f145d71d 100644 --- a/verifier/src/main.rs +++ b/verifier/src/main.rs @@ -4,6 +4,7 @@ use std::sync::Arc; +use anyhow::{Context, Result}; use clap::Parser; use figment::{ providers::{Env, Format, Toml}, @@ -165,8 +166,8 @@ async fn run_oneshot(file_path: &str, config: &Config) -> anyhow::Result<()> { Ok(()) } -#[rocket::launch] -fn rocket() -> _ { +#[rocket::main] +async fn main() -> Result<()> { tracing_subscriber::fmt::try_init().ok(); let cli = Cli::parse(); @@ -178,12 +179,12 @@ fn rocket() -> _ { .merge(Toml::file(&cli.config)) .merge(Env::prefixed("DSTACK_VERIFIER_")); - let config: Config = figment.extract().expect("Failed to load configuration"); + let config: Config = figment.extract().context("Failed to load configuration")?; // Check for oneshot mode if let Some(file_path) = cli.verify { // Run oneshot verification and exit - let rt = tokio::runtime::Runtime::new().expect("Failed to create runtime"); + let rt = tokio::runtime::Runtime::new().context("Failed to create runtime")?; rt.block_on(async { if let Err(e) = run_oneshot(&file_path, &config).await { error!("Oneshot verification failed: {:#}", e); @@ -207,4 +208,8 @@ fn rocket() -> _ { info!("dstack-verifier started successfully"); }) })) + .launch() + .await + .map_err(|err| anyhow::anyhow!("launch rocket failed: {err:?}"))?; + Ok(()) } diff --git a/vmm/Cargo.toml b/vmm/Cargo.toml index caf7542a..84cb5ff6 100644 --- a/vmm/Cargo.toml +++ b/vmm/Cargo.toml @@ -50,6 +50,10 @@ lspci.workspace = true base64.workspace = true serde-human-bytes.workspace = true size-parser = { workspace = true, features = ["serde"] } +or-panic.workspace = true [dev-dependencies] insta.workspace = true + +[build-dependencies] +or-panic.workspace = true diff --git a/vmm/rpc/build.rs b/vmm/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/vmm/rpc/build.rs +++ b/vmm/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/vmm/src/app.rs b/vmm/src/app.rs index 913f0515..289ddf7a 100644 --- a/vmm/src/app.rs +++ b/vmm/src/app.rs @@ -16,6 +16,7 @@ use dstack_vmm_rpc::{ use fs_err as fs; use guest_api::client::DefaultClient as GuestClient; use id_pool::IdPool; +use or_panic::ResultOrPanic; use ra_rpc::client::RaClient; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -123,7 +124,7 @@ pub struct App { impl App { fn lock(&self) -> MutexGuard { - self.state.lock().unwrap() + self.state.lock().or_panic("mutex poisoned") } pub(crate) fn vm_dir(&self) -> PathBuf { @@ -350,7 +351,7 @@ impl App { .unwrap_or_default() .is_cvm() }) - .map(|(id, p)| (id.clone(), p.config.cid.unwrap())) + .flat_map(|(id, p)| p.config.cid.map(|cid| (id.clone(), cid))) .collect::>(); // Update CID pool with running VMs