1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 use crate::cpu::CpuManager; 6 use crate::device_manager::DeviceManager; 7 use crate::memory_manager::MemoryManager; 8 use crate::pci_segment::PciSegment; 9 use crate::{GuestMemoryMmap, GuestRegionMmap}; 10 use acpi_tables::sdt::GenericAddress; 11 use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt}; 12 #[cfg(target_arch = "aarch64")] 13 use arch::aarch64::DeviceInfoForFdt; 14 #[cfg(target_arch = "aarch64")] 15 use arch::DeviceType; 16 use arch::NumaNodes; 17 18 use bitflags::bitflags; 19 use pci::PciBdf; 20 use std::sync::{Arc, Mutex}; 21 use std::time::Instant; 22 use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryRegion}; 23 24 /* Values for Type in APIC sub-headers */ 25 #[cfg(target_arch = "x86_64")] 26 pub const ACPI_APIC_PROCESSOR: u8 = 0; 27 #[cfg(target_arch = "x86_64")] 28 pub const ACPI_APIC_IO: u8 = 1; 29 #[cfg(target_arch = "x86_64")] 30 pub const ACPI_APIC_XRUPT_OVERRIDE: u8 = 2; 31 #[cfg(target_arch = "aarch64")] 32 pub const ACPI_APIC_GENERIC_CPU_INTERFACE: u8 = 11; 33 #[cfg(target_arch = "aarch64")] 34 pub const ACPI_APIC_GENERIC_DISTRIBUTOR: u8 = 12; 35 #[cfg(target_arch = "aarch64")] 36 pub const ACPI_APIC_GENERIC_REDISTRIBUTOR: u8 = 14; 37 #[cfg(target_arch = "aarch64")] 38 pub const ACPI_APIC_GENERIC_TRANSLATOR: u8 = 15; 39 40 #[allow(dead_code)] 41 #[repr(packed)] 42 #[derive(Default)] 43 struct PciRangeEntry { 44 pub base_address: u64, 45 pub segment: u16, 46 pub start: u8, 47 pub end: u8, 48 _reserved: u32, 49 } 50 51 #[allow(dead_code)] 52 #[repr(packed)] 53 #[derive(Default)] 54 struct MemoryAffinity { 55 pub type_: u8, 56 pub length: u8, 57 pub proximity_domain: u32, 58 _reserved1: u16, 59 pub base_addr_lo: u32, 60 pub base_addr_hi: u32, 61 pub length_lo: u32, 62 pub length_hi: u32, 63 _reserved2: u32, 64 pub flags: u32, 65 _reserved3: u64, 66 } 67 68 #[allow(dead_code)] 69 #[repr(packed)] 70 #[derive(Default)] 71 struct ProcessorLocalX2ApicAffinity { 72 pub type_: u8, 73 pub length: u8, 74 _reserved1: u16, 75 pub proximity_domain: u32, 76 pub x2apic_id: u32, 77 pub flags: u32, 78 pub clock_domain: u32, 79 _reserved2: u32, 80 } 81 82 #[allow(dead_code)] 83 #[repr(packed)] 84 #[derive(Default)] 85 struct ProcessorGiccAffinity { 86 pub type_: u8, 87 pub length: u8, 88 pub proximity_domain: u32, 89 pub acpi_processor_uid: u32, 90 pub flags: u32, 91 pub clock_domain: u32, 92 } 93 94 bitflags! { 95 pub struct MemAffinityFlags: u32 { 96 const NOFLAGS = 0; 97 const ENABLE = 0b1; 98 const HOTPLUGGABLE = 0b10; 99 const NON_VOLATILE = 0b100; 100 } 101 } 102 103 impl MemoryAffinity { 104 fn from_region( 105 region: &Arc<GuestRegionMmap>, 106 proximity_domain: u32, 107 flags: MemAffinityFlags, 108 ) -> Self { 109 Self::from_range( 110 region.start_addr().raw_value(), 111 region.len(), 112 proximity_domain, 113 flags, 114 ) 115 } 116 117 fn from_range( 118 base_addr: u64, 119 size: u64, 120 proximity_domain: u32, 121 flags: MemAffinityFlags, 122 ) -> Self { 123 let base_addr_lo = (base_addr & 0xffff_ffff) as u32; 124 let base_addr_hi = (base_addr >> 32) as u32; 125 let length_lo = (size & 0xffff_ffff) as u32; 126 let length_hi = (size >> 32) as u32; 127 128 MemoryAffinity { 129 type_: 1, 130 length: 40, 131 proximity_domain, 132 base_addr_lo, 133 base_addr_hi, 134 length_lo, 135 length_hi, 136 flags: flags.bits(), 137 ..Default::default() 138 } 139 } 140 } 141 142 #[allow(dead_code)] 143 #[repr(packed)] 144 #[derive(Default)] 145 struct ViotVirtioPciNode { 146 pub type_: u8, 147 _reserved: u8, 148 pub length: u16, 149 pub pci_segment: u16, 150 pub pci_bdf_number: u16, 151 _reserved2: [u8; 8], 152 } 153 154 #[allow(dead_code)] 155 #[repr(packed)] 156 #[derive(Default)] 157 struct ViotPciRangeNode { 158 pub type_: u8, 159 _reserved: u8, 160 pub length: u16, 161 pub endpoint_start: u32, 162 pub pci_segment_start: u16, 163 pub pci_segment_end: u16, 164 pub pci_bdf_start: u16, 165 pub pci_bdf_end: u16, 166 pub output_node: u16, 167 _reserved2: [u8; 6], 168 } 169 170 pub fn create_dsdt_table( 171 device_manager: &Arc<Mutex<DeviceManager>>, 172 cpu_manager: &Arc<Mutex<CpuManager>>, 173 memory_manager: &Arc<Mutex<MemoryManager>>, 174 ) -> Sdt { 175 // DSDT 176 let mut dsdt = Sdt::new(*b"DSDT", 36, 6, *b"CLOUDH", *b"CHDSDT ", 1); 177 178 let mut bytes = Vec::new(); 179 180 device_manager.lock().unwrap().append_aml_bytes(&mut bytes); 181 cpu_manager.lock().unwrap().append_aml_bytes(&mut bytes); 182 memory_manager.lock().unwrap().append_aml_bytes(&mut bytes); 183 dsdt.append_slice(&bytes); 184 185 dsdt 186 } 187 188 fn create_facp_table(dsdt_offset: GuestAddress) -> Sdt { 189 // Revision 6 of the ACPI FADT table is 276 bytes long 190 let mut facp = Sdt::new(*b"FACP", 276, 6, *b"CLOUDH", *b"CHFACP ", 1); 191 192 // x86_64 specific fields 193 #[cfg(target_arch = "x86_64")] 194 { 195 // PM_TMR_BLK I/O port 196 facp.write(76, 0xb008u32); 197 // RESET_REG 198 facp.write(116, GenericAddress::io_port_address::<u8>(0x3c0)); 199 // RESET_VALUE 200 facp.write(128, 1u8); 201 // X_PM_TMR_BLK 202 facp.write(208, GenericAddress::io_port_address::<u32>(0xb008)); 203 // SLEEP_CONTROL_REG 204 facp.write(244, GenericAddress::io_port_address::<u8>(0x3c0)); 205 // SLEEP_STATUS_REG 206 facp.write(256, GenericAddress::io_port_address::<u8>(0x3c0)); 207 } 208 209 // aarch64 specific fields 210 #[cfg(target_arch = "aarch64")] 211 // ARM_BOOT_ARCH: enable PSCI with HVC enable-method 212 facp.write(129, 3u16); 213 214 // Architecture common fields 215 // HW_REDUCED_ACPI, RESET_REG_SUP, TMR_VAL_EXT 216 let fadt_flags: u32 = 1 << 20 | 1 << 10 | 1 << 8; 217 facp.write(112, fadt_flags); 218 // FADT minor version 219 facp.write(131, 3u8); 220 // X_DSDT 221 facp.write(140, dsdt_offset.0); 222 // Hypervisor Vendor Identity 223 facp.write(268, b"CLOUDHYP"); 224 225 facp.update_checksum(); 226 227 facp 228 } 229 230 fn create_mcfg_table(pci_segments: &[PciSegment]) -> Sdt { 231 let mut mcfg = Sdt::new(*b"MCFG", 36, 1, *b"CLOUDH", *b"CHMCFG ", 1); 232 233 // MCFG reserved 8 bytes 234 mcfg.append(0u64); 235 236 for segment in pci_segments { 237 // 32-bit PCI enhanced configuration mechanism 238 mcfg.append(PciRangeEntry { 239 base_address: segment.mmio_config_address, 240 segment: segment.id, 241 start: 0, 242 end: 0, 243 ..Default::default() 244 }); 245 } 246 mcfg 247 } 248 249 fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { 250 let mut srat = Sdt::new(*b"SRAT", 36, 3, *b"CLOUDH", *b"CHSRAT ", 1); 251 // SRAT reserved 12 bytes 252 srat.append_slice(&[0u8; 12]); 253 254 // Check the MemoryAffinity structure is the right size as expected by 255 // the ACPI specification. 256 assert_eq!(std::mem::size_of::<MemoryAffinity>(), 40); 257 258 for (node_id, node) in numa_nodes.iter() { 259 let proximity_domain = *node_id as u32; 260 261 for region in &node.memory_regions { 262 srat.append(MemoryAffinity::from_region( 263 region, 264 proximity_domain, 265 MemAffinityFlags::ENABLE, 266 )) 267 } 268 269 for region in &node.hotplug_regions { 270 srat.append(MemoryAffinity::from_region( 271 region, 272 proximity_domain, 273 MemAffinityFlags::ENABLE | MemAffinityFlags::HOTPLUGGABLE, 274 )) 275 } 276 277 #[cfg(target_arch = "x86_64")] 278 for section in &node.sgx_epc_sections { 279 srat.append(MemoryAffinity::from_range( 280 section.start().raw_value(), 281 section.size(), 282 proximity_domain, 283 MemAffinityFlags::ENABLE, 284 )) 285 } 286 287 for cpu in &node.cpus { 288 let x2apic_id = *cpu as u32; 289 290 // Flags 291 // - Enabled = 1 (bit 0) 292 // - Reserved bits 1-31 293 let flags = 1; 294 295 #[cfg(target_arch = "x86_64")] 296 srat.append(ProcessorLocalX2ApicAffinity { 297 type_: 2, 298 length: 24, 299 proximity_domain, 300 x2apic_id, 301 flags, 302 clock_domain: 0, 303 ..Default::default() 304 }); 305 #[cfg(target_arch = "aarch64")] 306 srat.append(ProcessorGiccAffinity { 307 type_: 3, 308 length: 18, 309 proximity_domain, 310 acpi_processor_uid: x2apic_id, 311 flags, 312 clock_domain: 0, 313 }); 314 } 315 } 316 srat 317 } 318 319 fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt { 320 let mut slit = Sdt::new(*b"SLIT", 36, 1, *b"CLOUDH", *b"CHSLIT ", 1); 321 // Number of System Localities on 8 bytes. 322 slit.append(numa_nodes.len() as u64); 323 324 let existing_nodes: Vec<u32> = numa_nodes.keys().cloned().collect(); 325 for (node_id, node) in numa_nodes.iter() { 326 let distances = &node.distances; 327 for i in existing_nodes.iter() { 328 let dist: u8 = if *node_id == *i { 329 10 330 } else if let Some(distance) = distances.get(i) { 331 *distance as u8 332 } else { 333 20 334 }; 335 336 slit.append(dist); 337 } 338 } 339 slit 340 } 341 342 #[cfg(target_arch = "aarch64")] 343 fn create_gtdt_table() -> Sdt { 344 const ARCH_TIMER_NS_EL2_IRQ: u32 = 10; 345 const ARCH_TIMER_VIRT_IRQ: u32 = 11; 346 const ARCH_TIMER_S_EL1_IRQ: u32 = 13; 347 const ARCH_TIMER_NS_EL1_IRQ: u32 = 14; 348 const ACPI_GTDT_INTERRUPT_MODE_LEVEL: u32 = 0; 349 const ACPI_GTDT_CAP_ALWAYS_ON: u32 = 1 << 2; 350 351 let irqflags: u32 = ACPI_GTDT_INTERRUPT_MODE_LEVEL; 352 // GTDT 353 let mut gtdt = Sdt::new(*b"GTDT", 104, 2, *b"CLOUDH", *b"CHGTDT ", 1); 354 // Secure EL1 Timer GSIV 355 gtdt.write(48, (ARCH_TIMER_S_EL1_IRQ + 16) as u32); 356 // Secure EL1 Timer Flags 357 gtdt.write(52, irqflags); 358 // Non-Secure EL1 Timer GSIV 359 gtdt.write(56, (ARCH_TIMER_NS_EL1_IRQ + 16) as u32); 360 // Non-Secure EL1 Timer Flags 361 gtdt.write(60, (irqflags | ACPI_GTDT_CAP_ALWAYS_ON) as u32); 362 // Virtual EL1 Timer GSIV 363 gtdt.write(64, (ARCH_TIMER_VIRT_IRQ + 16) as u32); 364 // Virtual EL1 Timer Flags 365 gtdt.write(68, irqflags); 366 // EL2 Timer GSIV 367 gtdt.write(72, (ARCH_TIMER_NS_EL2_IRQ + 16) as u32); 368 // EL2 Timer Flags 369 gtdt.write(76, irqflags); 370 371 gtdt.update_checksum(); 372 373 gtdt 374 } 375 376 #[cfg(target_arch = "aarch64")] 377 fn create_spcr_table(base_address: u64, gsi: u32) -> Sdt { 378 // SPCR 379 let mut spcr = Sdt::new(*b"SPCR", 80, 2, *b"CLOUDH", *b"CHSPCR ", 1); 380 // Interface Type 381 spcr.write(36, 3u8); 382 // Base Address in format ACPI Generic Address Structure 383 spcr.write(40, GenericAddress::mmio_address::<u8>(base_address)); 384 // Interrupt Type: Bit[3] ARMH GIC interrupt 385 spcr.write(52, (1 << 3) as u8); 386 // Global System Interrupt used by the UART 387 spcr.write(54, (gsi as u32).to_le()); 388 // Baud Rate: 3 = 9600 389 spcr.write(58, 3u8); 390 // Stop Bits: 1 Stop bit 391 spcr.write(60, 1u8); 392 // Flow Control: Bit[1] = RTS/CTS hardware flow control 393 spcr.write(61, (1 << 1) as u8); 394 // PCI Device ID: Not a PCI device 395 spcr.write(64, 0xffff_u16); 396 // PCI Vendor ID: Not a PCI device 397 spcr.write(66, 0xffff_u16); 398 399 spcr.update_checksum(); 400 401 spcr 402 } 403 404 #[cfg(target_arch = "aarch64")] 405 fn create_dbg2_table(base_address: u64) -> Sdt { 406 let namespace = "_SB_.COM1"; 407 let debug_device_info_offset = 44usize; 408 let debug_device_info_len: u16 = 22 /* BaseAddressRegisterOffset */ + 409 12 /* BaseAddressRegister */ + 410 4 /* AddressSize */ + 411 namespace.len() as u16 + 1 /* zero-terminated */; 412 let tbl_len: u32 = debug_device_info_offset as u32 + debug_device_info_len as u32; 413 let mut dbg2 = Sdt::new(*b"DBG2", tbl_len, 0, *b"CLOUDH", *b"CHDBG2 ", 1); 414 415 /* OffsetDbgDeviceInfo */ 416 dbg2.write_u32(36, 44); 417 /* NumberDbgDeviceInfo */ 418 dbg2.write_u32(40, 1); 419 420 /* Debug Device Information structure */ 421 /* Offsets are calculated from the start of this structure. */ 422 let namespace_offset = 38u16; 423 let base_address_register_offset = 22u16; 424 let address_size_offset = 34u16; 425 /* Revision */ 426 dbg2.write_u8(debug_device_info_offset, 0); 427 /* Length */ 428 dbg2.write_u16(debug_device_info_offset + 1, debug_device_info_len); 429 /* NumberofGenericAddressRegisters */ 430 dbg2.write_u8(debug_device_info_offset + 3, 1); 431 /* NameSpaceStringLength */ 432 dbg2.write_u16(debug_device_info_offset + 4, namespace.len() as u16 + 1); 433 /* NameSpaceStringOffset */ 434 dbg2.write_u16(debug_device_info_offset + 6, namespace_offset); 435 /* OemDataLength */ 436 dbg2.write_u16(debug_device_info_offset + 8, 0); 437 /* OemDataOffset */ 438 dbg2.write_u16(debug_device_info_offset + 10, 0); 439 /* Port Type */ 440 dbg2.write_u16(debug_device_info_offset + 12, 0x8000); 441 /* Port Subtype */ 442 dbg2.write_u16(debug_device_info_offset + 14, 0x0003); 443 /* Reserved */ 444 dbg2.write_u16(debug_device_info_offset + 16, 0); 445 /* BaseAddressRegisterOffset */ 446 dbg2.write_u16(debug_device_info_offset + 18, base_address_register_offset); 447 /* AddressSizeOffset */ 448 dbg2.write_u16(debug_device_info_offset + 20, address_size_offset); 449 /* BaseAddressRegister */ 450 dbg2.write( 451 debug_device_info_offset + base_address_register_offset as usize, 452 GenericAddress::mmio_address::<u8>(base_address), 453 ); 454 /* AddressSize */ 455 dbg2.write_u32( 456 debug_device_info_offset + address_size_offset as usize, 457 0x1000, 458 ); 459 /* NamespaceString, zero-terminated ASCII */ 460 for (k, c) in namespace.chars().enumerate() { 461 dbg2.write_u8( 462 debug_device_info_offset + namespace_offset as usize + k, 463 c as u8, 464 ); 465 } 466 dbg2.write_u8( 467 debug_device_info_offset + namespace_offset as usize + namespace.len(), 468 0, 469 ); 470 471 dbg2.update_checksum(); 472 473 dbg2 474 } 475 476 #[cfg(target_arch = "aarch64")] 477 fn create_iort_table(pci_segments: &[PciSegment]) -> Sdt { 478 const ACPI_IORT_NODE_ITS_GROUP: u8 = 0x00; 479 const ACPI_IORT_NODE_PCI_ROOT_COMPLEX: u8 = 0x02; 480 const ACPI_IORT_NODE_ROOT_COMPLEX_OFFSET: usize = 72; 481 const ACPI_IORT_NODE_ROOT_COMPLEX_SIZE: usize = 60; 482 483 // The IORT table containes: 484 // - Header (size = 40) 485 // - 1 x ITS Group Node (size = 24) 486 // - N x Root Complex Node (N = number of pci segments, size = 60 x N) 487 let iort_table_size: u32 = (ACPI_IORT_NODE_ROOT_COMPLEX_OFFSET 488 + ACPI_IORT_NODE_ROOT_COMPLEX_SIZE * pci_segments.len()) 489 as u32; 490 let mut iort = Sdt::new(*b"IORT", iort_table_size, 2, *b"CLOUDH", *b"CHIORT ", 1); 491 iort.write(36, ((1 + pci_segments.len()) as u32).to_le()); 492 iort.write(40, (48u32).to_le()); 493 494 // ITS group node 495 iort.write(48, ACPI_IORT_NODE_ITS_GROUP as u8); 496 // Length of the ITS group node in bytes 497 iort.write(49, (24u16).to_le()); 498 // ITS counts 499 iort.write(64, (1u32).to_le()); 500 501 // Root Complex Nodes 502 for (i, segment) in pci_segments.iter().enumerate() { 503 let node_offset: usize = 504 ACPI_IORT_NODE_ROOT_COMPLEX_OFFSET + i * ACPI_IORT_NODE_ROOT_COMPLEX_SIZE; 505 iort.write(node_offset, ACPI_IORT_NODE_PCI_ROOT_COMPLEX as u8); 506 // Length of the root complex node in bytes 507 iort.write( 508 node_offset + 1, 509 (ACPI_IORT_NODE_ROOT_COMPLEX_SIZE as u16).to_le(), 510 ); 511 // Revision 512 iort.write(node_offset + 3, (3u8).to_le()); 513 // Node ID 514 iort.write(node_offset + 4, (segment.id as u32).to_le()); 515 // Mapping counts 516 iort.write(node_offset + 8, (1u32).to_le()); 517 // Offset from the start of the RC node to the start of its Array of ID mappings 518 iort.write(node_offset + 12, (36u32).to_le()); 519 // Fully coherent device 520 iort.write(node_offset + 16, (1u32).to_le()); 521 // CCA = CPM = DCAS = 1 522 iort.write(node_offset + 24, 3u8); 523 // PCI segment number 524 iort.write(node_offset + 28, (segment.id as u32).to_le()); 525 // Memory address size limit 526 iort.write(node_offset + 32, (64u8).to_le()); 527 528 // From offset 32 onward is the space for ID mappings Array. 529 // Now we have only one mapping. 530 let mapping_offset: usize = node_offset + 36; 531 // The lowest value in the input range 532 iort.write(mapping_offset, (0u32).to_le()); 533 // The number of IDs in the range minus one: 534 // This should cover all the devices of a segment: 535 // 1 (bus) x 32 (devices) x 8 (functions) = 256 536 // Note: Currently only 1 bus is supported in a segment. 537 iort.write(mapping_offset + 4, (255_u32).to_le()); 538 // The lowest value in the output range 539 iort.write(mapping_offset + 8, ((256 * segment.id) as u32).to_le()); 540 // id_mapping_array_output_reference should be 541 // the ITS group node (the first node) if no SMMU 542 iort.write(mapping_offset + 12, (48u32).to_le()); 543 // Flags 544 iort.write(mapping_offset + 16, (0u32).to_le()); 545 } 546 547 iort.update_checksum(); 548 549 iort 550 } 551 552 fn create_viot_table(iommu_bdf: &PciBdf, devices_bdf: &[PciBdf]) -> Sdt { 553 // VIOT 554 let mut viot = Sdt::new(*b"VIOT", 36, 0, *b"CLOUDH", *b"CHVIOT ", 0); 555 // Node count 556 viot.append((devices_bdf.len() + 1) as u16); 557 // Node offset 558 viot.append(48u16); 559 // VIOT reserved 8 bytes 560 viot.append_slice(&[0u8; 8]); 561 562 // Virtio-iommu based on virtio-pci node 563 viot.append(ViotVirtioPciNode { 564 type_: 3, 565 length: 16, 566 pci_segment: iommu_bdf.segment(), 567 pci_bdf_number: iommu_bdf.into(), 568 ..Default::default() 569 }); 570 571 for device_bdf in devices_bdf { 572 viot.append(ViotPciRangeNode { 573 type_: 1, 574 length: 24, 575 endpoint_start: device_bdf.into(), 576 pci_segment_start: device_bdf.segment(), 577 pci_segment_end: device_bdf.segment(), 578 pci_bdf_start: device_bdf.into(), 579 pci_bdf_end: device_bdf.into(), 580 output_node: 48, 581 ..Default::default() 582 }); 583 } 584 585 viot 586 } 587 588 pub fn create_acpi_tables( 589 guest_mem: &GuestMemoryMmap, 590 device_manager: &Arc<Mutex<DeviceManager>>, 591 cpu_manager: &Arc<Mutex<CpuManager>>, 592 memory_manager: &Arc<Mutex<MemoryManager>>, 593 numa_nodes: &NumaNodes, 594 ) -> GuestAddress { 595 let start_time = Instant::now(); 596 let rsdp_offset = arch::layout::RSDP_POINTER; 597 let mut tables: Vec<u64> = Vec::new(); 598 599 // DSDT 600 let dsdt = create_dsdt_table(device_manager, cpu_manager, memory_manager); 601 let dsdt_offset = rsdp_offset.checked_add(Rsdp::len() as u64).unwrap(); 602 guest_mem 603 .write_slice(dsdt.as_slice(), dsdt_offset) 604 .expect("Error writing DSDT table"); 605 606 // FACP aka FADT 607 let facp = create_facp_table(dsdt_offset); 608 let facp_offset = dsdt_offset.checked_add(dsdt.len() as u64).unwrap(); 609 guest_mem 610 .write_slice(facp.as_slice(), facp_offset) 611 .expect("Error writing FACP table"); 612 tables.push(facp_offset.0); 613 614 // MADT 615 let madt = cpu_manager.lock().unwrap().create_madt(); 616 let madt_offset = facp_offset.checked_add(facp.len() as u64).unwrap(); 617 guest_mem 618 .write_slice(madt.as_slice(), madt_offset) 619 .expect("Error writing MADT table"); 620 tables.push(madt_offset.0); 621 let mut prev_tbl_len = madt.len() as u64; 622 let mut prev_tbl_off = madt_offset; 623 624 // PPTT 625 #[cfg(target_arch = "aarch64")] 626 { 627 let pptt = cpu_manager.lock().unwrap().create_pptt(); 628 let pptt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 629 guest_mem 630 .write_slice(pptt.as_slice(), pptt_offset) 631 .expect("Error writing PPTT table"); 632 tables.push(pptt_offset.0); 633 prev_tbl_len = pptt.len() as u64; 634 prev_tbl_off = pptt_offset; 635 } 636 637 // GTDT 638 #[cfg(target_arch = "aarch64")] 639 { 640 let gtdt = create_gtdt_table(); 641 let gtdt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 642 guest_mem 643 .write_slice(gtdt.as_slice(), gtdt_offset) 644 .expect("Error writing GTDT table"); 645 tables.push(gtdt_offset.0); 646 prev_tbl_len = gtdt.len() as u64; 647 prev_tbl_off = gtdt_offset; 648 } 649 650 // MCFG 651 let mcfg = create_mcfg_table(device_manager.lock().unwrap().pci_segments()); 652 let mcfg_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 653 guest_mem 654 .write_slice(mcfg.as_slice(), mcfg_offset) 655 .expect("Error writing MCFG table"); 656 tables.push(mcfg_offset.0); 657 prev_tbl_len = mcfg.len() as u64; 658 prev_tbl_off = mcfg_offset; 659 660 // SPCR and DBG2 661 #[cfg(target_arch = "aarch64")] 662 { 663 let is_serial_on = device_manager 664 .lock() 665 .unwrap() 666 .get_device_info() 667 .clone() 668 .get(&(DeviceType::Serial, DeviceType::Serial.to_string())) 669 .is_some(); 670 let serial_device_addr = arch::layout::LEGACY_SERIAL_MAPPED_IO_START.raw_value(); 671 let serial_device_irq = if is_serial_on { 672 device_manager 673 .lock() 674 .unwrap() 675 .get_device_info() 676 .clone() 677 .get(&(DeviceType::Serial, DeviceType::Serial.to_string())) 678 .unwrap() 679 .irq() 680 } else { 681 // If serial is turned off, add a fake device with invalid irq. 682 31 683 }; 684 685 // SPCR 686 let spcr = create_spcr_table(serial_device_addr, serial_device_irq); 687 let spcr_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 688 guest_mem 689 .write_slice(spcr.as_slice(), spcr_offset) 690 .expect("Error writing SPCR table"); 691 tables.push(spcr_offset.0); 692 prev_tbl_len = spcr.len() as u64; 693 prev_tbl_off = spcr_offset; 694 695 // DBG2 696 let dbg2 = create_dbg2_table(serial_device_addr); 697 let dbg2_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 698 guest_mem 699 .write_slice(dbg2.as_slice(), dbg2_offset) 700 .expect("Error writing DBG2 table"); 701 tables.push(dbg2_offset.0); 702 prev_tbl_len = dbg2.len() as u64; 703 prev_tbl_off = dbg2_offset; 704 } 705 706 // SRAT and SLIT 707 // Only created if the NUMA nodes list is not empty. 708 if !numa_nodes.is_empty() { 709 // SRAT 710 let srat = create_srat_table(numa_nodes); 711 let srat_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 712 guest_mem 713 .write_slice(srat.as_slice(), srat_offset) 714 .expect("Error writing SRAT table"); 715 tables.push(srat_offset.0); 716 717 // SLIT 718 let slit = create_slit_table(numa_nodes); 719 let slit_offset = srat_offset.checked_add(srat.len() as u64).unwrap(); 720 guest_mem 721 .write_slice(slit.as_slice(), slit_offset) 722 .expect("Error writing SRAT table"); 723 tables.push(slit_offset.0); 724 725 prev_tbl_len = slit.len() as u64; 726 prev_tbl_off = slit_offset; 727 }; 728 729 #[cfg(target_arch = "aarch64")] 730 { 731 let iort = create_iort_table(device_manager.lock().unwrap().pci_segments()); 732 let iort_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 733 guest_mem 734 .write_slice(iort.as_slice(), iort_offset) 735 .expect("Error writing IORT table"); 736 tables.push(iort_offset.0); 737 prev_tbl_len = iort.len() as u64; 738 prev_tbl_off = iort_offset; 739 } 740 741 // VIOT 742 if let Some((iommu_bdf, devices_bdf)) = device_manager.lock().unwrap().iommu_attached_devices() 743 { 744 let viot = create_viot_table(iommu_bdf, devices_bdf); 745 746 let viot_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 747 guest_mem 748 .write_slice(viot.as_slice(), viot_offset) 749 .expect("Error writing VIOT table"); 750 tables.push(viot_offset.0); 751 prev_tbl_len = viot.len() as u64; 752 prev_tbl_off = viot_offset; 753 } 754 755 // XSDT 756 let mut xsdt = Sdt::new(*b"XSDT", 36, 1, *b"CLOUDH", *b"CHXSDT ", 1); 757 for table in tables { 758 xsdt.append(table); 759 } 760 xsdt.update_checksum(); 761 let xsdt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap(); 762 guest_mem 763 .write_slice(xsdt.as_slice(), xsdt_offset) 764 .expect("Error writing XSDT table"); 765 766 // RSDP 767 let rsdp = Rsdp::new(*b"CLOUDH", xsdt_offset.0); 768 guest_mem 769 .write_slice(rsdp.as_slice(), rsdp_offset) 770 .expect("Error writing RSDP"); 771 772 info!( 773 "Generated ACPI tables: took {}µs size = {}", 774 Instant::now().duration_since(start_time).as_micros(), 775 xsdt_offset.0 + xsdt.len() as u64 - rsdp_offset.0 776 ); 777 rsdp_offset 778 } 779 780 #[cfg(feature = "tdx")] 781 pub fn create_acpi_tables_tdx( 782 device_manager: &Arc<Mutex<DeviceManager>>, 783 cpu_manager: &Arc<Mutex<CpuManager>>, 784 memory_manager: &Arc<Mutex<MemoryManager>>, 785 numa_nodes: &NumaNodes, 786 ) -> Vec<Sdt> { 787 // DSDT 788 let mut tables = vec![create_dsdt_table( 789 device_manager, 790 cpu_manager, 791 memory_manager, 792 )]; 793 794 // FACP aka FADT 795 tables.push(create_facp_table(GuestAddress(0))); 796 797 // MADT 798 tables.push(cpu_manager.lock().unwrap().create_madt()); 799 800 // MCFG 801 tables.push(create_mcfg_table( 802 device_manager.lock().unwrap().pci_segments(), 803 )); 804 805 // SRAT and SLIT 806 // Only created if the NUMA nodes list is not empty. 807 if !numa_nodes.is_empty() { 808 // SRAT 809 tables.push(create_srat_table(numa_nodes)); 810 811 // SLIT 812 tables.push(create_slit_table(numa_nodes)); 813 }; 814 815 // VIOT 816 if let Some((iommu_bdf, devices_bdf)) = device_manager.lock().unwrap().iommu_attached_devices() 817 { 818 tables.push(create_viot_table(iommu_bdf, devices_bdf)); 819 } 820 821 tables 822 } 823