xref: /cloud-hypervisor/vmm/src/acpi.rs (revision f67b3f79ea19c9a66e04074cbbf5d292f6529e43)
1 // Copyright © 2019 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 use crate::cpu::CpuManager;
6 use crate::device_manager::DeviceManager;
7 use crate::memory_manager::MemoryManager;
8 use crate::{GuestMemoryMmap, GuestRegionMmap};
9 use acpi_tables::sdt::GenericAddress;
10 use acpi_tables::{aml::Aml, rsdp::Rsdp, sdt::Sdt};
11 #[cfg(target_arch = "aarch64")]
12 use arch::aarch64::DeviceInfoForFdt;
13 #[cfg(target_arch = "aarch64")]
14 use arch::DeviceType;
15 #[cfg(any(target_arch = "aarch64", feature = "acpi"))]
16 use arch::NumaNodes;
17 
18 use bitflags::bitflags;
19 use std::sync::{Arc, Mutex};
20 use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryRegion};
21 
22 /* Values for Type in APIC sub-headers */
23 #[cfg(target_arch = "x86_64")]
24 pub const ACPI_APIC_PROCESSOR: u8 = 0;
25 #[cfg(target_arch = "x86_64")]
26 pub const ACPI_APIC_IO: u8 = 1;
27 #[cfg(target_arch = "x86_64")]
28 pub const ACPI_APIC_XRUPT_OVERRIDE: u8 = 2;
29 #[cfg(target_arch = "aarch64")]
30 pub const ACPI_APIC_GENERIC_CPU_INTERFACE: u8 = 11;
31 #[cfg(target_arch = "aarch64")]
32 pub const ACPI_APIC_GENERIC_DISTRIBUTOR: u8 = 12;
33 #[cfg(target_arch = "aarch64")]
34 pub const ACPI_APIC_GENERIC_REDISTRIBUTOR: u8 = 14;
35 #[cfg(target_arch = "aarch64")]
36 pub const ACPI_APIC_GENERIC_TRANSLATOR: u8 = 15;
37 
38 #[allow(dead_code)]
39 #[repr(packed)]
40 #[derive(Default)]
41 struct PciRangeEntry {
42     pub base_address: u64,
43     pub segment: u16,
44     pub start: u8,
45     pub end: u8,
46     _reserved: u32,
47 }
48 
49 #[allow(dead_code)]
50 #[repr(packed)]
51 #[derive(Default)]
52 struct MemoryAffinity {
53     pub type_: u8,
54     pub length: u8,
55     pub proximity_domain: u32,
56     _reserved1: u16,
57     pub base_addr_lo: u32,
58     pub base_addr_hi: u32,
59     pub length_lo: u32,
60     pub length_hi: u32,
61     _reserved2: u32,
62     pub flags: u32,
63     _reserved3: u64,
64 }
65 
66 #[allow(dead_code)]
67 #[repr(packed)]
68 #[derive(Default)]
69 struct ProcessorLocalX2ApicAffinity {
70     pub type_: u8,
71     pub length: u8,
72     _reserved1: u16,
73     pub proximity_domain: u32,
74     pub x2apic_id: u32,
75     pub flags: u32,
76     pub clock_domain: u32,
77     _reserved2: u32,
78 }
79 
80 #[allow(dead_code)]
81 #[repr(packed)]
82 #[derive(Default)]
83 struct ProcessorGiccAffinity {
84     pub type_: u8,
85     pub length: u8,
86     pub proximity_domain: u32,
87     pub acpi_processor_uid: u32,
88     pub flags: u32,
89     pub clock_domain: u32,
90 }
91 
92 bitflags! {
93     pub struct MemAffinityFlags: u32 {
94         const NOFLAGS = 0;
95         const ENABLE = 0b1;
96         const HOTPLUGGABLE = 0b10;
97         const NON_VOLATILE = 0b100;
98     }
99 }
100 
101 impl MemoryAffinity {
102     fn from_region(
103         region: &Arc<GuestRegionMmap>,
104         proximity_domain: u32,
105         flags: MemAffinityFlags,
106     ) -> Self {
107         Self::from_range(
108             region.start_addr().raw_value(),
109             region.len(),
110             proximity_domain,
111             flags,
112         )
113     }
114 
115     fn from_range(
116         base_addr: u64,
117         size: u64,
118         proximity_domain: u32,
119         flags: MemAffinityFlags,
120     ) -> Self {
121         let base_addr_lo = (base_addr & 0xffff_ffff) as u32;
122         let base_addr_hi = (base_addr >> 32) as u32;
123         let length_lo = (size & 0xffff_ffff) as u32;
124         let length_hi = (size >> 32) as u32;
125 
126         MemoryAffinity {
127             type_: 1,
128             length: 40,
129             proximity_domain,
130             base_addr_lo,
131             base_addr_hi,
132             length_lo,
133             length_hi,
134             flags: flags.bits(),
135             ..Default::default()
136         }
137     }
138 }
139 
140 #[allow(dead_code)]
141 #[repr(packed)]
142 #[derive(Default)]
143 struct ViotVirtioPciNode {
144     pub type_: u8,
145     _reserved: u8,
146     pub length: u16,
147     pub pci_segment: u16,
148     pub pci_bdf_number: u16,
149     _reserved2: [u8; 8],
150 }
151 
152 #[allow(dead_code)]
153 #[repr(packed)]
154 #[derive(Default)]
155 struct ViotPciRangeNode {
156     pub type_: u8,
157     _reserved: u8,
158     pub length: u16,
159     pub endpoint_start: u32,
160     pub pci_segment_start: u16,
161     pub pci_segment_end: u16,
162     pub pci_bdf_start: u16,
163     pub pci_bdf_end: u16,
164     pub output_node: u16,
165     _reserved2: [u8; 6],
166 }
167 
168 pub fn create_dsdt_table(
169     device_manager: &Arc<Mutex<DeviceManager>>,
170     cpu_manager: &Arc<Mutex<CpuManager>>,
171     memory_manager: &Arc<Mutex<MemoryManager>>,
172 ) -> Sdt {
173     // DSDT
174     let mut dsdt = Sdt::new(*b"DSDT", 36, 6, *b"CLOUDH", *b"CHDSDT  ", 1);
175 
176     dsdt.append_slice(device_manager.lock().unwrap().to_aml_bytes().as_slice());
177     dsdt.append_slice(cpu_manager.lock().unwrap().to_aml_bytes().as_slice());
178     dsdt.append_slice(memory_manager.lock().unwrap().to_aml_bytes().as_slice());
179 
180     dsdt
181 }
182 
183 fn create_facp_table(dsdt_offset: GuestAddress) -> Sdt {
184     // Revision 6 of the ACPI FADT table is 276 bytes long
185     let mut facp = Sdt::new(*b"FACP", 276, 6, *b"CLOUDH", *b"CHFACP  ", 1);
186 
187     // x86_64 specific fields
188     #[cfg(target_arch = "x86_64")]
189     {
190         // PM_TMR_BLK I/O port
191         facp.write(76, 0xb008u32);
192         // RESET_REG
193         facp.write(116, GenericAddress::io_port_address::<u8>(0x3c0));
194         // RESET_VALUE
195         facp.write(128, 1u8);
196         // X_PM_TMR_BLK
197         facp.write(208, GenericAddress::io_port_address::<u32>(0xb008));
198         // SLEEP_CONTROL_REG
199         facp.write(244, GenericAddress::io_port_address::<u8>(0x3c0));
200         // SLEEP_STATUS_REG
201         facp.write(256, GenericAddress::io_port_address::<u8>(0x3c0));
202     }
203 
204     // aarch64 specific fields
205     #[cfg(target_arch = "aarch64")]
206     // ARM_BOOT_ARCH: enable PSCI with HVC enable-method
207     facp.write(129, 3u16);
208 
209     // Architecture common fields
210     // HW_REDUCED_ACPI, RESET_REG_SUP, TMR_VAL_EXT
211     let fadt_flags: u32 = 1 << 20 | 1 << 10 | 1 << 8;
212     facp.write(112, fadt_flags);
213     // FADT minor version
214     facp.write(131, 3u8);
215     // X_DSDT
216     facp.write(140, dsdt_offset.0);
217     // Hypervisor Vendor Identity
218     facp.write(268, b"CLOUDHYP");
219 
220     facp.update_checksum();
221 
222     facp
223 }
224 
225 fn create_mcfg_table() -> Sdt {
226     let mut mcfg = Sdt::new(*b"MCFG", 36, 1, *b"CLOUDH", *b"CHMCFG  ", 1);
227 
228     // MCFG reserved 8 bytes
229     mcfg.append(0u64);
230 
231     // 32-bit PCI enhanced configuration mechanism
232     mcfg.append(PciRangeEntry {
233         base_address: arch::layout::PCI_MMCONFIG_START.0,
234         segment: 0,
235         start: 0,
236         end: ((arch::layout::PCI_MMCONFIG_SIZE - 1) >> 20) as u8,
237         ..Default::default()
238     });
239     mcfg
240 }
241 
242 fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
243     let mut srat = Sdt::new(*b"SRAT", 36, 3, *b"CLOUDH", *b"CHSRAT  ", 1);
244     // SRAT reserved 12 bytes
245     srat.append_slice(&[0u8; 12]);
246 
247     // Check the MemoryAffinity structure is the right size as expected by
248     // the ACPI specification.
249     assert_eq!(std::mem::size_of::<MemoryAffinity>(), 40);
250 
251     for (node_id, node) in numa_nodes.iter() {
252         let proximity_domain = *node_id as u32;
253 
254         for region in &node.memory_regions {
255             srat.append(MemoryAffinity::from_region(
256                 region,
257                 proximity_domain,
258                 MemAffinityFlags::ENABLE,
259             ))
260         }
261 
262         for region in &node.hotplug_regions {
263             srat.append(MemoryAffinity::from_region(
264                 region,
265                 proximity_domain,
266                 MemAffinityFlags::ENABLE | MemAffinityFlags::HOTPLUGGABLE,
267             ))
268         }
269 
270         #[cfg(target_arch = "x86_64")]
271         for section in &node.sgx_epc_sections {
272             srat.append(MemoryAffinity::from_range(
273                 section.start().raw_value(),
274                 section.size(),
275                 proximity_domain,
276                 MemAffinityFlags::ENABLE,
277             ))
278         }
279 
280         for cpu in &node.cpus {
281             let x2apic_id = *cpu as u32;
282 
283             // Flags
284             // - Enabled = 1 (bit 0)
285             // - Reserved bits 1-31
286             let flags = 1;
287 
288             #[cfg(target_arch = "x86_64")]
289             srat.append(ProcessorLocalX2ApicAffinity {
290                 type_: 2,
291                 length: 24,
292                 proximity_domain,
293                 x2apic_id,
294                 flags,
295                 clock_domain: 0,
296                 ..Default::default()
297             });
298             #[cfg(target_arch = "aarch64")]
299             srat.append(ProcessorGiccAffinity {
300                 type_: 3,
301                 length: 18,
302                 proximity_domain,
303                 acpi_processor_uid: x2apic_id,
304                 flags,
305                 clock_domain: 0,
306             });
307         }
308     }
309     srat
310 }
311 
312 fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt {
313     let mut slit = Sdt::new(*b"SLIT", 36, 1, *b"CLOUDH", *b"CHSLIT  ", 1);
314     // Number of System Localities on 8 bytes.
315     slit.append(numa_nodes.len() as u64);
316 
317     let existing_nodes: Vec<u32> = numa_nodes.keys().cloned().collect();
318     for (node_id, node) in numa_nodes.iter() {
319         let distances = &node.distances;
320         for i in existing_nodes.iter() {
321             let dist: u8 = if *node_id == *i {
322                 10
323             } else if let Some(distance) = distances.get(i) {
324                 *distance as u8
325             } else {
326                 20
327             };
328 
329             slit.append(dist);
330         }
331     }
332     slit
333 }
334 
335 #[cfg(target_arch = "aarch64")]
336 fn create_gtdt_table() -> Sdt {
337     const ARCH_TIMER_NS_EL2_IRQ: u32 = 10;
338     const ARCH_TIMER_VIRT_IRQ: u32 = 11;
339     const ARCH_TIMER_S_EL1_IRQ: u32 = 13;
340     const ARCH_TIMER_NS_EL1_IRQ: u32 = 14;
341     const ACPI_GTDT_INTERRUPT_MODE_LEVEL: u32 = 0;
342     const ACPI_GTDT_CAP_ALWAYS_ON: u32 = 1 << 2;
343 
344     let irqflags: u32 = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
345     // GTDT
346     let mut gtdt = Sdt::new(*b"GTDT", 104, 2, *b"CLOUDH", *b"CHGTDT  ", 1);
347     // Secure EL1 Timer GSIV
348     gtdt.write(48, (ARCH_TIMER_S_EL1_IRQ + 16) as u32);
349     // Secure EL1 Timer Flags
350     gtdt.write(52, irqflags);
351     // Non-Secure EL1 Timer GSIV
352     gtdt.write(56, (ARCH_TIMER_NS_EL1_IRQ + 16) as u32);
353     // Non-Secure EL1 Timer Flags
354     gtdt.write(60, (irqflags | ACPI_GTDT_CAP_ALWAYS_ON) as u32);
355     // Virtual EL1 Timer GSIV
356     gtdt.write(64, (ARCH_TIMER_VIRT_IRQ + 16) as u32);
357     // Virtual EL1 Timer Flags
358     gtdt.write(68, irqflags);
359     // EL2 Timer GSIV
360     gtdt.write(72, (ARCH_TIMER_NS_EL2_IRQ + 16) as u32);
361     // EL2 Timer Flags
362     gtdt.write(76, irqflags);
363 
364     gtdt.update_checksum();
365 
366     gtdt
367 }
368 
369 #[cfg(target_arch = "aarch64")]
370 fn create_spcr_table(base_address: u64, gsi: u32) -> Sdt {
371     // SPCR
372     let mut spcr = Sdt::new(*b"SPCR", 80, 2, *b"CLOUDH", *b"CHSPCR  ", 1);
373     // Interface Type
374     spcr.write(36, 3u8);
375     // Base Address in format ACPI Generic Address Structure
376     spcr.write(40, GenericAddress::mmio_address::<u8>(base_address));
377     // Interrupt Type: Bit[3] ARMH GIC interrupt
378     spcr.write(52, (1 << 3) as u8);
379     // Global System Interrupt used by the UART
380     spcr.write(54, (gsi as u32).to_le());
381     // Baud Rate: 3 = 9600
382     spcr.write(58, 3u8);
383     // Stop Bits: 1 Stop bit
384     spcr.write(60, 1u8);
385     // Flow Control: Bit[1] = RTS/CTS hardware flow control
386     spcr.write(61, (1 << 1) as u8);
387     // PCI Device ID: Not a PCI device
388     spcr.write(64, 0xffff_u16);
389     // PCI Vendor ID: Not a PCI device
390     spcr.write(66, 0xffff_u16);
391 
392     spcr.update_checksum();
393 
394     spcr
395 }
396 
397 #[cfg(target_arch = "aarch64")]
398 fn create_iort_table() -> Sdt {
399     const ACPI_IORT_NODE_ITS_GROUP: u8 = 0x00;
400     const ACPI_IORT_NODE_PCI_ROOT_COMPLEX: u8 = 0x02;
401 
402     // IORT
403     let mut iort = Sdt::new(*b"IORT", 124, 2, *b"CLOUDH", *b"CHIORT  ", 1);
404     // Nodes: PCI Root Complex, ITS
405     // Note: We currently do not support SMMU
406     iort.write(36, (2u32).to_le());
407     iort.write(40, (48u32).to_le());
408 
409     // ITS group node
410     iort.write(48, ACPI_IORT_NODE_ITS_GROUP as u8);
411     // Length of the ITS group node in bytes
412     iort.write(49, (24u16).to_le());
413     // ITS counts
414     iort.write(64, (1u32).to_le());
415 
416     // Root Complex Node
417     iort.write(72, ACPI_IORT_NODE_PCI_ROOT_COMPLEX as u8);
418     // Length of the root complex node in bytes
419     iort.write(73, (52u16).to_le());
420     // Mapping counts
421     iort.write(80, (1u32).to_le());
422     // Offset from the start of the RC node to the start of its Array of ID mappings
423     iort.write(84, (32u32).to_le());
424     // Fully coherent device
425     iort.write(88, (1u32).to_le());
426     // CCA = CPM = DCAS = 1
427     iort.write(95, 3u8);
428     // Identity RID mapping covering the whole input RID range
429     iort.write(108, (0xffff_u32).to_le());
430     // id_mapping_array_output_reference should be
431     // the ITS group node (the first node) if no SMMU
432     iort.write(116, (48u32).to_le());
433 
434     iort.update_checksum();
435 
436     iort
437 }
438 
439 fn create_viot_table(iommu_bdf: u32, devices_bdf: &[u32]) -> Sdt {
440     // VIOT
441     let mut viot = Sdt::new(*b"VIOT", 36, 0, *b"CLOUDH", *b"CHVIOT  ", 0);
442     // Node count
443     viot.append((devices_bdf.len() + 1) as u16);
444     // Node offset
445     viot.append(48u16);
446     // VIOT reserved 8 bytes
447     viot.append_slice(&[0u8; 8]);
448 
449     // Virtio-iommu based on virtio-pci node
450     viot.append(ViotVirtioPciNode {
451         type_: 3,
452         length: 16,
453         pci_segment: 0,
454         pci_bdf_number: iommu_bdf as u16,
455         ..Default::default()
456     });
457 
458     for device_bdf in devices_bdf {
459         viot.append(ViotPciRangeNode {
460             type_: 1,
461             length: 24,
462             endpoint_start: *device_bdf,
463             pci_segment_start: 0,
464             pci_segment_end: 0,
465             pci_bdf_start: *device_bdf as u16,
466             pci_bdf_end: *device_bdf as u16,
467             output_node: 48,
468             ..Default::default()
469         });
470     }
471 
472     viot
473 }
474 
475 pub fn create_acpi_tables(
476     guest_mem: &GuestMemoryMmap,
477     device_manager: &Arc<Mutex<DeviceManager>>,
478     cpu_manager: &Arc<Mutex<CpuManager>>,
479     memory_manager: &Arc<Mutex<MemoryManager>>,
480     numa_nodes: &NumaNodes,
481 ) -> GuestAddress {
482     let mut prev_tbl_len: u64;
483     let mut prev_tbl_off: GuestAddress;
484     let rsdp_offset = arch::layout::RSDP_POINTER;
485     let mut tables: Vec<u64> = Vec::new();
486 
487     // DSDT
488     let dsdt = create_dsdt_table(device_manager, cpu_manager, memory_manager);
489     let dsdt_offset = rsdp_offset.checked_add(Rsdp::len() as u64).unwrap();
490     guest_mem
491         .write_slice(dsdt.as_slice(), dsdt_offset)
492         .expect("Error writing DSDT table");
493 
494     // FACP aka FADT
495     let facp = create_facp_table(dsdt_offset);
496     let facp_offset = dsdt_offset.checked_add(dsdt.len() as u64).unwrap();
497     guest_mem
498         .write_slice(facp.as_slice(), facp_offset)
499         .expect("Error writing FACP table");
500     tables.push(facp_offset.0);
501 
502     // MADT
503     let madt = cpu_manager.lock().unwrap().create_madt();
504     let madt_offset = facp_offset.checked_add(facp.len() as u64).unwrap();
505     guest_mem
506         .write_slice(madt.as_slice(), madt_offset)
507         .expect("Error writing MADT table");
508     tables.push(madt_offset.0);
509     prev_tbl_len = madt.len() as u64;
510     prev_tbl_off = madt_offset;
511 
512     // PPTT
513     #[cfg(target_arch = "aarch64")]
514     {
515         let pptt = cpu_manager.lock().unwrap().create_pptt();
516         let pptt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
517         guest_mem
518             .write_slice(pptt.as_slice(), pptt_offset)
519             .expect("Error writing PPTT table");
520         tables.push(pptt_offset.0);
521         prev_tbl_len = pptt.len() as u64;
522         prev_tbl_off = pptt_offset;
523     }
524 
525     // GTDT
526     #[cfg(target_arch = "aarch64")]
527     {
528         let gtdt = create_gtdt_table();
529         let gtdt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
530         guest_mem
531             .write_slice(gtdt.as_slice(), gtdt_offset)
532             .expect("Error writing GTDT table");
533         tables.push(gtdt_offset.0);
534         prev_tbl_len = gtdt.len() as u64;
535         prev_tbl_off = gtdt_offset;
536     }
537 
538     // MCFG
539     let mcfg = create_mcfg_table();
540     let mcfg_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
541     guest_mem
542         .write_slice(mcfg.as_slice(), mcfg_offset)
543         .expect("Error writing MCFG table");
544     tables.push(mcfg_offset.0);
545     prev_tbl_len = mcfg.len() as u64;
546     prev_tbl_off = mcfg_offset;
547 
548     // SPCR
549     #[cfg(target_arch = "aarch64")]
550     {
551         let is_serial_on = device_manager
552             .lock()
553             .unwrap()
554             .get_device_info()
555             .clone()
556             .get(&(DeviceType::Serial, DeviceType::Serial.to_string()))
557             .is_some();
558         let serial_device_addr = arch::layout::LEGACY_SERIAL_MAPPED_IO_START;
559         let serial_device_irq = if is_serial_on {
560             device_manager
561                 .lock()
562                 .unwrap()
563                 .get_device_info()
564                 .clone()
565                 .get(&(DeviceType::Serial, DeviceType::Serial.to_string()))
566                 .unwrap()
567                 .irq()
568         } else {
569             // If serial is turned off, add a fake device with invalid irq.
570             31
571         };
572         let spcr = create_spcr_table(serial_device_addr, serial_device_irq);
573         let spcr_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
574         guest_mem
575             .write_slice(spcr.as_slice(), spcr_offset)
576             .expect("Error writing SPCR table");
577         tables.push(spcr_offset.0);
578         prev_tbl_len = spcr.len() as u64;
579         prev_tbl_off = spcr_offset;
580     }
581 
582     // SRAT and SLIT
583     // Only created if the NUMA nodes list is not empty.
584     if !numa_nodes.is_empty() {
585         // SRAT
586         let srat = create_srat_table(numa_nodes);
587         let srat_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
588         guest_mem
589             .write_slice(srat.as_slice(), srat_offset)
590             .expect("Error writing SRAT table");
591         tables.push(srat_offset.0);
592 
593         // SLIT
594         let slit = create_slit_table(numa_nodes);
595         let slit_offset = srat_offset.checked_add(srat.len() as u64).unwrap();
596         guest_mem
597             .write_slice(slit.as_slice(), slit_offset)
598             .expect("Error writing SRAT table");
599         tables.push(slit_offset.0);
600 
601         prev_tbl_len = slit.len() as u64;
602         prev_tbl_off = slit_offset;
603     };
604 
605     #[cfg(target_arch = "aarch64")]
606     {
607         let iort = create_iort_table();
608         let iort_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
609         guest_mem
610             .write_slice(iort.as_slice(), iort_offset)
611             .expect("Error writing IORT table");
612         tables.push(iort_offset.0);
613         prev_tbl_len = iort.len() as u64;
614         prev_tbl_off = iort_offset;
615     }
616 
617     // VIOT
618     if let Some((iommu_bdf, devices_bdf)) = device_manager.lock().unwrap().iommu_attached_devices()
619     {
620         let viot = create_viot_table(*iommu_bdf, devices_bdf);
621 
622         let viot_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
623         guest_mem
624             .write_slice(viot.as_slice(), viot_offset)
625             .expect("Error writing VIOT table");
626         tables.push(viot_offset.0);
627         prev_tbl_len = viot.len() as u64;
628         prev_tbl_off = viot_offset;
629     }
630 
631     // XSDT
632     let mut xsdt = Sdt::new(*b"XSDT", 36, 1, *b"CLOUDH", *b"CHXSDT  ", 1);
633     for table in tables {
634         xsdt.append(table);
635     }
636     xsdt.update_checksum();
637     let xsdt_offset = prev_tbl_off.checked_add(prev_tbl_len).unwrap();
638     guest_mem
639         .write_slice(xsdt.as_slice(), xsdt_offset)
640         .expect("Error writing XSDT table");
641 
642     // RSDP
643     let rsdp = Rsdp::new(*b"CLOUDH", xsdt_offset.0);
644     guest_mem
645         .write_slice(rsdp.as_slice(), rsdp_offset)
646         .expect("Error writing RSDP");
647 
648     rsdp_offset
649 }
650