xref: /cloud-hypervisor/arch/src/x86_64/mod.rs (revision 6f8bd27cf7629733582d930519e98d19e90afb16)
1 // Copyright © 2020, Oracle and/or its affiliates.
2 //
3 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 // SPDX-License-Identifier: Apache-2.0
5 //
6 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE-BSD-3-Clause file.
9 use std::sync::Arc;
10 pub mod interrupts;
11 pub mod layout;
12 mod mpspec;
13 mod mptable;
14 pub mod regs;
15 use crate::GuestMemoryMmap;
16 use crate::InitramfsConfig;
17 use crate::RegionType;
18 use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX};
19 use hypervisor::HypervisorError;
20 use linux_loader::loader::bootparam::boot_params;
21 use linux_loader::loader::elf::start_info::{
22     hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info,
23 };
24 use std::collections::BTreeMap;
25 use std::mem;
26 use vm_memory::{
27     Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
28     GuestMemoryRegion, GuestUsize,
29 };
30 mod smbios;
31 use std::arch::x86_64;
32 #[cfg(feature = "tdx")]
33 pub mod tdx;
34 
35 // CPUID feature bits
36 const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit.
37 const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit.
38 const MTRR_EDX_BIT: u8 = 12; // Hypervisor ecx bit.
39 
40 // KVM feature bits
41 const KVM_FEATURE_ASYNC_PF_INT_BIT: u8 = 14;
42 #[cfg(feature = "tdx")]
43 const KVM_FEATURE_CLOCKSOURCE_BIT: u8 = 0;
44 #[cfg(feature = "tdx")]
45 const KVM_FEATURE_CLOCKSOURCE2_BIT: u8 = 3;
46 #[cfg(feature = "tdx")]
47 const KVM_FEATURE_CLOCKSOURCE_STABLE_BIT: u8 = 24;
48 #[cfg(feature = "tdx")]
49 const KVM_FEATURE_ASYNC_PF_BIT: u8 = 4;
50 #[cfg(feature = "tdx")]
51 const KVM_FEATURE_ASYNC_PF_VMEXIT_BIT: u8 = 10;
52 #[cfg(feature = "tdx")]
53 const KVM_FEATURE_STEAL_TIME_BIT: u8 = 5;
54 
55 #[derive(Debug, Copy, Clone)]
56 /// Specifies the entry point address where the guest must start
57 /// executing code, as well as which of the supported boot protocols
58 /// is to be used to configure the guest initial state.
59 pub struct EntryPoint {
60     /// Address in guest memory where the guest must start execution
61     pub entry_addr: Option<GuestAddress>,
62 }
63 
64 const E820_RAM: u32 = 1;
65 const E820_RESERVED: u32 = 2;
66 
67 #[derive(Clone)]
68 pub struct SgxEpcSection {
69     start: GuestAddress,
70     size: GuestUsize,
71 }
72 
73 impl SgxEpcSection {
74     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
75         SgxEpcSection { start, size }
76     }
77     pub fn start(&self) -> GuestAddress {
78         self.start
79     }
80     pub fn size(&self) -> GuestUsize {
81         self.size
82     }
83 }
84 
85 #[derive(Clone)]
86 pub struct SgxEpcRegion {
87     start: GuestAddress,
88     size: GuestUsize,
89     epc_sections: BTreeMap<String, SgxEpcSection>,
90 }
91 
92 impl SgxEpcRegion {
93     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
94         SgxEpcRegion {
95             start,
96             size,
97             epc_sections: BTreeMap::new(),
98         }
99     }
100     pub fn start(&self) -> GuestAddress {
101         self.start
102     }
103     pub fn size(&self) -> GuestUsize {
104         self.size
105     }
106     pub fn epc_sections(&self) -> &BTreeMap<String, SgxEpcSection> {
107         &self.epc_sections
108     }
109     pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) {
110         self.epc_sections.insert(id, epc_section);
111     }
112 }
113 
114 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
115 // trait (in this case `DataInit`) where:
116 // *    the type that is implementing the trait is foreign or
117 // *    all of the parameters being passed to the trait (if there are any) are also foreign
118 // is prohibited.
119 #[derive(Copy, Clone, Default)]
120 struct StartInfoWrapper(hvm_start_info);
121 
122 #[derive(Copy, Clone, Default)]
123 struct MemmapTableEntryWrapper(hvm_memmap_table_entry);
124 
125 #[derive(Copy, Clone, Default)]
126 struct ModlistEntryWrapper(hvm_modlist_entry);
127 
128 // SAFETY: data structure only contain a series of integers
129 unsafe impl ByteValued for StartInfoWrapper {}
130 // SAFETY: data structure only contain a series of integers
131 unsafe impl ByteValued for MemmapTableEntryWrapper {}
132 // SAFETY: data structure only contain a series of integers
133 unsafe impl ByteValued for ModlistEntryWrapper {}
134 
135 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
136 // trait (in this case `DataInit`) where:
137 // *    the type that is implementing the trait is foreign or
138 // *    all of the parameters being passed to the trait (if there are any) are also foreign
139 // is prohibited.
140 #[derive(Copy, Clone, Default)]
141 struct BootParamsWrapper(boot_params);
142 
143 // SAFETY: BootParamsWrap is a wrapper over `boot_params` (a series of ints).
144 unsafe impl ByteValued for BootParamsWrapper {}
145 
146 #[derive(Debug)]
147 pub enum Error {
148     /// Error writing MP table to memory.
149     MpTableSetup(mptable::Error),
150 
151     /// Error configuring the general purpose registers
152     RegsConfiguration(regs::Error),
153 
154     /// Error configuring the special registers
155     SregsConfiguration(regs::Error),
156 
157     /// Error configuring the floating point related registers
158     FpuConfiguration(regs::Error),
159 
160     /// Error configuring the MSR registers
161     MsrsConfiguration(regs::Error),
162 
163     /// Failed to set supported CPUs.
164     SetSupportedCpusFailed(anyhow::Error),
165 
166     /// Cannot set the local interruption due to bad configuration.
167     LocalIntConfiguration(anyhow::Error),
168 
169     /// Error setting up SMBIOS table
170     SmbiosSetup(smbios::Error),
171 
172     /// Could not find any SGX EPC section
173     NoSgxEpcSection,
174 
175     /// Missing SGX CPU feature
176     MissingSgxFeature,
177 
178     /// Missing SGX_LC CPU feature
179     MissingSgxLaunchControlFeature,
180 
181     /// Error getting supported CPUID through the hypervisor (kvm/mshv) API
182     CpuidGetSupported(HypervisorError),
183 
184     /// Error populating CPUID with KVM HyperV emulation details
185     CpuidKvmHyperV(vmm_sys_util::fam::Error),
186 
187     /// Error populating CPUID with CPU identification
188     CpuidIdentification(vmm_sys_util::fam::Error),
189 
190     /// Error checking CPUID compatibility
191     CpuidCheckCompatibility,
192 
193     // Error writing EBDA address
194     EbdaSetup(vm_memory::GuestMemoryError),
195 
196     /// Error retrieving TDX capabilities through the hypervisor (kvm/mshv) API
197     #[cfg(feature = "tdx")]
198     TdxCapabilities(HypervisorError),
199 }
200 
201 impl From<Error> for super::Error {
202     fn from(e: Error) -> super::Error {
203         super::Error::PlatformSpecific(e)
204     }
205 }
206 
207 #[allow(clippy::upper_case_acronyms)]
208 #[derive(Copy, Clone, Debug)]
209 pub enum CpuidReg {
210     EAX,
211     EBX,
212     ECX,
213     EDX,
214 }
215 
216 pub struct CpuidPatch {
217     pub function: u32,
218     pub index: u32,
219     pub flags_bit: Option<u8>,
220     pub eax_bit: Option<u8>,
221     pub ebx_bit: Option<u8>,
222     pub ecx_bit: Option<u8>,
223     pub edx_bit: Option<u8>,
224 }
225 
226 impl CpuidPatch {
227     pub fn set_cpuid_reg(
228         cpuid: &mut Vec<CpuIdEntry>,
229         function: u32,
230         index: Option<u32>,
231         reg: CpuidReg,
232         value: u32,
233     ) {
234         let mut entry_found = false;
235         for entry in cpuid.iter_mut() {
236             if entry.function == function && (index.is_none() || index.unwrap() == entry.index) {
237                 entry_found = true;
238                 match reg {
239                     CpuidReg::EAX => {
240                         entry.eax = value;
241                     }
242                     CpuidReg::EBX => {
243                         entry.ebx = value;
244                     }
245                     CpuidReg::ECX => {
246                         entry.ecx = value;
247                     }
248                     CpuidReg::EDX => {
249                         entry.edx = value;
250                     }
251                 }
252             }
253         }
254 
255         if entry_found {
256             return;
257         }
258 
259         // Entry not found, so let's add it.
260         if let Some(index) = index {
261             let mut entry = CpuIdEntry {
262                 function,
263                 index,
264                 flags: CPUID_FLAG_VALID_INDEX,
265                 ..Default::default()
266             };
267             match reg {
268                 CpuidReg::EAX => {
269                     entry.eax = value;
270                 }
271                 CpuidReg::EBX => {
272                     entry.ebx = value;
273                 }
274                 CpuidReg::ECX => {
275                     entry.ecx = value;
276                 }
277                 CpuidReg::EDX => {
278                     entry.edx = value;
279                 }
280             }
281 
282             cpuid.push(entry);
283         }
284     }
285 
286     pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec<CpuidPatch>) {
287         for entry in cpuid {
288             for patch in patches.iter() {
289                 if entry.function == patch.function && entry.index == patch.index {
290                     if let Some(flags_bit) = patch.flags_bit {
291                         entry.flags |= 1 << flags_bit;
292                     }
293                     if let Some(eax_bit) = patch.eax_bit {
294                         entry.eax |= 1 << eax_bit;
295                     }
296                     if let Some(ebx_bit) = patch.ebx_bit {
297                         entry.ebx |= 1 << ebx_bit;
298                     }
299                     if let Some(ecx_bit) = patch.ecx_bit {
300                         entry.ecx |= 1 << ecx_bit;
301                     }
302                     if let Some(edx_bit) = patch.edx_bit {
303                         entry.edx |= 1 << edx_bit;
304                     }
305                 }
306             }
307         }
308     }
309 
310     pub fn is_feature_enabled(
311         cpuid: &[CpuIdEntry],
312         function: u32,
313         index: u32,
314         reg: CpuidReg,
315         feature_bit: usize,
316     ) -> bool {
317         let mask = 1 << feature_bit;
318 
319         for entry in cpuid {
320             if entry.function == function && entry.index == index {
321                 let reg_val = match reg {
322                     CpuidReg::EAX => entry.eax,
323                     CpuidReg::EBX => entry.ebx,
324                     CpuidReg::ECX => entry.ecx,
325                     CpuidReg::EDX => entry.edx,
326                 };
327 
328                 return (reg_val & mask) == mask;
329             }
330         }
331 
332         false
333     }
334 }
335 
336 #[derive(Debug)]
337 enum CpuidCompatibleCheck {
338     BitwiseSubset, // bitwise subset
339     Equal,         // equal in value
340     NumNotGreater, // smaller or equal as a number
341 }
342 
343 pub struct CpuidFeatureEntry {
344     function: u32,
345     index: u32,
346     feature_reg: CpuidReg,
347     compatible_check: CpuidCompatibleCheck,
348 }
349 
350 impl CpuidFeatureEntry {
351     fn checked_feature_entry_list() -> Vec<CpuidFeatureEntry> {
352         vec![
353             // The following list includes all hardware features bits from
354             // the CPUID Wiki Page: https://en.wikipedia.org/wiki/CPUID
355             // Leaf 0x1, ECX/EDX, feature bits
356             CpuidFeatureEntry {
357                 function: 1,
358                 index: 0,
359                 feature_reg: CpuidReg::ECX,
360                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
361             },
362             CpuidFeatureEntry {
363                 function: 1,
364                 index: 0,
365                 feature_reg: CpuidReg::EDX,
366                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
367             },
368             // Leaf 0x7, EAX/EBX/ECX/EDX, extended features
369             CpuidFeatureEntry {
370                 function: 7,
371                 index: 0,
372                 feature_reg: CpuidReg::EAX,
373                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
374             },
375             CpuidFeatureEntry {
376                 function: 7,
377                 index: 0,
378                 feature_reg: CpuidReg::EBX,
379                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
380             },
381             CpuidFeatureEntry {
382                 function: 7,
383                 index: 0,
384                 feature_reg: CpuidReg::ECX,
385                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
386             },
387             CpuidFeatureEntry {
388                 function: 7,
389                 index: 0,
390                 feature_reg: CpuidReg::EDX,
391                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
392             },
393             // Leaf 0x7 subleaf 0x1, EAX, extended features
394             CpuidFeatureEntry {
395                 function: 7,
396                 index: 1,
397                 feature_reg: CpuidReg::EAX,
398                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
399             },
400             // Leaf 0x8000_0001, ECX/EDX, CPUID features bits
401             CpuidFeatureEntry {
402                 function: 0x8000_0001,
403                 index: 0,
404                 feature_reg: CpuidReg::ECX,
405                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
406             },
407             CpuidFeatureEntry {
408                 function: 0x8000_0001,
409                 index: 0,
410                 feature_reg: CpuidReg::EDX,
411                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
412             },
413             // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html
414             // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE
415             CpuidFeatureEntry {
416                 function: 0x4000_0000,
417                 index: 0,
418                 feature_reg: CpuidReg::EAX,
419                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
420             },
421             CpuidFeatureEntry {
422                 function: 0x4000_0000,
423                 index: 0,
424                 feature_reg: CpuidReg::EBX,
425                 compatible_check: CpuidCompatibleCheck::Equal,
426             },
427             CpuidFeatureEntry {
428                 function: 0x4000_0000,
429                 index: 0,
430                 feature_reg: CpuidReg::ECX,
431                 compatible_check: CpuidCompatibleCheck::Equal,
432             },
433             CpuidFeatureEntry {
434                 function: 0x4000_0000,
435                 index: 0,
436                 feature_reg: CpuidReg::EDX,
437                 compatible_check: CpuidCompatibleCheck::Equal,
438             },
439             // Leaf 0x4000_0001, EAX/EBX/ECX/EDX, KVM CPUID features
440             CpuidFeatureEntry {
441                 function: 0x4000_0001,
442                 index: 0,
443                 feature_reg: CpuidReg::EAX,
444                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
445             },
446             CpuidFeatureEntry {
447                 function: 0x4000_0001,
448                 index: 0,
449                 feature_reg: CpuidReg::EBX,
450                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
451             },
452             CpuidFeatureEntry {
453                 function: 0x4000_0001,
454                 index: 0,
455                 feature_reg: CpuidReg::ECX,
456                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
457             },
458             CpuidFeatureEntry {
459                 function: 0x4000_0001,
460                 index: 0,
461                 feature_reg: CpuidReg::EDX,
462                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
463             },
464         ]
465     }
466 
467     fn get_features_from_cpuid(
468         cpuid: &[CpuIdEntry],
469         feature_entry_list: &[CpuidFeatureEntry],
470     ) -> Vec<u32> {
471         let mut features = vec![0; feature_entry_list.len()];
472         for (i, feature_entry) in feature_entry_list.iter().enumerate() {
473             for cpuid_entry in cpuid {
474                 if cpuid_entry.function == feature_entry.function
475                     && cpuid_entry.index == feature_entry.index
476                 {
477                     match feature_entry.feature_reg {
478                         CpuidReg::EAX => {
479                             features[i] = cpuid_entry.eax;
480                         }
481                         CpuidReg::EBX => {
482                             features[i] = cpuid_entry.ebx;
483                         }
484                         CpuidReg::ECX => {
485                             features[i] = cpuid_entry.ecx;
486                         }
487                         CpuidReg::EDX => {
488                             features[i] = cpuid_entry.edx;
489                         }
490                     }
491 
492                     break;
493                 }
494             }
495         }
496 
497         features
498     }
499 
500     // The function returns `Error` (a.k.a. "incompatible"), when the CPUID features from `src_vm_cpuid`
501     // is not a subset of those of the `dest_vm_cpuid`.
502     pub fn check_cpuid_compatibility(
503         src_vm_cpuid: &[CpuIdEntry],
504         dest_vm_cpuid: &[CpuIdEntry],
505     ) -> Result<(), Error> {
506         let feature_entry_list = &Self::checked_feature_entry_list();
507         let src_vm_features = Self::get_features_from_cpuid(src_vm_cpuid, feature_entry_list);
508         let dest_vm_features = Self::get_features_from_cpuid(dest_vm_cpuid, feature_entry_list);
509 
510         // Loop on feature bit and check if the 'source vm' feature is a subset
511         // of those of the 'destination vm' feature
512         let mut compatible = true;
513         for (i, (src_vm_feature, dest_vm_feature)) in src_vm_features
514             .iter()
515             .zip(dest_vm_features.iter())
516             .enumerate()
517         {
518             let entry = &feature_entry_list[i];
519             let entry_compatible = match entry.compatible_check {
520                 CpuidCompatibleCheck::BitwiseSubset => {
521                     let different_feature_bits = src_vm_feature ^ dest_vm_feature;
522                     let src_vm_feature_bits_only = different_feature_bits & src_vm_feature;
523                     src_vm_feature_bits_only == 0
524                 }
525                 CpuidCompatibleCheck::Equal => src_vm_feature == dest_vm_feature,
526                 CpuidCompatibleCheck::NumNotGreater => src_vm_feature <= dest_vm_feature,
527             };
528             if !entry_compatible {
529                 error!(
530                     "Detected incompatible CPUID entry: leaf={:#02x} (subleaf={:#02x}), register='{:?}', \
531                     compatilbe_check='{:?}', source VM feature='{:#04x}', destination VM feature'{:#04x}'.",
532                     entry.function, entry.index, entry.feature_reg,
533                     entry.compatible_check, src_vm_feature, dest_vm_feature
534                     );
535 
536                 compatible = false;
537             }
538         }
539 
540         if compatible {
541             info!("No CPU incompatibility detected.");
542             Ok(())
543         } else {
544             Err(Error::CpuidCheckCompatibility)
545         }
546     }
547 }
548 
549 pub fn generate_common_cpuid(
550     hypervisor: Arc<dyn hypervisor::Hypervisor>,
551     topology: Option<(u8, u8, u8)>,
552     sgx_epc_sections: Option<Vec<SgxEpcSection>>,
553     phys_bits: u8,
554     kvm_hyperv: bool,
555     #[cfg(feature = "tdx")] tdx_enabled: bool,
556 ) -> super::Result<Vec<CpuIdEntry>> {
557     let cpuid_patches = vec![
558         // Patch tsc deadline timer bit
559         CpuidPatch {
560             function: 1,
561             index: 0,
562             flags_bit: None,
563             eax_bit: None,
564             ebx_bit: None,
565             ecx_bit: Some(TSC_DEADLINE_TIMER_ECX_BIT),
566             edx_bit: None,
567         },
568         // Patch hypervisor bit
569         CpuidPatch {
570             function: 1,
571             index: 0,
572             flags_bit: None,
573             eax_bit: None,
574             ebx_bit: None,
575             ecx_bit: Some(HYPERVISOR_ECX_BIT),
576             edx_bit: None,
577         },
578         // Enable MTRR feature
579         CpuidPatch {
580             function: 1,
581             index: 0,
582             flags_bit: None,
583             eax_bit: None,
584             ebx_bit: None,
585             ecx_bit: None,
586             edx_bit: Some(MTRR_EDX_BIT),
587         },
588     ];
589 
590     // Supported CPUID
591     let mut cpuid = hypervisor.get_cpuid().map_err(Error::CpuidGetSupported)?;
592 
593     CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
594 
595     if let Some(t) = topology {
596         update_cpuid_topology(&mut cpuid, t.0, t.1, t.2);
597     }
598 
599     if let Some(sgx_epc_sections) = sgx_epc_sections {
600         update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?;
601     }
602 
603     #[cfg(feature = "tdx")]
604     let tdx_capabilities = if tdx_enabled {
605         let caps = hypervisor
606             .tdx_capabilities()
607             .map_err(Error::TdxCapabilities)?;
608         info!("TDX capabilities {:#?}", caps);
609         Some(caps)
610     } else {
611         None
612     };
613 
614     // Update some existing CPUID
615     for entry in cpuid.as_mut_slice().iter_mut() {
616         match entry.function {
617             0xd =>
618             {
619                 #[cfg(feature = "tdx")]
620                 if let Some(caps) = &tdx_capabilities {
621                     let xcr0_mask: u64 = 0x82ff;
622                     let xss_mask: u64 = !xcr0_mask;
623                     if entry.index == 0 {
624                         entry.eax &= (caps.xfam_fixed0 as u32) & (xcr0_mask as u32);
625                         entry.eax |= (caps.xfam_fixed1 as u32) & (xcr0_mask as u32);
626                         entry.edx &= ((caps.xfam_fixed0 & xcr0_mask) >> 32) as u32;
627                         entry.edx |= ((caps.xfam_fixed1 & xcr0_mask) >> 32) as u32;
628                     } else if entry.index == 1 {
629                         entry.ecx &= (caps.xfam_fixed0 as u32) & (xss_mask as u32);
630                         entry.ecx |= (caps.xfam_fixed1 as u32) & (xss_mask as u32);
631                         entry.edx &= ((caps.xfam_fixed0 & xss_mask) >> 32) as u32;
632                         entry.edx |= ((caps.xfam_fixed1 & xss_mask) >> 32) as u32;
633                     }
634                 }
635             }
636             // Copy host L2 cache details if not populated by KVM
637             0x8000_0006 => {
638                 if entry.eax == 0 && entry.ebx == 0 && entry.ecx == 0 && entry.edx == 0 {
639                     // SAFETY: cpuid called with valid leaves
640                     if unsafe { std::arch::x86_64::__cpuid(0x8000_0000).eax } >= 0x8000_0006 {
641                         // SAFETY: cpuid called with valid leaves
642                         let leaf = unsafe { std::arch::x86_64::__cpuid(0x8000_0006) };
643                         entry.eax = leaf.eax;
644                         entry.ebx = leaf.ebx;
645                         entry.ecx = leaf.ecx;
646                         entry.edx = leaf.edx;
647                     }
648                 }
649             }
650             // Set CPU physical bits
651             0x8000_0008 => {
652                 entry.eax = (entry.eax & 0xffff_ff00) | (phys_bits as u32 & 0xff);
653             }
654             // Disable KVM_FEATURE_ASYNC_PF_INT
655             // This is required until we find out why the asynchronous page
656             // fault is generating unexpected behavior when using interrupt
657             // mechanism.
658             // TODO: Re-enable KVM_FEATURE_ASYNC_PF_INT (#2277)
659             0x4000_0001 => {
660                 entry.eax &= !(1 << KVM_FEATURE_ASYNC_PF_INT_BIT);
661 
662                 // These features are not supported by TDX
663                 #[cfg(feature = "tdx")]
664                 if tdx_enabled {
665                     entry.eax &= !(1 << KVM_FEATURE_CLOCKSOURCE_BIT
666                         | 1 << KVM_FEATURE_CLOCKSOURCE2_BIT
667                         | 1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
668                         | 1 << KVM_FEATURE_ASYNC_PF_BIT
669                         | 1 << KVM_FEATURE_ASYNC_PF_VMEXIT_BIT
670                         | 1 << KVM_FEATURE_STEAL_TIME_BIT)
671                 }
672             }
673             _ => {}
674         }
675     }
676 
677     // Copy CPU identification string
678     for i in 0x8000_0002..=0x8000_0004 {
679         cpuid.retain(|c| c.function != i);
680         // SAFETY: call cpuid with valid leaves
681         let leaf = unsafe { std::arch::x86_64::__cpuid(i) };
682         cpuid.push(CpuIdEntry {
683             function: i,
684             eax: leaf.eax,
685             ebx: leaf.ebx,
686             ecx: leaf.ecx,
687             edx: leaf.edx,
688             ..Default::default()
689         });
690     }
691 
692     if kvm_hyperv {
693         // Remove conflicting entries
694         cpuid.retain(|c| c.function != 0x4000_0000);
695         cpuid.retain(|c| c.function != 0x4000_0001);
696         // See "Hypervisor Top Level Functional Specification" for details
697         // Compliance with "Hv#1" requires leaves up to 0x4000_000a
698         cpuid.push(CpuIdEntry {
699             function: 0x40000000,
700             eax: 0x4000000a, // Maximum cpuid leaf
701             ebx: 0x756e694c, // "Linu"
702             ecx: 0x564b2078, // "x KV"
703             edx: 0x7648204d, // "M Hv"
704             ..Default::default()
705         });
706         cpuid.push(CpuIdEntry {
707             function: 0x40000001,
708             eax: 0x31237648, // "Hv#1"
709             ..Default::default()
710         });
711         cpuid.push(CpuIdEntry {
712             function: 0x40000002,
713             eax: 0x3839,  // "Build number"
714             ebx: 0xa0000, // "Version"
715             ..Default::default()
716         });
717         cpuid.push(CpuIdEntry {
718             function: 0x4000_0003,
719             eax: 1 << 1 // AccessPartitionReferenceCounter
720                    | 1 << 2 // AccessSynicRegs
721                    | 1 << 3 // AccessSyntheticTimerRegs
722                    | 1 << 9, // AccessPartitionReferenceTsc
723             edx: 1 << 3, // CPU dynamic partitioning
724             ..Default::default()
725         });
726         cpuid.push(CpuIdEntry {
727             function: 0x4000_0004,
728             eax: 1 << 5, // Recommend relaxed timing
729             ..Default::default()
730         });
731         for i in 0x4000_0005..=0x4000_000a {
732             cpuid.push(CpuIdEntry {
733                 function: i,
734                 ..Default::default()
735             });
736         }
737     }
738 
739     Ok(cpuid)
740 }
741 
742 pub fn configure_vcpu(
743     vcpu: &Arc<dyn hypervisor::Vcpu>,
744     id: u8,
745     kernel_entry_point: Option<EntryPoint>,
746     vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
747     cpuid: Vec<CpuIdEntry>,
748     kvm_hyperv: bool,
749 ) -> super::Result<()> {
750     // Per vCPU CPUID changes; common are handled via generate_common_cpuid()
751     let mut cpuid = cpuid;
752     CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(id));
753     CpuidPatch::set_cpuid_reg(&mut cpuid, 0x1f, None, CpuidReg::EDX, u32::from(id));
754 
755     vcpu.set_cpuid2(&cpuid)
756         .map_err(|e| Error::SetSupportedCpusFailed(e.into()))?;
757 
758     if kvm_hyperv {
759         vcpu.enable_hyperv_synic().unwrap();
760     }
761 
762     regs::setup_msrs(vcpu).map_err(Error::MsrsConfiguration)?;
763     if let Some(kernel_entry_point) = kernel_entry_point {
764         if let Some(entry_addr) = kernel_entry_point.entry_addr {
765             // Safe to unwrap because this method is called after the VM is configured
766             regs::setup_regs(vcpu, entry_addr.raw_value()).map_err(Error::RegsConfiguration)?;
767             regs::setup_fpu(vcpu).map_err(Error::FpuConfiguration)?;
768             regs::setup_sregs(&vm_memory.memory(), vcpu).map_err(Error::SregsConfiguration)?;
769         }
770     }
771     interrupts::set_lint(vcpu).map_err(|e| Error::LocalIntConfiguration(e.into()))?;
772     Ok(())
773 }
774 
775 /// Returns a Vec of the valid memory addresses.
776 /// These should be used to configure the GuestMemory structure for the platform.
777 /// For x86_64 all addresses are valid from the start of the kernel except a
778 /// carve out at the end of 32bit address space.
779 pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
780     let reserved_memory_gap_start = layout::MEM_32BIT_RESERVED_START
781         .checked_add(layout::MEM_32BIT_DEVICES_SIZE)
782         .expect("32-bit reserved region is too large");
783 
784     let requested_memory_size = GuestAddress(size);
785     let mut regions = Vec::new();
786 
787     // case1: guest memory fits before the gap
788     if size <= layout::MEM_32BIT_RESERVED_START.raw_value() {
789         regions.push((GuestAddress(0), size as usize, RegionType::Ram));
790     // case2: guest memory extends beyond the gap
791     } else {
792         // push memory before the gap
793         regions.push((
794             GuestAddress(0),
795             layout::MEM_32BIT_RESERVED_START.raw_value() as usize,
796             RegionType::Ram,
797         ));
798         regions.push((
799             layout::RAM_64BIT_START,
800             requested_memory_size.unchecked_offset_from(layout::MEM_32BIT_RESERVED_START) as usize,
801             RegionType::Ram,
802         ));
803     }
804 
805     // Add the 32-bit device memory hole as a sub region.
806     regions.push((
807         layout::MEM_32BIT_RESERVED_START,
808         layout::MEM_32BIT_DEVICES_SIZE as usize,
809         RegionType::SubRegion,
810     ));
811 
812     // Add the 32-bit reserved memory hole as a sub region.
813     regions.push((
814         reserved_memory_gap_start,
815         (layout::MEM_32BIT_RESERVED_SIZE - layout::MEM_32BIT_DEVICES_SIZE) as usize,
816         RegionType::Reserved,
817     ));
818 
819     regions
820 }
821 
822 /// Configures the system and should be called once per vm before starting vcpu threads.
823 ///
824 /// # Arguments
825 ///
826 /// * `guest_mem` - The memory to be used by the guest.
827 /// * `cmdline_addr` - Address in `guest_mem` where the kernel command line was loaded.
828 /// * `cmdline_size` - Size of the kernel command line in bytes including the null terminator.
829 /// * `num_cpus` - Number of virtual CPUs the guest will have.
830 #[allow(clippy::too_many_arguments)]
831 pub fn configure_system(
832     guest_mem: &GuestMemoryMmap,
833     cmdline_addr: GuestAddress,
834     initramfs: &Option<InitramfsConfig>,
835     _num_cpus: u8,
836     rsdp_addr: Option<GuestAddress>,
837     sgx_epc_region: Option<SgxEpcRegion>,
838     serial_number: Option<&str>,
839     uuid: Option<&str>,
840     oem_strings: Option<&[&str]>,
841 ) -> super::Result<()> {
842     // Write EBDA address to location where ACPICA expects to find it
843     guest_mem
844         .write_obj((layout::EBDA_START.0 >> 4) as u16, layout::EBDA_POINTER)
845         .map_err(Error::EbdaSetup)?;
846 
847     let size = smbios::setup_smbios(guest_mem, serial_number, uuid, oem_strings)
848         .map_err(Error::SmbiosSetup)?;
849 
850     // Place the MP table after the SMIOS table aligned to 16 bytes
851     let offset = GuestAddress(layout::SMBIOS_START).unchecked_add(size);
852     let offset = GuestAddress((offset.0 + 16) & !0xf);
853     mptable::setup_mptable(offset, guest_mem, _num_cpus).map_err(Error::MpTableSetup)?;
854 
855     // Check that the RAM is not smaller than the RSDP start address
856     if let Some(rsdp_addr) = rsdp_addr {
857         if rsdp_addr.0 > guest_mem.last_addr().0 {
858             return Err(super::Error::RsdpPastRamEnd);
859         }
860     }
861 
862     configure_pvh(
863         guest_mem,
864         cmdline_addr,
865         initramfs,
866         rsdp_addr,
867         sgx_epc_region,
868     )
869 }
870 
871 fn configure_pvh(
872     guest_mem: &GuestMemoryMmap,
873     cmdline_addr: GuestAddress,
874     initramfs: &Option<InitramfsConfig>,
875     rsdp_addr: Option<GuestAddress>,
876     sgx_epc_region: Option<SgxEpcRegion>,
877 ) -> super::Result<()> {
878     const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578;
879 
880     let mut start_info: StartInfoWrapper = StartInfoWrapper(hvm_start_info::default());
881 
882     start_info.0.magic = XEN_HVM_START_MAGIC_VALUE;
883     start_info.0.version = 1; // pvh has version 1
884     start_info.0.nr_modules = 0;
885     start_info.0.cmdline_paddr = cmdline_addr.raw_value();
886     start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value();
887 
888     if let Some(rsdp_addr) = rsdp_addr {
889         start_info.0.rsdp_paddr = rsdp_addr.0;
890     }
891 
892     if let Some(initramfs_config) = initramfs {
893         // The initramfs has been written to guest memory already, here we just need to
894         // create the module structure that describes it.
895         let ramdisk_mod: ModlistEntryWrapper = ModlistEntryWrapper(hvm_modlist_entry {
896             paddr: initramfs_config.address.raw_value(),
897             size: initramfs_config.size as u64,
898             ..Default::default()
899         });
900 
901         start_info.0.nr_modules += 1;
902         start_info.0.modlist_paddr = layout::MODLIST_START.raw_value();
903 
904         // Write the modlist struct to guest memory.
905         guest_mem
906             .write_obj(ramdisk_mod, layout::MODLIST_START)
907             .map_err(super::Error::ModlistSetup)?;
908     }
909 
910     // Vector to hold the memory maps which needs to be written to guest memory
911     // at MEMMAP_START after all of the mappings are recorded.
912     let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
913 
914     // Create the memory map entries.
915     add_memmap_entry(&mut memmap, 0, layout::EBDA_START.raw_value(), E820_RAM);
916 
917     let mem_end = guest_mem.last_addr();
918 
919     if mem_end < layout::MEM_32BIT_RESERVED_START {
920         add_memmap_entry(
921             &mut memmap,
922             layout::HIGH_RAM_START.raw_value(),
923             mem_end.unchecked_offset_from(layout::HIGH_RAM_START) + 1,
924             E820_RAM,
925         );
926     } else {
927         add_memmap_entry(
928             &mut memmap,
929             layout::HIGH_RAM_START.raw_value(),
930             layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::HIGH_RAM_START),
931             E820_RAM,
932         );
933         if mem_end > layout::RAM_64BIT_START {
934             add_memmap_entry(
935                 &mut memmap,
936                 layout::RAM_64BIT_START.raw_value(),
937                 mem_end.unchecked_offset_from(layout::RAM_64BIT_START) + 1,
938                 E820_RAM,
939             );
940         }
941     }
942 
943     add_memmap_entry(
944         &mut memmap,
945         layout::PCI_MMCONFIG_START.0,
946         layout::PCI_MMCONFIG_SIZE,
947         E820_RESERVED,
948     );
949 
950     if let Some(sgx_epc_region) = sgx_epc_region {
951         add_memmap_entry(
952             &mut memmap,
953             sgx_epc_region.start().raw_value(),
954             sgx_epc_region.size(),
955             E820_RESERVED,
956         );
957     }
958 
959     start_info.0.memmap_entries = memmap.len() as u32;
960 
961     // Copy the vector with the memmap table to the MEMMAP_START address
962     // which is already saved in the memmap_paddr field of hvm_start_info struct.
963     let mut memmap_start_addr = layout::MEMMAP_START;
964 
965     guest_mem
966         .checked_offset(
967             memmap_start_addr,
968             mem::size_of::<hvm_memmap_table_entry>() * start_info.0.memmap_entries as usize,
969         )
970         .ok_or(super::Error::MemmapTablePastRamEnd)?;
971 
972     // For every entry in the memmap vector, create a MemmapTableEntryWrapper
973     // and write it to guest memory.
974     for memmap_entry in memmap {
975         let map_entry_wrapper: MemmapTableEntryWrapper = MemmapTableEntryWrapper(memmap_entry);
976 
977         guest_mem
978             .write_obj(map_entry_wrapper, memmap_start_addr)
979             .map_err(|_| super::Error::MemmapTableSetup)?;
980         memmap_start_addr =
981             memmap_start_addr.unchecked_add(mem::size_of::<hvm_memmap_table_entry>() as u64);
982     }
983 
984     // The hvm_start_info struct itself must be stored at PVH_START_INFO
985     // address, and %rbx will be initialized to contain PVH_INFO_START prior to
986     // starting the guest, as required by the PVH ABI.
987     let start_info_addr = layout::PVH_INFO_START;
988 
989     guest_mem
990         .checked_offset(start_info_addr, mem::size_of::<hvm_start_info>())
991         .ok_or(super::Error::StartInfoPastRamEnd)?;
992 
993     // Write the start_info struct to guest memory.
994     guest_mem
995         .write_obj(start_info, start_info_addr)
996         .map_err(|_| super::Error::StartInfoSetup)?;
997 
998     Ok(())
999 }
1000 
1001 fn add_memmap_entry(memmap: &mut Vec<hvm_memmap_table_entry>, addr: u64, size: u64, mem_type: u32) {
1002     // Add the table entry to the vector
1003     memmap.push(hvm_memmap_table_entry {
1004         addr,
1005         size,
1006         type_: mem_type,
1007         reserved: 0,
1008     });
1009 }
1010 
1011 /// Returns the memory address where the initramfs could be loaded.
1012 pub fn initramfs_load_addr(
1013     guest_mem: &GuestMemoryMmap,
1014     initramfs_size: usize,
1015 ) -> super::Result<u64> {
1016     let first_region = guest_mem
1017         .find_region(GuestAddress::new(0))
1018         .ok_or(super::Error::InitramfsAddress)?;
1019     // It's safe to cast to usize because the size of a region can't be greater than usize.
1020     let lowmem_size = first_region.len() as usize;
1021 
1022     if lowmem_size < initramfs_size {
1023         return Err(super::Error::InitramfsAddress);
1024     }
1025 
1026     let aligned_addr: u64 = ((lowmem_size - initramfs_size) & !(crate::pagesize() - 1)) as u64;
1027     Ok(aligned_addr)
1028 }
1029 
1030 pub fn get_host_cpu_phys_bits() -> u8 {
1031     // SAFETY: call cpuid with valid leaves
1032     unsafe {
1033         let leaf = x86_64::__cpuid(0x8000_0000);
1034 
1035         // Detect and handle AMD SME (Secure Memory Encryption) properly.
1036         // Some physical address bits may become reserved when the feature is enabled.
1037         // See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1
1038         let reduced = if leaf.eax >= 0x8000_001f
1039             && leaf.ebx == 0x6874_7541    // Vendor ID: AuthenticAMD
1040             && leaf.ecx == 0x444d_4163
1041             && leaf.edx == 0x6974_6e65
1042             && x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0
1043         {
1044             (x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f
1045         } else {
1046             0
1047         };
1048 
1049         if leaf.eax >= 0x8000_0008 {
1050             let leaf = x86_64::__cpuid(0x8000_0008);
1051             ((leaf.eax & 0xff) - reduced) as u8
1052         } else {
1053             36
1054         }
1055     }
1056 }
1057 
1058 fn update_cpuid_topology(
1059     cpuid: &mut Vec<CpuIdEntry>,
1060     threads_per_core: u8,
1061     cores_per_die: u8,
1062     dies_per_package: u8,
1063 ) {
1064     let thread_width = 8 - (threads_per_core - 1).leading_zeros();
1065     let core_width = (8 - (cores_per_die - 1).leading_zeros()) + thread_width;
1066     let die_width = (8 - (dies_per_package - 1).leading_zeros()) + core_width;
1067 
1068     // CPU Topology leaf 0xb
1069     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::EAX, thread_width);
1070     CpuidPatch::set_cpuid_reg(
1071         cpuid,
1072         0xb,
1073         Some(0),
1074         CpuidReg::EBX,
1075         u32::from(threads_per_core),
1076     );
1077     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::ECX, 1 << 8);
1078 
1079     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::EAX, die_width);
1080     CpuidPatch::set_cpuid_reg(
1081         cpuid,
1082         0xb,
1083         Some(1),
1084         CpuidReg::EBX,
1085         u32::from(dies_per_package * cores_per_die * threads_per_core),
1086     );
1087     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::ECX, 2 << 8);
1088 
1089     // CPU Topology leaf 0x1f
1090     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::EAX, thread_width);
1091     CpuidPatch::set_cpuid_reg(
1092         cpuid,
1093         0x1f,
1094         Some(0),
1095         CpuidReg::EBX,
1096         u32::from(threads_per_core),
1097     );
1098     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::ECX, 1 << 8);
1099 
1100     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::EAX, core_width);
1101     CpuidPatch::set_cpuid_reg(
1102         cpuid,
1103         0x1f,
1104         Some(1),
1105         CpuidReg::EBX,
1106         u32::from(cores_per_die * threads_per_core),
1107     );
1108     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::ECX, 2 << 8);
1109 
1110     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::EAX, die_width);
1111     CpuidPatch::set_cpuid_reg(
1112         cpuid,
1113         0x1f,
1114         Some(2),
1115         CpuidReg::EBX,
1116         u32::from(dies_per_package * cores_per_die * threads_per_core),
1117     );
1118     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::ECX, 5 << 8);
1119 }
1120 
1121 // The goal is to update the CPUID sub-leaves to reflect the number of EPC
1122 // sections exposed to the guest.
1123 fn update_cpuid_sgx(
1124     cpuid: &mut Vec<CpuIdEntry>,
1125     epc_sections: Vec<SgxEpcSection>,
1126 ) -> Result<(), Error> {
1127     // Something's wrong if there's no EPC section.
1128     if epc_sections.is_empty() {
1129         return Err(Error::NoSgxEpcSection);
1130     }
1131     // We can't go further if the hypervisor does not support SGX feature.
1132     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) {
1133         return Err(Error::MissingSgxFeature);
1134     }
1135     // We can't go further if the hypervisor does not support SGX_LC feature.
1136     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) {
1137         return Err(Error::MissingSgxLaunchControlFeature);
1138     }
1139 
1140     // Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC
1141     // properties such as confidentiality and integrity.
1142     // SAFETY: call cpuid with valid leaves
1143     let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) };
1144 
1145     for (i, epc_section) in epc_sections.iter().enumerate() {
1146         let subleaf_idx = i + 2;
1147         let start = epc_section.start().raw_value();
1148         let size = epc_section.size();
1149         let eax = (start & 0xffff_f000) as u32 | 0x1;
1150         let ebx = (start >> 32) as u32;
1151         let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf);
1152         let edx = (size >> 32) as u32;
1153         // CPU Topology leaf 0x12
1154         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax);
1155         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx);
1156         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx);
1157         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx);
1158     }
1159 
1160     // Add one NULL entry to terminate the dynamic list
1161     let subleaf_idx = epc_sections.len() + 2;
1162     // CPU Topology leaf 0x12
1163     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0);
1164     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0);
1165     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0);
1166     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0);
1167 
1168     Ok(())
1169 }
1170 
1171 #[cfg(test)]
1172 mod tests {
1173     use super::*;
1174 
1175     #[test]
1176     fn regions_lt_4gb() {
1177         let regions = arch_memory_regions(1 << 29);
1178         assert_eq!(3, regions.len());
1179         assert_eq!(GuestAddress(0), regions[0].0);
1180         assert_eq!(1usize << 29, regions[0].1);
1181     }
1182 
1183     #[test]
1184     fn regions_gt_4gb() {
1185         let regions = arch_memory_regions((1 << 32) + 0x8000);
1186         assert_eq!(4, regions.len());
1187         assert_eq!(GuestAddress(0), regions[0].0);
1188         assert_eq!(GuestAddress(1 << 32), regions[1].0);
1189     }
1190 
1191     #[test]
1192     fn test_system_configuration() {
1193         let no_vcpus = 4;
1194         let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1195         let config_err = configure_system(
1196             &gm,
1197             GuestAddress(0),
1198             &None,
1199             1,
1200             Some(layout::RSDP_POINTER),
1201             None,
1202             None,
1203             None,
1204             None,
1205         );
1206         assert!(config_err.is_err());
1207 
1208         // Now assigning some memory that falls before the 32bit memory hole.
1209         let mem_size = 128 << 20;
1210         let arch_mem_regions = arch_memory_regions(mem_size);
1211         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1212             .iter()
1213             .filter(|r| r.2 == RegionType::Ram)
1214             .map(|r| (r.0, r.1))
1215             .collect();
1216         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1217 
1218         configure_system(
1219             &gm,
1220             GuestAddress(0),
1221             &None,
1222             no_vcpus,
1223             None,
1224             None,
1225             None,
1226             None,
1227             None,
1228         )
1229         .unwrap();
1230 
1231         // Now assigning some memory that is equal to the start of the 32bit memory hole.
1232         let mem_size = 3328 << 20;
1233         let arch_mem_regions = arch_memory_regions(mem_size);
1234         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1235             .iter()
1236             .filter(|r| r.2 == RegionType::Ram)
1237             .map(|r| (r.0, r.1))
1238             .collect();
1239         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1240         configure_system(
1241             &gm,
1242             GuestAddress(0),
1243             &None,
1244             no_vcpus,
1245             None,
1246             None,
1247             None,
1248             None,
1249             None,
1250         )
1251         .unwrap();
1252 
1253         configure_system(
1254             &gm,
1255             GuestAddress(0),
1256             &None,
1257             no_vcpus,
1258             None,
1259             None,
1260             None,
1261             None,
1262             None,
1263         )
1264         .unwrap();
1265 
1266         // Now assigning some memory that falls after the 32bit memory hole.
1267         let mem_size = 3330 << 20;
1268         let arch_mem_regions = arch_memory_regions(mem_size);
1269         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1270             .iter()
1271             .filter(|r| r.2 == RegionType::Ram)
1272             .map(|r| (r.0, r.1))
1273             .collect();
1274         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1275         configure_system(
1276             &gm,
1277             GuestAddress(0),
1278             &None,
1279             no_vcpus,
1280             None,
1281             None,
1282             None,
1283             None,
1284             None,
1285         )
1286         .unwrap();
1287 
1288         configure_system(
1289             &gm,
1290             GuestAddress(0),
1291             &None,
1292             no_vcpus,
1293             None,
1294             None,
1295             None,
1296             None,
1297             None,
1298         )
1299         .unwrap();
1300     }
1301 
1302     #[test]
1303     fn test_add_memmap_entry() {
1304         let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
1305 
1306         let expected_memmap = vec![
1307             hvm_memmap_table_entry {
1308                 addr: 0x0,
1309                 size: 0x1000,
1310                 type_: E820_RAM,
1311                 ..Default::default()
1312             },
1313             hvm_memmap_table_entry {
1314                 addr: 0x10000,
1315                 size: 0xa000,
1316                 type_: E820_RESERVED,
1317                 ..Default::default()
1318             },
1319         ];
1320 
1321         add_memmap_entry(&mut memmap, 0, 0x1000, E820_RAM);
1322         add_memmap_entry(&mut memmap, 0x10000, 0xa000, E820_RESERVED);
1323 
1324         assert_eq!(format!("{:?}", memmap), format!("{:?}", expected_memmap));
1325     }
1326 }
1327