xref: /cloud-hypervisor/arch/src/x86_64/mod.rs (revision eea9bcea38e0c5649f444c829f3a4f9c22aa486c)
1 // Copyright © 2020, Oracle and/or its affiliates.
2 //
3 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 // SPDX-License-Identifier: Apache-2.0
5 //
6 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE-BSD-3-Clause file.
9 use std::sync::Arc;
10 pub mod interrupts;
11 pub mod layout;
12 mod mpspec;
13 mod mptable;
14 pub mod regs;
15 use crate::GuestMemoryMmap;
16 use crate::InitramfsConfig;
17 use crate::RegionType;
18 use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX};
19 use hypervisor::HypervisorError;
20 use linux_loader::loader::bootparam::boot_params;
21 use linux_loader::loader::elf::start_info::{
22     hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info,
23 };
24 use std::collections::BTreeMap;
25 use std::mem;
26 use vm_memory::{
27     Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
28     GuestMemoryRegion, GuestUsize,
29 };
30 mod smbios;
31 use std::arch::x86_64;
32 #[cfg(feature = "tdx")]
33 pub mod tdx;
34 
35 // CPUID feature bits
36 const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit.
37 const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit.
38 const MTRR_EDX_BIT: u8 = 12; // Hypervisor ecx bit.
39 
40 // KVM feature bits
41 const KVM_FEATURE_ASYNC_PF_INT_BIT: u8 = 14;
42 #[cfg(feature = "tdx")]
43 const KVM_FEATURE_CLOCKSOURCE_BIT: u8 = 0;
44 #[cfg(feature = "tdx")]
45 const KVM_FEATURE_CLOCKSOURCE2_BIT: u8 = 3;
46 #[cfg(feature = "tdx")]
47 const KVM_FEATURE_CLOCKSOURCE_STABLE_BIT: u8 = 24;
48 #[cfg(feature = "tdx")]
49 const KVM_FEATURE_ASYNC_PF_BIT: u8 = 4;
50 #[cfg(feature = "tdx")]
51 const KVM_FEATURE_ASYNC_PF_VMEXIT_BIT: u8 = 10;
52 #[cfg(feature = "tdx")]
53 const KVM_FEATURE_STEAL_TIME_BIT: u8 = 5;
54 
55 #[derive(Debug, Copy, Clone)]
56 /// Specifies the entry point address where the guest must start
57 /// executing code, as well as which of the supported boot protocols
58 /// is to be used to configure the guest initial state.
59 pub struct EntryPoint {
60     /// Address in guest memory where the guest must start execution
61     pub entry_addr: Option<GuestAddress>,
62 }
63 
64 const E820_RAM: u32 = 1;
65 const E820_RESERVED: u32 = 2;
66 
67 #[derive(Clone)]
68 pub struct SgxEpcSection {
69     start: GuestAddress,
70     size: GuestUsize,
71 }
72 
73 impl SgxEpcSection {
74     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
75         SgxEpcSection { start, size }
76     }
77     pub fn start(&self) -> GuestAddress {
78         self.start
79     }
80     pub fn size(&self) -> GuestUsize {
81         self.size
82     }
83 }
84 
85 #[derive(Clone)]
86 pub struct SgxEpcRegion {
87     start: GuestAddress,
88     size: GuestUsize,
89     epc_sections: BTreeMap<String, SgxEpcSection>,
90 }
91 
92 impl SgxEpcRegion {
93     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
94         SgxEpcRegion {
95             start,
96             size,
97             epc_sections: BTreeMap::new(),
98         }
99     }
100     pub fn start(&self) -> GuestAddress {
101         self.start
102     }
103     pub fn size(&self) -> GuestUsize {
104         self.size
105     }
106     pub fn epc_sections(&self) -> &BTreeMap<String, SgxEpcSection> {
107         &self.epc_sections
108     }
109     pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) {
110         self.epc_sections.insert(id, epc_section);
111     }
112 }
113 
114 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
115 // trait (in this case `DataInit`) where:
116 // *    the type that is implementing the trait is foreign or
117 // *    all of the parameters being passed to the trait (if there are any) are also foreign
118 // is prohibited.
119 #[derive(Copy, Clone, Default)]
120 struct StartInfoWrapper(hvm_start_info);
121 
122 #[derive(Copy, Clone, Default)]
123 struct MemmapTableEntryWrapper(hvm_memmap_table_entry);
124 
125 #[derive(Copy, Clone, Default)]
126 struct ModlistEntryWrapper(hvm_modlist_entry);
127 
128 // SAFETY: These data structures only contain a series of integers
129 unsafe impl ByteValued for StartInfoWrapper {}
130 unsafe impl ByteValued for MemmapTableEntryWrapper {}
131 unsafe impl ByteValued for ModlistEntryWrapper {}
132 
133 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
134 // trait (in this case `DataInit`) where:
135 // *    the type that is implementing the trait is foreign or
136 // *    all of the parameters being passed to the trait (if there are any) are also foreign
137 // is prohibited.
138 #[derive(Copy, Clone, Default)]
139 struct BootParamsWrapper(boot_params);
140 
141 // SAFETY: BootParamsWrap is a wrapper over `boot_params` (a series of ints).
142 unsafe impl ByteValued for BootParamsWrapper {}
143 
144 #[derive(Debug)]
145 pub enum Error {
146     /// Error writing MP table to memory.
147     MpTableSetup(mptable::Error),
148 
149     /// Error configuring the general purpose registers
150     RegsConfiguration(regs::Error),
151 
152     /// Error configuring the special registers
153     SregsConfiguration(regs::Error),
154 
155     /// Error configuring the floating point related registers
156     FpuConfiguration(regs::Error),
157 
158     /// Error configuring the MSR registers
159     MsrsConfiguration(regs::Error),
160 
161     /// Failed to set supported CPUs.
162     SetSupportedCpusFailed(anyhow::Error),
163 
164     /// Cannot set the local interruption due to bad configuration.
165     LocalIntConfiguration(anyhow::Error),
166 
167     /// Error setting up SMBIOS table
168     SmbiosSetup(smbios::Error),
169 
170     /// Could not find any SGX EPC section
171     NoSgxEpcSection,
172 
173     /// Missing SGX CPU feature
174     MissingSgxFeature,
175 
176     /// Missing SGX_LC CPU feature
177     MissingSgxLaunchControlFeature,
178 
179     /// Error getting supported CPUID through the hypervisor (kvm/mshv) API
180     CpuidGetSupported(HypervisorError),
181 
182     /// Error populating CPUID with KVM HyperV emulation details
183     CpuidKvmHyperV(vmm_sys_util::fam::Error),
184 
185     /// Error populating CPUID with CPU identification
186     CpuidIdentification(vmm_sys_util::fam::Error),
187 
188     /// Error checking CPUID compatibility
189     CpuidCheckCompatibility,
190 
191     // Error writing EBDA address
192     EbdaSetup(vm_memory::GuestMemoryError),
193 
194     /// Error retrieving TDX capabilities through the hypervisor (kvm/mshv) API
195     #[cfg(feature = "tdx")]
196     TdxCapabilities(HypervisorError),
197 }
198 
199 impl From<Error> for super::Error {
200     fn from(e: Error) -> super::Error {
201         super::Error::PlatformSpecific(e)
202     }
203 }
204 
205 #[allow(clippy::upper_case_acronyms)]
206 #[derive(Copy, Clone, Debug)]
207 pub enum CpuidReg {
208     EAX,
209     EBX,
210     ECX,
211     EDX,
212 }
213 
214 pub struct CpuidPatch {
215     pub function: u32,
216     pub index: u32,
217     pub flags_bit: Option<u8>,
218     pub eax_bit: Option<u8>,
219     pub ebx_bit: Option<u8>,
220     pub ecx_bit: Option<u8>,
221     pub edx_bit: Option<u8>,
222 }
223 
224 impl CpuidPatch {
225     pub fn set_cpuid_reg(
226         cpuid: &mut Vec<CpuIdEntry>,
227         function: u32,
228         index: Option<u32>,
229         reg: CpuidReg,
230         value: u32,
231     ) {
232         let mut entry_found = false;
233         for entry in cpuid.iter_mut() {
234             if entry.function == function && (index.is_none() || index.unwrap() == entry.index) {
235                 entry_found = true;
236                 match reg {
237                     CpuidReg::EAX => {
238                         entry.eax = value;
239                     }
240                     CpuidReg::EBX => {
241                         entry.ebx = value;
242                     }
243                     CpuidReg::ECX => {
244                         entry.ecx = value;
245                     }
246                     CpuidReg::EDX => {
247                         entry.edx = value;
248                     }
249                 }
250             }
251         }
252 
253         if entry_found {
254             return;
255         }
256 
257         // Entry not found, so let's add it.
258         if let Some(index) = index {
259             let mut entry = CpuIdEntry {
260                 function,
261                 index,
262                 flags: CPUID_FLAG_VALID_INDEX,
263                 ..Default::default()
264             };
265             match reg {
266                 CpuidReg::EAX => {
267                     entry.eax = value;
268                 }
269                 CpuidReg::EBX => {
270                     entry.ebx = value;
271                 }
272                 CpuidReg::ECX => {
273                     entry.ecx = value;
274                 }
275                 CpuidReg::EDX => {
276                     entry.edx = value;
277                 }
278             }
279 
280             cpuid.push(entry);
281         }
282     }
283 
284     pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec<CpuidPatch>) {
285         for entry in cpuid {
286             for patch in patches.iter() {
287                 if entry.function == patch.function && entry.index == patch.index {
288                     if let Some(flags_bit) = patch.flags_bit {
289                         entry.flags |= 1 << flags_bit;
290                     }
291                     if let Some(eax_bit) = patch.eax_bit {
292                         entry.eax |= 1 << eax_bit;
293                     }
294                     if let Some(ebx_bit) = patch.ebx_bit {
295                         entry.ebx |= 1 << ebx_bit;
296                     }
297                     if let Some(ecx_bit) = patch.ecx_bit {
298                         entry.ecx |= 1 << ecx_bit;
299                     }
300                     if let Some(edx_bit) = patch.edx_bit {
301                         entry.edx |= 1 << edx_bit;
302                     }
303                 }
304             }
305         }
306     }
307 
308     pub fn is_feature_enabled(
309         cpuid: &[CpuIdEntry],
310         function: u32,
311         index: u32,
312         reg: CpuidReg,
313         feature_bit: usize,
314     ) -> bool {
315         let mask = 1 << feature_bit;
316 
317         for entry in cpuid {
318             if entry.function == function && entry.index == index {
319                 let reg_val = match reg {
320                     CpuidReg::EAX => entry.eax,
321                     CpuidReg::EBX => entry.ebx,
322                     CpuidReg::ECX => entry.ecx,
323                     CpuidReg::EDX => entry.edx,
324                 };
325 
326                 return (reg_val & mask) == mask;
327             }
328         }
329 
330         false
331     }
332 }
333 
334 #[derive(Debug)]
335 enum CpuidCompatibleCheck {
336     BitwiseSubset, // bitwise subset
337     Equal,         // equal in value
338     NumNotGreater, // smaller or equal as a number
339 }
340 
341 pub struct CpuidFeatureEntry {
342     function: u32,
343     index: u32,
344     feature_reg: CpuidReg,
345     compatible_check: CpuidCompatibleCheck,
346 }
347 
348 impl CpuidFeatureEntry {
349     fn checked_feature_entry_list() -> Vec<CpuidFeatureEntry> {
350         vec![
351             // The following list includes all hardware features bits from
352             // the CPUID Wiki Page: https://en.wikipedia.org/wiki/CPUID
353             // Leaf 0x1, ECX/EDX, feature bits
354             CpuidFeatureEntry {
355                 function: 1,
356                 index: 0,
357                 feature_reg: CpuidReg::ECX,
358                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
359             },
360             CpuidFeatureEntry {
361                 function: 1,
362                 index: 0,
363                 feature_reg: CpuidReg::EDX,
364                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
365             },
366             // Leaf 0x7, EAX/EBX/ECX/EDX, extended features
367             CpuidFeatureEntry {
368                 function: 7,
369                 index: 0,
370                 feature_reg: CpuidReg::EAX,
371                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
372             },
373             CpuidFeatureEntry {
374                 function: 7,
375                 index: 0,
376                 feature_reg: CpuidReg::EBX,
377                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
378             },
379             CpuidFeatureEntry {
380                 function: 7,
381                 index: 0,
382                 feature_reg: CpuidReg::ECX,
383                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
384             },
385             CpuidFeatureEntry {
386                 function: 7,
387                 index: 0,
388                 feature_reg: CpuidReg::EDX,
389                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
390             },
391             // Leaf 0x7 subleaf 0x1, EAX, extended features
392             CpuidFeatureEntry {
393                 function: 7,
394                 index: 1,
395                 feature_reg: CpuidReg::EAX,
396                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
397             },
398             // Leaf 0x8000_0001, ECX/EDX, CPUID features bits
399             CpuidFeatureEntry {
400                 function: 0x8000_0001,
401                 index: 0,
402                 feature_reg: CpuidReg::ECX,
403                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
404             },
405             CpuidFeatureEntry {
406                 function: 0x8000_0001,
407                 index: 0,
408                 feature_reg: CpuidReg::EDX,
409                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
410             },
411             // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html
412             // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE
413             CpuidFeatureEntry {
414                 function: 0x4000_0000,
415                 index: 0,
416                 feature_reg: CpuidReg::EAX,
417                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
418             },
419             CpuidFeatureEntry {
420                 function: 0x4000_0000,
421                 index: 0,
422                 feature_reg: CpuidReg::EBX,
423                 compatible_check: CpuidCompatibleCheck::Equal,
424             },
425             CpuidFeatureEntry {
426                 function: 0x4000_0000,
427                 index: 0,
428                 feature_reg: CpuidReg::ECX,
429                 compatible_check: CpuidCompatibleCheck::Equal,
430             },
431             CpuidFeatureEntry {
432                 function: 0x4000_0000,
433                 index: 0,
434                 feature_reg: CpuidReg::EDX,
435                 compatible_check: CpuidCompatibleCheck::Equal,
436             },
437             // Leaf 0x4000_0001, EAX/EBX/ECX/EDX, KVM CPUID features
438             CpuidFeatureEntry {
439                 function: 0x4000_0001,
440                 index: 0,
441                 feature_reg: CpuidReg::EAX,
442                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
443             },
444             CpuidFeatureEntry {
445                 function: 0x4000_0001,
446                 index: 0,
447                 feature_reg: CpuidReg::EBX,
448                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
449             },
450             CpuidFeatureEntry {
451                 function: 0x4000_0001,
452                 index: 0,
453                 feature_reg: CpuidReg::ECX,
454                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
455             },
456             CpuidFeatureEntry {
457                 function: 0x4000_0001,
458                 index: 0,
459                 feature_reg: CpuidReg::EDX,
460                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
461             },
462         ]
463     }
464 
465     fn get_features_from_cpuid(
466         cpuid: &[CpuIdEntry],
467         feature_entry_list: &[CpuidFeatureEntry],
468     ) -> Vec<u32> {
469         let mut features = vec![0; feature_entry_list.len()];
470         for (i, feature_entry) in feature_entry_list.iter().enumerate() {
471             for cpuid_entry in cpuid {
472                 if cpuid_entry.function == feature_entry.function
473                     && cpuid_entry.index == feature_entry.index
474                 {
475                     match feature_entry.feature_reg {
476                         CpuidReg::EAX => {
477                             features[i] = cpuid_entry.eax;
478                         }
479                         CpuidReg::EBX => {
480                             features[i] = cpuid_entry.ebx;
481                         }
482                         CpuidReg::ECX => {
483                             features[i] = cpuid_entry.ecx;
484                         }
485                         CpuidReg::EDX => {
486                             features[i] = cpuid_entry.edx;
487                         }
488                     }
489 
490                     break;
491                 }
492             }
493         }
494 
495         features
496     }
497 
498     // The function returns `Error` (a.k.a. "incompatible"), when the CPUID features from `src_vm_cpuid`
499     // is not a subset of those of the `dest_vm_cpuid`.
500     pub fn check_cpuid_compatibility(
501         src_vm_cpuid: &[CpuIdEntry],
502         dest_vm_cpuid: &[CpuIdEntry],
503     ) -> Result<(), Error> {
504         let feature_entry_list = &Self::checked_feature_entry_list();
505         let src_vm_features = Self::get_features_from_cpuid(src_vm_cpuid, feature_entry_list);
506         let dest_vm_features = Self::get_features_from_cpuid(dest_vm_cpuid, feature_entry_list);
507 
508         // Loop on feature bit and check if the 'source vm' feature is a subset
509         // of those of the 'destination vm' feature
510         let mut compatible = true;
511         for (i, (src_vm_feature, dest_vm_feature)) in src_vm_features
512             .iter()
513             .zip(dest_vm_features.iter())
514             .enumerate()
515         {
516             let entry = &feature_entry_list[i];
517             let entry_compatible = match entry.compatible_check {
518                 CpuidCompatibleCheck::BitwiseSubset => {
519                     let different_feature_bits = src_vm_feature ^ dest_vm_feature;
520                     let src_vm_feature_bits_only = different_feature_bits & src_vm_feature;
521                     src_vm_feature_bits_only == 0
522                 }
523                 CpuidCompatibleCheck::Equal => src_vm_feature == dest_vm_feature,
524                 CpuidCompatibleCheck::NumNotGreater => src_vm_feature <= dest_vm_feature,
525             };
526             if !entry_compatible {
527                 error!(
528                     "Detected incompatible CPUID entry: leaf={:#02x} (subleaf={:#02x}), register='{:?}', \
529                     compatilbe_check='{:?}', source VM feature='{:#04x}', destination VM feature'{:#04x}'.",
530                     entry.function, entry.index, entry.feature_reg,
531                     entry.compatible_check, src_vm_feature, dest_vm_feature
532                     );
533 
534                 compatible = false;
535             }
536         }
537 
538         if compatible {
539             info!("No CPU incompatibility detected.");
540             Ok(())
541         } else {
542             Err(Error::CpuidCheckCompatibility)
543         }
544     }
545 }
546 
547 pub fn generate_common_cpuid(
548     hypervisor: Arc<dyn hypervisor::Hypervisor>,
549     topology: Option<(u8, u8, u8)>,
550     sgx_epc_sections: Option<Vec<SgxEpcSection>>,
551     phys_bits: u8,
552     kvm_hyperv: bool,
553     #[cfg(feature = "tdx")] tdx_enabled: bool,
554 ) -> super::Result<Vec<CpuIdEntry>> {
555     let cpuid_patches = vec![
556         // Patch tsc deadline timer bit
557         CpuidPatch {
558             function: 1,
559             index: 0,
560             flags_bit: None,
561             eax_bit: None,
562             ebx_bit: None,
563             ecx_bit: Some(TSC_DEADLINE_TIMER_ECX_BIT),
564             edx_bit: None,
565         },
566         // Patch hypervisor bit
567         CpuidPatch {
568             function: 1,
569             index: 0,
570             flags_bit: None,
571             eax_bit: None,
572             ebx_bit: None,
573             ecx_bit: Some(HYPERVISOR_ECX_BIT),
574             edx_bit: None,
575         },
576         // Enable MTRR feature
577         CpuidPatch {
578             function: 1,
579             index: 0,
580             flags_bit: None,
581             eax_bit: None,
582             ebx_bit: None,
583             ecx_bit: None,
584             edx_bit: Some(MTRR_EDX_BIT),
585         },
586     ];
587 
588     // Supported CPUID
589     let mut cpuid = hypervisor.get_cpuid().map_err(Error::CpuidGetSupported)?;
590 
591     CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
592 
593     if let Some(t) = topology {
594         update_cpuid_topology(&mut cpuid, t.0, t.1, t.2);
595     }
596 
597     if let Some(sgx_epc_sections) = sgx_epc_sections {
598         update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?;
599     }
600 
601     #[cfg(feature = "tdx")]
602     let tdx_capabilities = if tdx_enabled {
603         let caps = hypervisor
604             .tdx_capabilities()
605             .map_err(Error::TdxCapabilities)?;
606         info!("TDX capabilities {:#?}", caps);
607         Some(caps)
608     } else {
609         None
610     };
611 
612     // Update some existing CPUID
613     for entry in cpuid.as_mut_slice().iter_mut() {
614         match entry.function {
615             0xd =>
616             {
617                 #[cfg(feature = "tdx")]
618                 if let Some(caps) = &tdx_capabilities {
619                     let xcr0_mask: u64 = 0x82ff;
620                     let xss_mask: u64 = !xcr0_mask;
621                     if entry.index == 0 {
622                         entry.eax &= (caps.xfam_fixed0 as u32) & (xcr0_mask as u32);
623                         entry.eax |= (caps.xfam_fixed1 as u32) & (xcr0_mask as u32);
624                         entry.edx &= ((caps.xfam_fixed0 & xcr0_mask) >> 32) as u32;
625                         entry.edx |= ((caps.xfam_fixed1 & xcr0_mask) >> 32) as u32;
626                     } else if entry.index == 1 {
627                         entry.ecx &= (caps.xfam_fixed0 as u32) & (xss_mask as u32);
628                         entry.ecx |= (caps.xfam_fixed1 as u32) & (xss_mask as u32);
629                         entry.edx &= ((caps.xfam_fixed0 & xss_mask) >> 32) as u32;
630                         entry.edx |= ((caps.xfam_fixed1 & xss_mask) >> 32) as u32;
631                     }
632                 }
633             }
634             // Set CPU physical bits
635             0x8000_0008 => {
636                 entry.eax = (entry.eax & 0xffff_ff00) | (phys_bits as u32 & 0xff);
637             }
638             // Disable KVM_FEATURE_ASYNC_PF_INT
639             // This is required until we find out why the asynchronous page
640             // fault is generating unexpected behavior when using interrupt
641             // mechanism.
642             // TODO: Re-enable KVM_FEATURE_ASYNC_PF_INT (#2277)
643             0x4000_0001 => {
644                 entry.eax &= !(1 << KVM_FEATURE_ASYNC_PF_INT_BIT);
645 
646                 // These features are not supported by TDX
647                 #[cfg(feature = "tdx")]
648                 if tdx_enabled {
649                     entry.eax &= !(1 << KVM_FEATURE_CLOCKSOURCE_BIT
650                         | 1 << KVM_FEATURE_CLOCKSOURCE2_BIT
651                         | 1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
652                         | 1 << KVM_FEATURE_ASYNC_PF_BIT
653                         | 1 << KVM_FEATURE_ASYNC_PF_VMEXIT_BIT
654                         | 1 << KVM_FEATURE_STEAL_TIME_BIT)
655                 }
656             }
657             _ => {}
658         }
659     }
660 
661     // Copy CPU identification string
662     for i in 0x8000_0002..=0x8000_0004 {
663         cpuid.retain(|c| c.function != i);
664         let leaf = unsafe { std::arch::x86_64::__cpuid(i) };
665         cpuid.push(CpuIdEntry {
666             function: i,
667             eax: leaf.eax,
668             ebx: leaf.ebx,
669             ecx: leaf.ecx,
670             edx: leaf.edx,
671             ..Default::default()
672         });
673     }
674 
675     if kvm_hyperv {
676         // Remove conflicting entries
677         cpuid.retain(|c| c.function != 0x4000_0000);
678         cpuid.retain(|c| c.function != 0x4000_0001);
679         // See "Hypervisor Top Level Functional Specification" for details
680         // Compliance with "Hv#1" requires leaves up to 0x4000_000a
681         cpuid.push(CpuIdEntry {
682             function: 0x40000000,
683             eax: 0x4000000a, // Maximum cpuid leaf
684             ebx: 0x756e694c, // "Linu"
685             ecx: 0x564b2078, // "x KV"
686             edx: 0x7648204d, // "M Hv"
687             ..Default::default()
688         });
689         cpuid.push(CpuIdEntry {
690             function: 0x40000001,
691             eax: 0x31237648, // "Hv#1"
692             ..Default::default()
693         });
694         cpuid.push(CpuIdEntry {
695             function: 0x40000002,
696             eax: 0x3839,  // "Build number"
697             ebx: 0xa0000, // "Version"
698             ..Default::default()
699         });
700         cpuid.push(CpuIdEntry {
701             function: 0x4000_0003,
702             eax: 1 << 1 // AccessPartitionReferenceCounter
703                    | 1 << 2 // AccessSynicRegs
704                    | 1 << 3 // AccessSyntheticTimerRegs
705                    | 1 << 9, // AccessPartitionReferenceTsc
706             edx: 1 << 3, // CPU dynamic partitioning
707             ..Default::default()
708         });
709         cpuid.push(CpuIdEntry {
710             function: 0x4000_0004,
711             eax: 1 << 5, // Recommend relaxed timing
712             ..Default::default()
713         });
714         for i in 0x4000_0005..=0x4000_000a {
715             cpuid.push(CpuIdEntry {
716                 function: i,
717                 ..Default::default()
718             });
719         }
720     }
721 
722     Ok(cpuid)
723 }
724 
725 pub fn configure_vcpu(
726     vcpu: &Arc<dyn hypervisor::Vcpu>,
727     id: u8,
728     kernel_entry_point: Option<EntryPoint>,
729     vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
730     cpuid: Vec<CpuIdEntry>,
731     kvm_hyperv: bool,
732 ) -> super::Result<()> {
733     // Per vCPU CPUID changes; common are handled via generate_common_cpuid()
734     let mut cpuid = cpuid;
735     CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(id));
736     CpuidPatch::set_cpuid_reg(&mut cpuid, 0x1f, None, CpuidReg::EDX, u32::from(id));
737 
738     vcpu.set_cpuid2(&cpuid)
739         .map_err(|e| Error::SetSupportedCpusFailed(e.into()))?;
740 
741     if kvm_hyperv {
742         vcpu.enable_hyperv_synic().unwrap();
743     }
744 
745     regs::setup_msrs(vcpu).map_err(Error::MsrsConfiguration)?;
746     if let Some(kernel_entry_point) = kernel_entry_point {
747         if let Some(entry_addr) = kernel_entry_point.entry_addr {
748             // Safe to unwrap because this method is called after the VM is configured
749             regs::setup_regs(vcpu, entry_addr.raw_value()).map_err(Error::RegsConfiguration)?;
750             regs::setup_fpu(vcpu).map_err(Error::FpuConfiguration)?;
751             regs::setup_sregs(&vm_memory.memory(), vcpu).map_err(Error::SregsConfiguration)?;
752         }
753     }
754     interrupts::set_lint(vcpu).map_err(|e| Error::LocalIntConfiguration(e.into()))?;
755     Ok(())
756 }
757 
758 /// Returns a Vec of the valid memory addresses.
759 /// These should be used to configure the GuestMemory structure for the platform.
760 /// For x86_64 all addresses are valid from the start of the kernel except a
761 /// carve out at the end of 32bit address space.
762 pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
763     let reserved_memory_gap_start = layout::MEM_32BIT_RESERVED_START
764         .checked_add(layout::MEM_32BIT_DEVICES_SIZE)
765         .expect("32-bit reserved region is too large");
766 
767     let requested_memory_size = GuestAddress(size as u64);
768     let mut regions = Vec::new();
769 
770     // case1: guest memory fits before the gap
771     if size as u64 <= layout::MEM_32BIT_RESERVED_START.raw_value() {
772         regions.push((GuestAddress(0), size as usize, RegionType::Ram));
773     // case2: guest memory extends beyond the gap
774     } else {
775         // push memory before the gap
776         regions.push((
777             GuestAddress(0),
778             layout::MEM_32BIT_RESERVED_START.raw_value() as usize,
779             RegionType::Ram,
780         ));
781         regions.push((
782             layout::RAM_64BIT_START,
783             requested_memory_size.unchecked_offset_from(layout::MEM_32BIT_RESERVED_START) as usize,
784             RegionType::Ram,
785         ));
786     }
787 
788     // Add the 32-bit device memory hole as a sub region.
789     regions.push((
790         layout::MEM_32BIT_RESERVED_START,
791         layout::MEM_32BIT_DEVICES_SIZE as usize,
792         RegionType::SubRegion,
793     ));
794 
795     // Add the 32-bit reserved memory hole as a sub region.
796     regions.push((
797         reserved_memory_gap_start,
798         (layout::MEM_32BIT_RESERVED_SIZE - layout::MEM_32BIT_DEVICES_SIZE) as usize,
799         RegionType::Reserved,
800     ));
801 
802     regions
803 }
804 
805 /// Configures the system and should be called once per vm before starting vcpu threads.
806 ///
807 /// # Arguments
808 ///
809 /// * `guest_mem` - The memory to be used by the guest.
810 /// * `cmdline_addr` - Address in `guest_mem` where the kernel command line was loaded.
811 /// * `cmdline_size` - Size of the kernel command line in bytes including the null terminator.
812 /// * `num_cpus` - Number of virtual CPUs the guest will have.
813 #[allow(clippy::too_many_arguments)]
814 pub fn configure_system(
815     guest_mem: &GuestMemoryMmap,
816     cmdline_addr: GuestAddress,
817     initramfs: &Option<InitramfsConfig>,
818     _num_cpus: u8,
819     rsdp_addr: Option<GuestAddress>,
820     sgx_epc_region: Option<SgxEpcRegion>,
821     serial_number: Option<&str>,
822     uuid: Option<&str>,
823     oem_strings: Option<&[&str]>,
824 ) -> super::Result<()> {
825     // Write EBDA address to location where ACPICA expects to find it
826     guest_mem
827         .write_obj((layout::EBDA_START.0 >> 4) as u16, layout::EBDA_POINTER)
828         .map_err(Error::EbdaSetup)?;
829 
830     let size = smbios::setup_smbios(guest_mem, serial_number, uuid, oem_strings)
831         .map_err(Error::SmbiosSetup)?;
832 
833     // Place the MP table after the SMIOS table aligned to 16 bytes
834     let offset = GuestAddress(layout::SMBIOS_START).unchecked_add(size);
835     let offset = GuestAddress((offset.0 + 16) & !0xf);
836     mptable::setup_mptable(offset, guest_mem, _num_cpus).map_err(Error::MpTableSetup)?;
837 
838     // Check that the RAM is not smaller than the RSDP start address
839     if let Some(rsdp_addr) = rsdp_addr {
840         if rsdp_addr.0 > guest_mem.last_addr().0 {
841             return Err(super::Error::RsdpPastRamEnd);
842         }
843     }
844 
845     configure_pvh(
846         guest_mem,
847         cmdline_addr,
848         initramfs,
849         rsdp_addr,
850         sgx_epc_region,
851     )
852 }
853 
854 fn configure_pvh(
855     guest_mem: &GuestMemoryMmap,
856     cmdline_addr: GuestAddress,
857     initramfs: &Option<InitramfsConfig>,
858     rsdp_addr: Option<GuestAddress>,
859     sgx_epc_region: Option<SgxEpcRegion>,
860 ) -> super::Result<()> {
861     const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578;
862 
863     let mut start_info: StartInfoWrapper = StartInfoWrapper(hvm_start_info::default());
864 
865     start_info.0.magic = XEN_HVM_START_MAGIC_VALUE;
866     start_info.0.version = 1; // pvh has version 1
867     start_info.0.nr_modules = 0;
868     start_info.0.cmdline_paddr = cmdline_addr.raw_value() as u64;
869     start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value();
870 
871     if let Some(rsdp_addr) = rsdp_addr {
872         start_info.0.rsdp_paddr = rsdp_addr.0;
873     }
874 
875     if let Some(initramfs_config) = initramfs {
876         // The initramfs has been written to guest memory already, here we just need to
877         // create the module structure that describes it.
878         let ramdisk_mod: ModlistEntryWrapper = ModlistEntryWrapper(hvm_modlist_entry {
879             paddr: initramfs_config.address.raw_value(),
880             size: initramfs_config.size as u64,
881             ..Default::default()
882         });
883 
884         start_info.0.nr_modules += 1;
885         start_info.0.modlist_paddr = layout::MODLIST_START.raw_value();
886 
887         // Write the modlist struct to guest memory.
888         guest_mem
889             .write_obj(ramdisk_mod, layout::MODLIST_START)
890             .map_err(super::Error::ModlistSetup)?;
891     }
892 
893     // Vector to hold the memory maps which needs to be written to guest memory
894     // at MEMMAP_START after all of the mappings are recorded.
895     let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
896 
897     // Create the memory map entries.
898     add_memmap_entry(&mut memmap, 0, layout::EBDA_START.raw_value(), E820_RAM);
899 
900     let mem_end = guest_mem.last_addr();
901 
902     if mem_end < layout::MEM_32BIT_RESERVED_START {
903         add_memmap_entry(
904             &mut memmap,
905             layout::HIGH_RAM_START.raw_value(),
906             mem_end.unchecked_offset_from(layout::HIGH_RAM_START) + 1,
907             E820_RAM,
908         );
909     } else {
910         add_memmap_entry(
911             &mut memmap,
912             layout::HIGH_RAM_START.raw_value(),
913             layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::HIGH_RAM_START),
914             E820_RAM,
915         );
916         if mem_end > layout::RAM_64BIT_START {
917             add_memmap_entry(
918                 &mut memmap,
919                 layout::RAM_64BIT_START.raw_value(),
920                 mem_end.unchecked_offset_from(layout::RAM_64BIT_START) + 1,
921                 E820_RAM,
922             );
923         }
924     }
925 
926     add_memmap_entry(
927         &mut memmap,
928         layout::PCI_MMCONFIG_START.0,
929         layout::PCI_MMCONFIG_SIZE,
930         E820_RESERVED,
931     );
932 
933     if let Some(sgx_epc_region) = sgx_epc_region {
934         add_memmap_entry(
935             &mut memmap,
936             sgx_epc_region.start().raw_value(),
937             sgx_epc_region.size() as u64,
938             E820_RESERVED,
939         );
940     }
941 
942     start_info.0.memmap_entries = memmap.len() as u32;
943 
944     // Copy the vector with the memmap table to the MEMMAP_START address
945     // which is already saved in the memmap_paddr field of hvm_start_info struct.
946     let mut memmap_start_addr = layout::MEMMAP_START;
947 
948     guest_mem
949         .checked_offset(
950             memmap_start_addr,
951             mem::size_of::<hvm_memmap_table_entry>() * start_info.0.memmap_entries as usize,
952         )
953         .ok_or(super::Error::MemmapTablePastRamEnd)?;
954 
955     // For every entry in the memmap vector, create a MemmapTableEntryWrapper
956     // and write it to guest memory.
957     for memmap_entry in memmap {
958         let map_entry_wrapper: MemmapTableEntryWrapper = MemmapTableEntryWrapper(memmap_entry);
959 
960         guest_mem
961             .write_obj(map_entry_wrapper, memmap_start_addr)
962             .map_err(|_| super::Error::MemmapTableSetup)?;
963         memmap_start_addr =
964             memmap_start_addr.unchecked_add(mem::size_of::<hvm_memmap_table_entry>() as u64);
965     }
966 
967     // The hvm_start_info struct itself must be stored at PVH_START_INFO
968     // address, and %rbx will be initialized to contain PVH_INFO_START prior to
969     // starting the guest, as required by the PVH ABI.
970     let start_info_addr = layout::PVH_INFO_START;
971 
972     guest_mem
973         .checked_offset(start_info_addr, mem::size_of::<hvm_start_info>())
974         .ok_or(super::Error::StartInfoPastRamEnd)?;
975 
976     // Write the start_info struct to guest memory.
977     guest_mem
978         .write_obj(start_info, start_info_addr)
979         .map_err(|_| super::Error::StartInfoSetup)?;
980 
981     Ok(())
982 }
983 
984 fn add_memmap_entry(memmap: &mut Vec<hvm_memmap_table_entry>, addr: u64, size: u64, mem_type: u32) {
985     // Add the table entry to the vector
986     memmap.push(hvm_memmap_table_entry {
987         addr,
988         size,
989         type_: mem_type,
990         reserved: 0,
991     });
992 }
993 
994 /// Returns the memory address where the initramfs could be loaded.
995 pub fn initramfs_load_addr(
996     guest_mem: &GuestMemoryMmap,
997     initramfs_size: usize,
998 ) -> super::Result<u64> {
999     let first_region = guest_mem
1000         .find_region(GuestAddress::new(0))
1001         .ok_or(super::Error::InitramfsAddress)?;
1002     // It's safe to cast to usize because the size of a region can't be greater than usize.
1003     let lowmem_size = first_region.len() as usize;
1004 
1005     if lowmem_size < initramfs_size {
1006         return Err(super::Error::InitramfsAddress);
1007     }
1008 
1009     let aligned_addr: u64 = ((lowmem_size - initramfs_size) & !(crate::pagesize() - 1)) as u64;
1010     Ok(aligned_addr)
1011 }
1012 
1013 pub fn get_host_cpu_phys_bits() -> u8 {
1014     unsafe {
1015         let leaf = x86_64::__cpuid(0x8000_0000);
1016 
1017         // Detect and handle AMD SME (Secure Memory Encryption) properly.
1018         // Some physical address bits may become reserved when the feature is enabled.
1019         // See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1
1020         let reduced = if leaf.eax >= 0x8000_001f
1021             && leaf.ebx == 0x6874_7541    // Vendor ID: AuthenticAMD
1022             && leaf.ecx == 0x444d_4163
1023             && leaf.edx == 0x6974_6e65
1024             && x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0
1025         {
1026             (x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f
1027         } else {
1028             0
1029         };
1030 
1031         if leaf.eax >= 0x8000_0008 {
1032             let leaf = x86_64::__cpuid(0x8000_0008);
1033             ((leaf.eax & 0xff) - reduced) as u8
1034         } else {
1035             36
1036         }
1037     }
1038 }
1039 
1040 fn update_cpuid_topology(
1041     cpuid: &mut Vec<CpuIdEntry>,
1042     threads_per_core: u8,
1043     cores_per_die: u8,
1044     dies_per_package: u8,
1045 ) {
1046     let thread_width = 8 - (threads_per_core - 1).leading_zeros();
1047     let core_width = (8 - (cores_per_die - 1).leading_zeros()) + thread_width;
1048     let die_width = (8 - (dies_per_package - 1).leading_zeros()) + core_width;
1049 
1050     // CPU Topology leaf 0xb
1051     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::EAX, thread_width);
1052     CpuidPatch::set_cpuid_reg(
1053         cpuid,
1054         0xb,
1055         Some(0),
1056         CpuidReg::EBX,
1057         u32::from(threads_per_core),
1058     );
1059     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::ECX, 1 << 8);
1060 
1061     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::EAX, die_width);
1062     CpuidPatch::set_cpuid_reg(
1063         cpuid,
1064         0xb,
1065         Some(1),
1066         CpuidReg::EBX,
1067         u32::from(dies_per_package * cores_per_die * threads_per_core),
1068     );
1069     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::ECX, 2 << 8);
1070 
1071     // CPU Topology leaf 0x1f
1072     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::EAX, thread_width);
1073     CpuidPatch::set_cpuid_reg(
1074         cpuid,
1075         0x1f,
1076         Some(0),
1077         CpuidReg::EBX,
1078         u32::from(threads_per_core),
1079     );
1080     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::ECX, 1 << 8);
1081 
1082     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::EAX, core_width);
1083     CpuidPatch::set_cpuid_reg(
1084         cpuid,
1085         0x1f,
1086         Some(1),
1087         CpuidReg::EBX,
1088         u32::from(cores_per_die * threads_per_core),
1089     );
1090     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::ECX, 2 << 8);
1091 
1092     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::EAX, die_width);
1093     CpuidPatch::set_cpuid_reg(
1094         cpuid,
1095         0x1f,
1096         Some(2),
1097         CpuidReg::EBX,
1098         u32::from(dies_per_package * cores_per_die * threads_per_core),
1099     );
1100     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::ECX, 5 << 8);
1101 }
1102 
1103 // The goal is to update the CPUID sub-leaves to reflect the number of EPC
1104 // sections exposed to the guest.
1105 fn update_cpuid_sgx(
1106     cpuid: &mut Vec<CpuIdEntry>,
1107     epc_sections: Vec<SgxEpcSection>,
1108 ) -> Result<(), Error> {
1109     // Something's wrong if there's no EPC section.
1110     if epc_sections.is_empty() {
1111         return Err(Error::NoSgxEpcSection);
1112     }
1113     // We can't go further if the hypervisor does not support SGX feature.
1114     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) {
1115         return Err(Error::MissingSgxFeature);
1116     }
1117     // We can't go further if the hypervisor does not support SGX_LC feature.
1118     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) {
1119         return Err(Error::MissingSgxLaunchControlFeature);
1120     }
1121 
1122     // Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC
1123     // properties such as confidentiality and integrity.
1124     let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) };
1125 
1126     for (i, epc_section) in epc_sections.iter().enumerate() {
1127         let subleaf_idx = i + 2;
1128         let start = epc_section.start().raw_value();
1129         let size = epc_section.size() as u64;
1130         let eax = (start & 0xffff_f000) as u32 | 0x1;
1131         let ebx = (start >> 32) as u32;
1132         let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf);
1133         let edx = (size >> 32) as u32;
1134         // CPU Topology leaf 0x12
1135         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax);
1136         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx);
1137         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx);
1138         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx);
1139     }
1140 
1141     // Add one NULL entry to terminate the dynamic list
1142     let subleaf_idx = epc_sections.len() + 2;
1143     // CPU Topology leaf 0x12
1144     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0);
1145     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0);
1146     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0);
1147     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0);
1148 
1149     Ok(())
1150 }
1151 
1152 #[cfg(test)]
1153 mod tests {
1154     use super::*;
1155 
1156     #[test]
1157     fn regions_lt_4gb() {
1158         let regions = arch_memory_regions(1 << 29);
1159         assert_eq!(3, regions.len());
1160         assert_eq!(GuestAddress(0), regions[0].0);
1161         assert_eq!(1usize << 29, regions[0].1);
1162     }
1163 
1164     #[test]
1165     fn regions_gt_4gb() {
1166         let regions = arch_memory_regions((1 << 32) + 0x8000);
1167         assert_eq!(4, regions.len());
1168         assert_eq!(GuestAddress(0), regions[0].0);
1169         assert_eq!(GuestAddress(1 << 32), regions[1].0);
1170     }
1171 
1172     #[test]
1173     fn test_system_configuration() {
1174         let no_vcpus = 4;
1175         let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1176         let config_err = configure_system(
1177             &gm,
1178             GuestAddress(0),
1179             &None,
1180             1,
1181             Some(layout::RSDP_POINTER),
1182             None,
1183             None,
1184             None,
1185             None,
1186         );
1187         assert!(config_err.is_err());
1188 
1189         // Now assigning some memory that falls before the 32bit memory hole.
1190         let mem_size = 128 << 20;
1191         let arch_mem_regions = arch_memory_regions(mem_size);
1192         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1193             .iter()
1194             .filter(|r| r.2 == RegionType::Ram)
1195             .map(|r| (r.0, r.1))
1196             .collect();
1197         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1198 
1199         configure_system(
1200             &gm,
1201             GuestAddress(0),
1202             &None,
1203             no_vcpus,
1204             None,
1205             None,
1206             None,
1207             None,
1208             None,
1209         )
1210         .unwrap();
1211 
1212         // Now assigning some memory that is equal to the start of the 32bit memory hole.
1213         let mem_size = 3328 << 20;
1214         let arch_mem_regions = arch_memory_regions(mem_size);
1215         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1216             .iter()
1217             .filter(|r| r.2 == RegionType::Ram)
1218             .map(|r| (r.0, r.1))
1219             .collect();
1220         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1221         configure_system(
1222             &gm,
1223             GuestAddress(0),
1224             &None,
1225             no_vcpus,
1226             None,
1227             None,
1228             None,
1229             None,
1230             None,
1231         )
1232         .unwrap();
1233 
1234         configure_system(
1235             &gm,
1236             GuestAddress(0),
1237             &None,
1238             no_vcpus,
1239             None,
1240             None,
1241             None,
1242             None,
1243             None,
1244         )
1245         .unwrap();
1246 
1247         // Now assigning some memory that falls after the 32bit memory hole.
1248         let mem_size = 3330 << 20;
1249         let arch_mem_regions = arch_memory_regions(mem_size);
1250         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1251             .iter()
1252             .filter(|r| r.2 == RegionType::Ram)
1253             .map(|r| (r.0, r.1))
1254             .collect();
1255         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1256         configure_system(
1257             &gm,
1258             GuestAddress(0),
1259             &None,
1260             no_vcpus,
1261             None,
1262             None,
1263             None,
1264             None,
1265             None,
1266         )
1267         .unwrap();
1268 
1269         configure_system(
1270             &gm,
1271             GuestAddress(0),
1272             &None,
1273             no_vcpus,
1274             None,
1275             None,
1276             None,
1277             None,
1278             None,
1279         )
1280         .unwrap();
1281     }
1282 
1283     #[test]
1284     fn test_add_memmap_entry() {
1285         let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
1286 
1287         let expected_memmap = vec![
1288             hvm_memmap_table_entry {
1289                 addr: 0x0,
1290                 size: 0x1000,
1291                 type_: E820_RAM,
1292                 ..Default::default()
1293             },
1294             hvm_memmap_table_entry {
1295                 addr: 0x10000,
1296                 size: 0xa000,
1297                 type_: E820_RESERVED,
1298                 ..Default::default()
1299             },
1300         ];
1301 
1302         add_memmap_entry(&mut memmap, 0, 0x1000, E820_RAM);
1303         add_memmap_entry(&mut memmap, 0x10000, 0xa000, E820_RESERVED);
1304 
1305         assert_eq!(format!("{:?}", memmap), format!("{:?}", expected_memmap));
1306     }
1307 }
1308