xref: /cloud-hypervisor/arch/src/x86_64/mod.rs (revision b440cb7d2330770cd415b63544a371d4caa2db3a)
1 // Copyright © 2020, Oracle and/or its affiliates.
2 //
3 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 // SPDX-License-Identifier: Apache-2.0
5 //
6 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE-BSD-3-Clause file.
9 use std::sync::Arc;
10 pub mod interrupts;
11 pub mod layout;
12 mod mpspec;
13 mod mptable;
14 pub mod regs;
15 use crate::GuestMemoryMmap;
16 use crate::InitramfsConfig;
17 use crate::RegionType;
18 use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX};
19 use hypervisor::HypervisorError;
20 use linux_loader::loader::bootparam::boot_params;
21 use linux_loader::loader::elf::start_info::{
22     hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info,
23 };
24 use std::collections::BTreeMap;
25 use std::mem;
26 use vm_memory::{
27     Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
28     GuestMemoryRegion, GuestUsize,
29 };
30 mod smbios;
31 use std::arch::x86_64;
32 #[cfg(feature = "tdx")]
33 pub mod tdx;
34 
35 // CPUID feature bits
36 const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit.
37 const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit.
38 const MTRR_EDX_BIT: u8 = 12; // Hypervisor ecx bit.
39 
40 // KVM feature bits
41 const KVM_FEATURE_ASYNC_PF_INT_BIT: u8 = 14;
42 #[cfg(feature = "tdx")]
43 const KVM_FEATURE_CLOCKSOURCE_BIT: u8 = 0;
44 #[cfg(feature = "tdx")]
45 const KVM_FEATURE_CLOCKSOURCE2_BIT: u8 = 3;
46 #[cfg(feature = "tdx")]
47 const KVM_FEATURE_CLOCKSOURCE_STABLE_BIT: u8 = 24;
48 #[cfg(feature = "tdx")]
49 const KVM_FEATURE_ASYNC_PF_BIT: u8 = 4;
50 #[cfg(feature = "tdx")]
51 const KVM_FEATURE_ASYNC_PF_VMEXIT_BIT: u8 = 10;
52 #[cfg(feature = "tdx")]
53 const KVM_FEATURE_STEAL_TIME_BIT: u8 = 5;
54 
55 #[derive(Debug, Copy, Clone)]
56 /// Specifies the entry point address where the guest must start
57 /// executing code, as well as which of the supported boot protocols
58 /// is to be used to configure the guest initial state.
59 pub struct EntryPoint {
60     /// Address in guest memory where the guest must start execution
61     pub entry_addr: Option<GuestAddress>,
62 }
63 
64 const E820_RAM: u32 = 1;
65 const E820_RESERVED: u32 = 2;
66 
67 #[derive(Clone)]
68 pub struct SgxEpcSection {
69     start: GuestAddress,
70     size: GuestUsize,
71 }
72 
73 impl SgxEpcSection {
74     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
75         SgxEpcSection { start, size }
76     }
77     pub fn start(&self) -> GuestAddress {
78         self.start
79     }
80     pub fn size(&self) -> GuestUsize {
81         self.size
82     }
83 }
84 
85 #[derive(Clone)]
86 pub struct SgxEpcRegion {
87     start: GuestAddress,
88     size: GuestUsize,
89     epc_sections: BTreeMap<String, SgxEpcSection>,
90 }
91 
92 impl SgxEpcRegion {
93     pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
94         SgxEpcRegion {
95             start,
96             size,
97             epc_sections: BTreeMap::new(),
98         }
99     }
100     pub fn start(&self) -> GuestAddress {
101         self.start
102     }
103     pub fn size(&self) -> GuestUsize {
104         self.size
105     }
106     pub fn epc_sections(&self) -> &BTreeMap<String, SgxEpcSection> {
107         &self.epc_sections
108     }
109     pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) {
110         self.epc_sections.insert(id, epc_section);
111     }
112 }
113 
114 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
115 // trait (in this case `DataInit`) where:
116 // *    the type that is implementing the trait is foreign or
117 // *    all of the parameters being passed to the trait (if there are any) are also foreign
118 // is prohibited.
119 #[derive(Copy, Clone, Default)]
120 struct StartInfoWrapper(hvm_start_info);
121 
122 #[derive(Copy, Clone, Default)]
123 struct MemmapTableEntryWrapper(hvm_memmap_table_entry);
124 
125 #[derive(Copy, Clone, Default)]
126 struct ModlistEntryWrapper(hvm_modlist_entry);
127 
128 // SAFETY: These data structures only contain a series of integers
129 unsafe impl ByteValued for StartInfoWrapper {}
130 unsafe impl ByteValued for MemmapTableEntryWrapper {}
131 unsafe impl ByteValued for ModlistEntryWrapper {}
132 
133 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign
134 // trait (in this case `DataInit`) where:
135 // *    the type that is implementing the trait is foreign or
136 // *    all of the parameters being passed to the trait (if there are any) are also foreign
137 // is prohibited.
138 #[derive(Copy, Clone, Default)]
139 struct BootParamsWrapper(boot_params);
140 
141 // SAFETY: BootParamsWrap is a wrapper over `boot_params` (a series of ints).
142 unsafe impl ByteValued for BootParamsWrapper {}
143 
144 #[derive(Debug)]
145 pub enum Error {
146     /// Error writing MP table to memory.
147     MpTableSetup(mptable::Error),
148 
149     /// Error configuring the general purpose registers
150     RegsConfiguration(regs::Error),
151 
152     /// Error configuring the special registers
153     SregsConfiguration(regs::Error),
154 
155     /// Error configuring the floating point related registers
156     FpuConfiguration(regs::Error),
157 
158     /// Error configuring the MSR registers
159     MsrsConfiguration(regs::Error),
160 
161     /// Failed to set supported CPUs.
162     SetSupportedCpusFailed(anyhow::Error),
163 
164     /// Cannot set the local interruption due to bad configuration.
165     LocalIntConfiguration(anyhow::Error),
166 
167     /// Error setting up SMBIOS table
168     SmbiosSetup(smbios::Error),
169 
170     /// Could not find any SGX EPC section
171     NoSgxEpcSection,
172 
173     /// Missing SGX CPU feature
174     MissingSgxFeature,
175 
176     /// Missing SGX_LC CPU feature
177     MissingSgxLaunchControlFeature,
178 
179     /// Error getting supported CPUID through the hypervisor (kvm/mshv) API
180     CpuidGetSupported(HypervisorError),
181 
182     /// Error populating CPUID with KVM HyperV emulation details
183     CpuidKvmHyperV(vmm_sys_util::fam::Error),
184 
185     /// Error populating CPUID with CPU identification
186     CpuidIdentification(vmm_sys_util::fam::Error),
187 
188     /// Error checking CPUID compatibility
189     CpuidCheckCompatibility,
190 
191     // Error writing EBDA address
192     EbdaSetup(vm_memory::GuestMemoryError),
193 
194     /// Error retrieving TDX capabilities through the hypervisor (kvm/mshv) API
195     #[cfg(feature = "tdx")]
196     TdxCapabilities(HypervisorError),
197 }
198 
199 impl From<Error> for super::Error {
200     fn from(e: Error) -> super::Error {
201         super::Error::PlatformSpecific(e)
202     }
203 }
204 
205 #[allow(dead_code, clippy::upper_case_acronyms)]
206 #[derive(Copy, Clone, Debug)]
207 pub enum CpuidReg {
208     EAX,
209     EBX,
210     ECX,
211     EDX,
212 }
213 
214 pub struct CpuidPatch {
215     pub function: u32,
216     pub index: u32,
217     pub flags_bit: Option<u8>,
218     pub eax_bit: Option<u8>,
219     pub ebx_bit: Option<u8>,
220     pub ecx_bit: Option<u8>,
221     pub edx_bit: Option<u8>,
222 }
223 
224 impl CpuidPatch {
225     pub fn set_cpuid_reg(
226         cpuid: &mut Vec<CpuIdEntry>,
227         function: u32,
228         index: Option<u32>,
229         reg: CpuidReg,
230         value: u32,
231     ) {
232         let mut entry_found = false;
233         for entry in cpuid.iter_mut() {
234             if entry.function == function && (index == None || index.unwrap() == entry.index) {
235                 entry_found = true;
236                 match reg {
237                     CpuidReg::EAX => {
238                         entry.eax = value;
239                     }
240                     CpuidReg::EBX => {
241                         entry.ebx = value;
242                     }
243                     CpuidReg::ECX => {
244                         entry.ecx = value;
245                     }
246                     CpuidReg::EDX => {
247                         entry.edx = value;
248                     }
249                 }
250             }
251         }
252 
253         if entry_found {
254             return;
255         }
256 
257         // Entry not found, so let's add it.
258         if let Some(index) = index {
259             let mut entry = CpuIdEntry {
260                 function,
261                 index,
262                 flags: CPUID_FLAG_VALID_INDEX,
263                 ..Default::default()
264             };
265             match reg {
266                 CpuidReg::EAX => {
267                     entry.eax = value;
268                 }
269                 CpuidReg::EBX => {
270                     entry.ebx = value;
271                 }
272                 CpuidReg::ECX => {
273                     entry.ecx = value;
274                 }
275                 CpuidReg::EDX => {
276                     entry.edx = value;
277                 }
278             }
279 
280             cpuid.push(entry);
281         }
282     }
283 
284     pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec<CpuidPatch>) {
285         for entry in cpuid {
286             for patch in patches.iter() {
287                 if entry.function == patch.function && entry.index == patch.index {
288                     if let Some(flags_bit) = patch.flags_bit {
289                         entry.flags |= 1 << flags_bit;
290                     }
291                     if let Some(eax_bit) = patch.eax_bit {
292                         entry.eax |= 1 << eax_bit;
293                     }
294                     if let Some(ebx_bit) = patch.ebx_bit {
295                         entry.ebx |= 1 << ebx_bit;
296                     }
297                     if let Some(ecx_bit) = patch.ecx_bit {
298                         entry.ecx |= 1 << ecx_bit;
299                     }
300                     if let Some(edx_bit) = patch.edx_bit {
301                         entry.edx |= 1 << edx_bit;
302                     }
303                 }
304             }
305         }
306     }
307 
308     pub fn is_feature_enabled(
309         cpuid: &[CpuIdEntry],
310         function: u32,
311         index: u32,
312         reg: CpuidReg,
313         feature_bit: usize,
314     ) -> bool {
315         let mask = 1 << feature_bit;
316 
317         for entry in cpuid {
318             if entry.function == function && entry.index == index {
319                 let reg_val = match reg {
320                     CpuidReg::EAX => entry.eax,
321                     CpuidReg::EBX => entry.ebx,
322                     CpuidReg::ECX => entry.ecx,
323                     CpuidReg::EDX => entry.edx,
324                 };
325 
326                 return (reg_val & mask) == mask;
327             }
328         }
329 
330         false
331     }
332 }
333 
334 #[derive(Debug)]
335 enum CpuidCompatibleCheck {
336     BitwiseSubset, // bitwise subset
337     Equal,         // equal in value
338     NumNotGreater, // smaller or equal as a number
339 }
340 
341 pub struct CpuidFeatureEntry {
342     function: u32,
343     index: u32,
344     feature_reg: CpuidReg,
345     compatible_check: CpuidCompatibleCheck,
346 }
347 
348 impl CpuidFeatureEntry {
349     fn checked_feature_entry_list() -> Vec<CpuidFeatureEntry> {
350         vec![
351             // The following list includes all hardware features bits from
352             // the CPUID Wiki Page: https://en.wikipedia.org/wiki/CPUID
353             // Leaf 0x1, ECX/EDX, feature bits
354             CpuidFeatureEntry {
355                 function: 1,
356                 index: 0,
357                 feature_reg: CpuidReg::ECX,
358                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
359             },
360             CpuidFeatureEntry {
361                 function: 1,
362                 index: 0,
363                 feature_reg: CpuidReg::EDX,
364                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
365             },
366             // Leaf 0x7, EAX/EBX/ECX/EDX, extended features
367             CpuidFeatureEntry {
368                 function: 7,
369                 index: 0,
370                 feature_reg: CpuidReg::EAX,
371                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
372             },
373             CpuidFeatureEntry {
374                 function: 7,
375                 index: 0,
376                 feature_reg: CpuidReg::EBX,
377                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
378             },
379             CpuidFeatureEntry {
380                 function: 7,
381                 index: 0,
382                 feature_reg: CpuidReg::ECX,
383                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
384             },
385             CpuidFeatureEntry {
386                 function: 7,
387                 index: 0,
388                 feature_reg: CpuidReg::EDX,
389                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
390             },
391             // Leaf 0x7 subleaf 0x1, EAX, extended features
392             CpuidFeatureEntry {
393                 function: 7,
394                 index: 1,
395                 feature_reg: CpuidReg::EAX,
396                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
397             },
398             // Leaf 0x8000_0001, ECX/EDX, CPUID features bits
399             CpuidFeatureEntry {
400                 function: 0x8000_0001,
401                 index: 0,
402                 feature_reg: CpuidReg::ECX,
403                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
404             },
405             CpuidFeatureEntry {
406                 function: 0x8000_0001,
407                 index: 0,
408                 feature_reg: CpuidReg::EDX,
409                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
410             },
411             // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html
412             // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE
413             CpuidFeatureEntry {
414                 function: 0x4000_0000,
415                 index: 0,
416                 feature_reg: CpuidReg::EAX,
417                 compatible_check: CpuidCompatibleCheck::NumNotGreater,
418             },
419             CpuidFeatureEntry {
420                 function: 0x4000_0000,
421                 index: 0,
422                 feature_reg: CpuidReg::EBX,
423                 compatible_check: CpuidCompatibleCheck::Equal,
424             },
425             CpuidFeatureEntry {
426                 function: 0x4000_0000,
427                 index: 0,
428                 feature_reg: CpuidReg::ECX,
429                 compatible_check: CpuidCompatibleCheck::Equal,
430             },
431             CpuidFeatureEntry {
432                 function: 0x4000_0000,
433                 index: 0,
434                 feature_reg: CpuidReg::EDX,
435                 compatible_check: CpuidCompatibleCheck::Equal,
436             },
437             // Leaf 0x4000_0001, EAX/EBX/ECX/EDX, KVM CPUID features
438             CpuidFeatureEntry {
439                 function: 0x4000_0001,
440                 index: 0,
441                 feature_reg: CpuidReg::EAX,
442                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
443             },
444             CpuidFeatureEntry {
445                 function: 0x4000_0001,
446                 index: 0,
447                 feature_reg: CpuidReg::EBX,
448                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
449             },
450             CpuidFeatureEntry {
451                 function: 0x4000_0001,
452                 index: 0,
453                 feature_reg: CpuidReg::ECX,
454                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
455             },
456             CpuidFeatureEntry {
457                 function: 0x4000_0001,
458                 index: 0,
459                 feature_reg: CpuidReg::EDX,
460                 compatible_check: CpuidCompatibleCheck::BitwiseSubset,
461             },
462         ]
463     }
464 
465     fn get_features_from_cpuid(
466         cpuid: &[CpuIdEntry],
467         feature_entry_list: &[CpuidFeatureEntry],
468     ) -> Vec<u32> {
469         let mut features = vec![0; feature_entry_list.len()];
470         for (i, feature_entry) in feature_entry_list.iter().enumerate() {
471             for cpuid_entry in cpuid {
472                 if cpuid_entry.function == feature_entry.function
473                     && cpuid_entry.index == feature_entry.index
474                 {
475                     match feature_entry.feature_reg {
476                         CpuidReg::EAX => {
477                             features[i] = cpuid_entry.eax;
478                         }
479                         CpuidReg::EBX => {
480                             features[i] = cpuid_entry.ebx;
481                         }
482                         CpuidReg::ECX => {
483                             features[i] = cpuid_entry.ecx;
484                         }
485                         CpuidReg::EDX => {
486                             features[i] = cpuid_entry.edx;
487                         }
488                     }
489 
490                     break;
491                 }
492             }
493         }
494 
495         features
496     }
497 
498     // The function returns `Error` (a.k.a. "incompatible"), when the CPUID features from `src_vm_cpuid`
499     // is not a subset of those of the `dest_vm_cpuid`.
500     pub fn check_cpuid_compatibility(
501         src_vm_cpuid: &[CpuIdEntry],
502         dest_vm_cpuid: &[CpuIdEntry],
503     ) -> Result<(), Error> {
504         let feature_entry_list = &Self::checked_feature_entry_list();
505         let src_vm_features = Self::get_features_from_cpuid(src_vm_cpuid, feature_entry_list);
506         let dest_vm_features = Self::get_features_from_cpuid(dest_vm_cpuid, feature_entry_list);
507 
508         // Loop on feature bit and check if the 'source vm' feature is a subset
509         // of those of the 'destination vm' feature
510         let mut compatible = true;
511         for (i, (src_vm_feature, dest_vm_feature)) in src_vm_features
512             .iter()
513             .zip(dest_vm_features.iter())
514             .enumerate()
515         {
516             let entry = &feature_entry_list[i];
517             let entry_compatible = match entry.compatible_check {
518                 CpuidCompatibleCheck::BitwiseSubset => {
519                     let different_feature_bits = src_vm_feature ^ dest_vm_feature;
520                     let src_vm_feature_bits_only = different_feature_bits & src_vm_feature;
521                     src_vm_feature_bits_only == 0
522                 }
523                 CpuidCompatibleCheck::Equal => src_vm_feature == dest_vm_feature,
524                 CpuidCompatibleCheck::NumNotGreater => src_vm_feature <= dest_vm_feature,
525             };
526             if !entry_compatible {
527                 error!(
528                     "Detected incompatible CPUID entry: leaf={:#02x} (subleaf={:#02x}), register='{:?}', \
529                     compatilbe_check='{:?}', source VM feature='{:#04x}', destination VM feature'{:#04x}'.",
530                     entry.function, entry.index, entry.feature_reg,
531                     entry.compatible_check, src_vm_feature, dest_vm_feature
532                     );
533 
534                 compatible = false;
535             }
536         }
537 
538         if compatible {
539             info!("No CPU incompatibility detected.");
540             Ok(())
541         } else {
542             Err(Error::CpuidCheckCompatibility)
543         }
544     }
545 }
546 
547 pub fn generate_common_cpuid(
548     hypervisor: Arc<dyn hypervisor::Hypervisor>,
549     topology: Option<(u8, u8, u8)>,
550     sgx_epc_sections: Option<Vec<SgxEpcSection>>,
551     phys_bits: u8,
552     kvm_hyperv: bool,
553     #[cfg(feature = "tdx")] tdx_enabled: bool,
554 ) -> super::Result<Vec<CpuIdEntry>> {
555     let cpuid_patches = vec![
556         // Patch tsc deadline timer bit
557         CpuidPatch {
558             function: 1,
559             index: 0,
560             flags_bit: None,
561             eax_bit: None,
562             ebx_bit: None,
563             ecx_bit: Some(TSC_DEADLINE_TIMER_ECX_BIT),
564             edx_bit: None,
565         },
566         // Patch hypervisor bit
567         CpuidPatch {
568             function: 1,
569             index: 0,
570             flags_bit: None,
571             eax_bit: None,
572             ebx_bit: None,
573             ecx_bit: Some(HYPERVISOR_ECX_BIT),
574             edx_bit: None,
575         },
576         // Enable MTRR feature
577         CpuidPatch {
578             function: 1,
579             index: 0,
580             flags_bit: None,
581             eax_bit: None,
582             ebx_bit: None,
583             ecx_bit: None,
584             edx_bit: Some(MTRR_EDX_BIT),
585         },
586     ];
587 
588     // Supported CPUID
589     let mut cpuid = hypervisor.get_cpuid().map_err(Error::CpuidGetSupported)?;
590 
591     CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
592 
593     if let Some(t) = topology {
594         update_cpuid_topology(&mut cpuid, t.0, t.1, t.2);
595     }
596 
597     if let Some(sgx_epc_sections) = sgx_epc_sections {
598         update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?;
599     }
600 
601     #[cfg(feature = "tdx")]
602     let tdx_capabilities = if tdx_enabled {
603         let caps = hypervisor
604             .tdx_capabilities()
605             .map_err(Error::TdxCapabilities)?;
606         info!("TDX capabilities {:#?}", caps);
607         Some(caps)
608     } else {
609         None
610     };
611 
612     // Update some existing CPUID
613     for entry in cpuid.as_mut_slice().iter_mut() {
614         match entry.function {
615             0xd =>
616             {
617                 #[cfg(feature = "tdx")]
618                 if let Some(caps) = &tdx_capabilities {
619                     let xcr0_mask: u64 = 0x82ff;
620                     let xss_mask: u64 = !xcr0_mask;
621                     if entry.index == 0 {
622                         entry.eax &= (caps.xfam_fixed0 as u32) & (xcr0_mask as u32);
623                         entry.eax |= (caps.xfam_fixed1 as u32) & (xcr0_mask as u32);
624                         entry.edx &= ((caps.xfam_fixed0 & xcr0_mask) >> 32) as u32;
625                         entry.edx |= ((caps.xfam_fixed1 & xcr0_mask) >> 32) as u32;
626                     } else if entry.index == 1 {
627                         entry.ecx &= (caps.xfam_fixed0 as u32) & (xss_mask as u32);
628                         entry.ecx |= (caps.xfam_fixed1 as u32) & (xss_mask as u32);
629                         entry.edx &= ((caps.xfam_fixed0 & xss_mask) >> 32) as u32;
630                         entry.edx |= ((caps.xfam_fixed1 & xss_mask) >> 32) as u32;
631                     }
632                 }
633             }
634             // Set CPU physical bits
635             0x8000_0008 => {
636                 entry.eax = (entry.eax & 0xffff_ff00) | (phys_bits as u32 & 0xff);
637             }
638             // Disable KVM_FEATURE_ASYNC_PF_INT
639             // This is required until we find out why the asynchronous page
640             // fault is generating unexpected behavior when using interrupt
641             // mechanism.
642             // TODO: Re-enable KVM_FEATURE_ASYNC_PF_INT (#2277)
643             0x4000_0001 => {
644                 entry.eax &= !(1 << KVM_FEATURE_ASYNC_PF_INT_BIT);
645 
646                 // These features are not supported by TDX
647                 #[cfg(feature = "tdx")]
648                 if tdx_enabled {
649                     entry.eax &= !(1 << KVM_FEATURE_CLOCKSOURCE_BIT
650                         | 1 << KVM_FEATURE_CLOCKSOURCE2_BIT
651                         | 1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
652                         | 1 << KVM_FEATURE_ASYNC_PF_BIT
653                         | 1 << KVM_FEATURE_ASYNC_PF_VMEXIT_BIT
654                         | 1 << KVM_FEATURE_STEAL_TIME_BIT)
655                 }
656             }
657             _ => {}
658         }
659     }
660 
661     // Copy CPU identification string
662     for i in 0x8000_0002..=0x8000_0004 {
663         cpuid.retain(|c| c.function != i);
664         let leaf = unsafe { std::arch::x86_64::__cpuid(i) };
665         cpuid.push(CpuIdEntry {
666             function: i,
667             eax: leaf.eax,
668             ebx: leaf.ebx,
669             ecx: leaf.ecx,
670             edx: leaf.edx,
671             ..Default::default()
672         });
673     }
674 
675     if kvm_hyperv {
676         // Remove conflicting entries
677         cpuid.retain(|c| c.function != 0x4000_0000);
678         cpuid.retain(|c| c.function != 0x4000_0001);
679         // See "Hypervisor Top Level Functional Specification" for details
680         // Compliance with "Hv#1" requires leaves up to 0x4000_000a
681         cpuid.push(CpuIdEntry {
682             function: 0x40000000,
683             eax: 0x4000000a, // Maximum cpuid leaf
684             ebx: 0x756e694c, // "Linu"
685             ecx: 0x564b2078, // "x KV"
686             edx: 0x7648204d, // "M Hv"
687             ..Default::default()
688         });
689         cpuid.push(CpuIdEntry {
690             function: 0x40000001,
691             eax: 0x31237648, // "Hv#1"
692             ..Default::default()
693         });
694         cpuid.push(CpuIdEntry {
695             function: 0x40000002,
696             eax: 0x3839,  // "Build number"
697             ebx: 0xa0000, // "Version"
698             ..Default::default()
699         });
700         cpuid.push(CpuIdEntry {
701             function: 0x4000_0003,
702             eax: 1 << 1 // AccessPartitionReferenceCounter
703                    | 1 << 2 // AccessSynicRegs
704                    | 1 << 3 // AccessSyntheticTimerRegs
705                    | 1 << 9, // AccessPartitionReferenceTsc
706             edx: 1 << 3, // CPU dynamic partitioning
707             ..Default::default()
708         });
709         cpuid.push(CpuIdEntry {
710             function: 0x4000_0004,
711             eax: 1 << 5, // Recommend relaxed timing
712             ..Default::default()
713         });
714         for i in 0x4000_0005..=0x4000_000a {
715             cpuid.push(CpuIdEntry {
716                 function: i,
717                 ..Default::default()
718             });
719         }
720     }
721 
722     Ok(cpuid)
723 }
724 
725 pub fn configure_vcpu(
726     vcpu: &Arc<dyn hypervisor::Vcpu>,
727     id: u8,
728     kernel_entry_point: Option<EntryPoint>,
729     vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
730     cpuid: Vec<CpuIdEntry>,
731     kvm_hyperv: bool,
732 ) -> super::Result<()> {
733     // Per vCPU CPUID changes; common are handled via generate_common_cpuid()
734     let mut cpuid = cpuid;
735     CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(id));
736     CpuidPatch::set_cpuid_reg(&mut cpuid, 0x1f, None, CpuidReg::EDX, u32::from(id));
737 
738     vcpu.set_cpuid2(&cpuid)
739         .map_err(|e| Error::SetSupportedCpusFailed(e.into()))?;
740 
741     if kvm_hyperv {
742         vcpu.enable_hyperv_synic().unwrap();
743     }
744 
745     regs::setup_msrs(vcpu).map_err(Error::MsrsConfiguration)?;
746     if let Some(kernel_entry_point) = kernel_entry_point {
747         if let Some(entry_addr) = kernel_entry_point.entry_addr {
748             // Safe to unwrap because this method is called after the VM is configured
749             regs::setup_regs(vcpu, entry_addr.raw_value()).map_err(Error::RegsConfiguration)?;
750             regs::setup_fpu(vcpu).map_err(Error::FpuConfiguration)?;
751             regs::setup_sregs(&vm_memory.memory(), vcpu).map_err(Error::SregsConfiguration)?;
752         }
753     }
754     interrupts::set_lint(vcpu).map_err(|e| Error::LocalIntConfiguration(e.into()))?;
755     Ok(())
756 }
757 
758 /// Returns a Vec of the valid memory addresses.
759 /// These should be used to configure the GuestMemory structure for the platform.
760 /// For x86_64 all addresses are valid from the start of the kernel except a
761 /// carve out at the end of 32bit address space.
762 pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
763     let reserved_memory_gap_start = layout::MEM_32BIT_RESERVED_START
764         .checked_add(layout::MEM_32BIT_DEVICES_SIZE)
765         .expect("32-bit reserved region is too large");
766 
767     let requested_memory_size = GuestAddress(size as u64);
768     let mut regions = Vec::new();
769 
770     // case1: guest memory fits before the gap
771     if size as u64 <= layout::MEM_32BIT_RESERVED_START.raw_value() {
772         regions.push((GuestAddress(0), size as usize, RegionType::Ram));
773     // case2: guest memory extends beyond the gap
774     } else {
775         // push memory before the gap
776         regions.push((
777             GuestAddress(0),
778             layout::MEM_32BIT_RESERVED_START.raw_value() as usize,
779             RegionType::Ram,
780         ));
781         regions.push((
782             layout::RAM_64BIT_START,
783             requested_memory_size.unchecked_offset_from(layout::MEM_32BIT_RESERVED_START) as usize,
784             RegionType::Ram,
785         ));
786     }
787 
788     // Add the 32-bit device memory hole as a sub region.
789     regions.push((
790         layout::MEM_32BIT_RESERVED_START,
791         layout::MEM_32BIT_DEVICES_SIZE as usize,
792         RegionType::SubRegion,
793     ));
794 
795     // Add the 32-bit reserved memory hole as a sub region.
796     regions.push((
797         reserved_memory_gap_start,
798         (layout::MEM_32BIT_RESERVED_SIZE - layout::MEM_32BIT_DEVICES_SIZE) as usize,
799         RegionType::Reserved,
800     ));
801 
802     regions
803 }
804 
805 /// Configures the system and should be called once per vm before starting vcpu threads.
806 ///
807 /// # Arguments
808 ///
809 /// * `guest_mem` - The memory to be used by the guest.
810 /// * `cmdline_addr` - Address in `guest_mem` where the kernel command line was loaded.
811 /// * `cmdline_size` - Size of the kernel command line in bytes including the null terminator.
812 /// * `num_cpus` - Number of virtual CPUs the guest will have.
813 #[allow(clippy::too_many_arguments)]
814 pub fn configure_system(
815     guest_mem: &GuestMemoryMmap,
816     cmdline_addr: GuestAddress,
817     initramfs: &Option<InitramfsConfig>,
818     _num_cpus: u8,
819     rsdp_addr: Option<GuestAddress>,
820     sgx_epc_region: Option<SgxEpcRegion>,
821     serial_number: Option<&str>,
822 ) -> super::Result<()> {
823     // Write EBDA address to location where ACPICA expects to find it
824     guest_mem
825         .write_obj((layout::EBDA_START.0 >> 4) as u16, layout::EBDA_POINTER)
826         .map_err(Error::EbdaSetup)?;
827 
828     let size = smbios::setup_smbios(guest_mem, serial_number).map_err(Error::SmbiosSetup)?;
829 
830     // Place the MP table after the SMIOS table aligned to 16 bytes
831     let offset = GuestAddress(layout::SMBIOS_START).unchecked_add(size);
832     let offset = GuestAddress((offset.0 + 16) & !0xf);
833     mptable::setup_mptable(offset, guest_mem, _num_cpus).map_err(Error::MpTableSetup)?;
834 
835     // Check that the RAM is not smaller than the RSDP start address
836     if let Some(rsdp_addr) = rsdp_addr {
837         if rsdp_addr.0 > guest_mem.last_addr().0 {
838             return Err(super::Error::RsdpPastRamEnd);
839         }
840     }
841 
842     configure_pvh(
843         guest_mem,
844         cmdline_addr,
845         initramfs,
846         rsdp_addr,
847         sgx_epc_region,
848     )
849 }
850 
851 fn configure_pvh(
852     guest_mem: &GuestMemoryMmap,
853     cmdline_addr: GuestAddress,
854     initramfs: &Option<InitramfsConfig>,
855     rsdp_addr: Option<GuestAddress>,
856     sgx_epc_region: Option<SgxEpcRegion>,
857 ) -> super::Result<()> {
858     const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578;
859 
860     let mut start_info: StartInfoWrapper = StartInfoWrapper(hvm_start_info::default());
861 
862     start_info.0.magic = XEN_HVM_START_MAGIC_VALUE;
863     start_info.0.version = 1; // pvh has version 1
864     start_info.0.nr_modules = 0;
865     start_info.0.cmdline_paddr = cmdline_addr.raw_value() as u64;
866     start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value();
867 
868     if let Some(rsdp_addr) = rsdp_addr {
869         start_info.0.rsdp_paddr = rsdp_addr.0;
870     }
871 
872     if let Some(initramfs_config) = initramfs {
873         // The initramfs has been written to guest memory already, here we just need to
874         // create the module structure that describes it.
875         let ramdisk_mod: ModlistEntryWrapper = ModlistEntryWrapper(hvm_modlist_entry {
876             paddr: initramfs_config.address.raw_value(),
877             size: initramfs_config.size as u64,
878             ..Default::default()
879         });
880 
881         start_info.0.nr_modules += 1;
882         start_info.0.modlist_paddr = layout::MODLIST_START.raw_value();
883 
884         // Write the modlist struct to guest memory.
885         guest_mem
886             .write_obj(ramdisk_mod, layout::MODLIST_START)
887             .map_err(super::Error::ModlistSetup)?;
888     }
889 
890     // Vector to hold the memory maps which needs to be written to guest memory
891     // at MEMMAP_START after all of the mappings are recorded.
892     let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
893 
894     // Create the memory map entries.
895     add_memmap_entry(&mut memmap, 0, layout::EBDA_START.raw_value(), E820_RAM);
896 
897     let mem_end = guest_mem.last_addr();
898 
899     if mem_end < layout::MEM_32BIT_RESERVED_START {
900         add_memmap_entry(
901             &mut memmap,
902             layout::HIGH_RAM_START.raw_value(),
903             mem_end.unchecked_offset_from(layout::HIGH_RAM_START) + 1,
904             E820_RAM,
905         );
906     } else {
907         add_memmap_entry(
908             &mut memmap,
909             layout::HIGH_RAM_START.raw_value(),
910             layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::HIGH_RAM_START),
911             E820_RAM,
912         );
913         if mem_end > layout::RAM_64BIT_START {
914             add_memmap_entry(
915                 &mut memmap,
916                 layout::RAM_64BIT_START.raw_value(),
917                 mem_end.unchecked_offset_from(layout::RAM_64BIT_START) + 1,
918                 E820_RAM,
919             );
920         }
921     }
922 
923     add_memmap_entry(
924         &mut memmap,
925         layout::PCI_MMCONFIG_START.0,
926         layout::PCI_MMCONFIG_SIZE,
927         E820_RESERVED,
928     );
929 
930     if let Some(sgx_epc_region) = sgx_epc_region {
931         add_memmap_entry(
932             &mut memmap,
933             sgx_epc_region.start().raw_value(),
934             sgx_epc_region.size() as u64,
935             E820_RESERVED,
936         );
937     }
938 
939     start_info.0.memmap_entries = memmap.len() as u32;
940 
941     // Copy the vector with the memmap table to the MEMMAP_START address
942     // which is already saved in the memmap_paddr field of hvm_start_info struct.
943     let mut memmap_start_addr = layout::MEMMAP_START;
944 
945     guest_mem
946         .checked_offset(
947             memmap_start_addr,
948             mem::size_of::<hvm_memmap_table_entry>() * start_info.0.memmap_entries as usize,
949         )
950         .ok_or(super::Error::MemmapTablePastRamEnd)?;
951 
952     // For every entry in the memmap vector, create a MemmapTableEntryWrapper
953     // and write it to guest memory.
954     for memmap_entry in memmap {
955         let map_entry_wrapper: MemmapTableEntryWrapper = MemmapTableEntryWrapper(memmap_entry);
956 
957         guest_mem
958             .write_obj(map_entry_wrapper, memmap_start_addr)
959             .map_err(|_| super::Error::MemmapTableSetup)?;
960         memmap_start_addr =
961             memmap_start_addr.unchecked_add(mem::size_of::<hvm_memmap_table_entry>() as u64);
962     }
963 
964     // The hvm_start_info struct itself must be stored at PVH_START_INFO
965     // address, and %rbx will be initialized to contain PVH_INFO_START prior to
966     // starting the guest, as required by the PVH ABI.
967     let start_info_addr = layout::PVH_INFO_START;
968 
969     guest_mem
970         .checked_offset(start_info_addr, mem::size_of::<hvm_start_info>())
971         .ok_or(super::Error::StartInfoPastRamEnd)?;
972 
973     // Write the start_info struct to guest memory.
974     guest_mem
975         .write_obj(start_info, start_info_addr)
976         .map_err(|_| super::Error::StartInfoSetup)?;
977 
978     Ok(())
979 }
980 
981 fn add_memmap_entry(memmap: &mut Vec<hvm_memmap_table_entry>, addr: u64, size: u64, mem_type: u32) {
982     // Add the table entry to the vector
983     memmap.push(hvm_memmap_table_entry {
984         addr,
985         size,
986         type_: mem_type,
987         reserved: 0,
988     });
989 }
990 
991 /// Returns the memory address where the initramfs could be loaded.
992 pub fn initramfs_load_addr(
993     guest_mem: &GuestMemoryMmap,
994     initramfs_size: usize,
995 ) -> super::Result<u64> {
996     let first_region = guest_mem
997         .find_region(GuestAddress::new(0))
998         .ok_or(super::Error::InitramfsAddress)?;
999     // It's safe to cast to usize because the size of a region can't be greater than usize.
1000     let lowmem_size = first_region.len() as usize;
1001 
1002     if lowmem_size < initramfs_size {
1003         return Err(super::Error::InitramfsAddress);
1004     }
1005 
1006     let aligned_addr: u64 = ((lowmem_size - initramfs_size) & !(crate::pagesize() - 1)) as u64;
1007     Ok(aligned_addr)
1008 }
1009 
1010 pub fn get_host_cpu_phys_bits() -> u8 {
1011     unsafe {
1012         let leaf = x86_64::__cpuid(0x8000_0000);
1013 
1014         // Detect and handle AMD SME (Secure Memory Encryption) properly.
1015         // Some physical address bits may become reserved when the feature is enabled.
1016         // See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1
1017         let reduced = if leaf.eax >= 0x8000_001f
1018             && leaf.ebx == 0x6874_7541    // Vendor ID: AuthenticAMD
1019             && leaf.ecx == 0x444d_4163
1020             && leaf.edx == 0x6974_6e65
1021             && x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0
1022         {
1023             (x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f
1024         } else {
1025             0
1026         };
1027 
1028         if leaf.eax >= 0x8000_0008 {
1029             let leaf = x86_64::__cpuid(0x8000_0008);
1030             ((leaf.eax & 0xff) - reduced) as u8
1031         } else {
1032             36
1033         }
1034     }
1035 }
1036 
1037 fn update_cpuid_topology(
1038     cpuid: &mut Vec<CpuIdEntry>,
1039     threads_per_core: u8,
1040     cores_per_die: u8,
1041     dies_per_package: u8,
1042 ) {
1043     let thread_width = 8 - (threads_per_core - 1).leading_zeros();
1044     let core_width = (8 - (cores_per_die - 1).leading_zeros()) + thread_width;
1045     let die_width = (8 - (dies_per_package - 1).leading_zeros()) + core_width;
1046 
1047     // CPU Topology leaf 0xb
1048     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::EAX, thread_width);
1049     CpuidPatch::set_cpuid_reg(
1050         cpuid,
1051         0xb,
1052         Some(0),
1053         CpuidReg::EBX,
1054         u32::from(threads_per_core),
1055     );
1056     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::ECX, 1 << 8);
1057 
1058     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::EAX, die_width);
1059     CpuidPatch::set_cpuid_reg(
1060         cpuid,
1061         0xb,
1062         Some(1),
1063         CpuidReg::EBX,
1064         u32::from(dies_per_package * cores_per_die * threads_per_core),
1065     );
1066     CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::ECX, 2 << 8);
1067 
1068     // CPU Topology leaf 0x1f
1069     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::EAX, thread_width);
1070     CpuidPatch::set_cpuid_reg(
1071         cpuid,
1072         0x1f,
1073         Some(0),
1074         CpuidReg::EBX,
1075         u32::from(threads_per_core),
1076     );
1077     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::ECX, 1 << 8);
1078 
1079     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::EAX, core_width);
1080     CpuidPatch::set_cpuid_reg(
1081         cpuid,
1082         0x1f,
1083         Some(1),
1084         CpuidReg::EBX,
1085         u32::from(cores_per_die * threads_per_core),
1086     );
1087     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::ECX, 2 << 8);
1088 
1089     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::EAX, die_width);
1090     CpuidPatch::set_cpuid_reg(
1091         cpuid,
1092         0x1f,
1093         Some(2),
1094         CpuidReg::EBX,
1095         u32::from(dies_per_package * cores_per_die * threads_per_core),
1096     );
1097     CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::ECX, 5 << 8);
1098 }
1099 
1100 // The goal is to update the CPUID sub-leaves to reflect the number of EPC
1101 // sections exposed to the guest.
1102 fn update_cpuid_sgx(
1103     cpuid: &mut Vec<CpuIdEntry>,
1104     epc_sections: Vec<SgxEpcSection>,
1105 ) -> Result<(), Error> {
1106     // Something's wrong if there's no EPC section.
1107     if epc_sections.is_empty() {
1108         return Err(Error::NoSgxEpcSection);
1109     }
1110     // We can't go further if the hypervisor does not support SGX feature.
1111     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) {
1112         return Err(Error::MissingSgxFeature);
1113     }
1114     // We can't go further if the hypervisor does not support SGX_LC feature.
1115     if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) {
1116         return Err(Error::MissingSgxLaunchControlFeature);
1117     }
1118 
1119     // Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC
1120     // properties such as confidentiality and integrity.
1121     let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) };
1122 
1123     for (i, epc_section) in epc_sections.iter().enumerate() {
1124         let subleaf_idx = i + 2;
1125         let start = epc_section.start().raw_value();
1126         let size = epc_section.size() as u64;
1127         let eax = (start & 0xffff_f000) as u32 | 0x1;
1128         let ebx = (start >> 32) as u32;
1129         let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf);
1130         let edx = (size >> 32) as u32;
1131         // CPU Topology leaf 0x12
1132         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax);
1133         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx);
1134         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx);
1135         CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx);
1136     }
1137 
1138     // Add one NULL entry to terminate the dynamic list
1139     let subleaf_idx = epc_sections.len() + 2;
1140     // CPU Topology leaf 0x12
1141     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0);
1142     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0);
1143     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0);
1144     CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0);
1145 
1146     Ok(())
1147 }
1148 
1149 #[cfg(test)]
1150 mod tests {
1151     use super::*;
1152 
1153     #[test]
1154     fn regions_lt_4gb() {
1155         let regions = arch_memory_regions(1 << 29);
1156         assert_eq!(3, regions.len());
1157         assert_eq!(GuestAddress(0), regions[0].0);
1158         assert_eq!(1usize << 29, regions[0].1);
1159     }
1160 
1161     #[test]
1162     fn regions_gt_4gb() {
1163         let regions = arch_memory_regions((1 << 32) + 0x8000);
1164         assert_eq!(4, regions.len());
1165         assert_eq!(GuestAddress(0), regions[0].0);
1166         assert_eq!(GuestAddress(1 << 32), regions[1].0);
1167     }
1168 
1169     #[test]
1170     fn test_system_configuration() {
1171         let no_vcpus = 4;
1172         let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1173         let config_err = configure_system(
1174             &gm,
1175             GuestAddress(0),
1176             &None,
1177             1,
1178             Some(layout::RSDP_POINTER),
1179             None,
1180             None,
1181         );
1182         assert!(config_err.is_err());
1183 
1184         // Now assigning some memory that falls before the 32bit memory hole.
1185         let mem_size = 128 << 20;
1186         let arch_mem_regions = arch_memory_regions(mem_size);
1187         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1188             .iter()
1189             .filter(|r| r.2 == RegionType::Ram)
1190             .map(|r| (r.0, r.1))
1191             .collect();
1192         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1193 
1194         configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None, None).unwrap();
1195 
1196         // Now assigning some memory that is equal to the start of the 32bit memory hole.
1197         let mem_size = 3328 << 20;
1198         let arch_mem_regions = arch_memory_regions(mem_size);
1199         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1200             .iter()
1201             .filter(|r| r.2 == RegionType::Ram)
1202             .map(|r| (r.0, r.1))
1203             .collect();
1204         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1205         configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None, None).unwrap();
1206 
1207         configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None, None).unwrap();
1208 
1209         // Now assigning some memory that falls after the 32bit memory hole.
1210         let mem_size = 3330 << 20;
1211         let arch_mem_regions = arch_memory_regions(mem_size);
1212         let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions
1213             .iter()
1214             .filter(|r| r.2 == RegionType::Ram)
1215             .map(|r| (r.0, r.1))
1216             .collect();
1217         let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap();
1218         configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None, None).unwrap();
1219 
1220         configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None, None).unwrap();
1221     }
1222 
1223     #[test]
1224     fn test_add_memmap_entry() {
1225         let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
1226 
1227         let expected_memmap = vec![
1228             hvm_memmap_table_entry {
1229                 addr: 0x0,
1230                 size: 0x1000,
1231                 type_: E820_RAM,
1232                 ..Default::default()
1233             },
1234             hvm_memmap_table_entry {
1235                 addr: 0x10000,
1236                 size: 0xa000,
1237                 type_: E820_RESERVED,
1238                 ..Default::default()
1239             },
1240         ];
1241 
1242         add_memmap_entry(&mut memmap, 0, 0x1000, E820_RAM);
1243         add_memmap_entry(&mut memmap, 0x10000, 0xa000, E820_RESERVED);
1244 
1245         assert_eq!(format!("{:?}", memmap), format!("{:?}", expected_memmap));
1246     }
1247 }
1248