1 // Copyright © 2020, Oracle and/or its affiliates. 2 // 3 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 // SPDX-License-Identifier: Apache-2.0 5 // 6 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 7 // Use of this source code is governed by a BSD-style license that can be 8 // found in the LICENSE-BSD-3-Clause file. 9 use std::sync::Arc; 10 pub mod interrupts; 11 pub mod layout; 12 mod mpspec; 13 mod mptable; 14 pub mod regs; 15 use crate::GuestMemoryMmap; 16 use crate::InitramfsConfig; 17 use crate::RegionType; 18 use hypervisor::{CpuId, CpuIdEntry, HypervisorError, CPUID_FLAG_VALID_INDEX}; 19 use linux_loader::loader::bootparam::boot_params; 20 use linux_loader::loader::elf::start_info::{ 21 hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info, 22 }; 23 use std::collections::BTreeMap; 24 use std::mem; 25 use vm_memory::{ 26 Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, 27 GuestMemoryRegion, GuestUsize, 28 }; 29 mod smbios; 30 use std::arch::x86_64; 31 #[cfg(feature = "tdx")] 32 pub mod tdx; 33 34 // CPUID feature bits 35 const TSC_DEADLINE_TIMER_ECX_BIT: u8 = 24; // tsc deadline timer ecx bit. 36 const HYPERVISOR_ECX_BIT: u8 = 31; // Hypervisor ecx bit. 37 const MTRR_EDX_BIT: u8 = 12; // Hypervisor ecx bit. 38 39 // KVM feature bits 40 const KVM_FEATURE_ASYNC_PF_INT_BIT: u8 = 14; 41 #[cfg(feature = "tdx")] 42 const KVM_FEATURE_CLOCKSOURCE_BIT: u8 = 0; 43 #[cfg(feature = "tdx")] 44 const KVM_FEATURE_CLOCKSOURCE2_BIT: u8 = 3; 45 #[cfg(feature = "tdx")] 46 const KVM_FEATURE_CLOCKSOURCE_STABLE_BIT: u8 = 24; 47 #[cfg(feature = "tdx")] 48 const KVM_FEATURE_ASYNC_PF_BIT: u8 = 4; 49 #[cfg(feature = "tdx")] 50 const KVM_FEATURE_ASYNC_PF_VMEXIT_BIT: u8 = 10; 51 #[cfg(feature = "tdx")] 52 const KVM_FEATURE_STEAL_TIME_BIT: u8 = 5; 53 54 #[derive(Debug, Copy, Clone)] 55 /// Specifies the entry point address where the guest must start 56 /// executing code, as well as which of the supported boot protocols 57 /// is to be used to configure the guest initial state. 58 pub struct EntryPoint { 59 /// Address in guest memory where the guest must start execution 60 pub entry_addr: Option<GuestAddress>, 61 } 62 63 const E820_RAM: u32 = 1; 64 const E820_RESERVED: u32 = 2; 65 66 #[derive(Clone)] 67 pub struct SgxEpcSection { 68 start: GuestAddress, 69 size: GuestUsize, 70 } 71 72 impl SgxEpcSection { 73 pub fn new(start: GuestAddress, size: GuestUsize) -> Self { 74 SgxEpcSection { start, size } 75 } 76 pub fn start(&self) -> GuestAddress { 77 self.start 78 } 79 pub fn size(&self) -> GuestUsize { 80 self.size 81 } 82 } 83 84 #[derive(Clone)] 85 pub struct SgxEpcRegion { 86 start: GuestAddress, 87 size: GuestUsize, 88 epc_sections: BTreeMap<String, SgxEpcSection>, 89 } 90 91 impl SgxEpcRegion { 92 pub fn new(start: GuestAddress, size: GuestUsize) -> Self { 93 SgxEpcRegion { 94 start, 95 size, 96 epc_sections: BTreeMap::new(), 97 } 98 } 99 pub fn start(&self) -> GuestAddress { 100 self.start 101 } 102 pub fn size(&self) -> GuestUsize { 103 self.size 104 } 105 pub fn epc_sections(&self) -> &BTreeMap<String, SgxEpcSection> { 106 &self.epc_sections 107 } 108 pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) { 109 self.epc_sections.insert(id, epc_section); 110 } 111 } 112 113 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign 114 // trait (in this case `DataInit`) where: 115 // * the type that is implementing the trait is foreign or 116 // * all of the parameters being passed to the trait (if there are any) are also foreign 117 // is prohibited. 118 #[derive(Copy, Clone, Default)] 119 struct StartInfoWrapper(hvm_start_info); 120 121 #[derive(Copy, Clone, Default)] 122 struct MemmapTableEntryWrapper(hvm_memmap_table_entry); 123 124 #[derive(Copy, Clone, Default)] 125 struct ModlistEntryWrapper(hvm_modlist_entry); 126 127 // SAFETY: These data structures only contain a series of integers 128 unsafe impl ByteValued for StartInfoWrapper {} 129 unsafe impl ByteValued for MemmapTableEntryWrapper {} 130 unsafe impl ByteValued for ModlistEntryWrapper {} 131 132 // This is a workaround to the Rust enforcement specifying that any implementation of a foreign 133 // trait (in this case `DataInit`) where: 134 // * the type that is implementing the trait is foreign or 135 // * all of the parameters being passed to the trait (if there are any) are also foreign 136 // is prohibited. 137 #[derive(Copy, Clone, Default)] 138 struct BootParamsWrapper(boot_params); 139 140 // SAFETY: BootParamsWrap is a wrapper over `boot_params` (a series of ints). 141 unsafe impl ByteValued for BootParamsWrapper {} 142 143 #[derive(Debug)] 144 pub enum Error { 145 /// Error writing MP table to memory. 146 MpTableSetup(mptable::Error), 147 148 /// Error configuring the general purpose registers 149 RegsConfiguration(regs::Error), 150 151 /// Error configuring the special registers 152 SregsConfiguration(regs::Error), 153 154 /// Error configuring the floating point related registers 155 FpuConfiguration(regs::Error), 156 157 /// Error configuring the MSR registers 158 MsrsConfiguration(regs::Error), 159 160 /// Failed to set supported CPUs. 161 SetSupportedCpusFailed(anyhow::Error), 162 163 /// Cannot set the local interruption due to bad configuration. 164 LocalIntConfiguration(anyhow::Error), 165 166 /// Error setting up SMBIOS table 167 SmbiosSetup(smbios::Error), 168 169 /// Could not find any SGX EPC section 170 NoSgxEpcSection, 171 172 /// Missing SGX CPU feature 173 MissingSgxFeature, 174 175 /// Missing SGX_LC CPU feature 176 MissingSgxLaunchControlFeature, 177 178 /// Error getting supported CPUID through the hypervisor (kvm/mshv) API 179 CpuidGetSupported(HypervisorError), 180 181 /// Error populating CPUID with KVM HyperV emulation details 182 CpuidKvmHyperV(vmm_sys_util::fam::Error), 183 184 /// Error populating CPUID with CPU identification 185 CpuidIdentification(vmm_sys_util::fam::Error), 186 187 /// Error checking CPUID compatibility 188 CpuidCheckCompatibility, 189 190 // Error writing EBDA address 191 EbdaSetup(vm_memory::GuestMemoryError), 192 193 /// Error retrieving TDX capabilities through the hypervisor (kvm/mshv) API 194 #[cfg(feature = "tdx")] 195 TdxCapabilities(HypervisorError), 196 } 197 198 impl From<Error> for super::Error { 199 fn from(e: Error) -> super::Error { 200 super::Error::X86_64Setup(e) 201 } 202 } 203 204 #[allow(dead_code, clippy::upper_case_acronyms)] 205 #[derive(Copy, Clone, Debug)] 206 pub enum CpuidReg { 207 EAX, 208 EBX, 209 ECX, 210 EDX, 211 } 212 213 pub struct CpuidPatch { 214 pub function: u32, 215 pub index: u32, 216 pub flags_bit: Option<u8>, 217 pub eax_bit: Option<u8>, 218 pub ebx_bit: Option<u8>, 219 pub ecx_bit: Option<u8>, 220 pub edx_bit: Option<u8>, 221 } 222 223 impl CpuidPatch { 224 pub fn set_cpuid_reg( 225 cpuid: &mut CpuId, 226 function: u32, 227 index: Option<u32>, 228 reg: CpuidReg, 229 value: u32, 230 ) { 231 let entries = cpuid.as_mut_slice(); 232 233 let mut entry_found = false; 234 for entry in entries.iter_mut() { 235 if entry.function == function && (index == None || index.unwrap() == entry.index) { 236 entry_found = true; 237 match reg { 238 CpuidReg::EAX => { 239 entry.eax = value; 240 } 241 CpuidReg::EBX => { 242 entry.ebx = value; 243 } 244 CpuidReg::ECX => { 245 entry.ecx = value; 246 } 247 CpuidReg::EDX => { 248 entry.edx = value; 249 } 250 } 251 } 252 } 253 254 if entry_found { 255 return; 256 } 257 258 // Entry not found, so let's add it. 259 if let Some(index) = index { 260 let mut entry = CpuIdEntry { 261 function, 262 index, 263 flags: CPUID_FLAG_VALID_INDEX, 264 ..Default::default() 265 }; 266 match reg { 267 CpuidReg::EAX => { 268 entry.eax = value; 269 } 270 CpuidReg::EBX => { 271 entry.ebx = value; 272 } 273 CpuidReg::ECX => { 274 entry.ecx = value; 275 } 276 CpuidReg::EDX => { 277 entry.edx = value; 278 } 279 } 280 281 if let Err(e) = cpuid.push(entry) { 282 error!("Failed adding new CPUID entry: {:?}", e); 283 } 284 } 285 } 286 287 pub fn patch_cpuid(cpuid: &mut CpuId, patches: Vec<CpuidPatch>) { 288 let entries = cpuid.as_mut_slice(); 289 290 for entry in entries.iter_mut() { 291 for patch in patches.iter() { 292 if entry.function == patch.function && entry.index == patch.index { 293 if let Some(flags_bit) = patch.flags_bit { 294 entry.flags |= 1 << flags_bit; 295 } 296 if let Some(eax_bit) = patch.eax_bit { 297 entry.eax |= 1 << eax_bit; 298 } 299 if let Some(ebx_bit) = patch.ebx_bit { 300 entry.ebx |= 1 << ebx_bit; 301 } 302 if let Some(ecx_bit) = patch.ecx_bit { 303 entry.ecx |= 1 << ecx_bit; 304 } 305 if let Some(edx_bit) = patch.edx_bit { 306 entry.edx |= 1 << edx_bit; 307 } 308 } 309 } 310 } 311 } 312 313 pub fn is_feature_enabled( 314 cpuid: &CpuId, 315 function: u32, 316 index: u32, 317 reg: CpuidReg, 318 feature_bit: usize, 319 ) -> bool { 320 let entries = cpuid.as_slice(); 321 let mask = 1 << feature_bit; 322 323 for entry in entries.iter() { 324 if entry.function == function && entry.index == index { 325 let reg_val = match reg { 326 CpuidReg::EAX => entry.eax, 327 CpuidReg::EBX => entry.ebx, 328 CpuidReg::ECX => entry.ecx, 329 CpuidReg::EDX => entry.edx, 330 }; 331 332 return (reg_val & mask) == mask; 333 } 334 } 335 336 false 337 } 338 } 339 340 #[derive(Debug)] 341 enum CpuidCompatibleCheck { 342 BitwiseSubset, // bitwise subset 343 Equal, // equal in value 344 NumNotGreater, // smaller or equal as a number 345 } 346 347 pub struct CpuidFeatureEntry { 348 function: u32, 349 index: u32, 350 feature_reg: CpuidReg, 351 compatible_check: CpuidCompatibleCheck, 352 } 353 354 impl CpuidFeatureEntry { 355 fn checked_feature_entry_list() -> Vec<CpuidFeatureEntry> { 356 vec![ 357 // The following list includes all hardware features bits from 358 // the CPUID Wiki Page: https://en.wikipedia.org/wiki/CPUID 359 // Leaf 0x1, ECX/EDX, feature bits 360 CpuidFeatureEntry { 361 function: 1, 362 index: 0, 363 feature_reg: CpuidReg::ECX, 364 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 365 }, 366 CpuidFeatureEntry { 367 function: 1, 368 index: 0, 369 feature_reg: CpuidReg::EDX, 370 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 371 }, 372 // Leaf 0x7, EAX/EBX/ECX/EDX, extended features 373 CpuidFeatureEntry { 374 function: 7, 375 index: 0, 376 feature_reg: CpuidReg::EAX, 377 compatible_check: CpuidCompatibleCheck::NumNotGreater, 378 }, 379 CpuidFeatureEntry { 380 function: 7, 381 index: 0, 382 feature_reg: CpuidReg::EBX, 383 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 384 }, 385 CpuidFeatureEntry { 386 function: 7, 387 index: 0, 388 feature_reg: CpuidReg::ECX, 389 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 390 }, 391 CpuidFeatureEntry { 392 function: 7, 393 index: 0, 394 feature_reg: CpuidReg::EDX, 395 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 396 }, 397 // Leaf 0x7 subleaf 0x1, EAX, extended features 398 CpuidFeatureEntry { 399 function: 7, 400 index: 1, 401 feature_reg: CpuidReg::EAX, 402 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 403 }, 404 // Leaf 0x8000_0001, ECX/EDX, CPUID features bits 405 CpuidFeatureEntry { 406 function: 0x8000_0001, 407 index: 0, 408 feature_reg: CpuidReg::ECX, 409 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 410 }, 411 CpuidFeatureEntry { 412 function: 0x8000_0001, 413 index: 0, 414 feature_reg: CpuidReg::EDX, 415 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 416 }, 417 // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html 418 // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE 419 CpuidFeatureEntry { 420 function: 0x4000_0000, 421 index: 0, 422 feature_reg: CpuidReg::EAX, 423 compatible_check: CpuidCompatibleCheck::NumNotGreater, 424 }, 425 CpuidFeatureEntry { 426 function: 0x4000_0000, 427 index: 0, 428 feature_reg: CpuidReg::EBX, 429 compatible_check: CpuidCompatibleCheck::Equal, 430 }, 431 CpuidFeatureEntry { 432 function: 0x4000_0000, 433 index: 0, 434 feature_reg: CpuidReg::ECX, 435 compatible_check: CpuidCompatibleCheck::Equal, 436 }, 437 CpuidFeatureEntry { 438 function: 0x4000_0000, 439 index: 0, 440 feature_reg: CpuidReg::EDX, 441 compatible_check: CpuidCompatibleCheck::Equal, 442 }, 443 // Leaf 0x4000_0001, EAX/EBX/ECX/EDX, KVM CPUID features 444 CpuidFeatureEntry { 445 function: 0x4000_0001, 446 index: 0, 447 feature_reg: CpuidReg::EAX, 448 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 449 }, 450 CpuidFeatureEntry { 451 function: 0x4000_0001, 452 index: 0, 453 feature_reg: CpuidReg::EBX, 454 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 455 }, 456 CpuidFeatureEntry { 457 function: 0x4000_0001, 458 index: 0, 459 feature_reg: CpuidReg::ECX, 460 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 461 }, 462 CpuidFeatureEntry { 463 function: 0x4000_0001, 464 index: 0, 465 feature_reg: CpuidReg::EDX, 466 compatible_check: CpuidCompatibleCheck::BitwiseSubset, 467 }, 468 ] 469 } 470 471 fn get_features_from_cpuid( 472 cpuid: &CpuId, 473 feature_entry_list: &[CpuidFeatureEntry], 474 ) -> Vec<u32> { 475 let mut features = vec![0; feature_entry_list.len()]; 476 for (i, feature_entry) in feature_entry_list.iter().enumerate() { 477 for cpuid_entry in cpuid.as_slice().iter() { 478 if cpuid_entry.function == feature_entry.function 479 && cpuid_entry.index == feature_entry.index 480 { 481 match feature_entry.feature_reg { 482 CpuidReg::EAX => { 483 features[i] = cpuid_entry.eax; 484 } 485 CpuidReg::EBX => { 486 features[i] = cpuid_entry.ebx; 487 } 488 CpuidReg::ECX => { 489 features[i] = cpuid_entry.ecx; 490 } 491 CpuidReg::EDX => { 492 features[i] = cpuid_entry.edx; 493 } 494 } 495 496 break; 497 } 498 } 499 } 500 501 features 502 } 503 504 // The function returns `Error` (a.k.a. "incompatible"), when the CPUID features from `src_vm_cpuid` 505 // is not a subset of those of the `dest_vm_cpuid`. 506 pub fn check_cpuid_compatibility( 507 src_vm_cpuid: &CpuId, 508 dest_vm_cpuid: &CpuId, 509 ) -> Result<(), Error> { 510 let feature_entry_list = &Self::checked_feature_entry_list(); 511 let src_vm_features = Self::get_features_from_cpuid(src_vm_cpuid, feature_entry_list); 512 let dest_vm_features = Self::get_features_from_cpuid(dest_vm_cpuid, feature_entry_list); 513 514 // Loop on feature bit and check if the 'source vm' feature is a subset 515 // of those of the 'destination vm' feature 516 let mut compatible = true; 517 for (i, (src_vm_feature, dest_vm_feature)) in src_vm_features 518 .iter() 519 .zip(dest_vm_features.iter()) 520 .enumerate() 521 { 522 let entry = &feature_entry_list[i]; 523 let entry_compatible = match entry.compatible_check { 524 CpuidCompatibleCheck::BitwiseSubset => { 525 let different_feature_bits = src_vm_feature ^ dest_vm_feature; 526 let src_vm_feature_bits_only = different_feature_bits & src_vm_feature; 527 src_vm_feature_bits_only == 0 528 } 529 CpuidCompatibleCheck::Equal => src_vm_feature == dest_vm_feature, 530 CpuidCompatibleCheck::NumNotGreater => src_vm_feature <= dest_vm_feature, 531 }; 532 if !entry_compatible { 533 error!( 534 "Detected incompatible CPUID entry: leaf={:#02x} (subleaf={:#02x}), register='{:?}', \ 535 compatilbe_check='{:?}', source VM feature='{:#04x}', destination VM feature'{:#04x}'.", 536 entry.function, entry.index, entry.feature_reg, 537 entry.compatible_check, src_vm_feature, dest_vm_feature 538 ); 539 540 compatible = false; 541 } 542 } 543 544 if compatible { 545 info!("No CPU incompatibility detected."); 546 Ok(()) 547 } else { 548 Err(Error::CpuidCheckCompatibility) 549 } 550 } 551 } 552 553 pub fn generate_common_cpuid( 554 hypervisor: Arc<dyn hypervisor::Hypervisor>, 555 topology: Option<(u8, u8, u8)>, 556 sgx_epc_sections: Option<Vec<SgxEpcSection>>, 557 phys_bits: u8, 558 kvm_hyperv: bool, 559 #[cfg(feature = "tdx")] tdx_enabled: bool, 560 ) -> super::Result<CpuId> { 561 let cpuid_patches = vec![ 562 // Patch tsc deadline timer bit 563 CpuidPatch { 564 function: 1, 565 index: 0, 566 flags_bit: None, 567 eax_bit: None, 568 ebx_bit: None, 569 ecx_bit: Some(TSC_DEADLINE_TIMER_ECX_BIT), 570 edx_bit: None, 571 }, 572 // Patch hypervisor bit 573 CpuidPatch { 574 function: 1, 575 index: 0, 576 flags_bit: None, 577 eax_bit: None, 578 ebx_bit: None, 579 ecx_bit: Some(HYPERVISOR_ECX_BIT), 580 edx_bit: None, 581 }, 582 // Enable MTRR feature 583 CpuidPatch { 584 function: 1, 585 index: 0, 586 flags_bit: None, 587 eax_bit: None, 588 ebx_bit: None, 589 ecx_bit: None, 590 edx_bit: Some(MTRR_EDX_BIT), 591 }, 592 ]; 593 594 // Supported CPUID 595 let mut cpuid = hypervisor.get_cpuid().map_err(Error::CpuidGetSupported)?; 596 597 CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches); 598 599 if let Some(t) = topology { 600 update_cpuid_topology(&mut cpuid, t.0, t.1, t.2); 601 } 602 603 if let Some(sgx_epc_sections) = sgx_epc_sections { 604 update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?; 605 } 606 607 #[cfg(feature = "tdx")] 608 let tdx_capabilities = if tdx_enabled { 609 let caps = hypervisor 610 .tdx_capabilities() 611 .map_err(Error::TdxCapabilities)?; 612 info!("TDX capabilities {:#?}", caps); 613 Some(caps) 614 } else { 615 None 616 }; 617 618 // Update some existing CPUID 619 for entry in cpuid.as_mut_slice().iter_mut() { 620 match entry.function { 621 0xd => 622 { 623 #[cfg(feature = "tdx")] 624 if let Some(caps) = &tdx_capabilities { 625 let xcr0_mask: u64 = 0x82ff; 626 let xss_mask: u64 = !xcr0_mask; 627 if entry.index == 0 { 628 entry.eax &= (caps.xfam_fixed0 as u32) & (xcr0_mask as u32); 629 entry.eax |= (caps.xfam_fixed1 as u32) & (xcr0_mask as u32); 630 entry.edx &= ((caps.xfam_fixed0 & xcr0_mask) >> 32) as u32; 631 entry.edx |= ((caps.xfam_fixed1 & xcr0_mask) >> 32) as u32; 632 } else if entry.index == 1 { 633 entry.ecx &= (caps.xfam_fixed0 as u32) & (xss_mask as u32); 634 entry.ecx |= (caps.xfam_fixed1 as u32) & (xss_mask as u32); 635 entry.edx &= ((caps.xfam_fixed0 & xss_mask) >> 32) as u32; 636 entry.edx |= ((caps.xfam_fixed1 & xss_mask) >> 32) as u32; 637 } 638 } 639 } 640 // Set CPU physical bits 641 0x8000_0008 => { 642 entry.eax = (entry.eax & 0xffff_ff00) | (phys_bits as u32 & 0xff); 643 } 644 // Disable KVM_FEATURE_ASYNC_PF_INT 645 // This is required until we find out why the asynchronous page 646 // fault is generating unexpected behavior when using interrupt 647 // mechanism. 648 // TODO: Re-enable KVM_FEATURE_ASYNC_PF_INT (#2277) 649 0x4000_0001 => { 650 entry.eax &= !(1 << KVM_FEATURE_ASYNC_PF_INT_BIT); 651 652 // These features are not supported by TDX 653 #[cfg(feature = "tdx")] 654 if tdx_enabled { 655 entry.eax &= !(1 << KVM_FEATURE_CLOCKSOURCE_BIT 656 | 1 << KVM_FEATURE_CLOCKSOURCE2_BIT 657 | 1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 658 | 1 << KVM_FEATURE_ASYNC_PF_BIT 659 | 1 << KVM_FEATURE_ASYNC_PF_VMEXIT_BIT 660 | 1 << KVM_FEATURE_STEAL_TIME_BIT) 661 } 662 } 663 _ => {} 664 } 665 } 666 667 // Copy CPU identification string 668 for i in 0x8000_0002..=0x8000_0004 { 669 cpuid.retain(|c| c.function != i); 670 let leaf = unsafe { std::arch::x86_64::__cpuid(i) }; 671 cpuid 672 .push(CpuIdEntry { 673 function: i, 674 eax: leaf.eax, 675 ebx: leaf.ebx, 676 ecx: leaf.ecx, 677 edx: leaf.edx, 678 ..Default::default() 679 }) 680 .map_err(Error::CpuidIdentification)?; 681 } 682 683 if kvm_hyperv { 684 // Remove conflicting entries 685 cpuid.retain(|c| c.function != 0x4000_0000); 686 cpuid.retain(|c| c.function != 0x4000_0001); 687 // See "Hypervisor Top Level Functional Specification" for details 688 // Compliance with "Hv#1" requires leaves up to 0x4000_000a 689 cpuid 690 .push(CpuIdEntry { 691 function: 0x40000000, 692 eax: 0x4000000a, // Maximum cpuid leaf 693 ebx: 0x756e694c, // "Linu" 694 ecx: 0x564b2078, // "x KV" 695 edx: 0x7648204d, // "M Hv" 696 ..Default::default() 697 }) 698 .map_err(Error::CpuidKvmHyperV)?; 699 cpuid 700 .push(CpuIdEntry { 701 function: 0x40000001, 702 eax: 0x31237648, // "Hv#1" 703 ..Default::default() 704 }) 705 .map_err(Error::CpuidKvmHyperV)?; 706 cpuid 707 .push(CpuIdEntry { 708 function: 0x40000002, 709 eax: 0x3839, // "Build number" 710 ebx: 0xa0000, // "Version" 711 ..Default::default() 712 }) 713 .map_err(Error::CpuidKvmHyperV)?; 714 cpuid 715 .push(CpuIdEntry { 716 function: 0x4000_0003, 717 eax: 1 << 1 // AccessPartitionReferenceCounter 718 | 1 << 2 // AccessSynicRegs 719 | 1 << 3 // AccessSyntheticTimerRegs 720 | 1 << 9, // AccessPartitionReferenceTsc 721 edx: 1 << 3, // CPU dynamic partitioning 722 ..Default::default() 723 }) 724 .map_err(Error::CpuidKvmHyperV)?; 725 cpuid 726 .push(CpuIdEntry { 727 function: 0x4000_0004, 728 eax: 1 << 5, // Recommend relaxed timing 729 ..Default::default() 730 }) 731 .map_err(Error::CpuidKvmHyperV)?; 732 for i in 0x4000_0005..=0x4000_000a { 733 cpuid 734 .push(CpuIdEntry { 735 function: i, 736 ..Default::default() 737 }) 738 .map_err(Error::CpuidKvmHyperV)?; 739 } 740 } 741 742 Ok(cpuid) 743 } 744 745 pub fn configure_vcpu( 746 fd: &Arc<dyn hypervisor::Vcpu>, 747 id: u8, 748 kernel_entry_point: Option<EntryPoint>, 749 vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>, 750 cpuid: CpuId, 751 kvm_hyperv: bool, 752 ) -> super::Result<()> { 753 // Per vCPU CPUID changes; common are handled via generate_common_cpuid() 754 let mut cpuid = cpuid; 755 CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(id)); 756 CpuidPatch::set_cpuid_reg(&mut cpuid, 0x1f, None, CpuidReg::EDX, u32::from(id)); 757 758 fd.set_cpuid2(&cpuid) 759 .map_err(|e| Error::SetSupportedCpusFailed(e.into()))?; 760 761 if kvm_hyperv { 762 fd.enable_hyperv_synic().unwrap(); 763 } 764 765 regs::setup_msrs(fd).map_err(Error::MsrsConfiguration)?; 766 if let Some(kernel_entry_point) = kernel_entry_point { 767 if let Some(entry_addr) = kernel_entry_point.entry_addr { 768 // Safe to unwrap because this method is called after the VM is configured 769 regs::setup_regs(fd, entry_addr.raw_value()).map_err(Error::RegsConfiguration)?; 770 regs::setup_fpu(fd).map_err(Error::FpuConfiguration)?; 771 regs::setup_sregs(&vm_memory.memory(), fd).map_err(Error::SregsConfiguration)?; 772 } 773 } 774 interrupts::set_lint(fd).map_err(|e| Error::LocalIntConfiguration(e.into()))?; 775 Ok(()) 776 } 777 778 /// Returns a Vec of the valid memory addresses. 779 /// These should be used to configure the GuestMemory structure for the platform. 780 /// For x86_64 all addresses are valid from the start of the kernel except a 781 /// carve out at the end of 32bit address space. 782 pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> { 783 let reserved_memory_gap_start = layout::MEM_32BIT_RESERVED_START 784 .checked_add(layout::MEM_32BIT_DEVICES_SIZE) 785 .expect("32-bit reserved region is too large"); 786 787 let requested_memory_size = GuestAddress(size as u64); 788 let mut regions = Vec::new(); 789 790 // case1: guest memory fits before the gap 791 if size as u64 <= layout::MEM_32BIT_RESERVED_START.raw_value() { 792 regions.push((GuestAddress(0), size as usize, RegionType::Ram)); 793 // case2: guest memory extends beyond the gap 794 } else { 795 // push memory before the gap 796 regions.push(( 797 GuestAddress(0), 798 layout::MEM_32BIT_RESERVED_START.raw_value() as usize, 799 RegionType::Ram, 800 )); 801 regions.push(( 802 layout::RAM_64BIT_START, 803 requested_memory_size.unchecked_offset_from(layout::MEM_32BIT_RESERVED_START) as usize, 804 RegionType::Ram, 805 )); 806 } 807 808 // Add the 32-bit device memory hole as a sub region. 809 regions.push(( 810 layout::MEM_32BIT_RESERVED_START, 811 layout::MEM_32BIT_DEVICES_SIZE as usize, 812 RegionType::SubRegion, 813 )); 814 815 // Add the 32-bit reserved memory hole as a sub region. 816 regions.push(( 817 reserved_memory_gap_start, 818 (layout::MEM_32BIT_RESERVED_SIZE - layout::MEM_32BIT_DEVICES_SIZE) as usize, 819 RegionType::Reserved, 820 )); 821 822 regions 823 } 824 825 /// Configures the system and should be called once per vm before starting vcpu threads. 826 /// 827 /// # Arguments 828 /// 829 /// * `guest_mem` - The memory to be used by the guest. 830 /// * `cmdline_addr` - Address in `guest_mem` where the kernel command line was loaded. 831 /// * `cmdline_size` - Size of the kernel command line in bytes including the null terminator. 832 /// * `num_cpus` - Number of virtual CPUs the guest will have. 833 #[allow(clippy::too_many_arguments)] 834 pub fn configure_system( 835 guest_mem: &GuestMemoryMmap, 836 cmdline_addr: GuestAddress, 837 initramfs: &Option<InitramfsConfig>, 838 _num_cpus: u8, 839 rsdp_addr: Option<GuestAddress>, 840 sgx_epc_region: Option<SgxEpcRegion>, 841 ) -> super::Result<()> { 842 // Write EBDA address to location where ACPICA expects to find it 843 guest_mem 844 .write_obj((layout::EBDA_START.0 >> 4) as u16, layout::EBDA_POINTER) 845 .map_err(Error::EbdaSetup)?; 846 847 let size = smbios::setup_smbios(guest_mem).map_err(Error::SmbiosSetup)?; 848 849 // Place the MP table after the SMIOS table aligned to 16 bytes 850 let offset = GuestAddress(layout::SMBIOS_START).unchecked_add(size); 851 let offset = GuestAddress((offset.0 + 16) & !0xf); 852 mptable::setup_mptable(offset, guest_mem, _num_cpus).map_err(Error::MpTableSetup)?; 853 854 // Check that the RAM is not smaller than the RSDP start address 855 if let Some(rsdp_addr) = rsdp_addr { 856 if rsdp_addr.0 > guest_mem.last_addr().0 { 857 return Err(super::Error::RsdpPastRamEnd); 858 } 859 } 860 861 configure_pvh( 862 guest_mem, 863 cmdline_addr, 864 initramfs, 865 rsdp_addr, 866 sgx_epc_region, 867 ) 868 } 869 870 fn configure_pvh( 871 guest_mem: &GuestMemoryMmap, 872 cmdline_addr: GuestAddress, 873 initramfs: &Option<InitramfsConfig>, 874 rsdp_addr: Option<GuestAddress>, 875 sgx_epc_region: Option<SgxEpcRegion>, 876 ) -> super::Result<()> { 877 const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578; 878 879 let mut start_info: StartInfoWrapper = StartInfoWrapper(hvm_start_info::default()); 880 881 start_info.0.magic = XEN_HVM_START_MAGIC_VALUE; 882 start_info.0.version = 1; // pvh has version 1 883 start_info.0.nr_modules = 0; 884 start_info.0.cmdline_paddr = cmdline_addr.raw_value() as u64; 885 start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value(); 886 887 if let Some(rsdp_addr) = rsdp_addr { 888 start_info.0.rsdp_paddr = rsdp_addr.0; 889 } 890 891 if let Some(initramfs_config) = initramfs { 892 // The initramfs has been written to guest memory already, here we just need to 893 // create the module structure that describes it. 894 let ramdisk_mod: ModlistEntryWrapper = ModlistEntryWrapper(hvm_modlist_entry { 895 paddr: initramfs_config.address.raw_value(), 896 size: initramfs_config.size as u64, 897 ..Default::default() 898 }); 899 900 start_info.0.nr_modules += 1; 901 start_info.0.modlist_paddr = layout::MODLIST_START.raw_value(); 902 903 // Write the modlist struct to guest memory. 904 guest_mem 905 .write_obj(ramdisk_mod, layout::MODLIST_START) 906 .map_err(super::Error::ModlistSetup)?; 907 } 908 909 // Vector to hold the memory maps which needs to be written to guest memory 910 // at MEMMAP_START after all of the mappings are recorded. 911 let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new(); 912 913 // Create the memory map entries. 914 add_memmap_entry(&mut memmap, 0, layout::EBDA_START.raw_value(), E820_RAM); 915 916 let mem_end = guest_mem.last_addr(); 917 918 if mem_end < layout::MEM_32BIT_RESERVED_START { 919 add_memmap_entry( 920 &mut memmap, 921 layout::HIGH_RAM_START.raw_value(), 922 mem_end.unchecked_offset_from(layout::HIGH_RAM_START) + 1, 923 E820_RAM, 924 ); 925 } else { 926 add_memmap_entry( 927 &mut memmap, 928 layout::HIGH_RAM_START.raw_value(), 929 layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::HIGH_RAM_START), 930 E820_RAM, 931 ); 932 if mem_end > layout::RAM_64BIT_START { 933 add_memmap_entry( 934 &mut memmap, 935 layout::RAM_64BIT_START.raw_value(), 936 mem_end.unchecked_offset_from(layout::RAM_64BIT_START) + 1, 937 E820_RAM, 938 ); 939 } 940 } 941 942 add_memmap_entry( 943 &mut memmap, 944 layout::PCI_MMCONFIG_START.0, 945 layout::PCI_MMCONFIG_SIZE, 946 E820_RESERVED, 947 ); 948 949 if let Some(sgx_epc_region) = sgx_epc_region { 950 add_memmap_entry( 951 &mut memmap, 952 sgx_epc_region.start().raw_value(), 953 sgx_epc_region.size() as u64, 954 E820_RESERVED, 955 ); 956 } 957 958 start_info.0.memmap_entries = memmap.len() as u32; 959 960 // Copy the vector with the memmap table to the MEMMAP_START address 961 // which is already saved in the memmap_paddr field of hvm_start_info struct. 962 let mut memmap_start_addr = layout::MEMMAP_START; 963 964 guest_mem 965 .checked_offset( 966 memmap_start_addr, 967 mem::size_of::<hvm_memmap_table_entry>() * start_info.0.memmap_entries as usize, 968 ) 969 .ok_or(super::Error::MemmapTablePastRamEnd)?; 970 971 // For every entry in the memmap vector, create a MemmapTableEntryWrapper 972 // and write it to guest memory. 973 for memmap_entry in memmap { 974 let map_entry_wrapper: MemmapTableEntryWrapper = MemmapTableEntryWrapper(memmap_entry); 975 976 guest_mem 977 .write_obj(map_entry_wrapper, memmap_start_addr) 978 .map_err(|_| super::Error::MemmapTableSetup)?; 979 memmap_start_addr = 980 memmap_start_addr.unchecked_add(mem::size_of::<hvm_memmap_table_entry>() as u64); 981 } 982 983 // The hvm_start_info struct itself must be stored at PVH_START_INFO 984 // address, and %rbx will be initialized to contain PVH_INFO_START prior to 985 // starting the guest, as required by the PVH ABI. 986 let start_info_addr = layout::PVH_INFO_START; 987 988 guest_mem 989 .checked_offset(start_info_addr, mem::size_of::<hvm_start_info>()) 990 .ok_or(super::Error::StartInfoPastRamEnd)?; 991 992 // Write the start_info struct to guest memory. 993 guest_mem 994 .write_obj(start_info, start_info_addr) 995 .map_err(|_| super::Error::StartInfoSetup)?; 996 997 Ok(()) 998 } 999 1000 fn add_memmap_entry(memmap: &mut Vec<hvm_memmap_table_entry>, addr: u64, size: u64, mem_type: u32) { 1001 // Add the table entry to the vector 1002 memmap.push(hvm_memmap_table_entry { 1003 addr, 1004 size, 1005 type_: mem_type, 1006 reserved: 0, 1007 }); 1008 } 1009 1010 /// Returns the memory address where the initramfs could be loaded. 1011 pub fn initramfs_load_addr( 1012 guest_mem: &GuestMemoryMmap, 1013 initramfs_size: usize, 1014 ) -> super::Result<u64> { 1015 let first_region = guest_mem 1016 .find_region(GuestAddress::new(0)) 1017 .ok_or(super::Error::InitramfsAddress)?; 1018 // It's safe to cast to usize because the size of a region can't be greater than usize. 1019 let lowmem_size = first_region.len() as usize; 1020 1021 if lowmem_size < initramfs_size { 1022 return Err(super::Error::InitramfsAddress); 1023 } 1024 1025 let aligned_addr: u64 = ((lowmem_size - initramfs_size) & !(crate::pagesize() - 1)) as u64; 1026 Ok(aligned_addr) 1027 } 1028 1029 pub fn get_host_cpu_phys_bits() -> u8 { 1030 unsafe { 1031 let leaf = x86_64::__cpuid(0x8000_0000); 1032 1033 // Detect and handle AMD SME (Secure Memory Encryption) properly. 1034 // Some physical address bits may become reserved when the feature is enabled. 1035 // See AMD64 Architecture Programmer's Manual Volume 2, Section 7.10.1 1036 let reduced = if leaf.eax >= 0x8000_001f 1037 && leaf.ebx == 0x6874_7541 // Vendor ID: AuthenticAMD 1038 && leaf.ecx == 0x444d_4163 1039 && leaf.edx == 0x6974_6e65 1040 && x86_64::__cpuid(0x8000_001f).eax & 0x1 != 0 1041 { 1042 (x86_64::__cpuid(0x8000_001f).ebx >> 6) & 0x3f 1043 } else { 1044 0 1045 }; 1046 1047 if leaf.eax >= 0x8000_0008 { 1048 let leaf = x86_64::__cpuid(0x8000_0008); 1049 ((leaf.eax & 0xff) - reduced) as u8 1050 } else { 1051 36 1052 } 1053 } 1054 } 1055 1056 fn update_cpuid_topology( 1057 cpuid: &mut CpuId, 1058 threads_per_core: u8, 1059 cores_per_die: u8, 1060 dies_per_package: u8, 1061 ) { 1062 let thread_width = 8 - (threads_per_core - 1).leading_zeros(); 1063 let core_width = (8 - (cores_per_die - 1).leading_zeros()) + thread_width; 1064 let die_width = (8 - (dies_per_package - 1).leading_zeros()) + core_width; 1065 1066 // CPU Topology leaf 0xb 1067 CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::EAX, thread_width); 1068 CpuidPatch::set_cpuid_reg( 1069 cpuid, 1070 0xb, 1071 Some(0), 1072 CpuidReg::EBX, 1073 u32::from(threads_per_core), 1074 ); 1075 CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(0), CpuidReg::ECX, 1 << 8); 1076 1077 CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::EAX, die_width); 1078 CpuidPatch::set_cpuid_reg( 1079 cpuid, 1080 0xb, 1081 Some(1), 1082 CpuidReg::EBX, 1083 u32::from(dies_per_package * cores_per_die * threads_per_core), 1084 ); 1085 CpuidPatch::set_cpuid_reg(cpuid, 0xb, Some(1), CpuidReg::ECX, 2 << 8); 1086 1087 // CPU Topology leaf 0x1f 1088 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::EAX, thread_width); 1089 CpuidPatch::set_cpuid_reg( 1090 cpuid, 1091 0x1f, 1092 Some(0), 1093 CpuidReg::EBX, 1094 u32::from(threads_per_core), 1095 ); 1096 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(0), CpuidReg::ECX, 1 << 8); 1097 1098 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::EAX, core_width); 1099 CpuidPatch::set_cpuid_reg( 1100 cpuid, 1101 0x1f, 1102 Some(1), 1103 CpuidReg::EBX, 1104 u32::from(cores_per_die * threads_per_core), 1105 ); 1106 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(1), CpuidReg::ECX, 2 << 8); 1107 1108 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::EAX, die_width); 1109 CpuidPatch::set_cpuid_reg( 1110 cpuid, 1111 0x1f, 1112 Some(2), 1113 CpuidReg::EBX, 1114 u32::from(dies_per_package * cores_per_die * threads_per_core), 1115 ); 1116 CpuidPatch::set_cpuid_reg(cpuid, 0x1f, Some(2), CpuidReg::ECX, 5 << 8); 1117 } 1118 1119 // The goal is to update the CPUID sub-leaves to reflect the number of EPC 1120 // sections exposed to the guest. 1121 fn update_cpuid_sgx(cpuid: &mut CpuId, epc_sections: Vec<SgxEpcSection>) -> Result<(), Error> { 1122 // Something's wrong if there's no EPC section. 1123 if epc_sections.is_empty() { 1124 return Err(Error::NoSgxEpcSection); 1125 } 1126 // We can't go further if the hypervisor does not support SGX feature. 1127 if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) { 1128 return Err(Error::MissingSgxFeature); 1129 } 1130 // We can't go further if the hypervisor does not support SGX_LC feature. 1131 if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) { 1132 return Err(Error::MissingSgxLaunchControlFeature); 1133 } 1134 1135 // Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC 1136 // properties such as confidentiality and integrity. 1137 let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) }; 1138 1139 for (i, epc_section) in epc_sections.iter().enumerate() { 1140 let subleaf_idx = i + 2; 1141 let start = epc_section.start().raw_value(); 1142 let size = epc_section.size() as u64; 1143 let eax = (start & 0xffff_f000) as u32 | 0x1; 1144 let ebx = (start >> 32) as u32; 1145 let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf); 1146 let edx = (size >> 32) as u32; 1147 // CPU Topology leaf 0x12 1148 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax); 1149 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx); 1150 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx); 1151 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx); 1152 } 1153 1154 // Add one NULL entry to terminate the dynamic list 1155 let subleaf_idx = epc_sections.len() + 2; 1156 // CPU Topology leaf 0x12 1157 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0); 1158 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0); 1159 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0); 1160 CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0); 1161 1162 Ok(()) 1163 } 1164 1165 #[cfg(test)] 1166 mod tests { 1167 use super::*; 1168 1169 #[test] 1170 fn regions_lt_4gb() { 1171 let regions = arch_memory_regions(1 << 29); 1172 assert_eq!(3, regions.len()); 1173 assert_eq!(GuestAddress(0), regions[0].0); 1174 assert_eq!(1usize << 29, regions[0].1); 1175 } 1176 1177 #[test] 1178 fn regions_gt_4gb() { 1179 let regions = arch_memory_regions((1 << 32) + 0x8000); 1180 assert_eq!(4, regions.len()); 1181 assert_eq!(GuestAddress(0), regions[0].0); 1182 assert_eq!(GuestAddress(1 << 32), regions[1].0); 1183 } 1184 1185 #[test] 1186 fn test_system_configuration() { 1187 let no_vcpus = 4; 1188 let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 1189 let config_err = configure_system( 1190 &gm, 1191 GuestAddress(0), 1192 &None, 1193 1, 1194 Some(layout::RSDP_POINTER), 1195 None, 1196 ); 1197 assert!(config_err.is_err()); 1198 1199 // Now assigning some memory that falls before the 32bit memory hole. 1200 let mem_size = 128 << 20; 1201 let arch_mem_regions = arch_memory_regions(mem_size); 1202 let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions 1203 .iter() 1204 .filter(|r| r.2 == RegionType::Ram) 1205 .map(|r| (r.0, r.1)) 1206 .collect(); 1207 let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap(); 1208 1209 configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None).unwrap(); 1210 1211 // Now assigning some memory that is equal to the start of the 32bit memory hole. 1212 let mem_size = 3328 << 20; 1213 let arch_mem_regions = arch_memory_regions(mem_size); 1214 let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions 1215 .iter() 1216 .filter(|r| r.2 == RegionType::Ram) 1217 .map(|r| (r.0, r.1)) 1218 .collect(); 1219 let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap(); 1220 configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None).unwrap(); 1221 1222 configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None).unwrap(); 1223 1224 // Now assigning some memory that falls after the 32bit memory hole. 1225 let mem_size = 3330 << 20; 1226 let arch_mem_regions = arch_memory_regions(mem_size); 1227 let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions 1228 .iter() 1229 .filter(|r| r.2 == RegionType::Ram) 1230 .map(|r| (r.0, r.1)) 1231 .collect(); 1232 let gm = GuestMemoryMmap::from_ranges(&ram_regions).unwrap(); 1233 configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None).unwrap(); 1234 1235 configure_system(&gm, GuestAddress(0), &None, no_vcpus, None, None).unwrap(); 1236 } 1237 1238 #[test] 1239 fn test_add_memmap_entry() { 1240 let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new(); 1241 1242 let expected_memmap = vec![ 1243 hvm_memmap_table_entry { 1244 addr: 0x0, 1245 size: 0x1000, 1246 type_: E820_RAM, 1247 ..Default::default() 1248 }, 1249 hvm_memmap_table_entry { 1250 addr: 0x10000, 1251 size: 0xa000, 1252 type_: E820_RESERVED, 1253 ..Default::default() 1254 }, 1255 ]; 1256 1257 add_memmap_entry(&mut memmap, 0, 0x1000, E820_RAM); 1258 add_memmap_entry(&mut memmap, 0x10000, 0xa000, E820_RESERVED); 1259 1260 assert_eq!(format!("{:?}", memmap), format!("{:?}", expected_memmap)); 1261 } 1262 } 1263