1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 2 // 3 // Copyright © 2020, Microsoft Corporation 4 // 5 6 use crate::arch::emulator::{PlatformEmulator, PlatformError}; 7 8 #[cfg(target_arch = "x86_64")] 9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState}; 10 use crate::cpu; 11 use crate::cpu::Vcpu; 12 use crate::hypervisor; 13 use crate::vec_with_array_field; 14 use crate::vm::{self, InterruptSourceConfig, VmOps}; 15 use crate::HypervisorType; 16 pub use mshv_bindings::*; 17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType}; 18 use std::any::Any; 19 use std::collections::HashMap; 20 use std::sync::{Arc, RwLock}; 21 use vfio_ioctls::VfioDeviceFd; 22 use vm::DataMatch; 23 24 #[cfg(feature = "sev_snp")] 25 mod snp_constants; 26 // x86_64 dependencies 27 #[cfg(target_arch = "x86_64")] 28 pub mod x86_64; 29 #[cfg(feature = "sev_snp")] 30 use snp_constants::*; 31 32 use crate::{ 33 ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion, 34 USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE, 35 }; 36 #[cfg(feature = "sev_snp")] 37 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK; 38 use vmm_sys_util::eventfd::EventFd; 39 #[cfg(target_arch = "x86_64")] 40 pub use x86_64::VcpuMshvState; 41 #[cfg(target_arch = "x86_64")] 42 pub use x86_64::*; 43 44 #[cfg(target_arch = "x86_64")] 45 use std::fs::File; 46 use std::os::unix::io::AsRawFd; 47 48 #[cfg(target_arch = "x86_64")] 49 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry}; 50 51 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4; 52 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8; 53 54 /// 55 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms 56 /// 57 pub use { 58 mshv_bindings::mshv_create_device as CreateDevice, 59 mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd, 60 }; 61 62 pub const PAGE_SHIFT: usize = 12; 63 64 impl From<mshv_user_mem_region> for UserMemoryRegion { 65 fn from(region: mshv_user_mem_region) -> Self { 66 let mut flags: u32 = 0; 67 if region.flags & HV_MAP_GPA_READABLE != 0 { 68 flags |= USER_MEMORY_REGION_READ; 69 } 70 if region.flags & HV_MAP_GPA_WRITABLE != 0 { 71 flags |= USER_MEMORY_REGION_WRITE; 72 } 73 if region.flags & HV_MAP_GPA_EXECUTABLE != 0 { 74 flags |= USER_MEMORY_REGION_EXECUTE; 75 } 76 77 UserMemoryRegion { 78 guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64) 79 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)), 80 memory_size: region.size, 81 userspace_addr: region.userspace_addr, 82 flags, 83 ..Default::default() 84 } 85 } 86 } 87 88 impl From<UserMemoryRegion> for mshv_user_mem_region { 89 fn from(region: UserMemoryRegion) -> Self { 90 let mut flags: u32 = 0; 91 if region.flags & USER_MEMORY_REGION_READ != 0 { 92 flags |= HV_MAP_GPA_READABLE; 93 } 94 if region.flags & USER_MEMORY_REGION_WRITE != 0 { 95 flags |= HV_MAP_GPA_WRITABLE; 96 } 97 if region.flags & USER_MEMORY_REGION_EXECUTE != 0 { 98 flags |= HV_MAP_GPA_EXECUTABLE; 99 } 100 101 mshv_user_mem_region { 102 guest_pfn: region.guest_phys_addr >> PAGE_SHIFT, 103 size: region.memory_size, 104 userspace_addr: region.userspace_addr, 105 flags, 106 } 107 } 108 } 109 110 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress { 111 fn from(a: mshv_ioctls::IoEventAddress) -> Self { 112 match a { 113 mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x), 114 mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x), 115 } 116 } 117 } 118 119 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress { 120 fn from(a: IoEventAddress) -> Self { 121 match a { 122 IoEventAddress::Pio(x) => Self::Pio(x), 123 IoEventAddress::Mmio(x) => Self::Mmio(x), 124 } 125 } 126 } 127 128 impl From<VcpuMshvState> for CpuState { 129 fn from(s: VcpuMshvState) -> Self { 130 CpuState::Mshv(s) 131 } 132 } 133 134 impl From<CpuState> for VcpuMshvState { 135 fn from(s: CpuState) -> Self { 136 match s { 137 CpuState::Mshv(s) => s, 138 /* Needed in case other hypervisors are enabled */ 139 #[allow(unreachable_patterns)] 140 _ => panic!("CpuState is not valid"), 141 } 142 } 143 } 144 145 impl From<mshv_msi_routing_entry> for IrqRoutingEntry { 146 fn from(s: mshv_msi_routing_entry) -> Self { 147 IrqRoutingEntry::Mshv(s) 148 } 149 } 150 151 impl From<IrqRoutingEntry> for mshv_msi_routing_entry { 152 fn from(e: IrqRoutingEntry) -> Self { 153 match e { 154 IrqRoutingEntry::Mshv(e) => e, 155 /* Needed in case other hypervisors are enabled */ 156 #[allow(unreachable_patterns)] 157 _ => panic!("IrqRoutingEntry is not valid"), 158 } 159 } 160 } 161 162 struct MshvDirtyLogSlot { 163 guest_pfn: u64, 164 memory_size: u64, 165 } 166 167 /// Wrapper over mshv system ioctls. 168 pub struct MshvHypervisor { 169 mshv: Mshv, 170 } 171 172 impl MshvHypervisor { 173 #[cfg(target_arch = "x86_64")] 174 /// 175 /// Retrieve the list of MSRs supported by MSHV. 176 /// 177 fn get_msr_list(&self) -> hypervisor::Result<MsrList> { 178 self.mshv 179 .get_msr_index_list() 180 .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into())) 181 } 182 } 183 184 impl MshvHypervisor { 185 /// Create a hypervisor based on Mshv 186 #[allow(clippy::new_ret_no_self)] 187 pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> { 188 let mshv_obj = 189 Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?; 190 Ok(Arc::new(MshvHypervisor { mshv: mshv_obj })) 191 } 192 /// Check if the hypervisor is available 193 pub fn is_available() -> hypervisor::Result<bool> { 194 match std::fs::metadata("/dev/mshv") { 195 Ok(_) => Ok(true), 196 Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false), 197 Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck( 198 err.into(), 199 )), 200 } 201 } 202 } 203 /// Implementation of Hypervisor trait for Mshv 204 /// 205 /// # Examples 206 /// 207 /// ``` 208 /// # use hypervisor::mshv::MshvHypervisor; 209 /// # use std::sync::Arc; 210 /// let mshv = MshvHypervisor::new().unwrap(); 211 /// let hypervisor = Arc::new(mshv); 212 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed"); 213 /// ``` 214 impl hypervisor::Hypervisor for MshvHypervisor { 215 /// 216 /// Returns the type of the hypervisor 217 /// 218 fn hypervisor_type(&self) -> HypervisorType { 219 HypervisorType::Mshv 220 } 221 222 fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> { 223 let mshv_vm_type: VmType = match VmType::try_from(vm_type) { 224 Ok(vm_type) => vm_type, 225 Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()), 226 }; 227 let fd: VmFd; 228 loop { 229 match self.mshv.create_vm_with_type(mshv_vm_type) { 230 Ok(res) => fd = res, 231 Err(e) => { 232 if e.errno() == libc::EINTR { 233 // If the error returned is EINTR, which means the 234 // ioctl has been interrupted, we have to retry as 235 // this can't be considered as a regular error. 236 continue; 237 } else { 238 return Err(hypervisor::HypervisorError::VmCreate(e.into())); 239 } 240 } 241 } 242 break; 243 } 244 245 // Set additional partition property for SEV-SNP partition. 246 if mshv_vm_type == VmType::Snp { 247 let snp_policy = snp::get_default_snp_guest_policy(); 248 let vmgexit_offloads = snp::get_default_vmgexit_offload_features(); 249 // SAFETY: access union fields 250 unsafe { 251 debug!( 252 "Setting the partition isolation policy as: 0x{:x}", 253 snp_policy.as_uint64 254 ); 255 fd.set_partition_property( 256 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY, 257 snp_policy.as_uint64, 258 ) 259 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?; 260 debug!( 261 "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}", 262 vmgexit_offloads.as_uint64 263 ); 264 fd.set_partition_property( 265 hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS, 266 vmgexit_offloads.as_uint64, 267 ) 268 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?; 269 } 270 } 271 272 // Default Microsoft Hypervisor behavior for unimplemented MSR is to 273 // send a fault to the guest if it tries to access it. It is possible 274 // to override this behavior with a more suitable option i.e., ignore 275 // writes from the guest and return zero in attempt to read unimplemented 276 // MSR. 277 fd.set_partition_property( 278 hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION, 279 hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64, 280 ) 281 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?; 282 283 let msr_list = self.get_msr_list()?; 284 let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize; 285 let mut msrs: Vec<MsrEntry> = vec![ 286 MsrEntry { 287 ..Default::default() 288 }; 289 num_msrs 290 ]; 291 let indices = msr_list.as_slice(); 292 for (pos, index) in indices.iter().enumerate() { 293 msrs[pos].index = *index; 294 } 295 let vm_fd = Arc::new(fd); 296 297 Ok(Arc::new(MshvVm { 298 fd: vm_fd, 299 msrs, 300 dirty_log_slots: Arc::new(RwLock::new(HashMap::new())), 301 })) 302 } 303 304 /// Create a mshv vm object and return the object as Vm trait object 305 /// 306 /// # Examples 307 /// 308 /// ``` 309 /// # extern crate hypervisor; 310 /// # use hypervisor::mshv::MshvHypervisor; 311 /// use hypervisor::mshv::MshvVm; 312 /// let hypervisor = MshvHypervisor::new().unwrap(); 313 /// let vm = hypervisor.create_vm().unwrap(); 314 /// ``` 315 fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> { 316 let vm_type = 0; 317 self.create_vm_with_type(vm_type) 318 } 319 /// 320 /// Get the supported CpuID 321 /// 322 fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> { 323 Ok(Vec::new()) 324 } 325 326 /// Get maximum number of vCPUs 327 fn get_max_vcpus(&self) -> u32 { 328 // TODO: Using HV_MAXIMUM_PROCESSORS would be better 329 // but the ioctl API is limited to u8 330 256 331 } 332 } 333 334 /// Vcpu struct for Microsoft Hypervisor 335 pub struct MshvVcpu { 336 fd: VcpuFd, 337 vp_index: u8, 338 cpuid: Vec<CpuIdEntry>, 339 msrs: Vec<MsrEntry>, 340 vm_ops: Option<Arc<dyn vm::VmOps>>, 341 #[cfg(feature = "sev_snp")] 342 vm_fd: Arc<VmFd>, 343 } 344 345 /// Implementation of Vcpu trait for Microsoft Hypervisor 346 /// 347 /// # Examples 348 /// 349 /// ``` 350 /// # use hypervisor::mshv::MshvHypervisor; 351 /// # use std::sync::Arc; 352 /// let mshv = MshvHypervisor::new().unwrap(); 353 /// let hypervisor = Arc::new(mshv); 354 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed"); 355 /// let vcpu = vm.create_vcpu(0, None).unwrap(); 356 /// ``` 357 impl cpu::Vcpu for MshvVcpu { 358 #[cfg(target_arch = "x86_64")] 359 /// 360 /// Returns the vCPU general purpose registers. 361 /// 362 fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> { 363 Ok(self 364 .fd 365 .get_regs() 366 .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))? 367 .into()) 368 } 369 #[cfg(target_arch = "x86_64")] 370 /// 371 /// Sets the vCPU general purpose registers. 372 /// 373 fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> { 374 let regs = (*regs).into(); 375 self.fd 376 .set_regs(®s) 377 .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into())) 378 } 379 #[cfg(target_arch = "x86_64")] 380 /// 381 /// Returns the vCPU special registers. 382 /// 383 fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> { 384 Ok(self 385 .fd 386 .get_sregs() 387 .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))? 388 .into()) 389 } 390 #[cfg(target_arch = "x86_64")] 391 /// 392 /// Sets the vCPU special registers. 393 /// 394 fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> { 395 let sregs = (*sregs).into(); 396 self.fd 397 .set_sregs(&sregs) 398 .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into())) 399 } 400 #[cfg(target_arch = "x86_64")] 401 /// 402 /// Returns the floating point state (FPU) from the vCPU. 403 /// 404 fn get_fpu(&self) -> cpu::Result<FpuState> { 405 Ok(self 406 .fd 407 .get_fpu() 408 .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))? 409 .into()) 410 } 411 #[cfg(target_arch = "x86_64")] 412 /// 413 /// Set the floating point state (FPU) of a vCPU. 414 /// 415 fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> { 416 let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into(); 417 self.fd 418 .set_fpu(&fpu) 419 .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into())) 420 } 421 422 #[cfg(target_arch = "x86_64")] 423 /// 424 /// Returns the model-specific registers (MSR) for this vCPU. 425 /// 426 fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> { 427 let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect(); 428 let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap(); 429 let succ = self 430 .fd 431 .get_msrs(&mut mshv_msrs) 432 .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?; 433 434 msrs[..succ].copy_from_slice( 435 &mshv_msrs.as_slice()[..succ] 436 .iter() 437 .map(|e| (*e).into()) 438 .collect::<Vec<MsrEntry>>(), 439 ); 440 441 Ok(succ) 442 } 443 #[cfg(target_arch = "x86_64")] 444 /// 445 /// Setup the model-specific registers (MSR) for this vCPU. 446 /// Returns the number of MSR entries actually written. 447 /// 448 fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> { 449 let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect(); 450 let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap(); 451 self.fd 452 .set_msrs(&mshv_msrs) 453 .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into())) 454 } 455 456 #[cfg(target_arch = "x86_64")] 457 /// 458 /// X86 specific call to enable HyperV SynIC 459 /// 460 fn enable_hyperv_synic(&self) -> cpu::Result<()> { 461 /* We always have SynIC enabled on MSHV */ 462 Ok(()) 463 } 464 #[allow(non_upper_case_globals)] 465 fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> { 466 let hv_message: hv_message = hv_message::default(); 467 match self.fd.run(hv_message) { 468 Ok(x) => match x.header.message_type { 469 hv_message_type_HVMSG_X64_HALT => { 470 debug!("HALT"); 471 Ok(cpu::VmExit::Reset) 472 } 473 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => { 474 warn!("TRIPLE FAULT"); 475 Ok(cpu::VmExit::Shutdown) 476 } 477 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => { 478 let info = x.to_ioport_info().unwrap(); 479 let access_info = info.access_info; 480 // SAFETY: access_info is valid, otherwise we won't be here 481 let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize; 482 let is_write = info.header.intercept_access_type == 1; 483 let port = info.port_number; 484 let mut data: [u8; 4] = [0; 4]; 485 let mut ret_rax = info.rax; 486 487 /* 488 * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports. 489 * 490 * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402 491 * under the "fwdebug" feature flag. But that feature is not enabled by default 492 * and is considered legacy. 493 * 494 * OVMF unconditionally pokes these IO ports with string IO. 495 * 496 * Instead of trying to implement string IO support now which does not do much 497 * now, skip those ports explicitly to avoid panicking. 498 * 499 * Proper string IO support can be added once we gain the ability to translate 500 * guest virtual addresses to guest physical addresses on MSHV. 501 */ 502 match port { 503 0x402 | 0x510 | 0x511 | 0x514 => { 504 let insn_len = info.header.instruction_length() as u64; 505 506 /* Advance RIP and update RAX */ 507 let arr_reg_name_value = [ 508 ( 509 hv_register_name_HV_X64_REGISTER_RIP, 510 info.header.rip + insn_len, 511 ), 512 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax), 513 ]; 514 set_registers_64!(self.fd, arr_reg_name_value) 515 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 516 return Ok(cpu::VmExit::Ignore); 517 } 518 _ => {} 519 } 520 521 assert!( 522 // SAFETY: access_info is valid, otherwise we won't be here 523 (unsafe { access_info.__bindgen_anon_1.string_op() } != 1), 524 "String IN/OUT not supported" 525 ); 526 assert!( 527 // SAFETY: access_info is valid, otherwise we won't be here 528 (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1), 529 "Rep IN/OUT not supported" 530 ); 531 532 if is_write { 533 let data = (info.rax as u32).to_le_bytes(); 534 if let Some(vm_ops) = &self.vm_ops { 535 vm_ops 536 .pio_write(port.into(), &data[0..len]) 537 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 538 } 539 } else { 540 if let Some(vm_ops) = &self.vm_ops { 541 vm_ops 542 .pio_read(port.into(), &mut data[0..len]) 543 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 544 } 545 546 let v = u32::from_le_bytes(data); 547 /* Preserve high bits in EAX but clear out high bits in RAX */ 548 let mask = 0xffffffff >> (32 - len * 8); 549 let eax = (info.rax as u32 & !mask) | (v & mask); 550 ret_rax = eax as u64; 551 } 552 553 let insn_len = info.header.instruction_length() as u64; 554 555 /* Advance RIP and update RAX */ 556 let arr_reg_name_value = [ 557 ( 558 hv_register_name_HV_X64_REGISTER_RIP, 559 info.header.rip + insn_len, 560 ), 561 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax), 562 ]; 563 set_registers_64!(self.fd, arr_reg_name_value) 564 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 565 Ok(cpu::VmExit::Ignore) 566 } 567 hv_message_type_HVMSG_UNMAPPED_GPA => { 568 let info = x.to_memory_info().unwrap(); 569 let insn_len = info.instruction_byte_count as usize; 570 assert!(insn_len > 0 && insn_len <= 16); 571 572 let mut context = MshvEmulatorContext { 573 vcpu: self, 574 map: (info.guest_virtual_address, info.guest_physical_address), 575 }; 576 577 // Create a new emulator. 578 let mut emul = Emulator::new(&mut context); 579 580 // Emulate the trapped instruction, and only the first one. 581 let new_state = emul 582 .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes) 583 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 584 585 // Set CPU state back. 586 context 587 .set_cpu_state(self.vp_index as usize, new_state) 588 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 589 590 Ok(cpu::VmExit::Ignore) 591 } 592 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => { 593 let info = x.to_cpuid_info().unwrap(); 594 debug!("cpuid eax: {:x}", { info.rax }); 595 Ok(cpu::VmExit::Ignore) 596 } 597 hv_message_type_HVMSG_X64_MSR_INTERCEPT => { 598 let info = x.to_msr_info().unwrap(); 599 if info.header.intercept_access_type == 0 { 600 debug!("msr read: {:x}", { info.msr_number }); 601 } else { 602 debug!("msr write: {:x}", { info.msr_number }); 603 } 604 Ok(cpu::VmExit::Ignore) 605 } 606 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => { 607 //TODO: Handler for VMCALL here. 608 let info = x.to_exception_info().unwrap(); 609 debug!("Exception Info {:?}", { info.exception_vector }); 610 Ok(cpu::VmExit::Ignore) 611 } 612 hv_message_type_HVMSG_X64_APIC_EOI => { 613 let info = x.to_apic_eoi_info().unwrap(); 614 // The kernel should dispatch the EOI to the correct thread. 615 // Check the VP index is the same as the one we have. 616 assert!(info.vp_index == self.vp_index as u32); 617 // The interrupt vector in info is u32, but x86 only supports 256 vectors. 618 // There is no good way to recover from this if the hypervisor messes around. 619 // Just unwrap. 620 Ok(cpu::VmExit::IoapicEoi( 621 info.interrupt_vector.try_into().unwrap(), 622 )) 623 } 624 #[cfg(feature = "sev_snp")] 625 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => { 626 let info = x.to_vmg_intercept_info().unwrap(); 627 let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH; 628 let ghcb_msr = svm_ghcb_msr { 629 as_uint64: info.ghcb_msr, 630 }; 631 // SAFETY: Accessing a union element from bindgen generated bindings. 632 let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 }; 633 // Sanity check on the header fields before handling other operations. 634 assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8); 635 636 match ghcb_op { 637 GHCB_INFO_HYP_FEATURE_REQUEST => { 638 // Pre-condition: GHCB data must be zero 639 assert!(ghcb_data == 0); 640 let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64; 641 // Indicate support for basic SEV-SNP features 642 ghcb_response |= 643 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64; 644 // Indicate support for SEV-SNP AP creation 645 ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION 646 << GHCB_INFO_BIT_WIDTH) 647 as u64; 648 debug!( 649 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}", 650 ghcb_response 651 ); 652 let arr_reg_name_value = 653 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)]; 654 set_registers_64!(self.fd, arr_reg_name_value) 655 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 656 } 657 GHCB_INFO_REGISTER_REQUEST => { 658 let mut ghcb_gpa = hv_x64_register_sev_ghcb::default(); 659 // SAFETY: Accessing a union element from bindgen generated bindings. 660 unsafe { 661 ghcb_gpa.__bindgen_anon_1.set_enabled(1); 662 ghcb_gpa 663 .__bindgen_anon_1 664 .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number()); 665 } 666 // SAFETY: Accessing a union element from bindgen generated bindings. 667 let reg_name_value = unsafe { 668 [( 669 hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA, 670 ghcb_gpa.as_uint64, 671 )] 672 }; 673 674 set_registers_64!(self.fd, reg_name_value) 675 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 676 677 let mut resp_ghcb_msr = svm_ghcb_msr::default(); 678 // SAFETY: Accessing a union element from bindgen generated bindings. 679 unsafe { 680 resp_ghcb_msr 681 .__bindgen_anon_2 682 .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64); 683 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number( 684 ghcb_msr.__bindgen_anon_2.gpa_page_number(), 685 ); 686 } 687 // SAFETY: Accessing a union element from bindgen generated bindings. 688 let reg_name_value = unsafe { 689 [( 690 hv_register_name_HV_X64_REGISTER_GHCB, 691 resp_ghcb_msr.as_uint64, 692 )] 693 }; 694 695 set_registers_64!(self.fd, reg_name_value) 696 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 697 } 698 GHCB_INFO_SEV_INFO_REQUEST => { 699 let sev_cpuid_function = 0x8000_001F; 700 let cpu_leaf = self 701 .fd 702 .get_cpuid_values(sev_cpuid_function, 0, 0, 0) 703 .unwrap(); 704 let ebx = cpu_leaf[1]; 705 // First 6-byte of EBX represents page table encryption bit number 706 let pbit_encryption = (ebx & 0x3f) as u8; 707 let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64; 708 709 // GHCBData[63:48] specifies the maximum GHCB protocol version supported 710 ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48; 711 // GHCBData[47:32] specifies the minimum GHCB protocol version supported 712 ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32; 713 // GHCBData[31:24] specifies the SEV page table encryption bit number. 714 ghcb_response |= (pbit_encryption as u64) << 24; 715 716 let arr_reg_name_value = 717 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)]; 718 set_registers_64!(self.fd, arr_reg_name_value) 719 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?; 720 } 721 GHCB_INFO_NORMAL => { 722 let exit_code = 723 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32; 724 // SAFETY: Accessing a union element from bindgen generated bindings. 725 let pfn = unsafe { ghcb_msr.__bindgen_anon_2.gpa_page_number() }; 726 let ghcb_gpa = pfn << GHCB_INFO_BIT_WIDTH; 727 match exit_code { 728 SVM_EXITCODE_HV_DOORBELL_PAGE => { 729 let exit_info1 = 730 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32; 731 match exit_info1 { 732 SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => { 733 // Hypervisor does not have any preference for doorbell GPA. 734 let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF; 735 let mut swei2_rw_gpa_arg = 736 mshv_bindings::mshv_read_write_gpa { 737 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 738 byte_count: std::mem::size_of::<u64>() as u32, 739 ..Default::default() 740 }; 741 swei2_rw_gpa_arg.data.copy_from_slice( 742 &preferred_doorbell_gpa.to_le_bytes(), 743 ); 744 self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err( 745 |e| cpu::HypervisorCpuError::GpaWrite(e.into()), 746 )?; 747 } 748 SVM_NAE_HV_DOORBELL_PAGE_SET => { 749 let exit_info2 = info 750 .__bindgen_anon_2 751 .__bindgen_anon_1 752 .sw_exit_info2; 753 let mut ghcb_doorbell_gpa = 754 hv_x64_register_sev_hv_doorbell::default(); 755 // SAFETY: Accessing a union element from bindgen generated bindings. 756 unsafe { 757 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1); 758 ghcb_doorbell_gpa 759 .__bindgen_anon_1 760 .set_page_number(exit_info2 >> PAGE_SHIFT); 761 } 762 // SAFETY: Accessing a union element from bindgen generated bindings. 763 let reg_names = unsafe { 764 [( 765 hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA, 766 ghcb_doorbell_gpa.as_uint64, 767 )] 768 }; 769 set_registers_64!(self.fd, reg_names).map_err(|e| { 770 cpu::HypervisorCpuError::SetRegister(e.into()) 771 })?; 772 773 let mut swei2_rw_gpa_arg = 774 mshv_bindings::mshv_read_write_gpa { 775 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 776 byte_count: std::mem::size_of::<u64>() as u32, 777 ..Default::default() 778 }; 779 swei2_rw_gpa_arg.data[0..8] 780 .copy_from_slice(&exit_info2.to_le_bytes()); 781 self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err( 782 |e| cpu::HypervisorCpuError::GpaWrite(e.into()), 783 )?; 784 785 // Clear the SW_EXIT_INFO1 register to indicate no error 786 let mut swei1_rw_gpa_arg = 787 mshv_bindings::mshv_read_write_gpa { 788 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET, 789 byte_count: std::mem::size_of::<u64>() as u32, 790 ..Default::default() 791 }; 792 self.fd.gpa_write(&mut swei1_rw_gpa_arg).map_err( 793 |e| cpu::HypervisorCpuError::GpaWrite(e.into()), 794 )?; 795 } 796 SVM_NAE_HV_DOORBELL_PAGE_QUERY => { 797 let mut reg_assocs = [ hv_register_assoc { 798 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA, 799 ..Default::default() 800 } ]; 801 self.fd.get_reg(&mut reg_assocs).unwrap(); 802 // SAFETY: Accessing a union element from bindgen generated bindings. 803 let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 }; 804 let mut swei2_rw_gpa_arg = 805 mshv_bindings::mshv_read_write_gpa { 806 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 807 byte_count: std::mem::size_of::<u64>() as u32, 808 ..Default::default() 809 }; 810 swei2_rw_gpa_arg 811 .data 812 .copy_from_slice(&doorbell_gpa.to_le_bytes()); 813 self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err( 814 |e| cpu::HypervisorCpuError::GpaWrite(e.into()), 815 )?; 816 } 817 SVM_NAE_HV_DOORBELL_PAGE_CLEAR => { 818 let mut swei2_rw_gpa_arg = 819 mshv_bindings::mshv_read_write_gpa { 820 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 821 byte_count: std::mem::size_of::<u64>() as u32, 822 ..Default::default() 823 }; 824 self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err( 825 |e| cpu::HypervisorCpuError::GpaWrite(e.into()), 826 )?; 827 } 828 _ => { 829 panic!( 830 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}", 831 exit_info1 832 ); 833 } 834 } 835 } 836 SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => { 837 warn!("Fetching extended guest request is not supported"); 838 // Extended guest request is not supported by the Hypervisor 839 // Returning the error to the guest 840 // 0x6 means `The NAE event was not valid` 841 // Reference: GHCB Spec, page 42 842 let value: u64 = 0x6; 843 let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa { 844 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 845 byte_count: std::mem::size_of::<u64>() as u32, 846 ..Default::default() 847 }; 848 swei2_rw_gpa_arg.data.copy_from_slice(&value.to_le_bytes()); 849 self.fd 850 .gpa_write(&mut swei2_rw_gpa_arg) 851 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?; 852 } 853 SVM_EXITCODE_IOIO_PROT => { 854 let exit_info1 = 855 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32; 856 let port_info = hv_sev_vmgexit_port_info { 857 as_uint32: exit_info1, 858 }; 859 860 let port = 861 // SAFETY: Accessing a union element from bindgen generated bindings. 862 unsafe { port_info.__bindgen_anon_1.intercepted_port() }; 863 let mut len = 4; 864 // SAFETY: Accessing a union element from bindgen generated bindings. 865 unsafe { 866 if port_info.__bindgen_anon_1.operand_size_16bit() == 1 { 867 len = 2; 868 } else if port_info.__bindgen_anon_1.operand_size_8bit() 869 == 1 870 { 871 len = 1; 872 } 873 } 874 let is_write = 875 // SAFETY: Accessing a union element from bindgen generated bindings. 876 unsafe { port_info.__bindgen_anon_1.access_type() == 0 }; 877 let mut rax_rw_gpa_arg: mshv_read_write_gpa = 878 mshv_bindings::mshv_read_write_gpa { 879 base_gpa: ghcb_gpa + GHCB_RAX_OFFSET, 880 byte_count: std::mem::size_of::<u64>() as u32, 881 ..Default::default() 882 }; 883 self.fd 884 .gpa_read(&mut rax_rw_gpa_arg) 885 .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?; 886 887 if is_write { 888 if let Some(vm_ops) = &self.vm_ops { 889 vm_ops 890 .pio_write( 891 port.into(), 892 &rax_rw_gpa_arg.data[0..len], 893 ) 894 .map_err(|e| { 895 cpu::HypervisorCpuError::RunVcpu(e.into()) 896 })?; 897 } 898 } else { 899 if let Some(vm_ops) = &self.vm_ops { 900 vm_ops 901 .pio_read( 902 port.into(), 903 &mut rax_rw_gpa_arg.data[0..len], 904 ) 905 .map_err(|e| { 906 cpu::HypervisorCpuError::RunVcpu(e.into()) 907 })?; 908 } 909 910 self.fd.gpa_write(&mut rax_rw_gpa_arg).map_err(|e| { 911 cpu::HypervisorCpuError::GpaWrite(e.into()) 912 })?; 913 } 914 915 // Clear the SW_EXIT_INFO1 register to indicate no error 916 let mut swei1_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa { 917 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET, 918 byte_count: std::mem::size_of::<u64>() as u32, 919 ..Default::default() 920 }; 921 self.fd 922 .gpa_write(&mut swei1_rw_gpa_arg) 923 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?; 924 } 925 SVM_EXITCODE_MMIO_READ => { 926 let src_gpa = 927 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1; 928 let dst_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch; 929 let data_len = 930 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2 931 as usize; 932 // Sanity check to make sure data len is within supported range. 933 assert!(data_len <= 0x8); 934 935 let mut data: Vec<u8> = vec![0; data_len]; 936 if let Some(vm_ops) = &self.vm_ops { 937 vm_ops.mmio_read(src_gpa, &mut data[0..data_len]).map_err( 938 |e| cpu::HypervisorCpuError::RunVcpu(e.into()), 939 )?; 940 } 941 let mut arg: mshv_read_write_gpa = 942 mshv_bindings::mshv_read_write_gpa { 943 base_gpa: dst_gpa, 944 byte_count: data_len as u32, 945 ..Default::default() 946 }; 947 arg.data[0..data_len].copy_from_slice(&data); 948 949 self.fd 950 .gpa_write(&mut arg) 951 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?; 952 } 953 SVM_EXITCODE_MMIO_WRITE => { 954 let dst_gpa = 955 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1; 956 let src_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch; 957 let data_len = 958 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2 959 as usize; 960 // Sanity check to make sure data len is within supported range. 961 assert!(data_len <= 0x8); 962 let mut arg: mshv_read_write_gpa = 963 mshv_bindings::mshv_read_write_gpa { 964 base_gpa: src_gpa, 965 byte_count: data_len as u32, 966 ..Default::default() 967 }; 968 969 self.fd 970 .gpa_read(&mut arg) 971 .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?; 972 973 if let Some(vm_ops) = &self.vm_ops { 974 vm_ops 975 .mmio_write(dst_gpa, &arg.data[0..data_len]) 976 .map_err(|e| { 977 cpu::HypervisorCpuError::RunVcpu(e.into()) 978 })?; 979 } 980 } 981 SVM_EXITCODE_SNP_GUEST_REQUEST => { 982 let req_gpa = 983 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1; 984 let rsp_gpa = 985 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2; 986 987 let mshv_psp_req = 988 mshv_issue_psp_guest_request { req_gpa, rsp_gpa }; 989 self.vm_fd 990 .psp_issue_guest_request(&mshv_psp_req) 991 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 992 993 debug!( 994 "SNP guest request: req_gpa {:0x} rsp_gpa {:0x}", 995 req_gpa, rsp_gpa 996 ); 997 998 let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa { 999 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 1000 byte_count: std::mem::size_of::<u64>() as u32, 1001 ..Default::default() 1002 }; 1003 self.fd 1004 .gpa_write(&mut swei2_rw_gpa_arg) 1005 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?; 1006 } 1007 SVM_EXITCODE_SNP_AP_CREATION => { 1008 let vmsa_gpa = 1009 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2; 1010 let apic_id = 1011 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 >> 32; 1012 debug!( 1013 "SNP AP CREATE REQUEST with VMSA GPA {:0x}, and APIC ID {:?}", 1014 vmsa_gpa, apic_id 1015 ); 1016 1017 let mshv_ap_create_req = mshv_sev_snp_ap_create { 1018 vp_id: apic_id, 1019 vmsa_gpa, 1020 }; 1021 self.vm_fd 1022 .sev_snp_ap_create(&mshv_ap_create_req) 1023 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?; 1024 1025 let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa { 1026 base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET, 1027 byte_count: std::mem::size_of::<u64>() as u32, 1028 ..Default::default() 1029 }; 1030 self.fd 1031 .gpa_write(&mut swei2_rw_gpa_arg) 1032 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?; 1033 } 1034 _ => panic!( 1035 "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}", 1036 exit_code 1037 ), 1038 } 1039 } 1040 _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op), 1041 } 1042 1043 Ok(cpu::VmExit::Ignore) 1044 } 1045 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!( 1046 "Unhandled VCPU exit {:?}", 1047 exit 1048 ))), 1049 }, 1050 1051 Err(e) => match e.errno() { 1052 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore), 1053 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!( 1054 "VCPU error {:?}", 1055 e 1056 ))), 1057 }, 1058 } 1059 } 1060 #[cfg(target_arch = "x86_64")] 1061 /// 1062 /// X86 specific call to setup the CPUID registers. 1063 /// 1064 fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> { 1065 let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect(); 1066 let mshv_cpuid = <CpuId>::from_entries(&cpuid) 1067 .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?; 1068 1069 self.fd 1070 .register_intercept_result_cpuid(&mshv_cpuid) 1071 .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into())) 1072 } 1073 #[cfg(target_arch = "x86_64")] 1074 /// 1075 /// X86 specific call to retrieve the CPUID registers. 1076 /// 1077 fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> { 1078 Ok(self.cpuid.clone()) 1079 } 1080 #[cfg(target_arch = "x86_64")] 1081 /// 1082 /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller). 1083 /// 1084 fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> { 1085 Ok(self 1086 .fd 1087 .get_lapic() 1088 .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))? 1089 .into()) 1090 } 1091 #[cfg(target_arch = "x86_64")] 1092 /// 1093 /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller). 1094 /// 1095 fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> { 1096 let lapic: mshv_bindings::LapicState = (*lapic).clone().into(); 1097 self.fd 1098 .set_lapic(&lapic) 1099 .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into())) 1100 } 1101 /// 1102 /// Returns the vcpu's current "multiprocessing state". 1103 /// 1104 fn get_mp_state(&self) -> cpu::Result<MpState> { 1105 Ok(MpState::Mshv) 1106 } 1107 /// 1108 /// Sets the vcpu's current "multiprocessing state". 1109 /// 1110 fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> { 1111 Ok(()) 1112 } 1113 /// 1114 /// Set CPU state 1115 /// 1116 fn set_state(&self, state: &CpuState) -> cpu::Result<()> { 1117 let state: VcpuMshvState = state.clone().into(); 1118 self.set_msrs(&state.msrs)?; 1119 self.set_vcpu_events(&state.vcpu_events)?; 1120 self.set_regs(&state.regs.into())?; 1121 self.set_sregs(&state.sregs.into())?; 1122 self.set_fpu(&state.fpu)?; 1123 self.set_xcrs(&state.xcrs)?; 1124 self.set_lapic(&state.lapic)?; 1125 self.set_xsave(&state.xsave)?; 1126 // These registers are global and needed to be set only for first VCPU 1127 // as Microsoft Hypervisor allows setting this regsier for only one VCPU 1128 if self.vp_index == 0 { 1129 self.fd 1130 .set_misc_regs(&state.misc) 1131 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))? 1132 } 1133 self.fd 1134 .set_debug_regs(&state.dbg) 1135 .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?; 1136 Ok(()) 1137 } 1138 /// 1139 /// Get CPU State 1140 /// 1141 fn state(&self) -> cpu::Result<CpuState> { 1142 let regs = self.get_regs()?; 1143 let sregs = self.get_sregs()?; 1144 let xcrs = self.get_xcrs()?; 1145 let fpu = self.get_fpu()?; 1146 let vcpu_events = self.get_vcpu_events()?; 1147 let mut msrs = self.msrs.clone(); 1148 self.get_msrs(&mut msrs)?; 1149 let lapic = self.get_lapic()?; 1150 let xsave = self.get_xsave()?; 1151 let misc = self 1152 .fd 1153 .get_misc_regs() 1154 .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?; 1155 let dbg = self 1156 .fd 1157 .get_debug_regs() 1158 .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?; 1159 1160 Ok(VcpuMshvState { 1161 msrs, 1162 vcpu_events, 1163 regs: regs.into(), 1164 sregs: sregs.into(), 1165 fpu, 1166 xcrs, 1167 lapic, 1168 dbg, 1169 xsave, 1170 misc, 1171 } 1172 .into()) 1173 } 1174 #[cfg(target_arch = "x86_64")] 1175 /// 1176 /// Translate guest virtual address to guest physical address 1177 /// 1178 fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> { 1179 let r = self 1180 .fd 1181 .translate_gva(gva, flags) 1182 .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?; 1183 1184 let gpa = r.0; 1185 // SAFETY: r is valid, otherwise this function will have returned 1186 let result_code = unsafe { r.1.__bindgen_anon_1.result_code }; 1187 1188 Ok((gpa, result_code)) 1189 } 1190 #[cfg(target_arch = "x86_64")] 1191 /// 1192 /// Return the list of initial MSR entries for a VCPU 1193 /// 1194 fn boot_msr_entries(&self) -> Vec<MsrEntry> { 1195 use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB}; 1196 1197 [ 1198 msr!(msr_index::MSR_IA32_SYSENTER_CS), 1199 msr!(msr_index::MSR_IA32_SYSENTER_ESP), 1200 msr!(msr_index::MSR_IA32_SYSENTER_EIP), 1201 msr!(msr_index::MSR_STAR), 1202 msr!(msr_index::MSR_CSTAR), 1203 msr!(msr_index::MSR_LSTAR), 1204 msr!(msr_index::MSR_KERNEL_GS_BASE), 1205 msr!(msr_index::MSR_SYSCALL_MASK), 1206 msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB), 1207 ] 1208 .to_vec() 1209 } 1210 } 1211 1212 impl MshvVcpu { 1213 #[cfg(target_arch = "x86_64")] 1214 /// 1215 /// X86 specific call that returns the vcpu's current "xsave struct". 1216 /// 1217 fn get_xsave(&self) -> cpu::Result<Xsave> { 1218 self.fd 1219 .get_xsave() 1220 .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into())) 1221 } 1222 #[cfg(target_arch = "x86_64")] 1223 /// 1224 /// X86 specific call that sets the vcpu's current "xsave struct". 1225 /// 1226 fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> { 1227 self.fd 1228 .set_xsave(xsave) 1229 .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into())) 1230 } 1231 #[cfg(target_arch = "x86_64")] 1232 /// 1233 /// X86 specific call that returns the vcpu's current "xcrs". 1234 /// 1235 fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> { 1236 self.fd 1237 .get_xcrs() 1238 .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into())) 1239 } 1240 #[cfg(target_arch = "x86_64")] 1241 /// 1242 /// X86 specific call that sets the vcpu's current "xcrs". 1243 /// 1244 fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> { 1245 self.fd 1246 .set_xcrs(xcrs) 1247 .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into())) 1248 } 1249 #[cfg(target_arch = "x86_64")] 1250 /// 1251 /// Returns currently pending exceptions, interrupts, and NMIs as well as related 1252 /// states of the vcpu. 1253 /// 1254 fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> { 1255 self.fd 1256 .get_vcpu_events() 1257 .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into())) 1258 } 1259 #[cfg(target_arch = "x86_64")] 1260 /// 1261 /// Sets pending exceptions, interrupts, and NMIs as well as related states 1262 /// of the vcpu. 1263 /// 1264 fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> { 1265 self.fd 1266 .set_vcpu_events(events) 1267 .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into())) 1268 } 1269 } 1270 1271 struct MshvEmulatorContext<'a> { 1272 vcpu: &'a MshvVcpu, 1273 map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor 1274 } 1275 1276 impl<'a> MshvEmulatorContext<'a> { 1277 // Do the actual gva -> gpa translation 1278 #[allow(non_upper_case_globals)] 1279 fn translate(&self, gva: u64) -> Result<u64, PlatformError> { 1280 if self.map.0 == gva { 1281 return Ok(self.map.1); 1282 } 1283 1284 // TODO: More fine-grained control for the flags 1285 let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE; 1286 1287 let (gpa, result_code) = self 1288 .vcpu 1289 .translate_gva(gva, flags.into()) 1290 .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?; 1291 1292 match result_code { 1293 hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa), 1294 _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))), 1295 } 1296 } 1297 } 1298 1299 /// Platform emulation for Hyper-V 1300 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> { 1301 type CpuState = EmulatorCpuState; 1302 1303 fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> { 1304 let gpa = self.translate(gva)?; 1305 debug!( 1306 "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]", 1307 data.len(), 1308 gva, 1309 gpa 1310 ); 1311 1312 if let Some(vm_ops) = &self.vcpu.vm_ops { 1313 if vm_ops.guest_mem_read(gpa, data).is_err() { 1314 vm_ops 1315 .mmio_read(gpa, data) 1316 .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?; 1317 } 1318 } 1319 1320 Ok(()) 1321 } 1322 1323 fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> { 1324 let gpa = self.translate(gva)?; 1325 debug!( 1326 "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]", 1327 data.len(), 1328 gva, 1329 gpa 1330 ); 1331 1332 if let Some(vm_ops) = &self.vcpu.vm_ops { 1333 if vm_ops.guest_mem_write(gpa, data).is_err() { 1334 vm_ops 1335 .mmio_write(gpa, data) 1336 .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?; 1337 } 1338 } 1339 1340 Ok(()) 1341 } 1342 1343 fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> { 1344 if cpu_id != self.vcpu.vp_index as usize { 1345 return Err(PlatformError::GetCpuStateFailure(anyhow!( 1346 "CPU id mismatch {:?} {:?}", 1347 cpu_id, 1348 self.vcpu.vp_index 1349 ))); 1350 } 1351 1352 let regs = self 1353 .vcpu 1354 .get_regs() 1355 .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?; 1356 let sregs = self 1357 .vcpu 1358 .get_sregs() 1359 .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?; 1360 1361 debug!("mshv emulator: Getting new CPU state"); 1362 debug!("mshv emulator: {:#x?}", regs); 1363 1364 Ok(EmulatorCpuState { regs, sregs }) 1365 } 1366 1367 fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> { 1368 if cpu_id != self.vcpu.vp_index as usize { 1369 return Err(PlatformError::SetCpuStateFailure(anyhow!( 1370 "CPU id mismatch {:?} {:?}", 1371 cpu_id, 1372 self.vcpu.vp_index 1373 ))); 1374 } 1375 1376 debug!("mshv emulator: Setting new CPU state"); 1377 debug!("mshv emulator: {:#x?}", state.regs); 1378 1379 self.vcpu 1380 .set_regs(&state.regs) 1381 .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?; 1382 self.vcpu 1383 .set_sregs(&state.sregs) 1384 .map_err(|e| PlatformError::SetCpuStateFailure(e.into())) 1385 } 1386 1387 fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> { 1388 self.translate(gva) 1389 } 1390 1391 fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> { 1392 Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented"))) 1393 } 1394 } 1395 1396 /// Wrapper over Mshv VM ioctls. 1397 pub struct MshvVm { 1398 fd: Arc<VmFd>, 1399 msrs: Vec<MsrEntry>, 1400 dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>, 1401 } 1402 1403 impl MshvVm { 1404 /// 1405 /// Creates an in-kernel device. 1406 /// 1407 /// See the documentation for `MSHV_CREATE_DEVICE`. 1408 fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> { 1409 let device_fd = self 1410 .fd 1411 .create_device(device) 1412 .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?; 1413 Ok(VfioDeviceFd::new_from_mshv(device_fd)) 1414 } 1415 } 1416 1417 /// 1418 /// Implementation of Vm trait for Mshv 1419 /// 1420 /// # Examples 1421 /// 1422 /// ``` 1423 /// # extern crate hypervisor; 1424 /// # use hypervisor::mshv::MshvHypervisor; 1425 /// # use std::sync::Arc; 1426 /// let mshv = MshvHypervisor::new().unwrap(); 1427 /// let hypervisor = Arc::new(mshv); 1428 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed"); 1429 /// ``` 1430 impl vm::Vm for MshvVm { 1431 #[cfg(target_arch = "x86_64")] 1432 /// 1433 /// Sets the address of the one-page region in the VM's address space. 1434 /// 1435 fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> { 1436 Ok(()) 1437 } 1438 #[cfg(target_arch = "x86_64")] 1439 /// 1440 /// Sets the address of the three-page region in the VM's address space. 1441 /// 1442 fn set_tss_address(&self, _offset: usize) -> vm::Result<()> { 1443 Ok(()) 1444 } 1445 /// 1446 /// Creates an in-kernel interrupt controller. 1447 /// 1448 fn create_irq_chip(&self) -> vm::Result<()> { 1449 Ok(()) 1450 } 1451 /// 1452 /// Registers an event that will, when signaled, trigger the `gsi` IRQ. 1453 /// 1454 fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> { 1455 debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi); 1456 1457 self.fd 1458 .register_irqfd(fd, gsi) 1459 .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?; 1460 1461 Ok(()) 1462 } 1463 /// 1464 /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ. 1465 /// 1466 fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> { 1467 debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi); 1468 1469 self.fd 1470 .unregister_irqfd(fd, gsi) 1471 .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?; 1472 1473 Ok(()) 1474 } 1475 /// 1476 /// Creates a VcpuFd object from a vcpu RawFd. 1477 /// 1478 fn create_vcpu( 1479 &self, 1480 id: u8, 1481 vm_ops: Option<Arc<dyn VmOps>>, 1482 ) -> vm::Result<Arc<dyn cpu::Vcpu>> { 1483 let vcpu_fd = self 1484 .fd 1485 .create_vcpu(id) 1486 .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?; 1487 let vcpu = MshvVcpu { 1488 fd: vcpu_fd, 1489 vp_index: id, 1490 cpuid: Vec::new(), 1491 msrs: self.msrs.clone(), 1492 vm_ops, 1493 #[cfg(feature = "sev_snp")] 1494 vm_fd: self.fd.clone(), 1495 }; 1496 Ok(Arc::new(vcpu)) 1497 } 1498 #[cfg(target_arch = "x86_64")] 1499 fn enable_split_irq(&self) -> vm::Result<()> { 1500 Ok(()) 1501 } 1502 #[cfg(target_arch = "x86_64")] 1503 fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> { 1504 Ok(()) 1505 } 1506 fn register_ioevent( 1507 &self, 1508 fd: &EventFd, 1509 addr: &IoEventAddress, 1510 datamatch: Option<DataMatch>, 1511 ) -> vm::Result<()> { 1512 let addr = &mshv_ioctls::IoEventAddress::from(*addr); 1513 debug!( 1514 "register_ioevent fd {} addr {:x?} datamatch {:?}", 1515 fd.as_raw_fd(), 1516 addr, 1517 datamatch 1518 ); 1519 if let Some(dm) = datamatch { 1520 match dm { 1521 vm::DataMatch::DataMatch32(mshv_dm32) => self 1522 .fd 1523 .register_ioevent(fd, addr, mshv_dm32) 1524 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())), 1525 vm::DataMatch::DataMatch64(mshv_dm64) => self 1526 .fd 1527 .register_ioevent(fd, addr, mshv_dm64) 1528 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())), 1529 } 1530 } else { 1531 self.fd 1532 .register_ioevent(fd, addr, NoDatamatch) 1533 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())) 1534 } 1535 } 1536 /// Unregister an event from a certain address it has been previously registered to. 1537 fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> { 1538 let addr = &mshv_ioctls::IoEventAddress::from(*addr); 1539 debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr); 1540 1541 self.fd 1542 .unregister_ioevent(fd, addr, NoDatamatch) 1543 .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into())) 1544 } 1545 1546 /// Creates a guest physical memory region. 1547 fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> { 1548 let user_memory_region: mshv_user_mem_region = user_memory_region.into(); 1549 // No matter read only or not we keep track the slots. 1550 // For readonly hypervisor can enable the dirty bits, 1551 // but a VM exit happens before setting the dirty bits 1552 self.dirty_log_slots.write().unwrap().insert( 1553 user_memory_region.guest_pfn, 1554 MshvDirtyLogSlot { 1555 guest_pfn: user_memory_region.guest_pfn, 1556 memory_size: user_memory_region.size, 1557 }, 1558 ); 1559 1560 self.fd 1561 .map_user_memory(user_memory_region) 1562 .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?; 1563 Ok(()) 1564 } 1565 1566 /// Removes a guest physical memory region. 1567 fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> { 1568 let user_memory_region: mshv_user_mem_region = user_memory_region.into(); 1569 // Remove the corresponding entry from "self.dirty_log_slots" if needed 1570 self.dirty_log_slots 1571 .write() 1572 .unwrap() 1573 .remove(&user_memory_region.guest_pfn); 1574 1575 self.fd 1576 .unmap_user_memory(user_memory_region) 1577 .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?; 1578 Ok(()) 1579 } 1580 1581 fn make_user_memory_region( 1582 &self, 1583 _slot: u32, 1584 guest_phys_addr: u64, 1585 memory_size: u64, 1586 userspace_addr: u64, 1587 readonly: bool, 1588 _log_dirty_pages: bool, 1589 ) -> UserMemoryRegion { 1590 let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE; 1591 if !readonly { 1592 flags |= HV_MAP_GPA_WRITABLE; 1593 } 1594 1595 mshv_user_mem_region { 1596 flags, 1597 guest_pfn: guest_phys_addr >> PAGE_SHIFT, 1598 size: memory_size, 1599 userspace_addr, 1600 } 1601 .into() 1602 } 1603 1604 fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> { 1605 let mut vfio_dev = mshv_create_device { 1606 type_: mshv_device_type_MSHV_DEV_TYPE_VFIO, 1607 fd: 0, 1608 flags: 0, 1609 }; 1610 1611 self.create_device(&mut vfio_dev) 1612 .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into())) 1613 } 1614 1615 /// 1616 /// Constructs a routing entry 1617 /// 1618 fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry { 1619 match config { 1620 InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry { 1621 gsi, 1622 address_lo: cfg.low_addr, 1623 address_hi: cfg.high_addr, 1624 data: cfg.data, 1625 } 1626 .into(), 1627 _ => { 1628 unreachable!() 1629 } 1630 } 1631 } 1632 1633 fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> { 1634 let mut msi_routing = 1635 vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len()); 1636 msi_routing[0].nr = entries.len() as u32; 1637 1638 let entries: Vec<mshv_msi_routing_entry> = entries 1639 .iter() 1640 .map(|entry| match entry { 1641 IrqRoutingEntry::Mshv(e) => *e, 1642 #[allow(unreachable_patterns)] 1643 _ => panic!("IrqRoutingEntry type is wrong"), 1644 }) 1645 .collect(); 1646 1647 // SAFETY: msi_routing initialized with entries.len() and now it is being turned into 1648 // entries_slice with entries.len() again. It is guaranteed to be large enough to hold 1649 // everything from entries. 1650 unsafe { 1651 let entries_slice: &mut [mshv_msi_routing_entry] = 1652 msi_routing[0].entries.as_mut_slice(entries.len()); 1653 entries_slice.copy_from_slice(&entries); 1654 } 1655 1656 self.fd 1657 .set_msi_routing(&msi_routing[0]) 1658 .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into())) 1659 } 1660 /// 1661 /// Start logging dirty pages 1662 /// 1663 fn start_dirty_log(&self) -> vm::Result<()> { 1664 self.fd 1665 .enable_dirty_page_tracking() 1666 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into())) 1667 } 1668 /// 1669 /// Stop logging dirty pages 1670 /// 1671 fn stop_dirty_log(&self) -> vm::Result<()> { 1672 let dirty_log_slots = self.dirty_log_slots.read().unwrap(); 1673 // Before disabling the dirty page tracking we need 1674 // to set the dirty bits in the Hypervisor 1675 // This is a requirement from Microsoft Hypervisor 1676 for (_, s) in dirty_log_slots.iter() { 1677 self.fd 1678 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY) 1679 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?; 1680 } 1681 self.fd 1682 .disable_dirty_page_tracking() 1683 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?; 1684 Ok(()) 1685 } 1686 /// 1687 /// Get dirty pages bitmap (one bit per page) 1688 /// 1689 fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> { 1690 self.fd 1691 .get_dirty_log( 1692 base_gpa >> PAGE_SHIFT, 1693 memory_size as usize, 1694 DIRTY_BITMAP_CLEAR_DIRTY, 1695 ) 1696 .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into())) 1697 } 1698 /// Retrieve guest clock. 1699 #[cfg(target_arch = "x86_64")] 1700 fn get_clock(&self) -> vm::Result<ClockData> { 1701 Ok(ClockData::Mshv) 1702 } 1703 /// Set guest clock. 1704 #[cfg(target_arch = "x86_64")] 1705 fn set_clock(&self, _data: &ClockData) -> vm::Result<()> { 1706 Ok(()) 1707 } 1708 /// Downcast to the underlying MshvVm type 1709 fn as_any(&self) -> &dyn Any { 1710 self 1711 } 1712 /// Initialize the SEV-SNP VM 1713 #[cfg(feature = "sev_snp")] 1714 fn sev_snp_init(&self) -> vm::Result<()> { 1715 self.fd 1716 .set_partition_property( 1717 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE, 1718 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64, 1719 ) 1720 .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into())) 1721 } 1722 1723 #[cfg(feature = "sev_snp")] 1724 fn import_isolated_pages( 1725 &self, 1726 page_type: u32, 1727 page_size: u32, 1728 pages: &[u64], 1729 ) -> vm::Result<()> { 1730 if pages.is_empty() { 1731 return Ok(()); 1732 } 1733 1734 let mut isolated_pages = 1735 vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len()); 1736 isolated_pages[0].num_pages = pages.len() as u64; 1737 isolated_pages[0].page_type = page_type; 1738 isolated_pages[0].page_size = page_size; 1739 // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into 1740 // pages_slice with pages.len() again. It is guaranteed to be large enough to hold 1741 // everything from pages. 1742 unsafe { 1743 let pages_slice: &mut [u64] = isolated_pages[0].page_number.as_mut_slice(pages.len()); 1744 pages_slice.copy_from_slice(pages); 1745 } 1746 self.fd 1747 .import_isolated_pages(&isolated_pages[0]) 1748 .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into())) 1749 } 1750 #[cfg(feature = "sev_snp")] 1751 fn complete_isolated_import( 1752 &self, 1753 snp_id_block: IGVM_VHS_SNP_ID_BLOCK, 1754 host_data: &[u8], 1755 id_block_enabled: u8, 1756 ) -> vm::Result<()> { 1757 let mut auth_info = hv_snp_id_auth_info { 1758 id_key_algorithm: snp_id_block.id_key_algorithm, 1759 auth_key_algorithm: snp_id_block.author_key_algorithm, 1760 ..Default::default() 1761 }; 1762 // Each of r/s component is 576 bits long 1763 auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES] 1764 .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref()); 1765 auth_info.id_block_signature 1766 [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES] 1767 .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref()); 1768 auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES] 1769 .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref()); 1770 auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END] 1771 .copy_from_slice(snp_id_block.id_public_key.qx.as_ref()); 1772 auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END] 1773 .copy_from_slice(snp_id_block.id_public_key.qy.as_ref()); 1774 1775 let data = mshv_complete_isolated_import { 1776 import_data: hv_partition_complete_isolated_import_data { 1777 psp_parameters: hv_psp_launch_finish_data { 1778 id_block: hv_snp_id_block { 1779 launch_digest: snp_id_block.ld, 1780 family_id: snp_id_block.family_id, 1781 image_id: snp_id_block.image_id, 1782 version: snp_id_block.version, 1783 guest_svn: snp_id_block.guest_svn, 1784 policy: get_default_snp_guest_policy(), 1785 }, 1786 id_auth_info: auth_info, 1787 host_data: host_data[0..32].try_into().unwrap(), 1788 id_block_enabled, 1789 author_key_enabled: 0, 1790 }, 1791 }, 1792 }; 1793 self.fd 1794 .complete_isolated_import(&data) 1795 .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into())) 1796 } 1797 } 1798