1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 2 // 3 // Copyright © 2024, Microsoft Corporation 4 // 5 6 use iced_x86::Register; 7 use mshv_bindings::*; 8 9 use crate::arch::emulator::{PlatformEmulator, PlatformError}; 10 use crate::arch::x86::emulator::{CpuStateManager, EmulatorCpuState}; 11 use crate::cpu::Vcpu; 12 use crate::mshv::MshvVcpu; 13 14 pub struct MshvEmulatorContext<'a> { 15 pub vcpu: &'a MshvVcpu, 16 pub map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor 17 } 18 19 impl MshvEmulatorContext<'_> { 20 // Do the actual gva -> gpa translation 21 #[allow(non_upper_case_globals)] 22 fn translate(&self, gva: u64, flags: u32) -> Result<u64, PlatformError> { 23 if self.map.0 == gva { 24 return Ok(self.map.1); 25 } 26 27 let (gpa, result_code) = self 28 .vcpu 29 .translate_gva(gva, flags.into()) 30 .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?; 31 32 match result_code { 33 hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa), 34 _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))), 35 } 36 } 37 38 fn r(&self, gva: u64, data: &mut [u8], flags: u32) -> Result<(), PlatformError> { 39 let gpa = self.translate(gva, flags)?; 40 debug!( 41 "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]", 42 data.len(), 43 gva, 44 gpa 45 ); 46 47 if let Some(vm_ops) = &self.vcpu.vm_ops { 48 if vm_ops.guest_mem_read(gpa, data).is_err() { 49 vm_ops 50 .mmio_read(gpa, data) 51 .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?; 52 } 53 } 54 55 Ok(()) 56 } 57 58 fn read_memory_flags( 59 &self, 60 gva: u64, 61 data: &mut [u8], 62 flags: u32, 63 ) -> Result<(), PlatformError> { 64 let mut len = data.len() as u64; 65 66 // Compare the page number of the first and last byte. If they are different, this is a 67 // cross-page access. 68 let pg1 = gva >> HV_HYP_PAGE_SHIFT; 69 let pg2 = (gva + len - 1) >> HV_HYP_PAGE_SHIFT; 70 let cross_page = pg1 != pg2; 71 72 if cross_page { 73 // We only handle one page cross-page access 74 assert!(pg1 + 1 == pg2); 75 let n = (gva + len) & HV_HYP_PAGE_MASK as u64; 76 len -= n; 77 } 78 79 self.r(gva, &mut data[..len as usize], flags)?; 80 81 if cross_page { 82 self.r(gva + len, &mut data[len as usize..], flags)?; 83 } 84 85 Ok(()) 86 } 87 88 fn w(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> { 89 let gpa = self.translate(gva, HV_TRANSLATE_GVA_VALIDATE_WRITE)?; 90 debug!( 91 "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]", 92 data.len(), 93 gva, 94 gpa 95 ); 96 97 if let Some(vm_ops) = &self.vcpu.vm_ops { 98 if vm_ops.guest_mem_write(gpa, data).is_err() { 99 vm_ops 100 .mmio_write(gpa, data) 101 .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?; 102 } 103 } 104 105 Ok(()) 106 } 107 } 108 109 /// Platform emulation for Hyper-V 110 impl PlatformEmulator for MshvEmulatorContext<'_> { 111 type CpuState = EmulatorCpuState; 112 113 fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> { 114 self.read_memory_flags(gva, data, HV_TRANSLATE_GVA_VALIDATE_READ) 115 } 116 117 fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> { 118 let mut len = data.len() as u64; 119 120 // Compare the page number of the first and last byte. If they are different, this is a 121 // cross-page access. 122 let pg1 = gva >> HV_HYP_PAGE_SHIFT; 123 let pg2 = (gva + len - 1) >> HV_HYP_PAGE_SHIFT; 124 let cross_page = pg1 != pg2; 125 126 if cross_page { 127 // We only handle one page cross-page access 128 assert!(pg1 + 1 == pg2); 129 let n = (gva + len) & HV_HYP_PAGE_MASK as u64; 130 len -= n; 131 } 132 133 self.w(gva, &data[..len as usize])?; 134 135 if cross_page { 136 self.w(gva + len, &data[len as usize..])?; 137 } 138 139 Ok(()) 140 } 141 142 fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> { 143 if cpu_id != self.vcpu.vp_index as usize { 144 return Err(PlatformError::GetCpuStateFailure(anyhow!( 145 "CPU id mismatch {:?} {:?}", 146 cpu_id, 147 self.vcpu.vp_index 148 ))); 149 } 150 151 let regs = self 152 .vcpu 153 .get_regs() 154 .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?; 155 let sregs = self 156 .vcpu 157 .get_sregs() 158 .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?; 159 160 debug!("mshv emulator: Getting new CPU state"); 161 debug!("mshv emulator: {:#x?}", regs); 162 163 Ok(EmulatorCpuState { regs, sregs }) 164 } 165 166 fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> { 167 if cpu_id != self.vcpu.vp_index as usize { 168 return Err(PlatformError::SetCpuStateFailure(anyhow!( 169 "CPU id mismatch {:?} {:?}", 170 cpu_id, 171 self.vcpu.vp_index 172 ))); 173 } 174 175 debug!("mshv emulator: Setting new CPU state"); 176 debug!("mshv emulator: {:#x?}", state.regs); 177 178 self.vcpu 179 .set_regs(&state.regs) 180 .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?; 181 self.vcpu 182 .set_sregs(&state.sregs) 183 .map_err(|e| PlatformError::SetCpuStateFailure(e.into())) 184 } 185 186 fn fetch(&self, ip: u64, instruction_bytes: &mut [u8]) -> Result<(), PlatformError> { 187 let rip = 188 self.cpu_state(self.vcpu.vp_index as usize)? 189 .linearize(Register::CS, ip, false)?; 190 self.read_memory_flags( 191 rip, 192 instruction_bytes, 193 HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_EXECUTE, 194 ) 195 } 196 } 197