1 // Copyright © 2024 Institute of Software, CAS. All rights reserved. 2 // 3 // Copyright © 2019 Intel Corporation 4 // 5 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 6 // 7 // Copyright © 2020, Microsoft Corporation 8 // 9 // Copyright 2018-2019 CrowdStrike, Inc. 10 // 11 // 12 13 //! A generic abstraction around hypervisor functionality 14 //! 15 //! This crate offers a trait abstraction for underlying hypervisors 16 //! 17 //! # Platform support 18 //! 19 //! - x86_64 20 //! - arm64 21 //! - riscv64 (experimental) 22 //! 23 24 #[macro_use] 25 extern crate anyhow; 26 #[allow(unused_imports)] 27 #[macro_use] 28 extern crate log; 29 30 /// Architecture specific definitions 31 #[macro_use] 32 pub mod arch; 33 34 #[cfg(feature = "kvm")] 35 /// KVM implementation module 36 pub mod kvm; 37 38 /// Microsoft Hypervisor implementation module 39 #[cfg(all(feature = "mshv", target_arch = "x86_64"))] 40 pub mod mshv; 41 42 /// Hypervisor related module 43 mod hypervisor; 44 45 /// Vm related module 46 mod vm; 47 48 /// CPU related module 49 mod cpu; 50 51 /// Device related module 52 mod device; 53 54 use std::sync::Arc; 55 56 use concat_idents::concat_idents; 57 #[cfg(target_arch = "x86_64")] 58 pub use cpu::CpuVendor; 59 pub use cpu::{HypervisorCpuError, Vcpu, VmExit}; 60 pub use device::HypervisorDeviceError; 61 #[cfg(all(feature = "kvm", target_arch = "aarch64"))] 62 pub use kvm::{aarch64, GicState}; 63 #[cfg(all(feature = "kvm", target_arch = "riscv64"))] 64 pub use kvm::{riscv64, AiaState}; 65 pub use vm::{ 66 DataMatch, HypervisorVmError, InterruptSourceConfig, LegacyIrqSourceConfig, MsiIrqSourceConfig, 67 Vm, VmOps, 68 }; 69 70 pub use crate::hypervisor::{Hypervisor, HypervisorError}; 71 72 #[derive(Debug, Copy, Clone)] 73 pub enum HypervisorType { 74 #[cfg(feature = "kvm")] 75 Kvm, 76 #[cfg(feature = "mshv")] 77 Mshv, 78 } 79 80 pub fn new() -> std::result::Result<Arc<dyn Hypervisor>, HypervisorError> { 81 #[cfg(feature = "kvm")] 82 if kvm::KvmHypervisor::is_available()? { 83 return kvm::KvmHypervisor::new(); 84 } 85 86 #[cfg(feature = "mshv")] 87 if mshv::MshvHypervisor::is_available()? { 88 return mshv::MshvHypervisor::new(); 89 } 90 91 Err(HypervisorError::HypervisorCreate(anyhow!( 92 "no supported hypervisor" 93 ))) 94 } 95 96 // Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`. 97 fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> { 98 let rounded_size = size_in_bytes.div_ceil(size_of::<T>()); 99 let mut v = Vec::with_capacity(rounded_size); 100 v.resize_with(rounded_size, T::default); 101 v 102 } 103 104 // The kvm API has many structs that resemble the following `Foo` structure: 105 // 106 // ``` 107 // #[repr(C)] 108 // struct Foo { 109 // some_data: u32 110 // entries: __IncompleteArrayField<__u32>, 111 // } 112 // ``` 113 // 114 // In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not 115 // include any space for `entries`. To make the allocation large enough while still being aligned 116 // for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used 117 // as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous 118 // with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries. 119 use std::mem::size_of; 120 pub fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> { 121 let element_space = count * size_of::<F>(); 122 let vec_size_bytes = size_of::<T>() + element_space; 123 vec_with_size_in_bytes(vec_size_bytes) 124 } 125 126 /// 127 /// User memory region structure 128 /// 129 #[derive(Debug, Default, Eq, PartialEq)] 130 pub struct UserMemoryRegion { 131 pub slot: u32, 132 pub guest_phys_addr: u64, 133 pub memory_size: u64, 134 pub userspace_addr: u64, 135 pub flags: u32, 136 } 137 138 /// 139 /// Flags for user memory region 140 /// 141 pub const USER_MEMORY_REGION_READ: u32 = 1; 142 pub const USER_MEMORY_REGION_WRITE: u32 = 1 << 1; 143 pub const USER_MEMORY_REGION_EXECUTE: u32 = 1 << 2; 144 pub const USER_MEMORY_REGION_LOG_DIRTY: u32 = 1 << 3; 145 pub const USER_MEMORY_REGION_ADJUSTABLE: u32 = 1 << 4; 146 147 #[derive(Debug)] 148 pub enum MpState { 149 #[cfg(feature = "kvm")] 150 Kvm(kvm_bindings::kvm_mp_state), 151 #[cfg(all(feature = "mshv", target_arch = "x86_64"))] 152 Mshv, /* MSHV does not support MpState yet */ 153 } 154 155 #[derive(Debug, Clone, Copy)] 156 pub enum IoEventAddress { 157 Pio(u64), 158 Mmio(u64), 159 } 160 161 #[derive(Clone, serde::Serialize, serde::Deserialize)] 162 #[allow(clippy::large_enum_variant)] 163 pub enum CpuState { 164 #[cfg(feature = "kvm")] 165 Kvm(kvm::VcpuKvmState), 166 #[cfg(all(feature = "mshv", target_arch = "x86_64"))] 167 Mshv(mshv::VcpuMshvState), 168 } 169 170 #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] 171 #[cfg(target_arch = "x86_64")] 172 pub enum ClockData { 173 #[cfg(feature = "kvm")] 174 Kvm(kvm_bindings::kvm_clock_data), 175 #[cfg(feature = "mshv")] 176 Mshv(mshv::MshvClockData), 177 } 178 179 #[cfg(target_arch = "x86_64")] 180 impl ClockData { 181 pub fn reset_flags(&mut self) { 182 match self { 183 #[cfg(feature = "kvm")] 184 ClockData::Kvm(s) => s.flags = 0, 185 #[allow(unreachable_patterns)] 186 _ => {} 187 } 188 } 189 } 190 191 #[derive(Copy, Clone)] 192 pub enum IrqRoutingEntry { 193 #[cfg(feature = "kvm")] 194 Kvm(kvm_bindings::kvm_irq_routing_entry), 195 #[cfg(feature = "mshv")] 196 Mshv(mshv_bindings::mshv_user_irq_entry), 197 } 198 199 #[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] 200 pub enum StandardRegisters { 201 #[cfg(all(feature = "kvm", not(target_arch = "riscv64")))] 202 Kvm(kvm_bindings::kvm_regs), 203 #[cfg(all(feature = "kvm", target_arch = "riscv64"))] 204 Kvm(kvm_bindings::kvm_riscv_core), 205 #[cfg(all(feature = "mshv", target_arch = "x86_64"))] 206 Mshv(mshv_bindings::StandardRegisters), 207 } 208 209 macro_rules! set_x86_64_reg { 210 ($reg_name:ident) => { 211 concat_idents!(method_name = "set_", $reg_name { 212 #[cfg(target_arch = "x86_64")] 213 impl StandardRegisters { 214 pub fn method_name(&mut self, val: u64) { 215 match self { 216 #[cfg(feature = "kvm")] 217 StandardRegisters::Kvm(s) => s.$reg_name = val, 218 #[cfg(feature = "mshv")] 219 StandardRegisters::Mshv(s) => s.$reg_name = val, 220 } 221 } 222 } 223 }); 224 } 225 } 226 227 macro_rules! get_x86_64_reg { 228 ($reg_name:ident) => { 229 concat_idents!(method_name = "get_", $reg_name { 230 #[cfg(target_arch = "x86_64")] 231 impl StandardRegisters { 232 pub fn method_name(&self) -> u64 { 233 match self { 234 #[cfg(feature = "kvm")] 235 StandardRegisters::Kvm(s) => s.$reg_name, 236 #[cfg(feature = "mshv")] 237 StandardRegisters::Mshv(s) => s.$reg_name, 238 } 239 } 240 } 241 }); 242 } 243 } 244 245 set_x86_64_reg!(rax); 246 set_x86_64_reg!(rbx); 247 set_x86_64_reg!(rcx); 248 set_x86_64_reg!(rdx); 249 set_x86_64_reg!(rsi); 250 set_x86_64_reg!(rdi); 251 set_x86_64_reg!(rsp); 252 set_x86_64_reg!(rbp); 253 set_x86_64_reg!(r8); 254 set_x86_64_reg!(r9); 255 set_x86_64_reg!(r10); 256 set_x86_64_reg!(r11); 257 set_x86_64_reg!(r12); 258 set_x86_64_reg!(r13); 259 set_x86_64_reg!(r14); 260 set_x86_64_reg!(r15); 261 set_x86_64_reg!(rip); 262 set_x86_64_reg!(rflags); 263 264 get_x86_64_reg!(rax); 265 get_x86_64_reg!(rbx); 266 get_x86_64_reg!(rcx); 267 get_x86_64_reg!(rdx); 268 get_x86_64_reg!(rsi); 269 get_x86_64_reg!(rdi); 270 get_x86_64_reg!(rsp); 271 get_x86_64_reg!(rbp); 272 get_x86_64_reg!(r8); 273 get_x86_64_reg!(r9); 274 get_x86_64_reg!(r10); 275 get_x86_64_reg!(r11); 276 get_x86_64_reg!(r12); 277 get_x86_64_reg!(r13); 278 get_x86_64_reg!(r14); 279 get_x86_64_reg!(r15); 280 get_x86_64_reg!(rip); 281 get_x86_64_reg!(rflags); 282 283 macro_rules! set_aarch64_reg { 284 ($reg_name:ident, $type:ty) => { 285 concat_idents!(method_name = "set_", $reg_name { 286 #[cfg(target_arch = "aarch64")] 287 impl StandardRegisters { 288 pub fn method_name(&mut self, val: $type) { 289 match self { 290 #[cfg(feature = "kvm")] 291 StandardRegisters::Kvm(s) => s.regs.$reg_name = val, 292 } 293 } 294 } 295 }); 296 } 297 } 298 299 macro_rules! get_aarch64_reg { 300 ($reg_name:ident, $type:ty) => { 301 concat_idents!(method_name = "get_", $reg_name { 302 #[cfg(target_arch = "aarch64")] 303 impl StandardRegisters { 304 pub fn method_name(&self) -> $type { 305 match self { 306 #[cfg(feature = "kvm")] 307 StandardRegisters::Kvm(s) => s.regs.$reg_name, 308 } 309 } 310 } 311 }); 312 } 313 } 314 315 set_aarch64_reg!(regs, [u64; 31usize]); 316 set_aarch64_reg!(sp, u64); 317 set_aarch64_reg!(pc, u64); 318 set_aarch64_reg!(pstate, u64); 319 320 get_aarch64_reg!(regs, [u64; 31usize]); 321 get_aarch64_reg!(sp, u64); 322 get_aarch64_reg!(pc, u64); 323 get_aarch64_reg!(pstate, u64); 324 325 macro_rules! set_riscv64_reg { 326 (mode) => { 327 #[cfg(target_arch = "riscv64")] 328 impl StandardRegisters { 329 pub fn set_mode(&mut self, val: u64) { 330 match self { 331 #[cfg(feature = "kvm")] 332 StandardRegisters::Kvm(s) => s.mode = val, 333 } 334 } 335 } 336 }; 337 ($reg_name:ident) => { 338 concat_idents!(method_name = "set_", $reg_name { 339 #[cfg(target_arch = "riscv64")] 340 impl StandardRegisters { 341 pub fn method_name(&mut self, val: u64) { 342 match self { 343 #[cfg(feature = "kvm")] 344 StandardRegisters::Kvm(s) => s.regs.$reg_name = val, 345 } 346 } 347 } 348 }); 349 } 350 } 351 352 macro_rules! get_riscv64_reg { 353 (mode) => { 354 #[cfg(target_arch = "riscv64")] 355 impl StandardRegisters { 356 pub fn get_mode(&self) -> u64 { 357 match self { 358 #[cfg(feature = "kvm")] 359 StandardRegisters::Kvm(s) => s.mode, 360 } 361 } 362 } 363 }; 364 ($reg_name:ident) => { 365 concat_idents!(method_name = "get_", $reg_name { 366 #[cfg(target_arch = "riscv64")] 367 impl StandardRegisters { 368 pub fn method_name(&self) -> u64 { 369 match self { 370 #[cfg(feature = "kvm")] 371 StandardRegisters::Kvm(s) => s.regs.$reg_name, 372 } 373 } 374 } 375 }); 376 } 377 } 378 379 set_riscv64_reg!(pc); 380 set_riscv64_reg!(ra); 381 set_riscv64_reg!(sp); 382 set_riscv64_reg!(gp); 383 set_riscv64_reg!(tp); 384 set_riscv64_reg!(t0); 385 set_riscv64_reg!(t1); 386 set_riscv64_reg!(t2); 387 set_riscv64_reg!(s0); 388 set_riscv64_reg!(s1); 389 set_riscv64_reg!(a0); 390 set_riscv64_reg!(a1); 391 set_riscv64_reg!(a2); 392 set_riscv64_reg!(a3); 393 set_riscv64_reg!(a4); 394 set_riscv64_reg!(a5); 395 set_riscv64_reg!(a6); 396 set_riscv64_reg!(a7); 397 set_riscv64_reg!(s2); 398 set_riscv64_reg!(s3); 399 set_riscv64_reg!(s4); 400 set_riscv64_reg!(s5); 401 set_riscv64_reg!(s6); 402 set_riscv64_reg!(s7); 403 set_riscv64_reg!(s8); 404 set_riscv64_reg!(s9); 405 set_riscv64_reg!(s10); 406 set_riscv64_reg!(s11); 407 set_riscv64_reg!(t3); 408 set_riscv64_reg!(t4); 409 set_riscv64_reg!(t5); 410 set_riscv64_reg!(t6); 411 set_riscv64_reg!(mode); 412 413 get_riscv64_reg!(pc); 414 get_riscv64_reg!(ra); 415 get_riscv64_reg!(sp); 416 get_riscv64_reg!(gp); 417 get_riscv64_reg!(tp); 418 get_riscv64_reg!(t0); 419 get_riscv64_reg!(t1); 420 get_riscv64_reg!(t2); 421 get_riscv64_reg!(s0); 422 get_riscv64_reg!(s1); 423 get_riscv64_reg!(a0); 424 get_riscv64_reg!(a1); 425 get_riscv64_reg!(a2); 426 get_riscv64_reg!(a3); 427 get_riscv64_reg!(a4); 428 get_riscv64_reg!(a5); 429 get_riscv64_reg!(a6); 430 get_riscv64_reg!(a7); 431 get_riscv64_reg!(s2); 432 get_riscv64_reg!(s3); 433 get_riscv64_reg!(s4); 434 get_riscv64_reg!(s5); 435 get_riscv64_reg!(s6); 436 get_riscv64_reg!(s7); 437 get_riscv64_reg!(s8); 438 get_riscv64_reg!(s9); 439 get_riscv64_reg!(s10); 440 get_riscv64_reg!(s11); 441 get_riscv64_reg!(t3); 442 get_riscv64_reg!(t4); 443 get_riscv64_reg!(t5); 444 get_riscv64_reg!(t6); 445 get_riscv64_reg!(mode); 446