1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 4 // 5 6 use devices::interrupt_controller::InterruptController; 7 use hypervisor::IrqRoutingEntry; 8 use std::collections::HashMap; 9 use std::io; 10 use std::sync::atomic::{AtomicBool, Ordering}; 11 use std::sync::{Arc, Mutex}; 12 use vm_allocator::SystemAllocator; 13 use vm_device::interrupt::{ 14 InterruptIndex, InterruptManager, InterruptSourceConfig, InterruptSourceGroup, 15 LegacyIrqGroupConfig, MsiIrqGroupConfig, 16 }; 17 use vmm_sys_util::eventfd::EventFd; 18 19 /// Reuse std::io::Result to simplify interoperability among crates. 20 pub type Result<T> = std::io::Result<T>; 21 22 struct InterruptRoute { 23 gsi: u32, 24 irq_fd: EventFd, 25 registered: AtomicBool, 26 } 27 28 impl InterruptRoute { 29 pub fn new(allocator: &mut SystemAllocator) -> Result<Self> { 30 let irq_fd = EventFd::new(libc::EFD_NONBLOCK)?; 31 let gsi = allocator 32 .allocate_gsi() 33 .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed allocating new GSI"))?; 34 35 Ok(InterruptRoute { 36 gsi, 37 irq_fd, 38 registered: AtomicBool::new(false), 39 }) 40 } 41 42 pub fn enable(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> { 43 if !self.registered.load(Ordering::Acquire) { 44 vm.register_irqfd(&self.irq_fd, self.gsi).map_err(|e| { 45 io::Error::new( 46 io::ErrorKind::Other, 47 format!("Failed registering irq_fd: {}", e), 48 ) 49 })?; 50 51 // Update internals to track the irq_fd as "registered". 52 self.registered.store(true, Ordering::Release); 53 } 54 55 Ok(()) 56 } 57 58 pub fn disable(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> { 59 if self.registered.load(Ordering::Acquire) { 60 vm.unregister_irqfd(&self.irq_fd, self.gsi).map_err(|e| { 61 io::Error::new( 62 io::ErrorKind::Other, 63 format!("Failed unregistering irq_fd: {}", e), 64 ) 65 })?; 66 67 // Update internals to track the irq_fd as "unregistered". 68 self.registered.store(false, Ordering::Release); 69 } 70 71 Ok(()) 72 } 73 74 pub fn trigger(&self) -> Result<()> { 75 self.irq_fd.write(1) 76 } 77 78 pub fn notifier(&self) -> Option<EventFd> { 79 Some( 80 self.irq_fd 81 .try_clone() 82 .expect("Failed cloning interrupt's EventFd"), 83 ) 84 } 85 } 86 87 pub struct RoutingEntry<IrqRoutingEntry> { 88 route: IrqRoutingEntry, 89 masked: bool, 90 } 91 92 pub struct MsiInterruptGroup<IrqRoutingEntry> { 93 vm: Arc<dyn hypervisor::Vm>, 94 gsi_msi_routes: Arc<Mutex<HashMap<u32, RoutingEntry<IrqRoutingEntry>>>>, 95 irq_routes: HashMap<InterruptIndex, InterruptRoute>, 96 } 97 98 impl MsiInterruptGroup<IrqRoutingEntry> { 99 fn set_gsi_routes(&self, routes: &HashMap<u32, RoutingEntry<IrqRoutingEntry>>) -> Result<()> { 100 let mut entry_vec: Vec<IrqRoutingEntry> = Vec::new(); 101 for (_, entry) in routes.iter() { 102 if entry.masked { 103 continue; 104 } 105 106 entry_vec.push(entry.route); 107 } 108 109 self.vm.set_gsi_routing(&entry_vec).map_err(|e| { 110 io::Error::new( 111 io::ErrorKind::Other, 112 format!("Failed setting GSI routing: {}", e), 113 ) 114 }) 115 } 116 } 117 118 impl MsiInterruptGroup<IrqRoutingEntry> { 119 fn new( 120 vm: Arc<dyn hypervisor::Vm>, 121 gsi_msi_routes: Arc<Mutex<HashMap<u32, RoutingEntry<IrqRoutingEntry>>>>, 122 irq_routes: HashMap<InterruptIndex, InterruptRoute>, 123 ) -> Self { 124 MsiInterruptGroup { 125 vm, 126 gsi_msi_routes, 127 irq_routes, 128 } 129 } 130 } 131 132 impl InterruptSourceGroup for MsiInterruptGroup<IrqRoutingEntry> { 133 fn enable(&self) -> Result<()> { 134 for (_, route) in self.irq_routes.iter() { 135 route.enable(&self.vm)?; 136 } 137 138 Ok(()) 139 } 140 141 fn disable(&self) -> Result<()> { 142 for (_, route) in self.irq_routes.iter() { 143 route.disable(&self.vm)?; 144 } 145 146 Ok(()) 147 } 148 149 fn trigger(&self, index: InterruptIndex) -> Result<()> { 150 if let Some(route) = self.irq_routes.get(&index) { 151 return route.trigger(); 152 } 153 154 Err(io::Error::new( 155 io::ErrorKind::Other, 156 format!("trigger: Invalid interrupt index {}", index), 157 )) 158 } 159 160 fn notifier(&self, index: InterruptIndex) -> Option<EventFd> { 161 if let Some(route) = self.irq_routes.get(&index) { 162 return route.notifier(); 163 } 164 165 None 166 } 167 168 fn update(&self, index: InterruptIndex, config: InterruptSourceConfig) -> Result<()> { 169 if let Some(route) = self.irq_routes.get(&index) { 170 let entry = RoutingEntry::<_>::make_entry(&self.vm, route.gsi, &config)?; 171 let mut routes = self.gsi_msi_routes.lock().unwrap(); 172 routes.insert(route.gsi, *entry); 173 return self.set_gsi_routes(&routes); 174 } 175 176 Err(io::Error::new( 177 io::ErrorKind::Other, 178 format!("update: Invalid interrupt index {}", index), 179 )) 180 } 181 182 fn mask(&self, index: InterruptIndex) -> Result<()> { 183 if let Some(route) = self.irq_routes.get(&index) { 184 let mut routes = self.gsi_msi_routes.lock().unwrap(); 185 if let Some(entry) = routes.get_mut(&route.gsi) { 186 entry.masked = true; 187 } else { 188 return Err(io::Error::new( 189 io::ErrorKind::Other, 190 format!("mask: No existing route for interrupt index {}", index), 191 )); 192 } 193 route.disable(&self.vm)?; 194 return self.set_gsi_routes(&routes); 195 } 196 197 Err(io::Error::new( 198 io::ErrorKind::Other, 199 format!("mask: Invalid interrupt index {}", index), 200 )) 201 } 202 203 fn unmask(&self, index: InterruptIndex) -> Result<()> { 204 if let Some(route) = self.irq_routes.get(&index) { 205 let mut routes = self.gsi_msi_routes.lock().unwrap(); 206 if let Some(entry) = routes.get_mut(&route.gsi) { 207 entry.masked = false; 208 } else { 209 return Err(io::Error::new( 210 io::ErrorKind::Other, 211 format!("mask: No existing route for interrupt index {}", index), 212 )); 213 } 214 route.enable(&self.vm)?; 215 return self.set_gsi_routes(&routes); 216 } 217 218 Err(io::Error::new( 219 io::ErrorKind::Other, 220 format!("unmask: Invalid interrupt index {}", index), 221 )) 222 } 223 } 224 225 pub struct LegacyUserspaceInterruptGroup { 226 ioapic: Arc<Mutex<dyn InterruptController>>, 227 irq: u32, 228 } 229 230 impl LegacyUserspaceInterruptGroup { 231 fn new(ioapic: Arc<Mutex<dyn InterruptController>>, irq: u32) -> Self { 232 LegacyUserspaceInterruptGroup { ioapic, irq } 233 } 234 } 235 236 impl InterruptSourceGroup for LegacyUserspaceInterruptGroup { 237 fn trigger(&self, _index: InterruptIndex) -> Result<()> { 238 self.ioapic 239 .lock() 240 .unwrap() 241 .service_irq(self.irq as usize) 242 .map_err(|e| { 243 io::Error::new( 244 io::ErrorKind::Other, 245 format!("failed to inject IRQ #{}: {:?}", self.irq, e), 246 ) 247 }) 248 } 249 250 fn update(&self, _index: InterruptIndex, _config: InterruptSourceConfig) -> Result<()> { 251 Ok(()) 252 } 253 254 fn notifier(&self, _index: InterruptIndex) -> Option<EventFd> { 255 self.ioapic.lock().unwrap().notifier(self.irq as usize) 256 } 257 } 258 259 pub struct LegacyUserspaceInterruptManager { 260 ioapic: Arc<Mutex<dyn InterruptController>>, 261 } 262 263 pub struct MsiInterruptManager<IrqRoutingEntry> { 264 allocator: Arc<Mutex<SystemAllocator>>, 265 vm: Arc<dyn hypervisor::Vm>, 266 gsi_msi_routes: Arc<Mutex<HashMap<u32, RoutingEntry<IrqRoutingEntry>>>>, 267 } 268 269 impl LegacyUserspaceInterruptManager { 270 pub fn new(ioapic: Arc<Mutex<dyn InterruptController>>) -> Self { 271 LegacyUserspaceInterruptManager { ioapic } 272 } 273 } 274 275 impl MsiInterruptManager<IrqRoutingEntry> { 276 pub fn new(allocator: Arc<Mutex<SystemAllocator>>, vm: Arc<dyn hypervisor::Vm>) -> Self { 277 // Create a shared list of GSI that can be shared through all PCI 278 // devices. This way, we can maintain the full list of used GSI, 279 // preventing one device from overriding interrupts setting from 280 // another one. 281 let gsi_msi_routes = Arc::new(Mutex::new(HashMap::new())); 282 283 MsiInterruptManager { 284 allocator, 285 vm, 286 gsi_msi_routes, 287 } 288 } 289 } 290 291 impl InterruptManager for LegacyUserspaceInterruptManager { 292 type GroupConfig = LegacyIrqGroupConfig; 293 294 fn create_group(&self, config: Self::GroupConfig) -> Result<Arc<dyn InterruptSourceGroup>> { 295 Ok(Arc::new(LegacyUserspaceInterruptGroup::new( 296 self.ioapic.clone(), 297 config.irq as u32, 298 ))) 299 } 300 301 fn destroy_group(&self, _group: Arc<dyn InterruptSourceGroup>) -> Result<()> { 302 Ok(()) 303 } 304 } 305 306 impl InterruptManager for MsiInterruptManager<IrqRoutingEntry> { 307 type GroupConfig = MsiIrqGroupConfig; 308 309 fn create_group(&self, config: Self::GroupConfig) -> Result<Arc<dyn InterruptSourceGroup>> { 310 let mut allocator = self.allocator.lock().unwrap(); 311 let mut irq_routes: HashMap<InterruptIndex, InterruptRoute> = 312 HashMap::with_capacity(config.count as usize); 313 for i in config.base..config.base + config.count { 314 irq_routes.insert(i, InterruptRoute::new(&mut allocator)?); 315 } 316 317 Ok(Arc::new(MsiInterruptGroup::new( 318 self.vm.clone(), 319 self.gsi_msi_routes.clone(), 320 irq_routes, 321 ))) 322 } 323 324 fn destroy_group(&self, _group: Arc<dyn InterruptSourceGroup>) -> Result<()> { 325 Ok(()) 326 } 327 } 328 329 #[cfg(feature = "kvm")] 330 pub mod kvm { 331 use super::*; 332 use hypervisor::kvm::KVM_MSI_VALID_DEVID; 333 use hypervisor::kvm::{kvm_irq_routing_entry, KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI}; 334 use pci::PciBdf; 335 336 type KvmRoutingEntry = RoutingEntry<kvm_irq_routing_entry>; 337 pub type KvmMsiInterruptManager = MsiInterruptManager<kvm_irq_routing_entry>; 338 339 impl KvmRoutingEntry { 340 pub fn make_entry( 341 vm: &Arc<dyn hypervisor::Vm>, 342 gsi: u32, 343 config: &InterruptSourceConfig, 344 ) -> Result<Box<Self>> { 345 if let InterruptSourceConfig::MsiIrq(cfg) = &config { 346 let mut kvm_route = kvm_irq_routing_entry { 347 gsi, 348 type_: KVM_IRQ_ROUTING_MSI, 349 ..Default::default() 350 }; 351 352 kvm_route.u.msi.address_lo = cfg.low_addr; 353 kvm_route.u.msi.address_hi = cfg.high_addr; 354 kvm_route.u.msi.data = cfg.data; 355 356 if vm.check_extension(hypervisor::Cap::MsiDevid) { 357 // On AArch64, there is limitation on the range of the 'devid', 358 // it can not be greater than 65536 (the max of u16). 359 // 360 // BDF can not be used directly, because 'segment' is in high 361 // 16 bits. The layout of the u32 BDF is: 362 // |---- 16 bits ----|-- 8 bits --|-- 5 bits --|-- 3 bits --| 363 // | segment | bus | device | function | 364 // 365 // Now that we support 1 bus only in a segment, we can build a 366 // 'devid' by replacing the 'bus' bits with the low 8 bits of 367 // 'segment' data. 368 // This way we can resolve the range checking problem and give 369 // different `devid` to all the devices. Limitation is that at 370 // most 256 segments can be supported. 371 // 372 let bdf: PciBdf = PciBdf::from(cfg.devid); 373 let modified_bdf: PciBdf = 374 PciBdf::new(0, bdf.segment() as u8, bdf.device(), bdf.function()); 375 kvm_route.flags = KVM_MSI_VALID_DEVID; 376 kvm_route.u.msi.__bindgen_anon_1.devid = modified_bdf.into(); 377 } 378 379 let kvm_entry = KvmRoutingEntry { 380 route: kvm_route, 381 masked: false, 382 }; 383 384 return Ok(Box::new(kvm_entry)); 385 } else if let InterruptSourceConfig::LegacyIrq(cfg) = &config { 386 let mut kvm_route = kvm_irq_routing_entry { 387 gsi, 388 type_: KVM_IRQ_ROUTING_IRQCHIP, 389 ..Default::default() 390 }; 391 kvm_route.u.irqchip.irqchip = cfg.irqchip; 392 kvm_route.u.irqchip.pin = cfg.pin; 393 let kvm_entry = KvmRoutingEntry { 394 route: kvm_route, 395 masked: false, 396 }; 397 398 return Ok(Box::new(kvm_entry)); 399 } 400 401 Err(io::Error::new( 402 io::ErrorKind::Other, 403 "Interrupt config type not supported", 404 )) 405 } 406 } 407 } 408 409 #[cfg(feature = "mshv")] 410 pub mod mshv { 411 use super::*; 412 use hypervisor::mshv::*; 413 414 type MshvRoutingEntry = RoutingEntry<mshv_msi_routing_entry>; 415 pub type MshvMsiInterruptManager = MsiInterruptManager<mshv_msi_routing_entry>; 416 417 impl MshvRoutingEntry { 418 pub fn make_entry( 419 _vm: &Arc<dyn hypervisor::Vm>, 420 gsi: u32, 421 config: &InterruptSourceConfig, 422 ) -> Result<Box<Self>> { 423 if let InterruptSourceConfig::MsiIrq(cfg) = &config { 424 let route = mshv_msi_routing_entry { 425 gsi, 426 address_lo: cfg.low_addr, 427 address_hi: cfg.high_addr, 428 data: cfg.data, 429 }; 430 let entry = MshvRoutingEntry { 431 route, 432 masked: false, 433 }; 434 435 return Ok(Box::new(entry)); 436 } 437 438 Err(io::Error::new( 439 io::ErrorKind::Other, 440 "Interrupt config type not supported", 441 )) 442 } 443 } 444 } 445 446 #[cfg(target_arch = "aarch64")] 447 #[cfg(test)] 448 mod tests { 449 use arch::aarch64::gic::kvm::{create_gic, save_pending_tables}; 450 use arch::aarch64::gic::{ 451 get_dist_regs, get_icc_regs, get_redist_regs, set_dist_regs, set_icc_regs, set_redist_regs, 452 }; 453 454 #[test] 455 fn test_create_gic() { 456 let hv = hypervisor::new().unwrap(); 457 let vm = hv.create_vm().unwrap(); 458 459 assert!(create_gic(&vm, 1).is_ok()); 460 } 461 462 #[test] 463 fn test_get_set_dist_regs() { 464 let hv = hypervisor::new().unwrap(); 465 let vm = hv.create_vm().unwrap(); 466 let _ = vm.create_vcpu(0, None).unwrap(); 467 let gic = create_gic(&vm, 1).expect("Cannot create gic"); 468 469 let res = get_dist_regs(gic.device()); 470 assert!(res.is_ok()); 471 let state = res.unwrap(); 472 assert_eq!(state.len(), 568); 473 474 let res = set_dist_regs(gic.device(), &state); 475 assert!(res.is_ok()); 476 } 477 478 #[test] 479 fn test_get_set_redist_regs() { 480 let hv = hypervisor::new().unwrap(); 481 let vm = hv.create_vm().unwrap(); 482 let _ = vm.create_vcpu(0, None).unwrap(); 483 let gic = create_gic(&vm, 1).expect("Cannot create gic"); 484 485 let gicr_typer = vec![123]; 486 let res = get_redist_regs(gic.device(), &gicr_typer); 487 assert!(res.is_ok()); 488 let state = res.unwrap(); 489 println!("{}", state.len()); 490 assert!(state.len() == 24); 491 492 assert!(set_redist_regs(gic.device(), &gicr_typer, &state).is_ok()); 493 } 494 495 #[test] 496 fn test_get_set_icc_regs() { 497 let hv = hypervisor::new().unwrap(); 498 let vm = hv.create_vm().unwrap(); 499 let _ = vm.create_vcpu(0, None).unwrap(); 500 let gic = create_gic(&vm, 1).expect("Cannot create gic"); 501 502 let gicr_typer = vec![123]; 503 let res = get_icc_regs(gic.device(), &gicr_typer); 504 assert!(res.is_ok()); 505 let state = res.unwrap(); 506 println!("{}", state.len()); 507 assert!(state.len() == 9); 508 509 assert!(set_icc_regs(gic.device(), &gicr_typer, &state).is_ok()); 510 } 511 512 #[test] 513 fn test_save_pending_tables() { 514 let hv = hypervisor::new().unwrap(); 515 let vm = hv.create_vm().unwrap(); 516 let _ = vm.create_vcpu(0, None).unwrap(); 517 let gic = create_gic(&vm, 1).expect("Cannot create gic"); 518 519 assert!(save_pending_tables(gic.device()).is_ok()); 520 } 521 } 522