1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 4 // 5 6 use crate::{ 7 msi_num_enabled_vectors, BarReprogrammingParams, MsiConfig, MsixCap, MsixConfig, 8 PciBarConfiguration, PciBarRegionType, PciBdf, PciCapabilityId, PciClassCode, PciConfiguration, 9 PciDevice, PciDeviceError, PciHeaderType, PciSubclass, MSIX_TABLE_ENTRY_SIZE, 10 }; 11 use byteorder::{ByteOrder, LittleEndian}; 12 use hypervisor::HypervisorVmError; 13 use std::any::Any; 14 use std::collections::BTreeMap; 15 use std::io; 16 use std::os::unix::io::AsRawFd; 17 use std::ptr::null_mut; 18 use std::sync::{Arc, Barrier, Mutex}; 19 use thiserror::Error; 20 use vfio_bindings::bindings::vfio::*; 21 use vfio_ioctls::{VfioContainer, VfioDevice, VfioIrq, VfioRegionInfoCap}; 22 use vm_allocator::{AddressAllocator, SystemAllocator}; 23 use vm_device::interrupt::{ 24 InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig, 25 }; 26 use vm_device::BusDevice; 27 use vm_memory::{Address, GuestAddress, GuestUsize}; 28 use vmm_sys_util::eventfd::EventFd; 29 30 #[derive(Debug, Error)] 31 pub enum VfioPciError { 32 #[error("Failed to DMA map: {0}")] 33 DmaMap(#[source] vfio_ioctls::VfioError), 34 #[error("Failed to DMA unmap: {0}")] 35 DmaUnmap(#[source] vfio_ioctls::VfioError), 36 #[error("Failed to enable INTx: {0}")] 37 EnableIntx(#[source] VfioError), 38 #[error("Failed to enable MSI: {0}")] 39 EnableMsi(#[source] VfioError), 40 #[error("Failed to enable MSI-x: {0}")] 41 EnableMsix(#[source] VfioError), 42 #[error("Failed to map VFIO PCI region into guest: {0}")] 43 MapRegionGuest(#[source] HypervisorVmError), 44 #[error("Failed to notifier's eventfd")] 45 MissingNotifier, 46 } 47 48 #[derive(Copy, Clone)] 49 enum PciVfioSubclass { 50 VfioSubclass = 0xff, 51 } 52 53 impl PciSubclass for PciVfioSubclass { 54 fn get_register_value(&self) -> u8 { 55 *self as u8 56 } 57 } 58 59 enum InterruptUpdateAction { 60 EnableMsi, 61 DisableMsi, 62 EnableMsix, 63 DisableMsix, 64 } 65 66 pub(crate) struct VfioIntx { 67 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 68 enabled: bool, 69 } 70 71 pub(crate) struct VfioMsi { 72 pub(crate) cfg: MsiConfig, 73 cap_offset: u32, 74 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 75 } 76 77 impl VfioMsi { 78 fn update(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> { 79 let old_enabled = self.cfg.enabled(); 80 81 self.cfg.update(offset, data); 82 83 let new_enabled = self.cfg.enabled(); 84 85 if !old_enabled && new_enabled { 86 return Some(InterruptUpdateAction::EnableMsi); 87 } 88 89 if old_enabled && !new_enabled { 90 return Some(InterruptUpdateAction::DisableMsi); 91 } 92 93 None 94 } 95 } 96 97 pub(crate) struct VfioMsix { 98 pub(crate) bar: MsixConfig, 99 cap: MsixCap, 100 cap_offset: u32, 101 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 102 } 103 104 impl VfioMsix { 105 fn update(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> { 106 let old_enabled = self.bar.enabled(); 107 108 // Update "Message Control" word 109 if offset == 2 && data.len() == 2 { 110 self.bar.set_msg_ctl(LittleEndian::read_u16(data)); 111 } 112 113 let new_enabled = self.bar.enabled(); 114 115 if !old_enabled && new_enabled { 116 return Some(InterruptUpdateAction::EnableMsix); 117 } 118 119 if old_enabled && !new_enabled { 120 return Some(InterruptUpdateAction::DisableMsix); 121 } 122 123 None 124 } 125 126 fn table_accessed(&self, bar_index: u32, offset: u64) -> bool { 127 let table_offset: u64 = u64::from(self.cap.table_offset()); 128 let table_size: u64 = u64::from(self.cap.table_size()) * (MSIX_TABLE_ENTRY_SIZE as u64); 129 let table_bir: u32 = self.cap.table_bir(); 130 131 bar_index == table_bir && offset >= table_offset && offset < table_offset + table_size 132 } 133 } 134 135 pub(crate) struct Interrupt { 136 pub(crate) intx: Option<VfioIntx>, 137 pub(crate) msi: Option<VfioMsi>, 138 pub(crate) msix: Option<VfioMsix>, 139 } 140 141 impl Interrupt { 142 fn update_msi(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> { 143 if let Some(ref mut msi) = &mut self.msi { 144 let action = msi.update(offset, data); 145 return action; 146 } 147 148 None 149 } 150 151 fn update_msix(&mut self, offset: u64, data: &[u8]) -> Option<InterruptUpdateAction> { 152 if let Some(ref mut msix) = &mut self.msix { 153 let action = msix.update(offset, data); 154 return action; 155 } 156 157 None 158 } 159 160 fn accessed(&self, offset: u64) -> Option<(PciCapabilityId, u64)> { 161 if let Some(msi) = &self.msi { 162 if offset >= u64::from(msi.cap_offset) 163 && offset < u64::from(msi.cap_offset) + msi.cfg.size() 164 { 165 return Some(( 166 PciCapabilityId::MessageSignalledInterrupts, 167 u64::from(msi.cap_offset), 168 )); 169 } 170 } 171 172 if let Some(msix) = &self.msix { 173 if offset == u64::from(msix.cap_offset) { 174 return Some((PciCapabilityId::MsiX, u64::from(msix.cap_offset))); 175 } 176 } 177 178 None 179 } 180 181 fn msix_table_accessed(&self, bar_index: u32, offset: u64) -> bool { 182 if let Some(msix) = &self.msix { 183 return msix.table_accessed(bar_index, offset); 184 } 185 186 false 187 } 188 189 fn msix_write_table(&mut self, offset: u64, data: &[u8]) { 190 if let Some(ref mut msix) = &mut self.msix { 191 let offset = offset - u64::from(msix.cap.table_offset()); 192 msix.bar.write_table(offset, data) 193 } 194 } 195 196 fn msix_read_table(&self, offset: u64, data: &mut [u8]) { 197 if let Some(msix) = &self.msix { 198 let offset = offset - u64::from(msix.cap.table_offset()); 199 msix.bar.read_table(offset, data) 200 } 201 } 202 203 pub(crate) fn intx_in_use(&self) -> bool { 204 if let Some(intx) = &self.intx { 205 return intx.enabled; 206 } 207 208 false 209 } 210 } 211 212 #[derive(Copy, Clone)] 213 pub struct UserMemoryRegion { 214 slot: u32, 215 start: u64, 216 size: u64, 217 host_addr: u64, 218 } 219 220 #[derive(Clone)] 221 pub struct MmioRegion { 222 pub start: GuestAddress, 223 pub length: GuestUsize, 224 pub(crate) type_: PciBarRegionType, 225 pub(crate) index: u32, 226 pub(crate) mem_slot: Option<u32>, 227 pub(crate) host_addr: Option<u64>, 228 pub(crate) mmap_size: Option<usize>, 229 pub(crate) user_memory_regions: Vec<UserMemoryRegion>, 230 } 231 #[derive(Debug, Error)] 232 pub enum VfioError { 233 #[error("Kernel VFIO error: {0}")] 234 KernelVfio(#[source] vfio_ioctls::VfioError), 235 #[error("VFIO user error: {0}")] 236 VfioUser(#[source] vfio_user::Error), 237 } 238 239 pub(crate) trait Vfio { 240 fn read_config_byte(&self, offset: u32) -> u8 { 241 let mut data: [u8; 1] = [0]; 242 self.read_config(offset, &mut data); 243 data[0] 244 } 245 246 fn read_config_word(&self, offset: u32) -> u16 { 247 let mut data: [u8; 2] = [0, 0]; 248 self.read_config(offset, &mut data); 249 u16::from_le_bytes(data) 250 } 251 252 fn read_config_dword(&self, offset: u32) -> u32 { 253 let mut data: [u8; 4] = [0, 0, 0, 0]; 254 self.read_config(offset, &mut data); 255 u32::from_le_bytes(data) 256 } 257 258 fn write_config_dword(&self, offset: u32, buf: u32) { 259 let data: [u8; 4] = buf.to_le_bytes(); 260 self.write_config(offset, &data) 261 } 262 263 fn read_config(&self, offset: u32, data: &mut [u8]) { 264 self.region_read(VFIO_PCI_CONFIG_REGION_INDEX, offset.into(), data.as_mut()); 265 } 266 267 fn write_config(&self, offset: u32, data: &[u8]) { 268 self.region_write(VFIO_PCI_CONFIG_REGION_INDEX, offset.into(), data) 269 } 270 271 fn enable_msi(&self, fds: Vec<&EventFd>) -> Result<(), VfioError> { 272 self.enable_irq(VFIO_PCI_MSI_IRQ_INDEX, fds) 273 } 274 275 fn disable_msi(&self) -> Result<(), VfioError> { 276 self.disable_irq(VFIO_PCI_MSI_IRQ_INDEX) 277 } 278 279 fn enable_msix(&self, fds: Vec<&EventFd>) -> Result<(), VfioError> { 280 self.enable_irq(VFIO_PCI_MSIX_IRQ_INDEX, fds) 281 } 282 283 fn disable_msix(&self) -> Result<(), VfioError> { 284 self.disable_irq(VFIO_PCI_MSIX_IRQ_INDEX) 285 } 286 287 fn region_read(&self, _index: u32, _offset: u64, _data: &mut [u8]) { 288 unimplemented!() 289 } 290 291 fn region_write(&self, _index: u32, _offset: u64, _data: &[u8]) { 292 unimplemented!() 293 } 294 295 fn get_irq_info(&self, _irq_index: u32) -> Option<VfioIrq> { 296 unimplemented!() 297 } 298 299 fn enable_irq(&self, _irq_index: u32, _event_fds: Vec<&EventFd>) -> Result<(), VfioError> { 300 unimplemented!() 301 } 302 303 fn disable_irq(&self, _irq_index: u32) -> Result<(), VfioError> { 304 unimplemented!() 305 } 306 307 fn unmask_irq(&self, _irq_index: u32) -> Result<(), VfioError> { 308 unimplemented!() 309 } 310 } 311 312 struct VfioDeviceWrapper { 313 device: Arc<VfioDevice>, 314 } 315 316 impl VfioDeviceWrapper { 317 fn new(device: Arc<VfioDevice>) -> Self { 318 Self { device } 319 } 320 } 321 322 impl Vfio for VfioDeviceWrapper { 323 fn region_read(&self, index: u32, offset: u64, data: &mut [u8]) { 324 self.device.region_read(index, data, offset) 325 } 326 327 fn region_write(&self, index: u32, offset: u64, data: &[u8]) { 328 self.device.region_write(index, data, offset) 329 } 330 331 fn get_irq_info(&self, irq_index: u32) -> Option<VfioIrq> { 332 self.device.get_irq_info(irq_index).copied() 333 } 334 335 fn enable_irq(&self, irq_index: u32, event_fds: Vec<&EventFd>) -> Result<(), VfioError> { 336 self.device 337 .enable_irq(irq_index, event_fds) 338 .map_err(VfioError::KernelVfio) 339 } 340 341 fn disable_irq(&self, irq_index: u32) -> Result<(), VfioError> { 342 self.device 343 .disable_irq(irq_index) 344 .map_err(VfioError::KernelVfio) 345 } 346 347 fn unmask_irq(&self, irq_index: u32) -> Result<(), VfioError> { 348 self.device 349 .unmask_irq(irq_index) 350 .map_err(VfioError::KernelVfio) 351 } 352 } 353 354 pub(crate) struct VfioCommon { 355 pub(crate) configuration: PciConfiguration, 356 pub(crate) mmio_regions: Vec<MmioRegion>, 357 pub(crate) interrupt: Interrupt, 358 } 359 360 impl VfioCommon { 361 pub(crate) fn allocate_bars( 362 &mut self, 363 allocator: &Arc<Mutex<SystemAllocator>>, 364 mmio_allocator: &mut AddressAllocator, 365 vfio_wrapper: &dyn Vfio, 366 ) -> Result<Vec<(GuestAddress, GuestUsize, PciBarRegionType)>, PciDeviceError> { 367 let mut ranges = Vec::new(); 368 let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX as u32; 369 370 // Going through all regular regions to compute the BAR size. 371 // We're not saving the BAR address to restore it, because we 372 // are going to allocate a guest address for each BAR and write 373 // that new address back. 374 while bar_id < VFIO_PCI_CONFIG_REGION_INDEX { 375 let region_size: u64; 376 let bar_addr: GuestAddress; 377 378 let bar_offset = if bar_id == VFIO_PCI_ROM_REGION_INDEX { 379 (PCI_ROM_EXP_BAR_INDEX * 4) as u32 380 } else { 381 PCI_CONFIG_BAR_OFFSET + bar_id * 4 382 }; 383 384 // First read flags 385 let flags = vfio_wrapper.read_config_dword(bar_offset); 386 387 // Is this an IO BAR? 388 let io_bar = if bar_id != VFIO_PCI_ROM_REGION_INDEX { 389 matches!(flags & PCI_CONFIG_IO_BAR, PCI_CONFIG_IO_BAR) 390 } else { 391 false 392 }; 393 394 // Is this a 64-bit BAR? 395 let is_64bit_bar = if bar_id != VFIO_PCI_ROM_REGION_INDEX { 396 matches!( 397 flags & PCI_CONFIG_MEMORY_BAR_64BIT, 398 PCI_CONFIG_MEMORY_BAR_64BIT 399 ) 400 } else { 401 false 402 }; 403 404 // By default, the region type is 32 bits memory BAR. 405 let mut region_type = PciBarRegionType::Memory32BitRegion; 406 407 // To get size write all 1s 408 vfio_wrapper.write_config_dword(bar_offset, 0xffff_ffff); 409 410 // And read back BAR value. The device will write zeros for bits it doesn't care about 411 let mut lower = vfio_wrapper.read_config_dword(bar_offset); 412 413 if io_bar { 414 // Mask flag bits (lowest 2 for I/O bars) 415 lower &= !0b11; 416 417 // BAR is not enabled 418 if lower == 0 { 419 bar_id += 1; 420 continue; 421 } 422 423 #[cfg(target_arch = "x86_64")] 424 { 425 // IO BAR 426 region_type = PciBarRegionType::IoRegion; 427 428 // Invert bits and add 1 to calculate size 429 region_size = (!lower + 1) as u64; 430 431 // The address needs to be 4 bytes aligned. 432 bar_addr = allocator 433 .lock() 434 .unwrap() 435 .allocate_io_addresses(None, region_size, Some(0x4)) 436 .ok_or(PciDeviceError::IoAllocationFailed(region_size))?; 437 } 438 #[cfg(target_arch = "aarch64")] 439 unimplemented!() 440 } else if is_64bit_bar { 441 // 64 bits Memory BAR 442 region_type = PciBarRegionType::Memory64BitRegion; 443 444 // Query size of upper BAR of 64-bit BAR 445 let upper_offset: u32 = PCI_CONFIG_BAR_OFFSET + (bar_id + 1) * 4; 446 vfio_wrapper.write_config_dword(upper_offset, 0xffff_ffff); 447 let upper = vfio_wrapper.read_config_dword(upper_offset); 448 449 let mut combined_size = u64::from(upper) << 32 | u64::from(lower); 450 451 // Mask out flag bits (lowest 4 for memory bars) 452 combined_size &= !0b1111; 453 454 // BAR is not enabled 455 if combined_size == 0 { 456 bar_id += 1; 457 continue; 458 } 459 460 // Invert and add 1 to to find size 461 region_size = (!combined_size + 1) as u64; 462 463 // BAR allocation must be naturally aligned 464 bar_addr = mmio_allocator 465 .allocate(None, region_size, Some(region_size)) 466 .ok_or(PciDeviceError::IoAllocationFailed(region_size))?; 467 } else { 468 // Mask out flag bits (lowest 4 for memory bars) 469 lower &= !0b1111; 470 471 if lower == 0 { 472 bar_id += 1; 473 continue; 474 } 475 476 // Invert and add 1 to to find size 477 region_size = (!lower + 1) as u64; 478 479 // BAR allocation must be naturally aligned 480 bar_addr = allocator 481 .lock() 482 .unwrap() 483 .allocate_mmio_hole_addresses(None, region_size, Some(region_size)) 484 .ok_or(PciDeviceError::IoAllocationFailed(region_size))?; 485 } 486 487 let reg_idx = if bar_id == VFIO_PCI_ROM_REGION_INDEX { 488 PCI_ROM_EXP_BAR_INDEX 489 } else { 490 bar_id as usize 491 }; 492 493 // We can now build our BAR configuration block. 494 let config = PciBarConfiguration::default() 495 .set_register_index(reg_idx) 496 .set_address(bar_addr.raw_value()) 497 .set_size(region_size) 498 .set_region_type(region_type); 499 500 if bar_id == VFIO_PCI_ROM_REGION_INDEX { 501 self.configuration 502 .add_pci_rom_bar(&config, flags & 0x1) 503 .map_err(|e| PciDeviceError::IoRegistrationFailed(bar_addr.raw_value(), e))?; 504 } else { 505 self.configuration 506 .add_pci_bar(&config) 507 .map_err(|e| PciDeviceError::IoRegistrationFailed(bar_addr.raw_value(), e))?; 508 } 509 510 ranges.push((bar_addr, region_size, region_type)); 511 self.mmio_regions.push(MmioRegion { 512 start: bar_addr, 513 length: region_size, 514 type_: region_type, 515 index: bar_id as u32, 516 mem_slot: None, 517 host_addr: None, 518 mmap_size: None, 519 user_memory_regions: Vec::new(), 520 }); 521 522 bar_id += 1; 523 if is_64bit_bar { 524 bar_id += 1; 525 } 526 } 527 528 Ok(ranges) 529 } 530 531 pub(crate) fn free_bars( 532 &mut self, 533 allocator: &mut SystemAllocator, 534 mmio_allocator: &mut AddressAllocator, 535 ) -> Result<(), PciDeviceError> { 536 for region in self.mmio_regions.iter() { 537 match region.type_ { 538 PciBarRegionType::IoRegion => { 539 #[cfg(target_arch = "x86_64")] 540 allocator.free_io_addresses(region.start, region.length); 541 #[cfg(target_arch = "aarch64")] 542 error!("I/O region is not supported"); 543 } 544 PciBarRegionType::Memory32BitRegion => { 545 allocator.free_mmio_hole_addresses(region.start, region.length); 546 } 547 PciBarRegionType::Memory64BitRegion => { 548 mmio_allocator.free(region.start, region.length); 549 } 550 } 551 } 552 Ok(()) 553 } 554 555 pub(crate) fn parse_msix_capabilities( 556 &mut self, 557 cap: u8, 558 interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 559 vfio_wrapper: &dyn Vfio, 560 bdf: PciBdf, 561 ) { 562 let msg_ctl = vfio_wrapper.read_config_word((cap + 2).into()); 563 564 let table = vfio_wrapper.read_config_dword((cap + 4).into()); 565 566 let pba = vfio_wrapper.read_config_dword((cap + 8).into()); 567 568 let msix_cap = MsixCap { 569 msg_ctl, 570 table, 571 pba, 572 }; 573 574 let interrupt_source_group = interrupt_manager 575 .create_group(MsiIrqGroupConfig { 576 base: 0, 577 count: msix_cap.table_size() as InterruptIndex, 578 }) 579 .unwrap(); 580 581 let msix_config = MsixConfig::new( 582 msix_cap.table_size(), 583 interrupt_source_group.clone(), 584 bdf.into(), 585 ); 586 587 self.interrupt.msix = Some(VfioMsix { 588 bar: msix_config, 589 cap: msix_cap, 590 cap_offset: cap.into(), 591 interrupt_source_group, 592 }); 593 } 594 595 pub(crate) fn parse_msi_capabilities( 596 &mut self, 597 cap: u8, 598 interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 599 vfio_wrapper: &dyn Vfio, 600 ) { 601 let msg_ctl = vfio_wrapper.read_config_word((cap + 2).into()); 602 603 let interrupt_source_group = interrupt_manager 604 .create_group(MsiIrqGroupConfig { 605 base: 0, 606 count: msi_num_enabled_vectors(msg_ctl) as InterruptIndex, 607 }) 608 .unwrap(); 609 610 let msi_config = MsiConfig::new(msg_ctl, interrupt_source_group.clone()); 611 612 self.interrupt.msi = Some(VfioMsi { 613 cfg: msi_config, 614 cap_offset: cap.into(), 615 interrupt_source_group, 616 }); 617 } 618 619 pub(crate) fn parse_capabilities( 620 &mut self, 621 interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 622 vfio_wrapper: &dyn Vfio, 623 bdf: PciBdf, 624 ) { 625 let mut cap_next = vfio_wrapper.read_config_byte(PCI_CONFIG_CAPABILITY_OFFSET); 626 627 while cap_next != 0 { 628 let cap_id = vfio_wrapper.read_config_byte(cap_next.into()); 629 630 match PciCapabilityId::from(cap_id) { 631 PciCapabilityId::MessageSignalledInterrupts => { 632 if let Some(irq_info) = vfio_wrapper.get_irq_info(VFIO_PCI_MSI_IRQ_INDEX) { 633 if irq_info.count > 0 { 634 // Parse capability only if the VFIO device 635 // supports MSI. 636 self.parse_msi_capabilities(cap_next, interrupt_manager, vfio_wrapper); 637 } 638 } 639 } 640 PciCapabilityId::MsiX => { 641 if let Some(irq_info) = vfio_wrapper.get_irq_info(VFIO_PCI_MSIX_IRQ_INDEX) { 642 if irq_info.count > 0 { 643 // Parse capability only if the VFIO device 644 // supports MSI-X. 645 self.parse_msix_capabilities( 646 cap_next, 647 interrupt_manager, 648 vfio_wrapper, 649 bdf, 650 ); 651 } 652 } 653 } 654 _ => {} 655 }; 656 657 cap_next = vfio_wrapper.read_config_byte((cap_next + 1).into()); 658 } 659 } 660 661 pub(crate) fn enable_intx(&mut self, wrapper: &dyn Vfio) -> Result<(), VfioPciError> { 662 if let Some(intx) = &mut self.interrupt.intx { 663 if !intx.enabled { 664 if let Some(eventfd) = intx.interrupt_source_group.notifier(0) { 665 wrapper 666 .enable_irq(VFIO_PCI_INTX_IRQ_INDEX, vec![&eventfd]) 667 .map_err(VfioPciError::EnableIntx)?; 668 669 intx.enabled = true; 670 } else { 671 return Err(VfioPciError::MissingNotifier); 672 } 673 } 674 } 675 676 Ok(()) 677 } 678 679 pub(crate) fn disable_intx(&mut self, wrapper: &dyn Vfio) { 680 if let Some(intx) = &mut self.interrupt.intx { 681 if intx.enabled { 682 if let Err(e) = wrapper.disable_irq(VFIO_PCI_INTX_IRQ_INDEX) { 683 error!("Could not disable INTx: {}", e); 684 } else { 685 intx.enabled = false; 686 } 687 } 688 } 689 } 690 691 pub(crate) fn enable_msi(&self, wrapper: &dyn Vfio) -> Result<(), VfioPciError> { 692 if let Some(msi) = &self.interrupt.msi { 693 let mut irq_fds: Vec<EventFd> = Vec::new(); 694 for i in 0..msi.cfg.num_enabled_vectors() { 695 if let Some(eventfd) = msi.interrupt_source_group.notifier(i as InterruptIndex) { 696 irq_fds.push(eventfd); 697 } else { 698 return Err(VfioPciError::MissingNotifier); 699 } 700 } 701 702 wrapper 703 .enable_msi(irq_fds.iter().collect()) 704 .map_err(VfioPciError::EnableMsi)?; 705 } 706 707 Ok(()) 708 } 709 710 pub(crate) fn disable_msi(&self, wrapper: &dyn Vfio) { 711 if let Err(e) = wrapper.disable_msi() { 712 error!("Could not disable MSI: {}", e); 713 } 714 } 715 716 pub(crate) fn enable_msix(&self, wrapper: &dyn Vfio) -> Result<(), VfioPciError> { 717 if let Some(msix) = &self.interrupt.msix { 718 let mut irq_fds: Vec<EventFd> = Vec::new(); 719 for i in 0..msix.bar.table_entries.len() { 720 if let Some(eventfd) = msix.interrupt_source_group.notifier(i as InterruptIndex) { 721 irq_fds.push(eventfd); 722 } else { 723 return Err(VfioPciError::MissingNotifier); 724 } 725 } 726 727 wrapper 728 .enable_msix(irq_fds.iter().collect()) 729 .map_err(VfioPciError::EnableMsix)?; 730 } 731 732 Ok(()) 733 } 734 735 pub(crate) fn disable_msix(&self, wrapper: &dyn Vfio) { 736 if let Err(e) = wrapper.disable_msix() { 737 error!("Could not disable MSI-X: {}", e); 738 } 739 } 740 741 pub(crate) fn initialize_legacy_interrupt( 742 &mut self, 743 legacy_interrupt_group: Option<Arc<dyn InterruptSourceGroup>>, 744 wrapper: &dyn Vfio, 745 ) -> Result<(), VfioPciError> { 746 if let Some(irq_info) = wrapper.get_irq_info(VFIO_PCI_INTX_IRQ_INDEX) { 747 if irq_info.count == 0 { 748 // A count of 0 means the INTx IRQ is not supported, therefore 749 // it shouldn't be initialized. 750 return Ok(()); 751 } 752 } 753 754 if let Some(interrupt_source_group) = legacy_interrupt_group { 755 self.interrupt.intx = Some(VfioIntx { 756 interrupt_source_group, 757 enabled: false, 758 }); 759 760 self.enable_intx(wrapper)?; 761 } 762 763 Ok(()) 764 } 765 766 pub(crate) fn update_msi_capabilities( 767 &mut self, 768 offset: u64, 769 data: &[u8], 770 wrapper: &dyn Vfio, 771 ) -> Result<(), VfioPciError> { 772 match self.interrupt.update_msi(offset, data) { 773 Some(InterruptUpdateAction::EnableMsi) => { 774 // Disable INTx before we can enable MSI 775 self.disable_intx(wrapper); 776 self.enable_msi(wrapper)?; 777 } 778 Some(InterruptUpdateAction::DisableMsi) => { 779 // Fallback onto INTx when disabling MSI 780 self.disable_msi(wrapper); 781 self.enable_intx(wrapper)?; 782 } 783 _ => {} 784 } 785 786 Ok(()) 787 } 788 789 pub(crate) fn update_msix_capabilities( 790 &mut self, 791 offset: u64, 792 data: &[u8], 793 wrapper: &dyn Vfio, 794 ) -> Result<(), VfioPciError> { 795 match self.interrupt.update_msix(offset, data) { 796 Some(InterruptUpdateAction::EnableMsix) => { 797 // Disable INTx before we can enable MSI-X 798 self.disable_intx(wrapper); 799 self.enable_msix(wrapper)?; 800 } 801 Some(InterruptUpdateAction::DisableMsix) => { 802 // Fallback onto INTx when disabling MSI-X 803 self.disable_msix(wrapper); 804 self.enable_intx(wrapper)?; 805 } 806 _ => {} 807 } 808 809 Ok(()) 810 } 811 812 pub(crate) fn find_region(&self, addr: u64) -> Option<MmioRegion> { 813 for region in self.mmio_regions.iter() { 814 if addr >= region.start.raw_value() 815 && addr < region.start.unchecked_add(region.length).raw_value() 816 { 817 return Some(region.clone()); 818 } 819 } 820 None 821 } 822 823 pub(crate) fn read_bar(&mut self, base: u64, offset: u64, data: &mut [u8], wrapper: &dyn Vfio) { 824 let addr = base + offset; 825 if let Some(region) = self.find_region(addr) { 826 let offset = addr - region.start.raw_value(); 827 828 if self.interrupt.msix_table_accessed(region.index, offset) { 829 self.interrupt.msix_read_table(offset, data); 830 } else { 831 wrapper.region_read(region.index, offset, data); 832 } 833 } 834 835 // INTx EOI 836 // The guest reading from the BAR potentially means the interrupt has 837 // been received and can be acknowledged. 838 if self.interrupt.intx_in_use() { 839 if let Err(e) = wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX) { 840 error!("Failed unmasking INTx IRQ: {}", e); 841 } 842 } 843 } 844 845 pub(crate) fn write_bar( 846 &mut self, 847 base: u64, 848 offset: u64, 849 data: &[u8], 850 wrapper: &dyn Vfio, 851 ) -> Option<Arc<Barrier>> { 852 let addr = base + offset; 853 if let Some(region) = self.find_region(addr) { 854 let offset = addr - region.start.raw_value(); 855 856 // If the MSI-X table is written to, we need to update our cache. 857 if self.interrupt.msix_table_accessed(region.index, offset) { 858 self.interrupt.msix_write_table(offset, data); 859 } else { 860 wrapper.region_write(region.index, offset, data); 861 } 862 } 863 864 // INTx EOI 865 // The guest writing to the BAR potentially means the interrupt has 866 // been received and can be acknowledged. 867 if self.interrupt.intx_in_use() { 868 if let Err(e) = wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX) { 869 error!("Failed unmasking INTx IRQ: {}", e); 870 } 871 } 872 873 None 874 } 875 876 pub(crate) fn write_config_register( 877 &mut self, 878 reg_idx: usize, 879 offset: u64, 880 data: &[u8], 881 wrapper: &dyn Vfio, 882 ) -> Option<Arc<Barrier>> { 883 // When the guest wants to write to a BAR, we trap it into 884 // our local configuration space. We're not reprogramming 885 // VFIO device. 886 if (PCI_CONFIG_BAR0_INDEX..PCI_CONFIG_BAR0_INDEX + BAR_NUMS).contains(®_idx) 887 || reg_idx == PCI_ROM_EXP_BAR_INDEX 888 { 889 // We keep our local cache updated with the BARs. 890 // We'll read it back from there when the guest is asking 891 // for BARs (see read_config_register()). 892 self.configuration 893 .write_config_register(reg_idx, offset, data); 894 return None; 895 } 896 897 let reg = (reg_idx * PCI_CONFIG_REGISTER_SIZE) as u64; 898 899 // If the MSI or MSI-X capabilities are accessed, we need to 900 // update our local cache accordingly. 901 // Depending on how the capabilities are modified, this could 902 // trigger a VFIO MSI or MSI-X toggle. 903 if let Some((cap_id, cap_base)) = self.interrupt.accessed(reg) { 904 let cap_offset: u64 = reg - cap_base + offset; 905 match cap_id { 906 PciCapabilityId::MessageSignalledInterrupts => { 907 if let Err(e) = self.update_msi_capabilities(cap_offset, data, wrapper) { 908 error!("Could not update MSI capabilities: {}", e); 909 } 910 } 911 PciCapabilityId::MsiX => { 912 if let Err(e) = self.update_msix_capabilities(cap_offset, data, wrapper) { 913 error!("Could not update MSI-X capabilities: {}", e); 914 } 915 } 916 _ => {} 917 } 918 } 919 920 // Make sure to write to the device's PCI config space after MSI/MSI-X 921 // interrupts have been enabled/disabled. In case of MSI, when the 922 // interrupts are enabled through VFIO (using VFIO_DEVICE_SET_IRQS), 923 // the MSI Enable bit in the MSI capability structure found in the PCI 924 // config space is disabled by default. That's why when the guest is 925 // enabling this bit, we first need to enable the MSI interrupts with 926 // VFIO through VFIO_DEVICE_SET_IRQS ioctl, and only after we can write 927 // to the device region to update the MSI Enable bit. 928 wrapper.write_config((reg + offset) as u32, data); 929 930 None 931 } 932 933 pub(crate) fn read_config_register(&mut self, reg_idx: usize, wrapper: &dyn Vfio) -> u32 { 934 // When reading the BARs, we trap it and return what comes 935 // from our local configuration space. We want the guest to 936 // use that and not the VFIO device BARs as it does not map 937 // with the guest address space. 938 if (PCI_CONFIG_BAR0_INDEX..PCI_CONFIG_BAR0_INDEX + BAR_NUMS).contains(®_idx) 939 || reg_idx == PCI_ROM_EXP_BAR_INDEX 940 { 941 return self.configuration.read_reg(reg_idx); 942 } 943 944 // Since we don't support passing multi-functions devices, we should 945 // mask the multi-function bit, bit 7 of the Header Type byte on the 946 // register 3. 947 let mask = if reg_idx == PCI_HEADER_TYPE_REG_INDEX { 948 0xff7f_ffff 949 } else { 950 0xffff_ffff 951 }; 952 953 // The config register read comes from the VFIO device itself. 954 wrapper.read_config_dword((reg_idx * 4) as u32) & mask 955 } 956 } 957 958 /// VfioPciDevice represents a VFIO PCI device. 959 /// This structure implements the BusDevice and PciDevice traits. 960 /// 961 /// A VfioPciDevice is bound to a VfioDevice and is also a PCI device. 962 /// The VMM creates a VfioDevice, then assigns it to a VfioPciDevice, 963 /// which then gets added to the PCI bus. 964 pub struct VfioPciDevice { 965 vm: Arc<dyn hypervisor::Vm>, 966 device: Arc<VfioDevice>, 967 container: Arc<VfioContainer>, 968 vfio_wrapper: VfioDeviceWrapper, 969 common: VfioCommon, 970 iommu_attached: bool, 971 } 972 973 impl VfioPciDevice { 974 /// Constructs a new Vfio Pci device for the given Vfio device 975 pub fn new( 976 vm: &Arc<dyn hypervisor::Vm>, 977 device: VfioDevice, 978 container: Arc<VfioContainer>, 979 msi_interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 980 legacy_interrupt_group: Option<Arc<dyn InterruptSourceGroup>>, 981 iommu_attached: bool, 982 bdf: PciBdf, 983 ) -> Result<Self, VfioPciError> { 984 let device = Arc::new(device); 985 device.reset(); 986 987 let configuration = PciConfiguration::new( 988 0, 989 0, 990 0, 991 PciClassCode::Other, 992 &PciVfioSubclass::VfioSubclass, 993 None, 994 PciHeaderType::Device, 995 0, 996 0, 997 None, 998 ); 999 1000 let vfio_wrapper = VfioDeviceWrapper::new(Arc::clone(&device)); 1001 1002 let mut common = VfioCommon { 1003 mmio_regions: Vec::new(), 1004 configuration, 1005 interrupt: Interrupt { 1006 intx: None, 1007 msi: None, 1008 msix: None, 1009 }, 1010 }; 1011 1012 common.parse_capabilities(msi_interrupt_manager, &vfio_wrapper, bdf); 1013 common.initialize_legacy_interrupt(legacy_interrupt_group, &vfio_wrapper)?; 1014 1015 let vfio_pci_device = VfioPciDevice { 1016 vm: vm.clone(), 1017 device, 1018 container, 1019 vfio_wrapper, 1020 common, 1021 iommu_attached, 1022 }; 1023 1024 Ok(vfio_pci_device) 1025 } 1026 1027 pub fn iommu_attached(&self) -> bool { 1028 self.iommu_attached 1029 } 1030 1031 fn align_4k(address: u64) -> u64 { 1032 (address + 0xfff) & 0xffff_ffff_ffff_f000 1033 } 1034 1035 fn is_4k_aligned(address: u64) -> bool { 1036 (address & 0xfff) == 0 1037 } 1038 1039 fn is_4k_multiple(size: u64) -> bool { 1040 (size & 0xfff) == 0 1041 } 1042 1043 fn generate_user_memory_regions<F>( 1044 region_index: u32, 1045 region_start: u64, 1046 region_size: u64, 1047 host_addr: u64, 1048 mem_slot: F, 1049 vfio_msix: Option<&VfioMsix>, 1050 ) -> Vec<UserMemoryRegion> 1051 where 1052 F: Fn() -> u32, 1053 { 1054 if !Self::is_4k_aligned(region_start) { 1055 error!( 1056 "Region start address 0x{:x} must be at least aligned on 4KiB", 1057 region_start 1058 ); 1059 } 1060 if !Self::is_4k_multiple(region_size) { 1061 error!( 1062 "Region size 0x{:x} must be at least a multiple of 4KiB", 1063 region_size 1064 ); 1065 } 1066 1067 // Using a BtreeMap as the list provided through the iterator is sorted 1068 // by key. This ensures proper split of the whole region. 1069 let mut inter_ranges = BTreeMap::new(); 1070 if let Some(msix) = vfio_msix { 1071 if region_index == msix.cap.table_bir() { 1072 let (offset, size) = msix.cap.table_range(); 1073 let base = region_start + offset; 1074 inter_ranges.insert(base, size); 1075 } 1076 if region_index == msix.cap.pba_bir() { 1077 let (offset, size) = msix.cap.pba_range(); 1078 let base = region_start + offset; 1079 inter_ranges.insert(base, size); 1080 } 1081 } 1082 1083 let mut user_memory_regions = Vec::new(); 1084 let mut new_start = region_start; 1085 for (range_start, range_size) in inter_ranges { 1086 if range_start > new_start { 1087 user_memory_regions.push(UserMemoryRegion { 1088 slot: mem_slot(), 1089 start: new_start, 1090 size: range_start - new_start, 1091 host_addr: host_addr + new_start - region_start, 1092 }); 1093 } 1094 1095 new_start = Self::align_4k(range_start + range_size); 1096 } 1097 1098 if region_start + region_size > new_start { 1099 user_memory_regions.push(UserMemoryRegion { 1100 slot: mem_slot(), 1101 start: new_start, 1102 size: region_start + region_size - new_start, 1103 host_addr: host_addr + new_start - region_start, 1104 }); 1105 } 1106 1107 user_memory_regions 1108 } 1109 1110 /// Map MMIO regions into the guest, and avoid VM exits when the guest tries 1111 /// to reach those regions. 1112 /// 1113 /// # Arguments 1114 /// 1115 /// * `vm` - The VM object. It is used to set the VFIO MMIO regions 1116 /// as user memory regions. 1117 /// * `mem_slot` - The closure to return a memory slot. 1118 pub fn map_mmio_regions<F>( 1119 &mut self, 1120 vm: &Arc<dyn hypervisor::Vm>, 1121 mem_slot: F, 1122 ) -> Result<(), VfioPciError> 1123 where 1124 F: Fn() -> u32, 1125 { 1126 let fd = self.device.as_raw_fd(); 1127 1128 for region in self.common.mmio_regions.iter_mut() { 1129 let region_flags = self.device.get_region_flags(region.index); 1130 if region_flags & VFIO_REGION_INFO_FLAG_MMAP != 0 { 1131 let mut prot = 0; 1132 if region_flags & VFIO_REGION_INFO_FLAG_READ != 0 { 1133 prot |= libc::PROT_READ; 1134 } 1135 if region_flags & VFIO_REGION_INFO_FLAG_WRITE != 0 { 1136 prot |= libc::PROT_WRITE; 1137 } 1138 1139 // Retrieve the list of capabilities found on the region 1140 let caps = if region_flags & VFIO_REGION_INFO_FLAG_CAPS != 0 { 1141 self.device.get_region_caps(region.index) 1142 } else { 1143 Vec::new() 1144 }; 1145 1146 // Don't try to mmap the region if it contains MSI-X table or 1147 // MSI-X PBA subregion, and if we couldn't find MSIX_MAPPABLE 1148 // in the list of supported capabilities. 1149 if let Some(msix) = self.common.interrupt.msix.as_ref() { 1150 if (region.index == msix.cap.table_bir() || region.index == msix.cap.pba_bir()) 1151 && !caps.contains(&VfioRegionInfoCap::MsixMappable) 1152 { 1153 continue; 1154 } 1155 } 1156 1157 let mmap_size = self.device.get_region_size(region.index); 1158 let offset = self.device.get_region_offset(region.index); 1159 1160 let host_addr = unsafe { 1161 libc::mmap( 1162 null_mut(), 1163 mmap_size as usize, 1164 prot, 1165 libc::MAP_SHARED, 1166 fd, 1167 offset as libc::off_t, 1168 ) 1169 }; 1170 1171 if host_addr == libc::MAP_FAILED { 1172 error!( 1173 "Could not mmap region index {}: {}", 1174 region.index, 1175 io::Error::last_os_error() 1176 ); 1177 continue; 1178 } 1179 1180 // In case the region that is being mapped contains the MSI-X 1181 // vectors table or the MSI-X PBA table, we must adjust what 1182 // is being declared through the hypervisor. We want to make 1183 // sure we will still trap MMIO accesses to these MSI-X 1184 // specific ranges. 1185 let user_memory_regions = Self::generate_user_memory_regions( 1186 region.index, 1187 region.start.raw_value(), 1188 mmap_size, 1189 host_addr as u64, 1190 &mem_slot, 1191 self.common.interrupt.msix.as_ref(), 1192 ); 1193 for user_memory_region in user_memory_regions.iter() { 1194 let mem_region = vm.make_user_memory_region( 1195 user_memory_region.slot, 1196 user_memory_region.start, 1197 user_memory_region.size, 1198 user_memory_region.host_addr, 1199 false, 1200 false, 1201 ); 1202 1203 vm.create_user_memory_region(mem_region) 1204 .map_err(VfioPciError::MapRegionGuest)?; 1205 } 1206 1207 // Update the region with memory mapped info. 1208 region.host_addr = Some(host_addr as u64); 1209 region.mmap_size = Some(mmap_size as usize); 1210 region.user_memory_regions = user_memory_regions; 1211 } 1212 } 1213 1214 Ok(()) 1215 } 1216 1217 pub fn unmap_mmio_regions(&mut self) { 1218 for region in self.common.mmio_regions.iter() { 1219 for user_memory_region in region.user_memory_regions.iter() { 1220 // Remove region 1221 let r = self.vm.make_user_memory_region( 1222 user_memory_region.slot, 1223 user_memory_region.start, 1224 user_memory_region.size, 1225 user_memory_region.host_addr, 1226 false, 1227 false, 1228 ); 1229 1230 if let Err(e) = self.vm.remove_user_memory_region(r) { 1231 error!("Could not remove the userspace memory region: {}", e); 1232 } 1233 } 1234 1235 if let (Some(host_addr), Some(mmap_size)) = (region.host_addr, region.mmap_size) { 1236 let ret = unsafe { libc::munmap(host_addr as *mut libc::c_void, mmap_size) }; 1237 if ret != 0 { 1238 error!( 1239 "Could not unmap region {}, error:{}", 1240 region.index, 1241 io::Error::last_os_error() 1242 ); 1243 } 1244 } 1245 } 1246 } 1247 1248 pub fn dma_map(&self, iova: u64, size: u64, user_addr: u64) -> Result<(), VfioPciError> { 1249 if !self.iommu_attached { 1250 self.container 1251 .vfio_dma_map(iova, size, user_addr) 1252 .map_err(VfioPciError::DmaMap)?; 1253 } 1254 1255 Ok(()) 1256 } 1257 1258 pub fn dma_unmap(&self, iova: u64, size: u64) -> Result<(), VfioPciError> { 1259 if !self.iommu_attached { 1260 self.container 1261 .vfio_dma_unmap(iova, size) 1262 .map_err(VfioPciError::DmaUnmap)?; 1263 } 1264 1265 Ok(()) 1266 } 1267 1268 pub fn mmio_regions(&self) -> Vec<MmioRegion> { 1269 self.common.mmio_regions.clone() 1270 } 1271 } 1272 1273 impl Drop for VfioPciDevice { 1274 fn drop(&mut self) { 1275 self.unmap_mmio_regions(); 1276 1277 if let Some(msix) = &self.common.interrupt.msix { 1278 if msix.bar.enabled() { 1279 self.common.disable_msix(&self.vfio_wrapper); 1280 } 1281 } 1282 1283 if let Some(msi) = &self.common.interrupt.msi { 1284 if msi.cfg.enabled() { 1285 self.common.disable_msi(&self.vfio_wrapper) 1286 } 1287 } 1288 1289 if self.common.interrupt.intx_in_use() { 1290 self.common.disable_intx(&self.vfio_wrapper); 1291 } 1292 } 1293 } 1294 1295 impl BusDevice for VfioPciDevice { 1296 fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) { 1297 self.read_bar(base, offset, data) 1298 } 1299 1300 fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1301 self.write_bar(base, offset, data) 1302 } 1303 } 1304 1305 // First BAR offset in the PCI config space. 1306 const PCI_CONFIG_BAR_OFFSET: u32 = 0x10; 1307 // Capability register offset in the PCI config space. 1308 const PCI_CONFIG_CAPABILITY_OFFSET: u32 = 0x34; 1309 // IO BAR when first BAR bit is 1. 1310 const PCI_CONFIG_IO_BAR: u32 = 0x1; 1311 // 64-bit memory bar flag. 1312 const PCI_CONFIG_MEMORY_BAR_64BIT: u32 = 0x4; 1313 // PCI config register size (4 bytes). 1314 const PCI_CONFIG_REGISTER_SIZE: usize = 4; 1315 // Number of BARs for a PCI device 1316 const BAR_NUMS: usize = 6; 1317 // PCI Header Type register index 1318 const PCI_HEADER_TYPE_REG_INDEX: usize = 3; 1319 // First BAR register index 1320 const PCI_CONFIG_BAR0_INDEX: usize = 4; 1321 // PCI ROM expansion BAR register index 1322 const PCI_ROM_EXP_BAR_INDEX: usize = 12; 1323 1324 impl PciDevice for VfioPciDevice { 1325 fn allocate_bars( 1326 &mut self, 1327 allocator: &Arc<Mutex<SystemAllocator>>, 1328 mmio_allocator: &mut AddressAllocator, 1329 ) -> Result<Vec<(GuestAddress, GuestUsize, PciBarRegionType)>, PciDeviceError> { 1330 self.common 1331 .allocate_bars(allocator, mmio_allocator, &self.vfio_wrapper) 1332 } 1333 1334 fn free_bars( 1335 &mut self, 1336 allocator: &mut SystemAllocator, 1337 mmio_allocator: &mut AddressAllocator, 1338 ) -> Result<(), PciDeviceError> { 1339 self.common.free_bars(allocator, mmio_allocator) 1340 } 1341 1342 fn write_config_register( 1343 &mut self, 1344 reg_idx: usize, 1345 offset: u64, 1346 data: &[u8], 1347 ) -> Option<Arc<Barrier>> { 1348 self.common 1349 .write_config_register(reg_idx, offset, data, &self.vfio_wrapper) 1350 } 1351 1352 fn read_config_register(&mut self, reg_idx: usize) -> u32 { 1353 self.common 1354 .read_config_register(reg_idx, &self.vfio_wrapper) 1355 } 1356 1357 fn detect_bar_reprogramming( 1358 &mut self, 1359 reg_idx: usize, 1360 data: &[u8], 1361 ) -> Option<BarReprogrammingParams> { 1362 self.common 1363 .configuration 1364 .detect_bar_reprogramming(reg_idx, data) 1365 } 1366 1367 fn read_bar(&mut self, base: u64, offset: u64, data: &mut [u8]) { 1368 self.common.read_bar(base, offset, data, &self.vfio_wrapper) 1369 } 1370 1371 fn write_bar(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1372 self.common 1373 .write_bar(base, offset, data, &self.vfio_wrapper) 1374 } 1375 1376 fn move_bar(&mut self, old_base: u64, new_base: u64) -> Result<(), io::Error> { 1377 for region in self.common.mmio_regions.iter_mut() { 1378 if region.start.raw_value() == old_base { 1379 region.start = GuestAddress(new_base); 1380 1381 for user_memory_region in region.user_memory_regions.iter_mut() { 1382 // Remove old region 1383 let old_mem_region = self.vm.make_user_memory_region( 1384 user_memory_region.slot, 1385 user_memory_region.start, 1386 user_memory_region.size, 1387 user_memory_region.host_addr, 1388 false, 1389 false, 1390 ); 1391 1392 self.vm 1393 .remove_user_memory_region(old_mem_region) 1394 .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; 1395 1396 // Update the user memory region with the correct start address. 1397 if new_base > old_base { 1398 user_memory_region.start += new_base - old_base; 1399 } else { 1400 user_memory_region.start -= old_base - new_base; 1401 } 1402 1403 // Insert new region 1404 let new_mem_region = self.vm.make_user_memory_region( 1405 user_memory_region.slot, 1406 user_memory_region.start, 1407 user_memory_region.size, 1408 user_memory_region.host_addr, 1409 false, 1410 false, 1411 ); 1412 1413 self.vm 1414 .create_user_memory_region(new_mem_region) 1415 .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; 1416 } 1417 } 1418 } 1419 1420 Ok(()) 1421 } 1422 1423 fn as_any(&mut self) -> &mut dyn Any { 1424 self 1425 } 1426 } 1427