1 /* 2 * device quirks for PCI devices 3 * 4 * Copyright Red Hat, Inc. 2012-2015 5 * 6 * Authors: 7 * Alex Williamson <alex.williamson@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 */ 12 13 #include "pci.h" 14 #include "trace.h" 15 #include "qemu/range.h" 16 17 #define PCI_ANY_ID (~0) 18 19 /* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */ 20 static bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t device) 21 { 22 PCIDevice *pdev = &vdev->pdev; 23 24 return (vendor == PCI_ANY_ID || 25 vendor == pci_get_word(pdev->config + PCI_VENDOR_ID)) && 26 (device == PCI_ANY_ID || 27 device == pci_get_word(pdev->config + PCI_DEVICE_ID)); 28 } 29 30 static bool vfio_is_vga(VFIOPCIDevice *vdev) 31 { 32 PCIDevice *pdev = &vdev->pdev; 33 uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 34 35 return class == PCI_CLASS_DISPLAY_VGA; 36 } 37 38 /* 39 * List of device ids/vendor ids for which to disable 40 * option rom loading. This avoids the guest hangs during rom 41 * execution as noticed with the BCM 57810 card for lack of a 42 * more better way to handle such issues. 43 * The user can still override by specifying a romfile or 44 * rombar=1. 45 * Please see https://bugs.launchpad.net/qemu/+bug/1284874 46 * for an analysis of the 57810 card hang. When adding 47 * a new vendor id/device id combination below, please also add 48 * your card/environment details and information that could 49 * help in debugging to the bug tracking this issue 50 */ 51 static const struct { 52 uint32_t vendor; 53 uint32_t device; 54 } romblacklist[] = { 55 { 0x14e4, 0x168e }, /* Broadcom BCM 57810 */ 56 }; 57 58 bool vfio_blacklist_opt_rom(VFIOPCIDevice *vdev) 59 { 60 int i; 61 62 for (i = 0 ; i < ARRAY_SIZE(romblacklist); i++) { 63 if (vfio_pci_is(vdev, romblacklist[i].vendor, romblacklist[i].device)) { 64 trace_vfio_quirk_rom_blacklisted(vdev->vbasedev.name, 65 romblacklist[i].vendor, 66 romblacklist[i].device); 67 return true; 68 } 69 } 70 return false; 71 } 72 73 /* 74 * Device specific region quirks (mostly backdoors to PCI config space) 75 */ 76 77 /* 78 * The generic window quirks operate on an address and data register, 79 * vfio_generic_window_address_quirk handles the address register and 80 * vfio_generic_window_data_quirk handles the data register. These ops 81 * pass reads and writes through to hardware until a value matching the 82 * stored address match/mask is written. When this occurs, the data 83 * register access emulated PCI config space for the device rather than 84 * passing through accesses. This enables devices where PCI config space 85 * is accessible behind a window register to maintain the virtualization 86 * provided through vfio. 87 */ 88 typedef struct VFIOConfigWindowMatch { 89 uint32_t match; 90 uint32_t mask; 91 } VFIOConfigWindowMatch; 92 93 typedef struct VFIOConfigWindowQuirk { 94 struct VFIOPCIDevice *vdev; 95 96 uint32_t address_val; 97 98 uint32_t address_offset; 99 uint32_t data_offset; 100 101 bool window_enabled; 102 uint8_t bar; 103 104 MemoryRegion *addr_mem; 105 MemoryRegion *data_mem; 106 107 uint32_t nr_matches; 108 VFIOConfigWindowMatch matches[]; 109 } VFIOConfigWindowQuirk; 110 111 static uint64_t vfio_generic_window_quirk_address_read(void *opaque, 112 hwaddr addr, 113 unsigned size) 114 { 115 VFIOConfigWindowQuirk *window = opaque; 116 VFIOPCIDevice *vdev = window->vdev; 117 118 return vfio_region_read(&vdev->bars[window->bar].region, 119 addr + window->address_offset, size); 120 } 121 122 static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr, 123 uint64_t data, 124 unsigned size) 125 { 126 VFIOConfigWindowQuirk *window = opaque; 127 VFIOPCIDevice *vdev = window->vdev; 128 int i; 129 130 window->window_enabled = false; 131 132 vfio_region_write(&vdev->bars[window->bar].region, 133 addr + window->address_offset, data, size); 134 135 for (i = 0; i < window->nr_matches; i++) { 136 if ((data & ~window->matches[i].mask) == window->matches[i].match) { 137 window->window_enabled = true; 138 window->address_val = data & window->matches[i].mask; 139 trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name, 140 memory_region_name(window->addr_mem), data); 141 break; 142 } 143 } 144 } 145 146 static const MemoryRegionOps vfio_generic_window_address_quirk = { 147 .read = vfio_generic_window_quirk_address_read, 148 .write = vfio_generic_window_quirk_address_write, 149 .endianness = DEVICE_LITTLE_ENDIAN, 150 }; 151 152 static uint64_t vfio_generic_window_quirk_data_read(void *opaque, 153 hwaddr addr, unsigned size) 154 { 155 VFIOConfigWindowQuirk *window = opaque; 156 VFIOPCIDevice *vdev = window->vdev; 157 uint64_t data; 158 159 /* Always read data reg, discard if window enabled */ 160 data = vfio_region_read(&vdev->bars[window->bar].region, 161 addr + window->data_offset, size); 162 163 if (window->window_enabled) { 164 data = vfio_pci_read_config(&vdev->pdev, window->address_val, size); 165 trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name, 166 memory_region_name(window->data_mem), data); 167 } 168 169 return data; 170 } 171 172 static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr, 173 uint64_t data, unsigned size) 174 { 175 VFIOConfigWindowQuirk *window = opaque; 176 VFIOPCIDevice *vdev = window->vdev; 177 178 if (window->window_enabled) { 179 vfio_pci_write_config(&vdev->pdev, window->address_val, data, size); 180 trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name, 181 memory_region_name(window->data_mem), data); 182 return; 183 } 184 185 vfio_region_write(&vdev->bars[window->bar].region, 186 addr + window->data_offset, data, size); 187 } 188 189 static const MemoryRegionOps vfio_generic_window_data_quirk = { 190 .read = vfio_generic_window_quirk_data_read, 191 .write = vfio_generic_window_quirk_data_write, 192 .endianness = DEVICE_LITTLE_ENDIAN, 193 }; 194 195 /* 196 * The generic mirror quirk handles devices which expose PCI config space 197 * through a region within a BAR. When enabled, reads and writes are 198 * redirected through to emulated PCI config space. XXX if PCI config space 199 * used memory regions, this could just be an alias. 200 */ 201 typedef struct VFIOConfigMirrorQuirk { 202 struct VFIOPCIDevice *vdev; 203 uint32_t offset; 204 uint8_t bar; 205 MemoryRegion *mem; 206 } VFIOConfigMirrorQuirk; 207 208 static uint64_t vfio_generic_quirk_mirror_read(void *opaque, 209 hwaddr addr, unsigned size) 210 { 211 VFIOConfigMirrorQuirk *mirror = opaque; 212 VFIOPCIDevice *vdev = mirror->vdev; 213 uint64_t data; 214 215 /* Read and discard in case the hardware cares */ 216 (void)vfio_region_read(&vdev->bars[mirror->bar].region, 217 addr + mirror->offset, size); 218 219 data = vfio_pci_read_config(&vdev->pdev, addr, size); 220 trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name, 221 memory_region_name(mirror->mem), 222 addr, data); 223 return data; 224 } 225 226 static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr, 227 uint64_t data, unsigned size) 228 { 229 VFIOConfigMirrorQuirk *mirror = opaque; 230 VFIOPCIDevice *vdev = mirror->vdev; 231 232 vfio_pci_write_config(&vdev->pdev, addr, data, size); 233 trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name, 234 memory_region_name(mirror->mem), 235 addr, data); 236 } 237 238 static const MemoryRegionOps vfio_generic_mirror_quirk = { 239 .read = vfio_generic_quirk_mirror_read, 240 .write = vfio_generic_quirk_mirror_write, 241 .endianness = DEVICE_LITTLE_ENDIAN, 242 }; 243 244 /* Is range1 fully contained within range2? */ 245 static bool vfio_range_contained(uint64_t first1, uint64_t len1, 246 uint64_t first2, uint64_t len2) { 247 return (first1 >= first2 && first1 + len1 <= first2 + len2); 248 } 249 250 static bool vfio_flags_enabled(uint8_t flags, uint8_t mask) 251 { 252 return (mask && (flags & mask) == mask); 253 } 254 255 static uint64_t vfio_generic_window_quirk_read(void *opaque, 256 hwaddr addr, unsigned size) 257 { 258 VFIOLegacyQuirk *quirk = opaque; 259 VFIOPCIDevice *vdev = quirk->vdev; 260 uint64_t data; 261 262 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && 263 ranges_overlap(addr, size, 264 quirk->data.data_offset, quirk->data.data_size)) { 265 hwaddr offset = addr - quirk->data.data_offset; 266 267 if (!vfio_range_contained(addr, size, quirk->data.data_offset, 268 quirk->data.data_size)) { 269 hw_error("%s: window data read not fully contained: %s", 270 __func__, memory_region_name(quirk->mem)); 271 } 272 273 data = vfio_pci_read_config(&vdev->pdev, 274 quirk->data.address_val + offset, size); 275 276 trace_vfio_generic_window_quirk_read(memory_region_name(quirk->mem), 277 vdev->vbasedev.name, 278 quirk->data.bar, 279 addr, size, data); 280 } else { 281 data = vfio_region_read(&vdev->bars[quirk->data.bar].region, 282 addr + quirk->data.base_offset, size); 283 } 284 285 return data; 286 } 287 288 static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr, 289 uint64_t data, unsigned size) 290 { 291 VFIOLegacyQuirk *quirk = opaque; 292 VFIOPCIDevice *vdev = quirk->vdev; 293 294 if (ranges_overlap(addr, size, 295 quirk->data.address_offset, quirk->data.address_size)) { 296 297 if (addr != quirk->data.address_offset) { 298 hw_error("%s: offset write into address window: %s", 299 __func__, memory_region_name(quirk->mem)); 300 } 301 302 if ((data & ~quirk->data.address_mask) == quirk->data.address_match) { 303 quirk->data.flags |= quirk->data.write_flags | 304 quirk->data.read_flags; 305 quirk->data.address_val = data & quirk->data.address_mask; 306 } else { 307 quirk->data.flags &= ~(quirk->data.write_flags | 308 quirk->data.read_flags); 309 } 310 } 311 312 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && 313 ranges_overlap(addr, size, 314 quirk->data.data_offset, quirk->data.data_size)) { 315 hwaddr offset = addr - quirk->data.data_offset; 316 317 if (!vfio_range_contained(addr, size, quirk->data.data_offset, 318 quirk->data.data_size)) { 319 hw_error("%s: window data write not fully contained: %s", 320 __func__, memory_region_name(quirk->mem)); 321 } 322 323 vfio_pci_write_config(&vdev->pdev, 324 quirk->data.address_val + offset, data, size); 325 trace_vfio_generic_window_quirk_write(memory_region_name(quirk->mem), 326 vdev->vbasedev.name, 327 quirk->data.bar, 328 addr, data, size); 329 return; 330 } 331 332 vfio_region_write(&vdev->bars[quirk->data.bar].region, 333 addr + quirk->data.base_offset, data, size); 334 } 335 336 static const MemoryRegionOps vfio_generic_window_quirk = { 337 .read = vfio_generic_window_quirk_read, 338 .write = vfio_generic_window_quirk_write, 339 .endianness = DEVICE_LITTLE_ENDIAN, 340 }; 341 342 static uint64_t vfio_generic_quirk_read(void *opaque, 343 hwaddr addr, unsigned size) 344 { 345 VFIOLegacyQuirk *quirk = opaque; 346 VFIOPCIDevice *vdev = quirk->vdev; 347 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; 348 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; 349 uint64_t data; 350 351 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) && 352 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { 353 if (!vfio_range_contained(addr, size, offset, 354 quirk->data.address_mask + 1)) { 355 hw_error("%s: read not fully contained: %s", 356 __func__, memory_region_name(quirk->mem)); 357 } 358 359 data = vfio_pci_read_config(&vdev->pdev, addr - offset, size); 360 361 trace_vfio_generic_quirk_read(memory_region_name(quirk->mem), 362 vdev->vbasedev.name, quirk->data.bar, 363 addr + base, size, data); 364 } else { 365 data = vfio_region_read(&vdev->bars[quirk->data.bar].region, 366 addr + base, size); 367 } 368 369 return data; 370 } 371 372 static void vfio_generic_quirk_write(void *opaque, hwaddr addr, 373 uint64_t data, unsigned size) 374 { 375 VFIOLegacyQuirk *quirk = opaque; 376 VFIOPCIDevice *vdev = quirk->vdev; 377 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK; 378 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK; 379 380 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) && 381 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) { 382 if (!vfio_range_contained(addr, size, offset, 383 quirk->data.address_mask + 1)) { 384 hw_error("%s: write not fully contained: %s", 385 __func__, memory_region_name(quirk->mem)); 386 } 387 388 vfio_pci_write_config(&vdev->pdev, addr - offset, data, size); 389 390 trace_vfio_generic_quirk_write(memory_region_name(quirk->mem), 391 vdev->vbasedev.name, quirk->data.bar, 392 addr + base, data, size); 393 } else { 394 vfio_region_write(&vdev->bars[quirk->data.bar].region, 395 addr + base, data, size); 396 } 397 } 398 399 static const MemoryRegionOps vfio_generic_quirk = { 400 .read = vfio_generic_quirk_read, 401 .write = vfio_generic_quirk_write, 402 .endianness = DEVICE_LITTLE_ENDIAN, 403 }; 404 405 #define PCI_VENDOR_ID_ATI 0x1002 406 407 /* 408 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR 409 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always 410 * BAR4 (older cards like the X550 used BAR1, but we don't care to support 411 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the 412 * I/O port BAR address. Originally this was coded to return the virtual BAR 413 * address only if the physical register read returns the actual BAR address, 414 * but users have reported greater success if we return the virtual address 415 * unconditionally. 416 */ 417 static uint64_t vfio_ati_3c3_quirk_read(void *opaque, 418 hwaddr addr, unsigned size) 419 { 420 VFIOPCIDevice *vdev = opaque; 421 uint64_t data = vfio_pci_read_config(&vdev->pdev, 422 PCI_BASE_ADDRESS_4 + 1, size); 423 424 trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data); 425 426 return data; 427 } 428 429 static const MemoryRegionOps vfio_ati_3c3_quirk = { 430 .read = vfio_ati_3c3_quirk_read, 431 .endianness = DEVICE_LITTLE_ENDIAN, 432 }; 433 434 static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev) 435 { 436 VFIOQuirk *quirk; 437 438 /* 439 * As long as the BAR is >= 256 bytes it will be aligned such that the 440 * lower byte is always zero. Filter out anything else, if it exists. 441 */ 442 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || 443 !vdev->bars[4].ioport || vdev->bars[4].region.size < 256) { 444 return; 445 } 446 447 quirk = g_malloc0(sizeof(*quirk)); 448 quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 1); 449 quirk->nr_mem = 1; 450 451 memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev, 452 "vfio-ati-3c3-quirk", 1); 453 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 454 3 /* offset 3 bytes from 0x3c0 */, quirk->mem); 455 456 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, 457 quirk, next); 458 459 trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name); 460 } 461 462 /* 463 * Newer ATI/AMD devices, including HD5450 and HD7850, have a mirror to PCI 464 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access 465 * the MMIO space directly, but a window to this space is provided through 466 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the 467 * data register. When the address is programmed to a range of 0x4000-0x4fff 468 * PCI configuration space is available. Experimentation seems to indicate 469 * that read-only may be provided by hardware. 470 */ 471 static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr) 472 { 473 VFIOQuirk *quirk; 474 VFIOConfigWindowQuirk *window; 475 476 /* This windows doesn't seem to be used except by legacy VGA code */ 477 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || 478 !vdev->has_vga || nr != 4) { 479 return; 480 } 481 482 quirk = g_malloc0(sizeof(*quirk)); 483 quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 2); 484 quirk->nr_mem = 2; 485 window = quirk->data = g_malloc0(sizeof(*window) + 486 sizeof(VFIOConfigWindowMatch)); 487 window->vdev = vdev; 488 window->address_offset = 0; 489 window->data_offset = 4; 490 window->nr_matches = 1; 491 window->matches[0].match = 0x4000; 492 window->matches[0].mask = PCIE_CONFIG_SPACE_SIZE - 1; 493 window->bar = nr; 494 window->addr_mem = &quirk->mem[0]; 495 window->data_mem = &quirk->mem[1]; 496 497 memory_region_init_io(window->addr_mem, OBJECT(vdev), 498 &vfio_generic_window_address_quirk, window, 499 "vfio-ati-bar4-window-address-quirk", 4); 500 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 501 window->address_offset, 502 window->addr_mem, 1); 503 504 memory_region_init_io(window->data_mem, OBJECT(vdev), 505 &vfio_generic_window_data_quirk, window, 506 "vfio-ati-bar4-window-data-quirk", 4); 507 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 508 window->data_offset, 509 window->data_mem, 1); 510 511 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 512 513 trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name); 514 } 515 516 /* 517 * Trap the BAR2 MMIO mirror to config space as well. 518 */ 519 static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr) 520 { 521 VFIOQuirk *quirk; 522 VFIOConfigMirrorQuirk *mirror; 523 524 /* Only enable on newer devices where BAR2 is 64bit */ 525 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || 526 !vdev->has_vga || nr != 2 || !vdev->bars[2].mem64) { 527 return; 528 } 529 530 quirk = g_malloc0(sizeof(*quirk)); 531 mirror = quirk->data = g_malloc0(sizeof(*mirror)); 532 mirror->mem = quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 1); 533 quirk->nr_mem = 1; 534 mirror->vdev = vdev; 535 mirror->offset = 0x4000; 536 mirror->bar = nr; 537 538 memory_region_init_io(mirror->mem, OBJECT(vdev), 539 &vfio_generic_mirror_quirk, mirror, 540 "vfio-ati-bar2-4000-quirk", PCI_CONFIG_SPACE_SIZE); 541 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 542 mirror->offset, mirror->mem, 1); 543 544 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 545 546 trace_vfio_quirk_ati_bar2_probe(vdev->vbasedev.name); 547 } 548 549 /* 550 * Older ATI/AMD cards like the X550 have a similar window to that above. 551 * I/O port BAR1 provides a window to a mirror of PCI config space located 552 * in BAR2 at offset 0xf00. We don't care to support such older cards, but 553 * note it for future reference. 554 */ 555 556 #define PCI_VENDOR_ID_NVIDIA 0x10de 557 558 /* 559 * Nvidia has several different methods to get to config space, the 560 * nouveu project has several of these documented here: 561 * https://github.com/pathscale/envytools/tree/master/hwdocs 562 * 563 * The first quirk is actually not documented in envytools and is found 564 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an 565 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access 566 * the mirror of PCI config space found at BAR0 offset 0x1800. The access 567 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is 568 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738 569 * is written for a write to 0x3d4. The BAR0 offset is then accessible 570 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards 571 * that use the I/O port BAR5 window but it doesn't hurt to leave it. 572 */ 573 typedef enum {NONE = 0, SELECT, WINDOW, READ, WRITE} VFIONvidia3d0State; 574 static const char *nv3d0_states[] = { "NONE", "SELECT", 575 "WINDOW", "READ", "WRITE" }; 576 577 typedef struct VFIONvidia3d0Quirk { 578 VFIOPCIDevice *vdev; 579 VFIONvidia3d0State state; 580 uint32_t offset; 581 } VFIONvidia3d0Quirk; 582 583 static uint64_t vfio_nvidia_3d4_quirk_read(void *opaque, 584 hwaddr addr, unsigned size) 585 { 586 VFIONvidia3d0Quirk *quirk = opaque; 587 VFIOPCIDevice *vdev = quirk->vdev; 588 589 quirk->state = NONE; 590 591 return vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], 592 addr + 0x14, size); 593 } 594 595 static void vfio_nvidia_3d4_quirk_write(void *opaque, hwaddr addr, 596 uint64_t data, unsigned size) 597 { 598 VFIONvidia3d0Quirk *quirk = opaque; 599 VFIOPCIDevice *vdev = quirk->vdev; 600 VFIONvidia3d0State old_state = quirk->state; 601 602 quirk->state = NONE; 603 604 switch (data) { 605 case 0x338: 606 if (old_state == NONE) { 607 quirk->state = SELECT; 608 trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, 609 nv3d0_states[quirk->state]); 610 } 611 break; 612 case 0x538: 613 if (old_state == WINDOW) { 614 quirk->state = READ; 615 trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, 616 nv3d0_states[quirk->state]); 617 } 618 break; 619 case 0x738: 620 if (old_state == WINDOW) { 621 quirk->state = WRITE; 622 trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, 623 nv3d0_states[quirk->state]); 624 } 625 break; 626 } 627 628 vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], 629 addr + 0x14, data, size); 630 } 631 632 static const MemoryRegionOps vfio_nvidia_3d4_quirk = { 633 .read = vfio_nvidia_3d4_quirk_read, 634 .write = vfio_nvidia_3d4_quirk_write, 635 .endianness = DEVICE_LITTLE_ENDIAN, 636 }; 637 638 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, 639 hwaddr addr, unsigned size) 640 { 641 VFIONvidia3d0Quirk *quirk = opaque; 642 VFIOPCIDevice *vdev = quirk->vdev; 643 VFIONvidia3d0State old_state = quirk->state; 644 uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], 645 addr + 0x10, size); 646 647 quirk->state = NONE; 648 649 if (old_state == READ && 650 (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { 651 uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); 652 653 data = vfio_pci_read_config(&vdev->pdev, offset, size); 654 trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name, 655 offset, size, data); 656 } 657 658 return data; 659 } 660 661 static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, 662 uint64_t data, unsigned size) 663 { 664 VFIONvidia3d0Quirk *quirk = opaque; 665 VFIOPCIDevice *vdev = quirk->vdev; 666 VFIONvidia3d0State old_state = quirk->state; 667 668 quirk->state = NONE; 669 670 if (old_state == SELECT) { 671 quirk->offset = (uint32_t)data; 672 quirk->state = WINDOW; 673 trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, 674 nv3d0_states[quirk->state]); 675 } else if (old_state == WRITE) { 676 if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { 677 uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); 678 679 vfio_pci_write_config(&vdev->pdev, offset, data, size); 680 trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name, 681 offset, data, size); 682 return; 683 } 684 } 685 686 vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI], 687 addr + 0x10, data, size); 688 } 689 690 static const MemoryRegionOps vfio_nvidia_3d0_quirk = { 691 .read = vfio_nvidia_3d0_quirk_read, 692 .write = vfio_nvidia_3d0_quirk_write, 693 .endianness = DEVICE_LITTLE_ENDIAN, 694 }; 695 696 static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev) 697 { 698 VFIOQuirk *quirk; 699 VFIONvidia3d0Quirk *data; 700 701 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || 702 !vdev->bars[1].region.size) { 703 return; 704 } 705 706 quirk = g_malloc0(sizeof(*quirk)); 707 quirk->data = data = g_malloc0(sizeof(*data)); 708 quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 2); 709 quirk->nr_mem = 2; 710 data->vdev = vdev; 711 712 memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk, 713 data, "vfio-nvidia-3d4-quirk", 2); 714 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 715 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]); 716 717 memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk, 718 data, "vfio-nvidia-3d0-quirk", 2); 719 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 720 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]); 721 722 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, 723 quirk, next); 724 725 trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name); 726 } 727 728 /* 729 * The second quirk is documented in envytools. The I/O port BAR5 is just 730 * a set of address/data ports to the MMIO BARs. The BAR we care about is 731 * again BAR0. This backdoor is apparently a bit newer than the one above 732 * so we need to not only trap 256 bytes @0x1800, but all of PCI config 733 * space, including extended space is available at the 4k @0x88000. 734 */ 735 typedef struct VFIONvidiaBAR5Quirk { 736 uint32_t master; 737 uint32_t enable; 738 MemoryRegion *addr_mem; 739 MemoryRegion *data_mem; 740 bool enabled; 741 VFIOConfigWindowQuirk window; /* last for match data */ 742 } VFIONvidiaBAR5Quirk; 743 744 static void vfio_nvidia_bar5_enable(VFIONvidiaBAR5Quirk *bar5) 745 { 746 VFIOPCIDevice *vdev = bar5->window.vdev; 747 748 if (((bar5->master & bar5->enable) & 0x1) == bar5->enabled) { 749 return; 750 } 751 752 bar5->enabled = !bar5->enabled; 753 trace_vfio_quirk_nvidia_bar5_state(vdev->vbasedev.name, 754 bar5->enabled ? "Enable" : "Disable"); 755 memory_region_set_enabled(bar5->addr_mem, bar5->enabled); 756 memory_region_set_enabled(bar5->data_mem, bar5->enabled); 757 } 758 759 static uint64_t vfio_nvidia_bar5_quirk_master_read(void *opaque, 760 hwaddr addr, unsigned size) 761 { 762 VFIONvidiaBAR5Quirk *bar5 = opaque; 763 VFIOPCIDevice *vdev = bar5->window.vdev; 764 765 return vfio_region_read(&vdev->bars[5].region, addr, size); 766 } 767 768 static void vfio_nvidia_bar5_quirk_master_write(void *opaque, hwaddr addr, 769 uint64_t data, unsigned size) 770 { 771 VFIONvidiaBAR5Quirk *bar5 = opaque; 772 VFIOPCIDevice *vdev = bar5->window.vdev; 773 774 vfio_region_write(&vdev->bars[5].region, addr, data, size); 775 776 bar5->master = data; 777 vfio_nvidia_bar5_enable(bar5); 778 } 779 780 static const MemoryRegionOps vfio_nvidia_bar5_quirk_master = { 781 .read = vfio_nvidia_bar5_quirk_master_read, 782 .write = vfio_nvidia_bar5_quirk_master_write, 783 .endianness = DEVICE_LITTLE_ENDIAN, 784 }; 785 786 static uint64_t vfio_nvidia_bar5_quirk_enable_read(void *opaque, 787 hwaddr addr, unsigned size) 788 { 789 VFIONvidiaBAR5Quirk *bar5 = opaque; 790 VFIOPCIDevice *vdev = bar5->window.vdev; 791 792 return vfio_region_read(&vdev->bars[5].region, addr + 4, size); 793 } 794 795 static void vfio_nvidia_bar5_quirk_enable_write(void *opaque, hwaddr addr, 796 uint64_t data, unsigned size) 797 { 798 VFIONvidiaBAR5Quirk *bar5 = opaque; 799 VFIOPCIDevice *vdev = bar5->window.vdev; 800 801 vfio_region_write(&vdev->bars[5].region, addr + 4, data, size); 802 803 bar5->enable = data; 804 vfio_nvidia_bar5_enable(bar5); 805 } 806 807 static const MemoryRegionOps vfio_nvidia_bar5_quirk_enable = { 808 .read = vfio_nvidia_bar5_quirk_enable_read, 809 .write = vfio_nvidia_bar5_quirk_enable_write, 810 .endianness = DEVICE_LITTLE_ENDIAN, 811 }; 812 813 static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr) 814 { 815 VFIOQuirk *quirk; 816 VFIONvidiaBAR5Quirk *bar5; 817 VFIOConfigWindowQuirk *window; 818 819 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || 820 !vdev->has_vga || nr != 5) { 821 return; 822 } 823 824 quirk = g_malloc0(sizeof(*quirk)); 825 quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 4); 826 quirk->nr_mem = 4; 827 bar5 = quirk->data = g_malloc0(sizeof(*bar5) + 828 (sizeof(VFIOConfigWindowMatch) * 2)); 829 window = &bar5->window; 830 831 window->vdev = vdev; 832 window->address_offset = 0x8; 833 window->data_offset = 0xc; 834 window->nr_matches = 2; 835 window->matches[0].match = 0x1800; 836 window->matches[0].mask = PCI_CONFIG_SPACE_SIZE - 1; 837 window->matches[1].match = 0x88000; 838 window->matches[1].mask = PCIE_CONFIG_SPACE_SIZE - 1; 839 window->bar = nr; 840 window->addr_mem = bar5->addr_mem = &quirk->mem[0]; 841 window->data_mem = bar5->data_mem = &quirk->mem[1]; 842 843 memory_region_init_io(window->addr_mem, OBJECT(vdev), 844 &vfio_generic_window_address_quirk, window, 845 "vfio-nvidia-bar5-window-address-quirk", 4); 846 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 847 window->address_offset, 848 window->addr_mem, 1); 849 memory_region_set_enabled(window->addr_mem, false); 850 851 memory_region_init_io(window->data_mem, OBJECT(vdev), 852 &vfio_generic_window_data_quirk, window, 853 "vfio-nvidia-bar5-window-data-quirk", 4); 854 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 855 window->data_offset, 856 window->data_mem, 1); 857 memory_region_set_enabled(window->data_mem, false); 858 859 memory_region_init_io(&quirk->mem[2], OBJECT(vdev), 860 &vfio_nvidia_bar5_quirk_master, bar5, 861 "vfio-nvidia-bar5-master-quirk", 4); 862 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 863 0, &quirk->mem[2], 1); 864 865 memory_region_init_io(&quirk->mem[3], OBJECT(vdev), 866 &vfio_nvidia_bar5_quirk_enable, bar5, 867 "vfio-nvidia-bar5-enable-quirk", 4); 868 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 869 4, &quirk->mem[3], 1); 870 871 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 872 873 trace_vfio_quirk_nvidia_bar5_probe(vdev->vbasedev.name); 874 } 875 876 /* 877 * Finally, BAR0 itself. We want to redirect any accesses to either 878 * 0x1800 or 0x88000 through the PCI config space access functions. 879 */ 880 static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr, 881 uint64_t data, unsigned size) 882 { 883 VFIOConfigMirrorQuirk *mirror = opaque; 884 VFIOPCIDevice *vdev = mirror->vdev; 885 PCIDevice *pdev = &vdev->pdev; 886 887 vfio_generic_quirk_mirror_write(opaque, addr, data, size); 888 889 /* 890 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the 891 * MSI capability ID register. Both the ID and next register are 892 * read-only, so we allow writes covering either of those to real hw. 893 */ 894 if ((pdev->cap_present & QEMU_PCI_CAP_MSI) && 895 vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) { 896 vfio_region_write(&vdev->bars[mirror->bar].region, 897 addr + mirror->offset, data, size); 898 trace_vfio_quirk_nvidia_bar0_msi_ack(vdev->vbasedev.name); 899 } 900 } 901 902 static const MemoryRegionOps vfio_nvidia_mirror_quirk = { 903 .read = vfio_generic_quirk_mirror_read, 904 .write = vfio_nvidia_quirk_mirror_write, 905 .endianness = DEVICE_LITTLE_ENDIAN, 906 }; 907 908 static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr) 909 { 910 VFIOQuirk *quirk; 911 VFIOConfigMirrorQuirk *mirror; 912 913 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || 914 !vfio_is_vga(vdev) || nr != 0) { 915 return; 916 } 917 918 quirk = g_malloc0(sizeof(*quirk)); 919 mirror = quirk->data = g_malloc0(sizeof(*mirror)); 920 mirror->mem = quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 1); 921 quirk->nr_mem = 1; 922 mirror->vdev = vdev; 923 mirror->offset = 0x88000; 924 mirror->bar = nr; 925 926 memory_region_init_io(mirror->mem, OBJECT(vdev), 927 &vfio_nvidia_mirror_quirk, mirror, 928 "vfio-nvidia-bar0-88000-mirror-quirk", 929 PCIE_CONFIG_SPACE_SIZE); 930 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 931 mirror->offset, mirror->mem, 1); 932 933 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 934 935 /* The 0x1800 offset mirror only seems to get used by legacy VGA */ 936 if (vdev->has_vga) { 937 quirk = g_malloc0(sizeof(*quirk)); 938 mirror = quirk->data = g_malloc0(sizeof(*mirror)); 939 mirror->mem = quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 1); 940 quirk->nr_mem = 1; 941 mirror->vdev = vdev; 942 mirror->offset = 0x1800; 943 mirror->bar = nr; 944 945 memory_region_init_io(mirror->mem, OBJECT(vdev), 946 &vfio_nvidia_mirror_quirk, mirror, 947 "vfio-nvidia-bar0-1800-mirror-quirk", 948 PCI_CONFIG_SPACE_SIZE); 949 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 950 mirror->offset, mirror->mem, 1); 951 952 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 953 } 954 955 trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name); 956 } 957 958 /* 959 * TODO - Some Nvidia devices provide config access to their companion HDA 960 * device and even to their parent bridge via these config space mirrors. 961 * Add quirks for those regions. 962 */ 963 964 #define PCI_VENDOR_ID_REALTEK 0x10ec 965 966 /* 967 * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2 968 * offset 0x70 there is a dword data register, offset 0x74 is a dword address 969 * register. According to the Linux r8169 driver, the MSI-X table is addressed 970 * when the "type" portion of the address register is set to 0x1. This appears 971 * to be bits 16:30. Bit 31 is both a write indicator and some sort of 972 * "address latched" indicator. Bits 12:15 are a mask field, which we can 973 * ignore because the MSI-X table should always be accessed as a dword (full 974 * mask). Bits 0:11 is offset within the type. 975 * 976 * Example trace: 977 * 978 * Read from MSI-X table offset 0 979 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr 980 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch 981 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data 982 * 983 * Write 0xfee00000 to MSI-X table offset 0 984 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data 985 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write 986 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete 987 */ 988 typedef struct VFIOrtl8168Quirk { 989 VFIOPCIDevice *vdev; 990 uint32_t addr; 991 uint32_t data; 992 bool enabled; 993 } VFIOrtl8168Quirk; 994 995 static uint64_t vfio_rtl8168_quirk_address_read(void *opaque, 996 hwaddr addr, unsigned size) 997 { 998 VFIOrtl8168Quirk *rtl = opaque; 999 VFIOPCIDevice *vdev = rtl->vdev; 1000 uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size); 1001 1002 if (rtl->enabled) { 1003 data = rtl->addr ^ 0x80000000U; /* latch/complete */ 1004 trace_vfio_quirk_rtl8168_fake_latch(vdev->vbasedev.name, data); 1005 } 1006 1007 return data; 1008 } 1009 1010 static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, 1011 uint64_t data, unsigned size) 1012 { 1013 VFIOrtl8168Quirk *rtl = opaque; 1014 VFIOPCIDevice *vdev = rtl->vdev; 1015 1016 rtl->enabled = false; 1017 1018 if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */ 1019 rtl->enabled = true; 1020 rtl->addr = (uint32_t)data; 1021 1022 if (data & 0x80000000U) { /* Do write */ 1023 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) { 1024 hwaddr offset = data & 0xfff; 1025 uint64_t val = rtl->data; 1026 1027 trace_vfio_quirk_rtl8168_msix_write(vdev->vbasedev.name, 1028 (uint16_t)offset, val); 1029 1030 /* Write to the proper guest MSI-X table instead */ 1031 memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, 1032 offset, val, size, 1033 MEMTXATTRS_UNSPECIFIED); 1034 } 1035 return; /* Do not write guest MSI-X data to hardware */ 1036 } 1037 } 1038 1039 vfio_region_write(&vdev->bars[2].region, addr + 0x74, data, size); 1040 } 1041 1042 static const MemoryRegionOps vfio_rtl_address_quirk = { 1043 .read = vfio_rtl8168_quirk_address_read, 1044 .write = vfio_rtl8168_quirk_address_write, 1045 .valid = { 1046 .min_access_size = 4, 1047 .max_access_size = 4, 1048 .unaligned = false, 1049 }, 1050 .endianness = DEVICE_LITTLE_ENDIAN, 1051 }; 1052 1053 static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, 1054 hwaddr addr, unsigned size) 1055 { 1056 VFIOrtl8168Quirk *rtl = opaque; 1057 VFIOPCIDevice *vdev = rtl->vdev; 1058 uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size); 1059 1060 if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { 1061 hwaddr offset = rtl->addr & 0xfff; 1062 memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, 1063 &data, size, MEMTXATTRS_UNSPECIFIED); 1064 trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); 1065 } 1066 1067 return data; 1068 } 1069 1070 static void vfio_rtl8168_quirk_data_write(void *opaque, hwaddr addr, 1071 uint64_t data, unsigned size) 1072 { 1073 VFIOrtl8168Quirk *rtl = opaque; 1074 VFIOPCIDevice *vdev = rtl->vdev; 1075 1076 rtl->data = (uint32_t)data; 1077 1078 vfio_region_write(&vdev->bars[2].region, addr + 0x70, data, size); 1079 } 1080 1081 static const MemoryRegionOps vfio_rtl_data_quirk = { 1082 .read = vfio_rtl8168_quirk_data_read, 1083 .write = vfio_rtl8168_quirk_data_write, 1084 .valid = { 1085 .min_access_size = 4, 1086 .max_access_size = 4, 1087 .unaligned = false, 1088 }, 1089 .endianness = DEVICE_LITTLE_ENDIAN, 1090 }; 1091 1092 static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr) 1093 { 1094 VFIOQuirk *quirk; 1095 VFIOrtl8168Quirk *rtl; 1096 1097 if (!vfio_pci_is(vdev, PCI_VENDOR_ID_REALTEK, 0x8168) || nr != 2) { 1098 return; 1099 } 1100 1101 quirk = g_malloc0(sizeof(*quirk)); 1102 quirk->mem = g_malloc0_n(sizeof(MemoryRegion), 2); 1103 quirk->nr_mem = 2; 1104 quirk->data = rtl = g_malloc0(sizeof(*rtl)); 1105 rtl->vdev = vdev; 1106 1107 memory_region_init_io(&quirk->mem[0], OBJECT(vdev), 1108 &vfio_rtl_address_quirk, rtl, 1109 "vfio-rtl8168-window-address-quirk", 4); 1110 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 1111 0x74, &quirk->mem[0], 1); 1112 1113 memory_region_init_io(&quirk->mem[1], OBJECT(vdev), 1114 &vfio_rtl_data_quirk, rtl, 1115 "vfio-rtl8168-window-data-quirk", 4); 1116 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, 1117 0x70, &quirk->mem[1], 1); 1118 1119 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); 1120 1121 trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name); 1122 } 1123 1124 /* 1125 * Common quirk probe entry points. 1126 */ 1127 void vfio_vga_quirk_setup(VFIOPCIDevice *vdev) 1128 { 1129 vfio_vga_probe_ati_3c3_quirk(vdev); 1130 vfio_vga_probe_nvidia_3d0_quirk(vdev); 1131 } 1132 1133 void vfio_vga_quirk_teardown(VFIOPCIDevice *vdev) 1134 { 1135 VFIOQuirk *quirk; 1136 int i, j; 1137 1138 for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { 1139 QLIST_FOREACH(quirk, &vdev->vga.region[i].quirks, next) { 1140 for (j = 0; j < quirk->nr_mem; j++) { 1141 memory_region_del_subregion(&vdev->vga.region[i].mem, 1142 &quirk->mem[j]); 1143 } 1144 } 1145 } 1146 } 1147 1148 void vfio_vga_quirk_free(VFIOPCIDevice *vdev) 1149 { 1150 int i, j; 1151 1152 for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { 1153 while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) { 1154 VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); 1155 QLIST_REMOVE(quirk, next); 1156 for (j = 0; j < quirk->nr_mem; j++) { 1157 object_unparent(OBJECT(&quirk->mem[j])); 1158 } 1159 g_free(quirk->mem); 1160 g_free(quirk->data); 1161 g_free(quirk); 1162 } 1163 } 1164 } 1165 1166 void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr) 1167 { 1168 vfio_probe_ati_bar4_quirk(vdev, nr); 1169 vfio_probe_ati_bar2_quirk(vdev, nr); 1170 vfio_probe_nvidia_bar5_quirk(vdev, nr); 1171 vfio_probe_nvidia_bar0_quirk(vdev, nr); 1172 vfio_probe_rtl8168_bar2_quirk(vdev, nr); 1173 } 1174 1175 void vfio_bar_quirk_teardown(VFIOPCIDevice *vdev, int nr) 1176 { 1177 VFIOBAR *bar = &vdev->bars[nr]; 1178 VFIOQuirk *quirk; 1179 int i; 1180 1181 QLIST_FOREACH(quirk, &bar->quirks, next) { 1182 for (i = 0; i < quirk->nr_mem; i++) { 1183 memory_region_del_subregion(&bar->region.mem, &quirk->mem[i]); 1184 } 1185 } 1186 } 1187 1188 void vfio_bar_quirk_free(VFIOPCIDevice *vdev, int nr) 1189 { 1190 VFIOBAR *bar = &vdev->bars[nr]; 1191 int i; 1192 1193 while (!QLIST_EMPTY(&bar->quirks)) { 1194 VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); 1195 QLIST_REMOVE(quirk, next); 1196 for (i = 0; i < quirk->nr_mem; i++) { 1197 object_unparent(OBJECT(&quirk->mem[i])); 1198 } 1199 g_free(quirk->mem); 1200 g_free(quirk->data); 1201 g_free(quirk); 1202 } 1203 } 1204