1 /* 2 * low level and IOMMU backend agnostic helpers used by VFIO devices, 3 * related to regions, interrupts, capabilities 4 * 5 * Copyright Red Hat, Inc. 2012 6 * 7 * Authors: 8 * Alex Williamson <alex.williamson@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Based on qemu-kvm device-assignment: 14 * Adapted for KVM by Qumranet. 15 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) 16 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) 17 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) 18 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) 19 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) 20 */ 21 22 #include "qemu/osdep.h" 23 #include <sys/ioctl.h> 24 25 #include "hw/vfio/vfio-common.h" 26 #include "hw/vfio/pci.h" 27 #include "hw/hw.h" 28 #include "trace.h" 29 #include "qapi/error.h" 30 #include "qemu/error-report.h" 31 #include "qemu/units.h" 32 #include "monitor/monitor.h" 33 34 /* 35 * Common VFIO interrupt disable 36 */ 37 void vfio_disable_irqindex(VFIODevice *vbasedev, int index) 38 { 39 struct vfio_irq_set irq_set = { 40 .argsz = sizeof(irq_set), 41 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, 42 .index = index, 43 .start = 0, 44 .count = 0, 45 }; 46 47 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 48 } 49 50 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) 51 { 52 struct vfio_irq_set irq_set = { 53 .argsz = sizeof(irq_set), 54 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, 55 .index = index, 56 .start = 0, 57 .count = 1, 58 }; 59 60 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 61 } 62 63 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) 64 { 65 struct vfio_irq_set irq_set = { 66 .argsz = sizeof(irq_set), 67 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, 68 .index = index, 69 .start = 0, 70 .count = 1, 71 }; 72 73 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); 74 } 75 76 static inline const char *action_to_str(int action) 77 { 78 switch (action) { 79 case VFIO_IRQ_SET_ACTION_MASK: 80 return "MASK"; 81 case VFIO_IRQ_SET_ACTION_UNMASK: 82 return "UNMASK"; 83 case VFIO_IRQ_SET_ACTION_TRIGGER: 84 return "TRIGGER"; 85 default: 86 return "UNKNOWN ACTION"; 87 } 88 } 89 90 static const char *index_to_str(VFIODevice *vbasedev, int index) 91 { 92 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { 93 return NULL; 94 } 95 96 switch (index) { 97 case VFIO_PCI_INTX_IRQ_INDEX: 98 return "INTX"; 99 case VFIO_PCI_MSI_IRQ_INDEX: 100 return "MSI"; 101 case VFIO_PCI_MSIX_IRQ_INDEX: 102 return "MSIX"; 103 case VFIO_PCI_ERR_IRQ_INDEX: 104 return "ERR"; 105 case VFIO_PCI_REQ_IRQ_INDEX: 106 return "REQ"; 107 default: 108 return NULL; 109 } 110 } 111 112 bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, 113 int action, int fd, Error **errp) 114 { 115 ERRP_GUARD(); 116 g_autofree struct vfio_irq_set *irq_set = NULL; 117 int argsz; 118 const char *name; 119 int32_t *pfd; 120 121 argsz = sizeof(*irq_set) + sizeof(*pfd); 122 123 irq_set = g_malloc0(argsz); 124 irq_set->argsz = argsz; 125 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; 126 irq_set->index = index; 127 irq_set->start = subindex; 128 irq_set->count = 1; 129 pfd = (int32_t *)&irq_set->data; 130 *pfd = fd; 131 132 if (!ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { 133 return true; 134 } 135 136 error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure"); 137 138 name = index_to_str(vbasedev, index); 139 if (name) { 140 error_prepend(errp, "%s-%d: ", name, subindex); 141 } else { 142 error_prepend(errp, "index %d-%d: ", index, subindex); 143 } 144 error_prepend(errp, 145 "Failed to %s %s eventfd signaling for interrupt ", 146 fd < 0 ? "tear down" : "set up", action_to_str(action)); 147 return false; 148 } 149 150 /* 151 * IO Port/MMIO - Beware of the endians, VFIO is always little endian 152 */ 153 void vfio_region_write(void *opaque, hwaddr addr, 154 uint64_t data, unsigned size) 155 { 156 VFIORegion *region = opaque; 157 VFIODevice *vbasedev = region->vbasedev; 158 union { 159 uint8_t byte; 160 uint16_t word; 161 uint32_t dword; 162 uint64_t qword; 163 } buf; 164 165 switch (size) { 166 case 1: 167 buf.byte = data; 168 break; 169 case 2: 170 buf.word = cpu_to_le16(data); 171 break; 172 case 4: 173 buf.dword = cpu_to_le32(data); 174 break; 175 case 8: 176 buf.qword = cpu_to_le64(data); 177 break; 178 default: 179 hw_error("vfio: unsupported write size, %u bytes", size); 180 break; 181 } 182 183 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 184 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 185 ",%d) failed: %m", 186 __func__, vbasedev->name, region->nr, 187 addr, data, size); 188 } 189 190 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); 191 192 /* 193 * A read or write to a BAR always signals an INTx EOI. This will 194 * do nothing if not pending (including not in INTx mode). We assume 195 * that a BAR access is in response to an interrupt and that BAR 196 * accesses will service the interrupt. Unfortunately, we don't know 197 * which access will service the interrupt, so we're potentially 198 * getting quite a few host interrupts per guest interrupt. 199 */ 200 vbasedev->ops->vfio_eoi(vbasedev); 201 } 202 203 uint64_t vfio_region_read(void *opaque, 204 hwaddr addr, unsigned size) 205 { 206 VFIORegion *region = opaque; 207 VFIODevice *vbasedev = region->vbasedev; 208 union { 209 uint8_t byte; 210 uint16_t word; 211 uint32_t dword; 212 uint64_t qword; 213 } buf; 214 uint64_t data = 0; 215 216 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { 217 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", 218 __func__, vbasedev->name, region->nr, 219 addr, size); 220 return (uint64_t)-1; 221 } 222 switch (size) { 223 case 1: 224 data = buf.byte; 225 break; 226 case 2: 227 data = le16_to_cpu(buf.word); 228 break; 229 case 4: 230 data = le32_to_cpu(buf.dword); 231 break; 232 case 8: 233 data = le64_to_cpu(buf.qword); 234 break; 235 default: 236 hw_error("vfio: unsupported read size, %u bytes", size); 237 break; 238 } 239 240 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); 241 242 /* Same as write above */ 243 vbasedev->ops->vfio_eoi(vbasedev); 244 245 return data; 246 } 247 248 const MemoryRegionOps vfio_region_ops = { 249 .read = vfio_region_read, 250 .write = vfio_region_write, 251 .endianness = DEVICE_LITTLE_ENDIAN, 252 .valid = { 253 .min_access_size = 1, 254 .max_access_size = 8, 255 }, 256 .impl = { 257 .min_access_size = 1, 258 .max_access_size = 8, 259 }, 260 }; 261 262 int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size) 263 { 264 vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size(); 265 vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) / 266 BITS_PER_BYTE; 267 vbmap->bitmap = g_try_malloc0(vbmap->size); 268 if (!vbmap->bitmap) { 269 return -ENOMEM; 270 } 271 272 return 0; 273 } 274 275 struct vfio_info_cap_header * 276 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) 277 { 278 struct vfio_info_cap_header *hdr; 279 280 for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { 281 if (hdr->id == id) { 282 return hdr; 283 } 284 } 285 286 return NULL; 287 } 288 289 struct vfio_info_cap_header * 290 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) 291 { 292 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { 293 return NULL; 294 } 295 296 return vfio_get_cap((void *)info, info->cap_offset, id); 297 } 298 299 struct vfio_info_cap_header * 300 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) 301 { 302 if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { 303 return NULL; 304 } 305 306 return vfio_get_cap((void *)info, info->cap_offset, id); 307 } 308 309 static int vfio_setup_region_sparse_mmaps(VFIORegion *region, 310 struct vfio_region_info *info) 311 { 312 struct vfio_info_cap_header *hdr; 313 struct vfio_region_info_cap_sparse_mmap *sparse; 314 int i, j; 315 316 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); 317 if (!hdr) { 318 return -ENODEV; 319 } 320 321 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); 322 323 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, 324 region->nr, sparse->nr_areas); 325 326 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); 327 328 for (i = 0, j = 0; i < sparse->nr_areas; i++) { 329 if (sparse->areas[i].size) { 330 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, 331 sparse->areas[i].offset + 332 sparse->areas[i].size - 1); 333 region->mmaps[j].offset = sparse->areas[i].offset; 334 region->mmaps[j].size = sparse->areas[i].size; 335 j++; 336 } 337 } 338 339 region->nr_mmaps = j; 340 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); 341 342 return 0; 343 } 344 345 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, 346 int index, const char *name) 347 { 348 g_autofree struct vfio_region_info *info = NULL; 349 int ret; 350 351 ret = vfio_get_region_info(vbasedev, index, &info); 352 if (ret) { 353 return ret; 354 } 355 356 region->vbasedev = vbasedev; 357 region->flags = info->flags; 358 region->size = info->size; 359 region->fd_offset = info->offset; 360 region->nr = index; 361 362 if (region->size) { 363 region->mem = g_new0(MemoryRegion, 1); 364 memory_region_init_io(region->mem, obj, &vfio_region_ops, 365 region, name, region->size); 366 367 if (!vbasedev->no_mmap && 368 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { 369 370 ret = vfio_setup_region_sparse_mmaps(region, info); 371 372 if (ret) { 373 region->nr_mmaps = 1; 374 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); 375 region->mmaps[0].offset = 0; 376 region->mmaps[0].size = region->size; 377 } 378 } 379 } 380 381 trace_vfio_region_setup(vbasedev->name, index, name, 382 region->flags, region->fd_offset, region->size); 383 return 0; 384 } 385 386 static void vfio_subregion_unmap(VFIORegion *region, int index) 387 { 388 trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), 389 region->mmaps[index].offset, 390 region->mmaps[index].offset + 391 region->mmaps[index].size - 1); 392 memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); 393 munmap(region->mmaps[index].mmap, region->mmaps[index].size); 394 object_unparent(OBJECT(®ion->mmaps[index].mem)); 395 region->mmaps[index].mmap = NULL; 396 } 397 398 int vfio_region_mmap(VFIORegion *region) 399 { 400 int i, ret, prot = 0; 401 char *name; 402 403 if (!region->mem) { 404 return 0; 405 } 406 407 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; 408 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; 409 410 for (i = 0; i < region->nr_mmaps; i++) { 411 size_t align = MIN(1ULL << ctz64(region->mmaps[i].size), 1 * GiB); 412 void *map_base, *map_align; 413 414 /* 415 * Align the mmap for more efficient mapping in the kernel. Ideally 416 * we'd know the PMD and PUD mapping sizes to use as discrete alignment 417 * intervals, but we don't. As of Linux v6.12, the largest PUD size 418 * supporting huge pfnmap is 1GiB (ARCH_SUPPORTS_PUD_PFNMAP is only set 419 * on x86_64). Align by power-of-two size, capped at 1GiB. 420 * 421 * NB. qemu_memalign() and friends actually allocate memory, whereas 422 * the region size here can exceed host memory, therefore we manually 423 * create an oversized anonymous mapping and clean it up for alignment. 424 */ 425 map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE, 426 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 427 if (map_base == MAP_FAILED) { 428 ret = -errno; 429 goto no_mmap; 430 } 431 432 map_align = (void *)ROUND_UP((uintptr_t)map_base, (uintptr_t)align); 433 munmap(map_base, map_align - map_base); 434 munmap(map_align + region->mmaps[i].size, 435 align - (map_align - map_base)); 436 437 region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot, 438 MAP_SHARED | MAP_FIXED, 439 region->vbasedev->fd, 440 region->fd_offset + 441 region->mmaps[i].offset); 442 if (region->mmaps[i].mmap == MAP_FAILED) { 443 ret = -errno; 444 goto no_mmap; 445 } 446 447 name = g_strdup_printf("%s mmaps[%d]", 448 memory_region_name(region->mem), i); 449 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, 450 memory_region_owner(region->mem), 451 name, region->mmaps[i].size, 452 region->mmaps[i].mmap); 453 g_free(name); 454 memory_region_add_subregion(region->mem, region->mmaps[i].offset, 455 ®ion->mmaps[i].mem); 456 457 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), 458 region->mmaps[i].offset, 459 region->mmaps[i].offset + 460 region->mmaps[i].size - 1); 461 } 462 463 return 0; 464 465 no_mmap: 466 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, 467 region->fd_offset + region->mmaps[i].offset, 468 region->fd_offset + region->mmaps[i].offset + 469 region->mmaps[i].size - 1, ret); 470 471 region->mmaps[i].mmap = NULL; 472 473 for (i--; i >= 0; i--) { 474 vfio_subregion_unmap(region, i); 475 } 476 477 return ret; 478 } 479 480 void vfio_region_unmap(VFIORegion *region) 481 { 482 int i; 483 484 if (!region->mem) { 485 return; 486 } 487 488 for (i = 0; i < region->nr_mmaps; i++) { 489 if (region->mmaps[i].mmap) { 490 vfio_subregion_unmap(region, i); 491 } 492 } 493 } 494 495 void vfio_region_exit(VFIORegion *region) 496 { 497 int i; 498 499 if (!region->mem) { 500 return; 501 } 502 503 for (i = 0; i < region->nr_mmaps; i++) { 504 if (region->mmaps[i].mmap) { 505 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); 506 } 507 } 508 509 trace_vfio_region_exit(region->vbasedev->name, region->nr); 510 } 511 512 void vfio_region_finalize(VFIORegion *region) 513 { 514 int i; 515 516 if (!region->mem) { 517 return; 518 } 519 520 for (i = 0; i < region->nr_mmaps; i++) { 521 if (region->mmaps[i].mmap) { 522 munmap(region->mmaps[i].mmap, region->mmaps[i].size); 523 object_unparent(OBJECT(®ion->mmaps[i].mem)); 524 } 525 } 526 527 object_unparent(OBJECT(region->mem)); 528 529 g_free(region->mem); 530 g_free(region->mmaps); 531 532 trace_vfio_region_finalize(region->vbasedev->name, region->nr); 533 534 region->mem = NULL; 535 region->mmaps = NULL; 536 region->nr_mmaps = 0; 537 region->size = 0; 538 region->flags = 0; 539 region->nr = 0; 540 } 541 542 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) 543 { 544 int i; 545 546 if (!region->mem) { 547 return; 548 } 549 550 for (i = 0; i < region->nr_mmaps; i++) { 551 if (region->mmaps[i].mmap) { 552 memory_region_set_enabled(®ion->mmaps[i].mem, enabled); 553 } 554 } 555 556 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), 557 enabled); 558 } 559 560 int vfio_get_region_info(VFIODevice *vbasedev, int index, 561 struct vfio_region_info **info) 562 { 563 size_t argsz = sizeof(struct vfio_region_info); 564 565 *info = g_malloc0(argsz); 566 567 (*info)->index = index; 568 retry: 569 (*info)->argsz = argsz; 570 571 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { 572 g_free(*info); 573 *info = NULL; 574 return -errno; 575 } 576 577 if ((*info)->argsz > argsz) { 578 argsz = (*info)->argsz; 579 *info = g_realloc(*info, argsz); 580 581 goto retry; 582 } 583 584 return 0; 585 } 586 587 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, 588 uint32_t subtype, struct vfio_region_info **info) 589 { 590 int i; 591 592 for (i = 0; i < vbasedev->num_regions; i++) { 593 struct vfio_info_cap_header *hdr; 594 struct vfio_region_info_cap_type *cap_type; 595 596 if (vfio_get_region_info(vbasedev, i, info)) { 597 continue; 598 } 599 600 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); 601 if (!hdr) { 602 g_free(*info); 603 continue; 604 } 605 606 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); 607 608 trace_vfio_get_dev_region(vbasedev->name, i, 609 cap_type->type, cap_type->subtype); 610 611 if (cap_type->type == type && cap_type->subtype == subtype) { 612 return 0; 613 } 614 615 g_free(*info); 616 } 617 618 *info = NULL; 619 return -ENODEV; 620 } 621 622 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) 623 { 624 g_autofree struct vfio_region_info *info = NULL; 625 bool ret = false; 626 627 if (!vfio_get_region_info(vbasedev, region, &info)) { 628 if (vfio_get_region_info_cap(info, cap_type)) { 629 ret = true; 630 } 631 } 632 633 return ret; 634 } 635 636 bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp) 637 { 638 ERRP_GUARD(); 639 struct stat st; 640 641 if (vbasedev->fd < 0) { 642 if (stat(vbasedev->sysfsdev, &st) < 0) { 643 error_setg_errno(errp, errno, "no such host device"); 644 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); 645 return false; 646 } 647 /* User may specify a name, e.g: VFIO platform device */ 648 if (!vbasedev->name) { 649 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); 650 } 651 } else { 652 if (!vbasedev->iommufd) { 653 error_setg(errp, "Use FD passing only with iommufd backend"); 654 return false; 655 } 656 /* 657 * Give a name with fd so any function printing out vbasedev->name 658 * will not break. 659 */ 660 if (!vbasedev->name) { 661 vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); 662 } 663 } 664 665 return true; 666 } 667 668 void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) 669 { 670 ERRP_GUARD(); 671 int fd = monitor_fd_param(monitor_cur(), str, errp); 672 673 if (fd < 0) { 674 error_prepend(errp, "Could not parse remote object fd %s:", str); 675 return; 676 } 677 vbasedev->fd = fd; 678 } 679 680 void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, 681 DeviceState *dev, bool ram_discard) 682 { 683 vbasedev->type = type; 684 vbasedev->ops = ops; 685 vbasedev->dev = dev; 686 vbasedev->fd = -1; 687 688 vbasedev->ram_block_discard_allowed = ram_discard; 689 } 690 691 int vfio_device_get_aw_bits(VFIODevice *vdev) 692 { 693 /* 694 * iova_ranges is a sorted list. For old kernels that support 695 * VFIO but not support query of iova ranges, iova_ranges is NULL, 696 * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned. 697 */ 698 GList *l = g_list_last(vdev->bcontainer->iova_ranges); 699 700 if (l) { 701 Range *range = l->data; 702 return range_get_last_bit(range) + 1; 703 } 704 705 return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX; 706 } 707 708 bool vfio_device_is_mdev(VFIODevice *vbasedev) 709 { 710 g_autofree char *subsys = NULL; 711 g_autofree char *tmp = NULL; 712 713 if (!vbasedev->sysfsdev) { 714 return false; 715 } 716 717 tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev); 718 subsys = realpath(tmp, NULL); 719 return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); 720 } 721 722 bool vfio_device_hiod_realize(VFIODevice *vbasedev, Error **errp) 723 { 724 HostIOMMUDevice *hiod = vbasedev->hiod; 725 726 if (!hiod) { 727 return true; 728 } 729 730 return HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp); 731 } 732 733 VFIODevice *vfio_get_vfio_device(Object *obj) 734 { 735 if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) { 736 return &VFIO_PCI(obj)->vbasedev; 737 } else { 738 return NULL; 739 } 740 } 741