1 /* 2 * vfio based device assignment support - platform devices 3 * 4 * Copyright Linaro Limited, 2014 5 * 6 * Authors: 7 * Kim Phillips <kim.phillips@linaro.org> 8 * Eric Auger <eric.auger@linaro.org> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Based on vfio based PCI device assignment support: 14 * Copyright Red Hat, Inc. 2012 15 */ 16 17 #include "qemu/osdep.h" 18 #include "qapi/error.h" 19 #include <sys/ioctl.h> 20 #include <linux/vfio.h> 21 22 #include "hw/vfio/vfio-platform.h" 23 #include "migration/vmstate.h" 24 #include "qemu/error-report.h" 25 #include "qemu/main-loop.h" 26 #include "qemu/module.h" 27 #include "qemu/range.h" 28 #include "sysemu/sysemu.h" 29 #include "exec/memory.h" 30 #include "exec/address-spaces.h" 31 #include "qemu/queue.h" 32 #include "hw/sysbus.h" 33 #include "trace.h" 34 #include "hw/irq.h" 35 #include "hw/platform-bus.h" 36 #include "hw/qdev-properties.h" 37 #include "sysemu/kvm.h" 38 39 /* 40 * Functions used whatever the injection method 41 */ 42 43 static inline bool vfio_irq_is_automasked(VFIOINTp *intp) 44 { 45 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; 46 } 47 48 /** 49 * vfio_init_intp - allocate, initialize the IRQ struct pointer 50 * and add it into the list of IRQs 51 * @vbasedev: the VFIO device handle 52 * @info: irq info struct retrieved from VFIO driver 53 * @errp: error object 54 */ 55 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, 56 struct vfio_irq_info info, Error **errp) 57 { 58 int ret; 59 VFIOPlatformDevice *vdev = 60 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 61 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); 62 VFIOINTp *intp; 63 64 intp = g_malloc0(sizeof(*intp)); 65 intp->vdev = vdev; 66 intp->pin = info.index; 67 intp->flags = info.flags; 68 intp->state = VFIO_IRQ_INACTIVE; 69 intp->kvm_accel = false; 70 71 sysbus_init_irq(sbdev, &intp->qemuirq); 72 73 /* Get an eventfd for trigger */ 74 intp->interrupt = g_malloc0(sizeof(EventNotifier)); 75 ret = event_notifier_init(intp->interrupt, 0); 76 if (ret) { 77 g_free(intp->interrupt); 78 g_free(intp); 79 error_setg_errno(errp, -ret, 80 "failed to initialize trigger eventfd notifier"); 81 return NULL; 82 } 83 if (vfio_irq_is_automasked(intp)) { 84 /* Get an eventfd for resample/unmask */ 85 intp->unmask = g_malloc0(sizeof(EventNotifier)); 86 ret = event_notifier_init(intp->unmask, 0); 87 if (ret) { 88 g_free(intp->interrupt); 89 g_free(intp->unmask); 90 g_free(intp); 91 error_setg_errno(errp, -ret, 92 "failed to initialize resample eventfd notifier"); 93 return NULL; 94 } 95 } 96 97 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); 98 return intp; 99 } 100 101 /** 102 * vfio_set_trigger_eventfd - set VFIO eventfd handling 103 * 104 * @intp: IRQ struct handle 105 * @handler: handler to be called on eventfd signaling 106 * 107 * Setup VFIO signaling and attach an optional user-side handler 108 * to the eventfd 109 */ 110 static int vfio_set_trigger_eventfd(VFIOINTp *intp, 111 eventfd_user_side_handler_t handler) 112 { 113 VFIODevice *vbasedev = &intp->vdev->vbasedev; 114 int32_t fd = event_notifier_get_fd(intp->interrupt); 115 Error *err = NULL; 116 int ret; 117 118 qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp); 119 120 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, 121 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err); 122 if (ret) { 123 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); 124 qemu_set_fd_handler(fd, NULL, NULL, NULL); 125 } 126 127 return ret; 128 } 129 130 /* 131 * Functions only used when eventfds are handled on user-side 132 * ie. without irqfd 133 */ 134 135 /** 136 * vfio_mmap_set_enabled - enable/disable the fast path mode 137 * @vdev: the VFIO platform device 138 * @enabled: the target mmap state 139 * 140 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); 141 * enabled = false ~ slow path = MMIO region is trapped and region callbacks 142 * are called; slow path enables to trap the device IRQ status register reset 143 */ 144 145 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) 146 { 147 int i; 148 149 for (i = 0; i < vdev->vbasedev.num_regions; i++) { 150 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled); 151 } 152 } 153 154 /** 155 * vfio_intp_mmap_enable - timer function, restores the fast path 156 * if there is no more active IRQ 157 * @opaque: actually points to the VFIO platform device 158 * 159 * Called on mmap timer timout, this function checks whether the 160 * IRQ is still active and if not, restores the fast path. 161 * by construction a single eventfd is handled at a time. 162 * if the IRQ is still active, the timer is re-programmed. 163 */ 164 static void vfio_intp_mmap_enable(void *opaque) 165 { 166 VFIOINTp *tmp; 167 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; 168 169 qemu_mutex_lock(&vdev->intp_mutex); 170 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 171 if (tmp->state == VFIO_IRQ_ACTIVE) { 172 trace_vfio_platform_intp_mmap_enable(tmp->pin); 173 /* re-program the timer to check active status later */ 174 timer_mod(vdev->mmap_timer, 175 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 176 vdev->mmap_timeout); 177 qemu_mutex_unlock(&vdev->intp_mutex); 178 return; 179 } 180 } 181 vfio_mmap_set_enabled(vdev, true); 182 qemu_mutex_unlock(&vdev->intp_mutex); 183 } 184 185 /** 186 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ 187 * @opaque: opaque pointer, in practice the VFIOINTp handle 188 * 189 * The function is called on a previous IRQ completion, from 190 * vfio_platform_eoi, while the intp_mutex is locked. 191 * Also in such situation, the slow path already is set and 192 * the mmap timer was already programmed. 193 */ 194 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) 195 { 196 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, 197 event_notifier_get_fd(intp->interrupt)); 198 199 intp->state = VFIO_IRQ_ACTIVE; 200 201 /* trigger the virtual IRQ */ 202 qemu_set_irq(intp->qemuirq, 1); 203 } 204 205 /** 206 * vfio_intp_interrupt - The user-side eventfd handler 207 * @opaque: opaque pointer which in practice is the VFIOINTp handle 208 * 209 * the function is entered in event handler context: 210 * the vIRQ is injected into the guest if there is no other active 211 * or pending IRQ. 212 */ 213 static void vfio_intp_interrupt(VFIOINTp *intp) 214 { 215 int ret; 216 VFIOINTp *tmp; 217 VFIOPlatformDevice *vdev = intp->vdev; 218 bool delay_handling = false; 219 220 qemu_mutex_lock(&vdev->intp_mutex); 221 if (intp->state == VFIO_IRQ_INACTIVE) { 222 QLIST_FOREACH(tmp, &vdev->intp_list, next) { 223 if (tmp->state == VFIO_IRQ_ACTIVE || 224 tmp->state == VFIO_IRQ_PENDING) { 225 delay_handling = true; 226 break; 227 } 228 } 229 } 230 if (delay_handling) { 231 /* 232 * the new IRQ gets a pending status and is pushed in 233 * the pending queue 234 */ 235 intp->state = VFIO_IRQ_PENDING; 236 trace_vfio_intp_interrupt_set_pending(intp->pin); 237 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, 238 intp, pqnext); 239 ret = event_notifier_test_and_clear(intp->interrupt); 240 qemu_mutex_unlock(&vdev->intp_mutex); 241 return; 242 } 243 244 trace_vfio_platform_intp_interrupt(intp->pin, 245 event_notifier_get_fd(intp->interrupt)); 246 247 ret = event_notifier_test_and_clear(intp->interrupt); 248 if (!ret) { 249 error_report("Error when clearing fd=%d (ret = %d)", 250 event_notifier_get_fd(intp->interrupt), ret); 251 } 252 253 intp->state = VFIO_IRQ_ACTIVE; 254 255 /* sets slow path */ 256 vfio_mmap_set_enabled(vdev, false); 257 258 /* trigger the virtual IRQ */ 259 qemu_set_irq(intp->qemuirq, 1); 260 261 /* 262 * Schedule the mmap timer which will restore fastpath when no IRQ 263 * is active anymore 264 */ 265 if (vdev->mmap_timeout) { 266 timer_mod(vdev->mmap_timer, 267 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 268 vdev->mmap_timeout); 269 } 270 qemu_mutex_unlock(&vdev->intp_mutex); 271 } 272 273 /** 274 * vfio_platform_eoi - IRQ completion routine 275 * @vbasedev: the VFIO device handle 276 * 277 * De-asserts the active virtual IRQ and unmasks the physical IRQ 278 * (effective for level sensitive IRQ auto-masked by the VFIO driver). 279 * Then it handles next pending IRQ if any. 280 * eoi function is called on the first access to any MMIO region 281 * after an IRQ was triggered, trapped since slow path was set. 282 * It is assumed this access corresponds to the IRQ status 283 * register reset. With such a mechanism, a single IRQ can be 284 * handled at a time since there is no way to know which IRQ 285 * was completed by the guest (we would need additional details 286 * about the IRQ status register mask). 287 */ 288 static void vfio_platform_eoi(VFIODevice *vbasedev) 289 { 290 VFIOINTp *intp; 291 VFIOPlatformDevice *vdev = 292 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 293 294 qemu_mutex_lock(&vdev->intp_mutex); 295 QLIST_FOREACH(intp, &vdev->intp_list, next) { 296 if (intp->state == VFIO_IRQ_ACTIVE) { 297 trace_vfio_platform_eoi(intp->pin, 298 event_notifier_get_fd(intp->interrupt)); 299 intp->state = VFIO_IRQ_INACTIVE; 300 301 /* deassert the virtual IRQ */ 302 qemu_set_irq(intp->qemuirq, 0); 303 304 if (vfio_irq_is_automasked(intp)) { 305 /* unmasks the physical level-sensitive IRQ */ 306 vfio_unmask_single_irqindex(vbasedev, intp->pin); 307 } 308 309 /* a single IRQ can be active at a time */ 310 break; 311 } 312 } 313 /* in case there are pending IRQs, handle the first one */ 314 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { 315 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); 316 vfio_intp_inject_pending_lockheld(intp); 317 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); 318 } 319 qemu_mutex_unlock(&vdev->intp_mutex); 320 } 321 322 /** 323 * vfio_start_eventfd_injection - starts the virtual IRQ injection using 324 * user-side handled eventfds 325 * @sbdev: the sysbus device handle 326 * @irq: the qemu irq handle 327 */ 328 329 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) 330 { 331 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 332 VFIOINTp *intp; 333 334 QLIST_FOREACH(intp, &vdev->intp_list, next) { 335 if (intp->qemuirq == irq) { 336 break; 337 } 338 } 339 assert(intp); 340 341 if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) { 342 abort(); 343 } 344 } 345 346 /* 347 * Functions used for irqfd 348 */ 349 350 /** 351 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ 352 * @intp: the IRQ struct handle 353 * programs the VFIO driver to unmask this IRQ when the 354 * intp->unmask eventfd is triggered 355 */ 356 static int vfio_set_resample_eventfd(VFIOINTp *intp) 357 { 358 int32_t fd = event_notifier_get_fd(intp->unmask); 359 VFIODevice *vbasedev = &intp->vdev->vbasedev; 360 Error *err = NULL; 361 int ret; 362 363 qemu_set_fd_handler(fd, NULL, NULL, NULL); 364 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, 365 VFIO_IRQ_SET_ACTION_UNMASK, fd, &err); 366 if (ret) { 367 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); 368 } 369 return ret; 370 } 371 372 /** 373 * vfio_start_irqfd_injection - starts the virtual IRQ injection using 374 * irqfd 375 * 376 * @sbdev: the sysbus device handle 377 * @irq: the qemu irq handle 378 * 379 * In case the irqfd setup fails, we fallback to userspace handled eventfd 380 */ 381 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) 382 { 383 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); 384 VFIOINTp *intp; 385 386 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || 387 !vdev->irqfd_allowed) { 388 goto fail_irqfd; 389 } 390 391 QLIST_FOREACH(intp, &vdev->intp_list, next) { 392 if (intp->qemuirq == irq) { 393 break; 394 } 395 } 396 assert(intp); 397 398 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, 399 intp->unmask, irq) < 0) { 400 goto fail_irqfd; 401 } 402 403 if (vfio_set_trigger_eventfd(intp, NULL) < 0) { 404 goto fail_vfio; 405 } 406 if (vfio_irq_is_automasked(intp)) { 407 if (vfio_set_resample_eventfd(intp) < 0) { 408 goto fail_vfio; 409 } 410 trace_vfio_platform_start_level_irqfd_injection(intp->pin, 411 event_notifier_get_fd(intp->interrupt), 412 event_notifier_get_fd(intp->unmask)); 413 } else { 414 trace_vfio_platform_start_edge_irqfd_injection(intp->pin, 415 event_notifier_get_fd(intp->interrupt)); 416 } 417 418 intp->kvm_accel = true; 419 420 return; 421 fail_vfio: 422 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); 423 abort(); 424 fail_irqfd: 425 vfio_start_eventfd_injection(sbdev, irq); 426 return; 427 } 428 429 /* VFIO skeleton */ 430 431 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) 432 { 433 vbasedev->needs_reset = true; 434 } 435 436 /* not implemented yet */ 437 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) 438 { 439 return -1; 440 } 441 442 /** 443 * vfio_populate_device - Allocate and populate MMIO region 444 * and IRQ structs according to driver returned information 445 * @vbasedev: the VFIO device handle 446 * @errp: error object 447 * 448 */ 449 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp) 450 { 451 VFIOINTp *intp, *tmp; 452 int i, ret = -1; 453 VFIOPlatformDevice *vdev = 454 container_of(vbasedev, VFIOPlatformDevice, vbasedev); 455 456 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { 457 error_setg(errp, "this isn't a platform device"); 458 return ret; 459 } 460 461 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); 462 463 for (i = 0; i < vbasedev->num_regions; i++) { 464 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i); 465 466 vdev->regions[i] = g_new0(VFIORegion, 1); 467 ret = vfio_region_setup(OBJECT(vdev), vbasedev, 468 vdev->regions[i], i, name); 469 g_free(name); 470 if (ret) { 471 error_setg_errno(errp, -ret, "failed to get region %d info", i); 472 goto reg_error; 473 } 474 } 475 476 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 477 vfio_intp_mmap_enable, vdev); 478 479 QSIMPLEQ_INIT(&vdev->pending_intp_queue); 480 481 for (i = 0; i < vbasedev->num_irqs; i++) { 482 struct vfio_irq_info irq = { .argsz = sizeof(irq) }; 483 484 irq.index = i; 485 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); 486 if (ret) { 487 error_setg_errno(errp, -ret, "failed to get device irq info"); 488 goto irq_err; 489 } else { 490 trace_vfio_platform_populate_interrupts(irq.index, 491 irq.count, 492 irq.flags); 493 intp = vfio_init_intp(vbasedev, irq, errp); 494 if (!intp) { 495 ret = -1; 496 goto irq_err; 497 } 498 } 499 } 500 return 0; 501 irq_err: 502 timer_del(vdev->mmap_timer); 503 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { 504 QLIST_REMOVE(intp, next); 505 g_free(intp); 506 } 507 reg_error: 508 for (i = 0; i < vbasedev->num_regions; i++) { 509 if (vdev->regions[i]) { 510 vfio_region_finalize(vdev->regions[i]); 511 } 512 g_free(vdev->regions[i]); 513 } 514 g_free(vdev->regions); 515 return ret; 516 } 517 518 /* specialized functions for VFIO Platform devices */ 519 static VFIODeviceOps vfio_platform_ops = { 520 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, 521 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, 522 .vfio_eoi = vfio_platform_eoi, 523 }; 524 525 /** 526 * vfio_base_device_init - perform preliminary VFIO setup 527 * @vbasedev: the VFIO device handle 528 * @errp: error object 529 * 530 * Implement the VFIO command sequence that allows to discover 531 * assigned device resources: group extraction, device 532 * fd retrieval, resource query. 533 * Precondition: the device name must be initialized 534 */ 535 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) 536 { 537 VFIOGroup *group; 538 VFIODevice *vbasedev_iter; 539 char *tmp, group_path[PATH_MAX], *group_name; 540 ssize_t len; 541 struct stat st; 542 int groupid; 543 int ret; 544 545 /* @sysfsdev takes precedence over @host */ 546 if (vbasedev->sysfsdev) { 547 g_free(vbasedev->name); 548 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); 549 } else { 550 if (!vbasedev->name || strchr(vbasedev->name, '/')) { 551 error_setg(errp, "wrong host device name"); 552 return -EINVAL; 553 } 554 555 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s", 556 vbasedev->name); 557 } 558 559 if (stat(vbasedev->sysfsdev, &st) < 0) { 560 error_setg_errno(errp, errno, 561 "failed to get the sysfs host device file status"); 562 return -errno; 563 } 564 565 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); 566 len = readlink(tmp, group_path, sizeof(group_path)); 567 g_free(tmp); 568 569 if (len < 0 || len >= sizeof(group_path)) { 570 ret = len < 0 ? -errno : -ENAMETOOLONG; 571 error_setg_errno(errp, -ret, "no iommu_group found"); 572 return ret; 573 } 574 575 group_path[len] = 0; 576 577 group_name = basename(group_path); 578 if (sscanf(group_name, "%d", &groupid) != 1) { 579 error_setg_errno(errp, errno, "failed to read %s", group_path); 580 return -errno; 581 } 582 583 trace_vfio_platform_base_device_init(vbasedev->name, groupid); 584 585 group = vfio_get_group(groupid, &address_space_memory, errp); 586 if (!group) { 587 return -ENOENT; 588 } 589 590 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { 591 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { 592 error_setg(errp, "device is already attached"); 593 vfio_put_group(group); 594 return -EBUSY; 595 } 596 } 597 ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); 598 if (ret) { 599 vfio_put_group(group); 600 return ret; 601 } 602 603 ret = vfio_populate_device(vbasedev, errp); 604 if (ret) { 605 vfio_put_group(group); 606 } 607 608 return ret; 609 } 610 611 /** 612 * vfio_platform_realize - the device realize function 613 * @dev: device state pointer 614 * @errp: error 615 * 616 * initialize the device, its memory regions and IRQ structures 617 * IRQ are started separately 618 */ 619 static void vfio_platform_realize(DeviceState *dev, Error **errp) 620 { 621 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); 622 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); 623 VFIODevice *vbasedev = &vdev->vbasedev; 624 int i, ret; 625 626 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; 627 vbasedev->dev = dev; 628 vbasedev->ops = &vfio_platform_ops; 629 630 qemu_mutex_init(&vdev->intp_mutex); 631 632 trace_vfio_platform_realize(vbasedev->sysfsdev ? 633 vbasedev->sysfsdev : vbasedev->name, 634 vdev->compat); 635 636 ret = vfio_base_device_init(vbasedev, errp); 637 if (ret) { 638 goto out; 639 } 640 641 if (!vdev->compat) { 642 GError *gerr = NULL; 643 gchar *contents; 644 gsize length; 645 char *path; 646 647 path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev); 648 if (!g_file_get_contents(path, &contents, &length, &gerr)) { 649 error_setg(errp, "%s", gerr->message); 650 g_error_free(gerr); 651 g_free(path); 652 return; 653 } 654 g_free(path); 655 vdev->compat = contents; 656 for (vdev->num_compat = 0; length; vdev->num_compat++) { 657 size_t skip = strlen(contents) + 1; 658 contents += skip; 659 length -= skip; 660 } 661 } 662 663 for (i = 0; i < vbasedev->num_regions; i++) { 664 if (vfio_region_mmap(vdev->regions[i])) { 665 warn_report("%s mmap unsupported, performance may be slow", 666 memory_region_name(vdev->regions[i]->mem)); 667 } 668 sysbus_init_mmio(sbdev, vdev->regions[i]->mem); 669 } 670 out: 671 if (!ret) { 672 return; 673 } 674 675 if (vdev->vbasedev.name) { 676 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); 677 } else { 678 error_prepend(errp, "vfio error: "); 679 } 680 } 681 682 static const VMStateDescription vfio_platform_vmstate = { 683 .name = "vfio-platform", 684 .unmigratable = 1, 685 }; 686 687 static Property vfio_platform_dev_properties[] = { 688 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), 689 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), 690 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), 691 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, 692 mmap_timeout, 1100), 693 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), 694 DEFINE_PROP_END_OF_LIST(), 695 }; 696 697 static void vfio_platform_class_init(ObjectClass *klass, void *data) 698 { 699 DeviceClass *dc = DEVICE_CLASS(klass); 700 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); 701 702 dc->realize = vfio_platform_realize; 703 dc->props = vfio_platform_dev_properties; 704 dc->vmsd = &vfio_platform_vmstate; 705 dc->desc = "VFIO-based platform device assignment"; 706 sbc->connect_irq_notifier = vfio_start_irqfd_injection; 707 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 708 /* Supported by TYPE_VIRT_MACHINE */ 709 dc->user_creatable = true; 710 } 711 712 static const TypeInfo vfio_platform_dev_info = { 713 .name = TYPE_VFIO_PLATFORM, 714 .parent = TYPE_SYS_BUS_DEVICE, 715 .instance_size = sizeof(VFIOPlatformDevice), 716 .class_init = vfio_platform_class_init, 717 .class_size = sizeof(VFIOPlatformDeviceClass), 718 }; 719 720 static void register_vfio_platform_dev_type(void) 721 { 722 type_register_static(&vfio_platform_dev_info); 723 } 724 725 type_init(register_vfio_platform_dev_type) 726