1 /*
2 * vfio based device assignment support - platform devices
3 *
4 * Copyright Linaro Limited, 2014
5 *
6 * Authors:
7 * Kim Phillips <kim.phillips@linaro.org>
8 * Eric Auger <eric.auger@linaro.org>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 * Based on vfio based PCI device assignment support:
14 * Copyright Red Hat, Inc. 2012
15 */
16
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include "qapi/error.h"
20 #include <sys/ioctl.h>
21 #include <linux/vfio.h>
22
23 #include "hw/vfio/vfio-platform.h"
24 #include "system/iommufd.h"
25 #include "migration/vmstate.h"
26 #include "qemu/error-report.h"
27 #include "qemu/lockable.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/module.h"
30 #include "qemu/range.h"
31 #include "system/memory.h"
32 #include "system/address-spaces.h"
33 #include "qemu/queue.h"
34 #include "hw/sysbus.h"
35 #include "trace.h"
36 #include "hw/irq.h"
37 #include "hw/platform-bus.h"
38 #include "hw/qdev-properties.h"
39 #include "system/kvm.h"
40 #include "hw/vfio/vfio-region.h"
41
42 /*
43 * Functions used whatever the injection method
44 */
45
vfio_irq_is_automasked(VFIOINTp * intp)46 static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
47 {
48 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
49 }
50
51 /**
52 * vfio_init_intp - allocate, initialize the IRQ struct pointer
53 * and add it into the list of IRQs
54 * @vbasedev: the VFIO device handle
55 * @info: irq info struct retrieved from VFIO driver
56 * @errp: error object
57 */
vfio_init_intp(VFIODevice * vbasedev,struct vfio_irq_info info,Error ** errp)58 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
59 struct vfio_irq_info info, Error **errp)
60 {
61 int ret;
62 VFIOPlatformDevice *vdev =
63 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
64 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
65 VFIOINTp *intp;
66
67 intp = g_malloc0(sizeof(*intp));
68 intp->vdev = vdev;
69 intp->pin = info.index;
70 intp->flags = info.flags;
71 intp->state = VFIO_IRQ_INACTIVE;
72 intp->kvm_accel = false;
73
74 sysbus_init_irq(sbdev, &intp->qemuirq);
75
76 /* Get an eventfd for trigger */
77 intp->interrupt = g_new0(EventNotifier, 1);
78 ret = event_notifier_init(intp->interrupt, 0);
79 if (ret) {
80 g_free(intp->interrupt);
81 g_free(intp);
82 error_setg_errno(errp, -ret,
83 "failed to initialize trigger eventfd notifier");
84 return NULL;
85 }
86 if (vfio_irq_is_automasked(intp)) {
87 /* Get an eventfd for resample/unmask */
88 intp->unmask = g_new0(EventNotifier, 1);
89 ret = event_notifier_init(intp->unmask, 0);
90 if (ret) {
91 g_free(intp->interrupt);
92 g_free(intp->unmask);
93 g_free(intp);
94 error_setg_errno(errp, -ret,
95 "failed to initialize resample eventfd notifier");
96 return NULL;
97 }
98 }
99
100 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
101 return intp;
102 }
103
104 /**
105 * vfio_set_trigger_eventfd - set VFIO eventfd handling
106 *
107 * @intp: IRQ struct handle
108 * @handler: handler to be called on eventfd signaling
109 *
110 * Setup VFIO signaling and attach an optional user-side handler
111 * to the eventfd
112 */
vfio_set_trigger_eventfd(VFIOINTp * intp,eventfd_user_side_handler_t handler)113 static int vfio_set_trigger_eventfd(VFIOINTp *intp,
114 eventfd_user_side_handler_t handler)
115 {
116 VFIODevice *vbasedev = &intp->vdev->vbasedev;
117 int32_t fd = event_notifier_get_fd(intp->interrupt);
118 Error *err = NULL;
119
120 qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
121
122 if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0,
123 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
124 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
125 qemu_set_fd_handler(fd, NULL, NULL, NULL);
126 return -EINVAL;
127 }
128
129 return 0;
130 }
131
132 /*
133 * Functions only used when eventfds are handled on user-side
134 * ie. without irqfd
135 */
136
137 /**
138 * vfio_mmap_set_enabled - enable/disable the fast path mode
139 * @vdev: the VFIO platform device
140 * @enabled: the target mmap state
141 *
142 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
143 * enabled = false ~ slow path = MMIO region is trapped and region callbacks
144 * are called; slow path enables to trap the device IRQ status register reset
145 */
146
vfio_mmap_set_enabled(VFIOPlatformDevice * vdev,bool enabled)147 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
148 {
149 int i;
150
151 for (i = 0; i < vdev->vbasedev.num_regions; i++) {
152 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
153 }
154 }
155
156 /**
157 * vfio_intp_mmap_enable - timer function, restores the fast path
158 * if there is no more active IRQ
159 * @opaque: actually points to the VFIO platform device
160 *
161 * Called on mmap timer timeout, this function checks whether the
162 * IRQ is still active and if not, restores the fast path.
163 * by construction a single eventfd is handled at a time.
164 * if the IRQ is still active, the timer is re-programmed.
165 */
vfio_intp_mmap_enable(void * opaque)166 static void vfio_intp_mmap_enable(void *opaque)
167 {
168 VFIOINTp *tmp;
169 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
170
171 QEMU_LOCK_GUARD(&vdev->intp_mutex);
172 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
173 if (tmp->state == VFIO_IRQ_ACTIVE) {
174 trace_vfio_platform_intp_mmap_enable(tmp->pin);
175 /* re-program the timer to check active status later */
176 timer_mod(vdev->mmap_timer,
177 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
178 vdev->mmap_timeout);
179 return;
180 }
181 }
182 vfio_mmap_set_enabled(vdev, true);
183 }
184
185 /**
186 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
187 * @opaque: opaque pointer, in practice the VFIOINTp handle
188 *
189 * The function is called on a previous IRQ completion, from
190 * vfio_platform_eoi, while the intp_mutex is locked.
191 * Also in such situation, the slow path already is set and
192 * the mmap timer was already programmed.
193 */
vfio_intp_inject_pending_lockheld(VFIOINTp * intp)194 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
195 {
196 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
197 event_notifier_get_fd(intp->interrupt));
198
199 intp->state = VFIO_IRQ_ACTIVE;
200
201 /* trigger the virtual IRQ */
202 qemu_set_irq(intp->qemuirq, 1);
203 }
204
205 /**
206 * vfio_intp_interrupt - The user-side eventfd handler
207 * @opaque: opaque pointer which in practice is the VFIOINTp handle
208 *
209 * the function is entered in event handler context:
210 * the vIRQ is injected into the guest if there is no other active
211 * or pending IRQ.
212 */
vfio_intp_interrupt(VFIOINTp * intp)213 static void vfio_intp_interrupt(VFIOINTp *intp)
214 {
215 int ret;
216 VFIOINTp *tmp;
217 VFIOPlatformDevice *vdev = intp->vdev;
218 bool delay_handling = false;
219
220 QEMU_LOCK_GUARD(&vdev->intp_mutex);
221 if (intp->state == VFIO_IRQ_INACTIVE) {
222 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
223 if (tmp->state == VFIO_IRQ_ACTIVE ||
224 tmp->state == VFIO_IRQ_PENDING) {
225 delay_handling = true;
226 break;
227 }
228 }
229 }
230 if (delay_handling) {
231 /*
232 * the new IRQ gets a pending status and is pushed in
233 * the pending queue
234 */
235 intp->state = VFIO_IRQ_PENDING;
236 trace_vfio_intp_interrupt_set_pending(intp->pin);
237 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
238 intp, pqnext);
239 event_notifier_test_and_clear(intp->interrupt);
240 return;
241 }
242
243 trace_vfio_platform_intp_interrupt(intp->pin,
244 event_notifier_get_fd(intp->interrupt));
245
246 ret = event_notifier_test_and_clear(intp->interrupt);
247 if (!ret) {
248 error_report("Error when clearing fd=%d (ret = %d)",
249 event_notifier_get_fd(intp->interrupt), ret);
250 }
251
252 intp->state = VFIO_IRQ_ACTIVE;
253
254 /* sets slow path */
255 vfio_mmap_set_enabled(vdev, false);
256
257 /* trigger the virtual IRQ */
258 qemu_set_irq(intp->qemuirq, 1);
259
260 /*
261 * Schedule the mmap timer which will restore fastpath when no IRQ
262 * is active anymore
263 */
264 if (vdev->mmap_timeout) {
265 timer_mod(vdev->mmap_timer,
266 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
267 vdev->mmap_timeout);
268 }
269 }
270
271 /**
272 * vfio_platform_eoi - IRQ completion routine
273 * @vbasedev: the VFIO device handle
274 *
275 * De-asserts the active virtual IRQ and unmasks the physical IRQ
276 * (effective for level sensitive IRQ auto-masked by the VFIO driver).
277 * Then it handles next pending IRQ if any.
278 * eoi function is called on the first access to any MMIO region
279 * after an IRQ was triggered, trapped since slow path was set.
280 * It is assumed this access corresponds to the IRQ status
281 * register reset. With such a mechanism, a single IRQ can be
282 * handled at a time since there is no way to know which IRQ
283 * was completed by the guest (we would need additional details
284 * about the IRQ status register mask).
285 */
vfio_platform_eoi(VFIODevice * vbasedev)286 static void vfio_platform_eoi(VFIODevice *vbasedev)
287 {
288 VFIOINTp *intp;
289 VFIOPlatformDevice *vdev =
290 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
291
292 QEMU_LOCK_GUARD(&vdev->intp_mutex);
293 QLIST_FOREACH(intp, &vdev->intp_list, next) {
294 if (intp->state == VFIO_IRQ_ACTIVE) {
295 trace_vfio_platform_eoi(intp->pin,
296 event_notifier_get_fd(intp->interrupt));
297 intp->state = VFIO_IRQ_INACTIVE;
298
299 /* deassert the virtual IRQ */
300 qemu_set_irq(intp->qemuirq, 0);
301
302 if (vfio_irq_is_automasked(intp)) {
303 /* unmasks the physical level-sensitive IRQ */
304 vfio_device_irq_unmask(vbasedev, intp->pin);
305 }
306
307 /* a single IRQ can be active at a time */
308 break;
309 }
310 }
311 /* in case there are pending IRQs, handle the first one */
312 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
313 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
314 vfio_intp_inject_pending_lockheld(intp);
315 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
316 }
317 }
318
319 /**
320 * vfio_start_eventfd_injection - starts the virtual IRQ injection using
321 * user-side handled eventfds
322 * @sbdev: the sysbus device handle
323 * @irq: the qemu irq handle
324 */
325
vfio_start_eventfd_injection(SysBusDevice * sbdev,qemu_irq irq)326 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
327 {
328 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
329 VFIOINTp *intp;
330
331 QLIST_FOREACH(intp, &vdev->intp_list, next) {
332 if (intp->qemuirq == irq) {
333 break;
334 }
335 }
336 assert(intp);
337
338 if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) {
339 abort();
340 }
341 }
342
343 /*
344 * Functions used for irqfd
345 */
346
347 /**
348 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
349 * @intp: the IRQ struct handle
350 * programs the VFIO driver to unmask this IRQ when the
351 * intp->unmask eventfd is triggered
352 */
vfio_set_resample_eventfd(VFIOINTp * intp)353 static int vfio_set_resample_eventfd(VFIOINTp *intp)
354 {
355 int32_t fd = event_notifier_get_fd(intp->unmask);
356 VFIODevice *vbasedev = &intp->vdev->vbasedev;
357 Error *err = NULL;
358
359 qemu_set_fd_handler(fd, NULL, NULL, NULL);
360 if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0,
361 VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) {
362 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
363 return -EINVAL;
364 }
365 return 0;
366 }
367
368 /**
369 * vfio_start_irqfd_injection - starts the virtual IRQ injection using
370 * irqfd
371 *
372 * @sbdev: the sysbus device handle
373 * @irq: the qemu irq handle
374 *
375 * In case the irqfd setup fails, we fallback to userspace handled eventfd
376 */
vfio_start_irqfd_injection(SysBusDevice * sbdev,qemu_irq irq)377 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
378 {
379 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
380 VFIOINTp *intp;
381
382 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
383 !vdev->irqfd_allowed) {
384 goto fail_irqfd;
385 }
386
387 QLIST_FOREACH(intp, &vdev->intp_list, next) {
388 if (intp->qemuirq == irq) {
389 break;
390 }
391 }
392 assert(intp);
393
394 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
395 intp->unmask, irq) < 0) {
396 goto fail_irqfd;
397 }
398
399 if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
400 goto fail_vfio;
401 }
402 if (vfio_irq_is_automasked(intp)) {
403 if (vfio_set_resample_eventfd(intp) < 0) {
404 goto fail_vfio;
405 }
406 trace_vfio_platform_start_level_irqfd_injection(intp->pin,
407 event_notifier_get_fd(intp->interrupt),
408 event_notifier_get_fd(intp->unmask));
409 } else {
410 trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
411 event_notifier_get_fd(intp->interrupt));
412 }
413
414 intp->kvm_accel = true;
415
416 return;
417 fail_vfio:
418 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
419 abort();
420 fail_irqfd:
421 vfio_start_eventfd_injection(sbdev, irq);
422 }
423
424 /* VFIO skeleton */
425
vfio_platform_compute_needs_reset(VFIODevice * vbasedev)426 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
427 {
428 vbasedev->needs_reset = true;
429 }
430
431 /* not implemented yet */
vfio_platform_hot_reset_multi(VFIODevice * vbasedev)432 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
433 {
434 return -1;
435 }
436
437 /**
438 * vfio_populate_device - Allocate and populate MMIO region
439 * and IRQ structs according to driver returned information
440 * @vbasedev: the VFIO device handle
441 * @errp: error object
442 *
443 */
vfio_populate_device(VFIODevice * vbasedev,Error ** errp)444 static bool vfio_populate_device(VFIODevice *vbasedev, Error **errp)
445 {
446 VFIOINTp *intp, *tmp;
447 int i, ret = -1;
448 VFIOPlatformDevice *vdev =
449 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
450
451 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
452 error_setg(errp, "this isn't a platform device");
453 return false;
454 }
455
456 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
457
458 for (i = 0; i < vbasedev->num_regions; i++) {
459 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
460
461 vdev->regions[i] = g_new0(VFIORegion, 1);
462 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
463 vdev->regions[i], i, name);
464 g_free(name);
465 if (ret) {
466 error_setg_errno(errp, -ret, "failed to get region %d info", i);
467 goto reg_error;
468 }
469 }
470
471 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
472 vfio_intp_mmap_enable, vdev);
473
474 QSIMPLEQ_INIT(&vdev->pending_intp_queue);
475
476 for (i = 0; i < vbasedev->num_irqs; i++) {
477 struct vfio_irq_info irq;
478
479 ret = vfio_device_get_irq_info(vbasedev, i, &irq);
480
481 if (ret) {
482 error_setg_errno(errp, -ret, "failed to get device irq info");
483 goto irq_err;
484 } else {
485 trace_vfio_platform_populate_interrupts(irq.index,
486 irq.count,
487 irq.flags);
488 intp = vfio_init_intp(vbasedev, irq, errp);
489 if (!intp) {
490 goto irq_err;
491 }
492 }
493 }
494 return true;
495 irq_err:
496 timer_del(vdev->mmap_timer);
497 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
498 QLIST_REMOVE(intp, next);
499 g_free(intp);
500 }
501 reg_error:
502 for (i = 0; i < vbasedev->num_regions; i++) {
503 if (vdev->regions[i]) {
504 vfio_region_finalize(vdev->regions[i]);
505 }
506 g_free(vdev->regions[i]);
507 }
508 g_free(vdev->regions);
509 return false;
510 }
511
512 /* specialized functions for VFIO Platform devices */
513 static VFIODeviceOps vfio_platform_ops = {
514 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
515 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
516 .vfio_eoi = vfio_platform_eoi,
517 };
518
519 /**
520 * vfio_base_device_init - perform preliminary VFIO setup
521 * @vbasedev: the VFIO device handle
522 * @errp: error object
523 *
524 * Implement the VFIO command sequence that allows to discover
525 * assigned device resources: group extraction, device
526 * fd retrieval, resource query.
527 * Precondition: the device name must be initialized
528 */
vfio_base_device_init(VFIODevice * vbasedev,Error ** errp)529 static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
530 {
531 /* @fd takes precedence over @sysfsdev which takes precedence over @host */
532 if (vbasedev->fd < 0 && vbasedev->sysfsdev) {
533 g_free(vbasedev->name);
534 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
535 } else if (vbasedev->fd < 0) {
536 if (!vbasedev->name || strchr(vbasedev->name, '/')) {
537 error_setg(errp, "wrong host device name");
538 return false;
539 }
540
541 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
542 vbasedev->name);
543 }
544
545 if (!vfio_device_get_name(vbasedev, errp)) {
546 return false;
547 }
548
549 if (!vfio_device_attach(vbasedev->name, vbasedev,
550 &address_space_memory, errp)) {
551 return false;
552 }
553
554 if (vfio_populate_device(vbasedev, errp)) {
555 return true;
556 }
557
558 vfio_device_detach(vbasedev);
559 return false;
560 }
561
562 /**
563 * vfio_platform_realize - the device realize function
564 * @dev: device state pointer
565 * @errp: error
566 *
567 * initialize the device, its memory regions and IRQ structures
568 * IRQ are started separately
569 */
vfio_platform_realize(DeviceState * dev,Error ** errp)570 static void vfio_platform_realize(DeviceState *dev, Error **errp)
571 {
572 ERRP_GUARD();
573 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
574 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
575 VFIODevice *vbasedev = &vdev->vbasedev;
576 int i;
577
578 warn_report("-device vfio-platform is deprecated");
579 qemu_mutex_init(&vdev->intp_mutex);
580
581 trace_vfio_platform_realize(vbasedev->sysfsdev ?
582 vbasedev->sysfsdev : vbasedev->name,
583 vdev->compat);
584
585 if (!vfio_base_device_init(vbasedev, errp)) {
586 goto init_err;
587 }
588
589 if (!vdev->compat) {
590 GError *gerr = NULL;
591 gchar *contents;
592 gsize length;
593 char *path;
594
595 path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev);
596 if (!g_file_get_contents(path, &contents, &length, &gerr)) {
597 error_setg(errp, "%s", gerr->message);
598 g_error_free(gerr);
599 g_free(path);
600 return;
601 }
602 g_free(path);
603 vdev->compat = contents;
604 for (vdev->num_compat = 0; length; vdev->num_compat++) {
605 size_t skip = strlen(contents) + 1;
606 contents += skip;
607 length -= skip;
608 }
609 }
610
611 for (i = 0; i < vbasedev->num_regions; i++) {
612 if (vfio_region_mmap(vdev->regions[i])) {
613 warn_report("%s mmap unsupported, performance may be slow",
614 memory_region_name(vdev->regions[i]->mem));
615 }
616 sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
617 }
618 return;
619
620 init_err:
621 if (vdev->vbasedev.name) {
622 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
623 } else {
624 error_prepend(errp, "vfio error: ");
625 }
626 }
627
628 static const VMStateDescription vfio_platform_vmstate = {
629 .name = "vfio-platform",
630 .unmigratable = 1,
631 };
632
633 static const Property vfio_platform_dev_properties[] = {
634 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
635 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
636 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
637 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
638 mmap_timeout, 1100),
639 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
640 #ifdef CONFIG_IOMMUFD
641 DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd,
642 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
643 #endif
644 };
645
vfio_platform_instance_init(Object * obj)646 static void vfio_platform_instance_init(Object *obj)
647 {
648 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(obj);
649 VFIODevice *vbasedev = &vdev->vbasedev;
650
651 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PLATFORM, &vfio_platform_ops,
652 DEVICE(vdev), false);
653 }
654
655 #ifdef CONFIG_IOMMUFD
vfio_platform_set_fd(Object * obj,const char * str,Error ** errp)656 static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp)
657 {
658 vfio_device_set_fd(&VFIO_PLATFORM_DEVICE(obj)->vbasedev, str, errp);
659 }
660 #endif
661
vfio_platform_class_init(ObjectClass * klass,const void * data)662 static void vfio_platform_class_init(ObjectClass *klass, const void *data)
663 {
664 DeviceClass *dc = DEVICE_CLASS(klass);
665 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
666
667 dc->realize = vfio_platform_realize;
668 device_class_set_props(dc, vfio_platform_dev_properties);
669 #ifdef CONFIG_IOMMUFD
670 object_class_property_add_str(klass, "fd", NULL, vfio_platform_set_fd);
671 #endif
672 dc->vmsd = &vfio_platform_vmstate;
673 dc->desc = "VFIO-based platform device assignment";
674 sbc->connect_irq_notifier = vfio_start_irqfd_injection;
675 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
676
677 object_class_property_set_description(klass, /* 2.4 */
678 "host",
679 "Host device name of assigned device");
680 object_class_property_set_description(klass, /* 2.4 and 2.5 */
681 "x-no-mmap",
682 "Disable MMAP for device. Allows to trace MMIO "
683 "accesses (DEBUG)");
684 object_class_property_set_description(klass, /* 2.4 */
685 "mmap-timeout-ms",
686 "When EOI is not provided by KVM/QEMU, wait time "
687 "(milliseconds) to re-enable device direct access "
688 "after level interrupt (DEBUG)");
689 object_class_property_set_description(klass, /* 2.4 */
690 "x-irqfd",
691 "Allow disabling irqfd support (DEBUG)");
692 object_class_property_set_description(klass, /* 2.6 */
693 "sysfsdev",
694 "Host sysfs path of assigned device");
695 #ifdef CONFIG_IOMMUFD
696 object_class_property_set_description(klass, /* 9.0 */
697 "iommufd",
698 "Set host IOMMUFD backend device");
699 #endif
700 }
701
702 static const TypeInfo vfio_platform_dev_info = {
703 .name = TYPE_VFIO_PLATFORM,
704 .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE,
705 .instance_size = sizeof(VFIOPlatformDevice),
706 .instance_init = vfio_platform_instance_init,
707 .class_init = vfio_platform_class_init,
708 .class_size = sizeof(VFIOPlatformDeviceClass),
709 };
710
register_vfio_platform_dev_type(void)711 static void register_vfio_platform_dev_type(void)
712 {
713 type_register_static(&vfio_platform_dev_info);
714 }
715
716 type_init(register_vfio_platform_dev_type)
717