xref: /qemu/hw/vfio/pci.c (revision 73f81da0a3628180409a0ae90ece19534bcdf09b)
1 /*
2  * vfio based device assignment support
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
23 #include <linux/vfio.h>
24 #include <sys/ioctl.h>
25 
26 #include "hw/hw.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_bridge.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/qdev-properties-system.h"
32 #include "migration/vmstate.h"
33 #include "qobject/qdict.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/module.h"
37 #include "qemu/range.h"
38 #include "qemu/units.h"
39 #include "system/kvm.h"
40 #include "system/runstate.h"
41 #include "pci.h"
42 #include "trace.h"
43 #include "qapi/error.h"
44 #include "migration/blocker.h"
45 #include "migration/qemu-file.h"
46 #include "system/iommufd.h"
47 #include "vfio-migration-internal.h"
48 #include "vfio-helpers.h"
49 
50 #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
51 
52 /* Protected by BQL */
53 static KVMRouteChange vfio_route_change;
54 
55 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
56 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
57 static void vfio_msi_disable_common(VFIOPCIDevice *vdev);
58 
59 /*
60  * Disabling BAR mmaping can be slow, but toggling it around INTx can
61  * also be a huge overhead.  We try to get the best of both worlds by
62  * waiting until an interrupt to disable mmaps (subsequent transitions
63  * to the same state are effectively no overhead).  If the interrupt has
64  * been serviced and the time gap is long enough, we re-enable mmaps for
65  * performance.  This works well for things like graphics cards, which
66  * may not use their interrupt at all and are penalized to an unusable
67  * level by read/write BAR traps.  Other devices, like NICs, have more
68  * regular interrupts and see much better latency by staying in non-mmap
69  * mode.  We therefore set the default mmap_timeout such that a ping
70  * is just enough to keep the mmap disabled.  Users can experiment with
71  * other options with the x-intx-mmap-timeout-ms parameter (a value of
72  * zero disables the timer).
73  */
74 static void vfio_intx_mmap_enable(void *opaque)
75 {
76     VFIOPCIDevice *vdev = opaque;
77 
78     if (vdev->intx.pending) {
79         timer_mod(vdev->intx.mmap_timer,
80                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
81         return;
82     }
83 
84     vfio_mmap_set_enabled(vdev, true);
85 }
86 
87 static void vfio_intx_interrupt(void *opaque)
88 {
89     VFIOPCIDevice *vdev = opaque;
90 
91     if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
92         return;
93     }
94 
95     trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
96 
97     vdev->intx.pending = true;
98     pci_irq_assert(&vdev->pdev);
99     vfio_mmap_set_enabled(vdev, false);
100     if (vdev->intx.mmap_timeout) {
101         timer_mod(vdev->intx.mmap_timer,
102                        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
103     }
104 }
105 
106 static void vfio_intx_eoi(VFIODevice *vbasedev)
107 {
108     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
109 
110     if (!vdev->intx.pending) {
111         return;
112     }
113 
114     trace_vfio_intx_eoi(vbasedev->name);
115 
116     vdev->intx.pending = false;
117     pci_irq_deassert(&vdev->pdev);
118     vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
119 }
120 
121 static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
122 {
123 #ifdef CONFIG_KVM
124     int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
125 
126     if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
127         vdev->intx.route.mode != PCI_INTX_ENABLED ||
128         !kvm_resamplefds_enabled()) {
129         return true;
130     }
131 
132     /* Get to a known interrupt state */
133     qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
134     vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
135     vdev->intx.pending = false;
136     pci_irq_deassert(&vdev->pdev);
137 
138     /* Get an eventfd for resample/unmask */
139     if (event_notifier_init(&vdev->intx.unmask, 0)) {
140         error_setg(errp, "event_notifier_init failed eoi");
141         goto fail;
142     }
143 
144     if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
145                                            &vdev->intx.interrupt,
146                                            &vdev->intx.unmask,
147                                            vdev->intx.route.irq)) {
148         error_setg_errno(errp, errno, "failed to setup resample irqfd");
149         goto fail_irqfd;
150     }
151 
152     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
153                                        VFIO_IRQ_SET_ACTION_UNMASK,
154                                        event_notifier_get_fd(&vdev->intx.unmask),
155                                        errp)) {
156         goto fail_vfio;
157     }
158 
159     /* Let'em rip */
160     vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
161 
162     vdev->intx.kvm_accel = true;
163 
164     trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
165 
166     return true;
167 
168 fail_vfio:
169     kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
170                                           vdev->intx.route.irq);
171 fail_irqfd:
172     event_notifier_cleanup(&vdev->intx.unmask);
173 fail:
174     qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
175     vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
176     return false;
177 #else
178     return true;
179 #endif
180 }
181 
182 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
183 {
184 #ifdef CONFIG_KVM
185     if (!vdev->intx.kvm_accel) {
186         return;
187     }
188 
189     /*
190      * Get to a known state, hardware masked, QEMU ready to accept new
191      * interrupts, QEMU IRQ de-asserted.
192      */
193     vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
194     vdev->intx.pending = false;
195     pci_irq_deassert(&vdev->pdev);
196 
197     /* Tell KVM to stop listening for an INTx irqfd */
198     if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
199                                               vdev->intx.route.irq)) {
200         error_report("vfio: Error: Failed to disable INTx irqfd: %m");
201     }
202 
203     /* We only need to close the eventfd for VFIO to cleanup the kernel side */
204     event_notifier_cleanup(&vdev->intx.unmask);
205 
206     /* QEMU starts listening for interrupt events. */
207     qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
208                         vfio_intx_interrupt, NULL, vdev);
209 
210     vdev->intx.kvm_accel = false;
211 
212     /* If we've missed an event, let it re-fire through QEMU */
213     vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
214 
215     trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
216 #endif
217 }
218 
219 static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
220 {
221     Error *err = NULL;
222 
223     trace_vfio_intx_update(vdev->vbasedev.name,
224                            vdev->intx.route.irq, route->irq);
225 
226     vfio_intx_disable_kvm(vdev);
227 
228     vdev->intx.route = *route;
229 
230     if (route->mode != PCI_INTX_ENABLED) {
231         return;
232     }
233 
234     if (!vfio_intx_enable_kvm(vdev, &err)) {
235         warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
236     }
237 
238     /* Re-enable the interrupt in cased we missed an EOI */
239     vfio_intx_eoi(&vdev->vbasedev);
240 }
241 
242 static void vfio_intx_routing_notifier(PCIDevice *pdev)
243 {
244     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
245     PCIINTxRoute route;
246 
247     if (vdev->interrupt != VFIO_INT_INTx) {
248         return;
249     }
250 
251     route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
252 
253     if (pci_intx_route_changed(&vdev->intx.route, &route)) {
254         vfio_intx_update(vdev, &route);
255     }
256 }
257 
258 static void vfio_irqchip_change(Notifier *notify, void *data)
259 {
260     VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
261                                        irqchip_change_notifier);
262 
263     vfio_intx_update(vdev, &vdev->intx.route);
264 }
265 
266 static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
267 {
268     uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
269     Error *err = NULL;
270     int32_t fd;
271     int ret;
272 
273 
274     if (!pin) {
275         return true;
276     }
277 
278     vfio_disable_interrupts(vdev);
279 
280     vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
281     pci_config_set_interrupt_pin(vdev->pdev.config, pin);
282 
283 #ifdef CONFIG_KVM
284     /*
285      * Only conditional to avoid generating error messages on platforms
286      * where we won't actually use the result anyway.
287      */
288     if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
289         vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
290                                                         vdev->intx.pin);
291     }
292 #endif
293 
294     ret = event_notifier_init(&vdev->intx.interrupt, 0);
295     if (ret) {
296         error_setg_errno(errp, -ret, "event_notifier_init failed");
297         return false;
298     }
299     fd = event_notifier_get_fd(&vdev->intx.interrupt);
300     qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
301 
302     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
303                                 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
304         qemu_set_fd_handler(fd, NULL, NULL, vdev);
305         event_notifier_cleanup(&vdev->intx.interrupt);
306         return false;
307     }
308 
309     if (!vfio_intx_enable_kvm(vdev, &err)) {
310         warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
311     }
312 
313     vdev->interrupt = VFIO_INT_INTx;
314 
315     trace_vfio_intx_enable(vdev->vbasedev.name);
316     return true;
317 }
318 
319 static void vfio_intx_disable(VFIOPCIDevice *vdev)
320 {
321     int fd;
322 
323     timer_del(vdev->intx.mmap_timer);
324     vfio_intx_disable_kvm(vdev);
325     vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
326     vdev->intx.pending = false;
327     pci_irq_deassert(&vdev->pdev);
328     vfio_mmap_set_enabled(vdev, true);
329 
330     fd = event_notifier_get_fd(&vdev->intx.interrupt);
331     qemu_set_fd_handler(fd, NULL, NULL, vdev);
332     event_notifier_cleanup(&vdev->intx.interrupt);
333 
334     vdev->interrupt = VFIO_INT_NONE;
335 
336     trace_vfio_intx_disable(vdev->vbasedev.name);
337 }
338 
339 /*
340  * MSI/X
341  */
342 static void vfio_msi_interrupt(void *opaque)
343 {
344     VFIOMSIVector *vector = opaque;
345     VFIOPCIDevice *vdev = vector->vdev;
346     MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
347     void (*notify)(PCIDevice *dev, unsigned vector);
348     MSIMessage msg;
349     int nr = vector - vdev->msi_vectors;
350 
351     if (!event_notifier_test_and_clear(&vector->interrupt)) {
352         return;
353     }
354 
355     if (vdev->interrupt == VFIO_INT_MSIX) {
356         get_msg = msix_get_message;
357         notify = msix_notify;
358 
359         /* A masked vector firing needs to use the PBA, enable it */
360         if (msix_is_masked(&vdev->pdev, nr)) {
361             set_bit(nr, vdev->msix->pending);
362             memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
363             trace_vfio_msix_pba_enable(vdev->vbasedev.name);
364         }
365     } else if (vdev->interrupt == VFIO_INT_MSI) {
366         get_msg = msi_get_message;
367         notify = msi_notify;
368     } else {
369         abort();
370     }
371 
372     msg = get_msg(&vdev->pdev, nr);
373     trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
374     notify(&vdev->pdev, nr);
375 }
376 
377 /*
378  * Get MSI-X enabled, but no vector enabled, by setting vector 0 with an invalid
379  * fd to kernel.
380  */
381 static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev)
382 {
383     g_autofree struct vfio_irq_set *irq_set = NULL;
384     int argsz;
385     int32_t *fd;
386 
387     argsz = sizeof(*irq_set) + sizeof(*fd);
388 
389     irq_set = g_malloc0(argsz);
390     irq_set->argsz = argsz;
391     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
392                      VFIO_IRQ_SET_ACTION_TRIGGER;
393     irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
394     irq_set->start = 0;
395     irq_set->count = 1;
396     fd = (int32_t *)&irq_set->data;
397     *fd = -1;
398 
399     return vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
400 }
401 
402 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
403 {
404     struct vfio_irq_set *irq_set;
405     int ret = 0, i, argsz;
406     int32_t *fds;
407 
408     /*
409      * If dynamic MSI-X allocation is supported, the vectors to be allocated
410      * and enabled can be scattered. Before kernel enabling MSI-X, setting
411      * nr_vectors causes all these vectors to be allocated on host.
412      *
413      * To keep allocation as needed, use vector 0 with an invalid fd to get
414      * MSI-X enabled first, then set vectors with a potentially sparse set of
415      * eventfds to enable interrupts only when enabled in guest.
416      */
417     if (msix && !vdev->msix->noresize) {
418         ret = vfio_enable_msix_no_vec(vdev);
419 
420         if (ret) {
421             return ret;
422         }
423     }
424 
425     argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
426 
427     irq_set = g_malloc0(argsz);
428     irq_set->argsz = argsz;
429     irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
430     irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
431     irq_set->start = 0;
432     irq_set->count = vdev->nr_vectors;
433     fds = (int32_t *)&irq_set->data;
434 
435     for (i = 0; i < vdev->nr_vectors; i++) {
436         int fd = -1;
437 
438         /*
439          * MSI vs MSI-X - The guest has direct access to MSI mask and pending
440          * bits, therefore we always use the KVM signaling path when setup.
441          * MSI-X mask and pending bits are emulated, so we want to use the
442          * KVM signaling path only when configured and unmasked.
443          */
444         if (vdev->msi_vectors[i].use) {
445             if (vdev->msi_vectors[i].virq < 0 ||
446                 (msix && msix_is_masked(&vdev->pdev, i))) {
447                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
448             } else {
449                 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
450             }
451         }
452 
453         fds[i] = fd;
454     }
455 
456     ret = vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
457 
458     g_free(irq_set);
459 
460     return ret;
461 }
462 
463 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
464                                   int vector_n, bool msix)
465 {
466     if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
467         return;
468     }
469 
470     vector->virq = kvm_irqchip_add_msi_route(&vfio_route_change,
471                                              vector_n, &vdev->pdev);
472 }
473 
474 static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector)
475 {
476     if (vector->virq < 0) {
477         return;
478     }
479 
480     if (event_notifier_init(&vector->kvm_interrupt, 0)) {
481         goto fail_notifier;
482     }
483 
484     if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
485                                            NULL, vector->virq) < 0) {
486         goto fail_kvm;
487     }
488 
489     return;
490 
491 fail_kvm:
492     event_notifier_cleanup(&vector->kvm_interrupt);
493 fail_notifier:
494     kvm_irqchip_release_virq(kvm_state, vector->virq);
495     vector->virq = -1;
496 }
497 
498 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
499 {
500     kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
501                                           vector->virq);
502     kvm_irqchip_release_virq(kvm_state, vector->virq);
503     vector->virq = -1;
504     event_notifier_cleanup(&vector->kvm_interrupt);
505 }
506 
507 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
508                                      PCIDevice *pdev)
509 {
510     kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
511     kvm_irqchip_commit_routes(kvm_state);
512 }
513 
514 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
515                                    MSIMessage *msg, IOHandler *handler)
516 {
517     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
518     VFIOMSIVector *vector;
519     int ret;
520     bool resizing = !!(vdev->nr_vectors < nr + 1);
521 
522     trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
523 
524     vector = &vdev->msi_vectors[nr];
525 
526     if (!vector->use) {
527         vector->vdev = vdev;
528         vector->virq = -1;
529         if (event_notifier_init(&vector->interrupt, 0)) {
530             error_report("vfio: Error: event_notifier_init failed");
531         }
532         vector->use = true;
533         msix_vector_use(pdev, nr);
534     }
535 
536     qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
537                         handler, NULL, vector);
538 
539     /*
540      * Attempt to enable route through KVM irqchip,
541      * default to userspace handling if unavailable.
542      */
543     if (vector->virq >= 0) {
544         if (!msg) {
545             vfio_remove_kvm_msi_virq(vector);
546         } else {
547             vfio_update_kvm_msi_virq(vector, *msg, pdev);
548         }
549     } else {
550         if (msg) {
551             if (vdev->defer_kvm_irq_routing) {
552                 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
553             } else {
554                 vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
555                 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
556                 kvm_irqchip_commit_route_changes(&vfio_route_change);
557                 vfio_connect_kvm_msi_virq(vector);
558             }
559         }
560     }
561 
562     /*
563      * When dynamic allocation is not supported, we don't want to have the
564      * host allocate all possible MSI vectors for a device if they're not
565      * in use, so we shutdown and incrementally increase them as needed.
566      * nr_vectors represents the total number of vectors allocated.
567      *
568      * When dynamic allocation is supported, let the host only allocate
569      * and enable a vector when it is in use in guest. nr_vectors represents
570      * the upper bound of vectors being enabled (but not all of the ranges
571      * is allocated or enabled).
572      */
573     if (resizing) {
574         vdev->nr_vectors = nr + 1;
575     }
576 
577     if (!vdev->defer_kvm_irq_routing) {
578         if (vdev->msix->noresize && resizing) {
579             vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
580             ret = vfio_enable_vectors(vdev, true);
581             if (ret) {
582                 error_report("vfio: failed to enable vectors, %s",
583                              strerror(-ret));
584             }
585         } else {
586             Error *err = NULL;
587             int32_t fd;
588 
589             if (vector->virq >= 0) {
590                 fd = event_notifier_get_fd(&vector->kvm_interrupt);
591             } else {
592                 fd = event_notifier_get_fd(&vector->interrupt);
593             }
594 
595             if (!vfio_device_irq_set_signaling(&vdev->vbasedev,
596                                         VFIO_PCI_MSIX_IRQ_INDEX, nr,
597                                         VFIO_IRQ_SET_ACTION_TRIGGER, fd,
598                                         &err)) {
599                 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
600             }
601         }
602     }
603 
604     /* Disable PBA emulation when nothing more is pending. */
605     clear_bit(nr, vdev->msix->pending);
606     if (find_first_bit(vdev->msix->pending,
607                        vdev->nr_vectors) == vdev->nr_vectors) {
608         memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
609         trace_vfio_msix_pba_disable(vdev->vbasedev.name);
610     }
611 
612     return 0;
613 }
614 
615 static int vfio_msix_vector_use(PCIDevice *pdev,
616                                 unsigned int nr, MSIMessage msg)
617 {
618     return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
619 }
620 
621 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
622 {
623     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
624     VFIOMSIVector *vector = &vdev->msi_vectors[nr];
625 
626     trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
627 
628     /*
629      * There are still old guests that mask and unmask vectors on every
630      * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
631      * the KVM setup in place, simply switch VFIO to use the non-bypass
632      * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
633      * core will mask the interrupt and set pending bits, allowing it to
634      * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
635      */
636     if (vector->virq >= 0) {
637         int32_t fd = event_notifier_get_fd(&vector->interrupt);
638         Error *err = NULL;
639 
640         if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
641                                     nr, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
642                                     &err)) {
643             error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
644         }
645     }
646 }
647 
648 static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
649 {
650     assert(!vdev->defer_kvm_irq_routing);
651     vdev->defer_kvm_irq_routing = true;
652     vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
653 }
654 
655 static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
656 {
657     int i;
658 
659     assert(vdev->defer_kvm_irq_routing);
660     vdev->defer_kvm_irq_routing = false;
661 
662     kvm_irqchip_commit_route_changes(&vfio_route_change);
663 
664     for (i = 0; i < vdev->nr_vectors; i++) {
665         vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]);
666     }
667 }
668 
669 static void vfio_msix_enable(VFIOPCIDevice *vdev)
670 {
671     int ret;
672 
673     vfio_disable_interrupts(vdev);
674 
675     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
676 
677     vdev->interrupt = VFIO_INT_MSIX;
678 
679     /*
680      * Setting vector notifiers triggers synchronous vector-use
681      * callbacks for each active vector.  Deferring to commit the KVM
682      * routes once rather than per vector provides a substantial
683      * performance improvement.
684      */
685     vfio_prepare_kvm_msi_virq_batch(vdev);
686 
687     if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
688                                   vfio_msix_vector_release, NULL)) {
689         error_report("vfio: msix_set_vector_notifiers failed");
690     }
691 
692     vfio_commit_kvm_msi_virq_batch(vdev);
693 
694     if (vdev->nr_vectors) {
695         ret = vfio_enable_vectors(vdev, true);
696         if (ret) {
697             error_report("vfio: failed to enable vectors, %s",
698                          strerror(-ret));
699         }
700     } else {
701         /*
702          * Some communication channels between VF & PF or PF & fw rely on the
703          * physical state of the device and expect that enabling MSI-X from the
704          * guest enables the same on the host.  When our guest is Linux, the
705          * guest driver call to pci_enable_msix() sets the enabling bit in the
706          * MSI-X capability, but leaves the vector table masked.  We therefore
707          * can't rely on a vector_use callback (from request_irq() in the guest)
708          * to switch the physical device into MSI-X mode because that may come a
709          * long time after pci_enable_msix().  This code sets vector 0 with an
710          * invalid fd to make the physical device MSI-X enabled, but with no
711          * vectors enabled, just like the guest view.
712          */
713         ret = vfio_enable_msix_no_vec(vdev);
714         if (ret) {
715             error_report("vfio: failed to enable MSI-X, %s",
716                          strerror(-ret));
717         }
718     }
719 
720     trace_vfio_msix_enable(vdev->vbasedev.name);
721 }
722 
723 static void vfio_msi_enable(VFIOPCIDevice *vdev)
724 {
725     int ret, i;
726 
727     vfio_disable_interrupts(vdev);
728 
729     vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
730 retry:
731     /*
732      * Setting vector notifiers needs to enable route for each vector.
733      * Deferring to commit the KVM routes once rather than per vector
734      * provides a substantial performance improvement.
735      */
736     vfio_prepare_kvm_msi_virq_batch(vdev);
737 
738     vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
739 
740     for (i = 0; i < vdev->nr_vectors; i++) {
741         VFIOMSIVector *vector = &vdev->msi_vectors[i];
742 
743         vector->vdev = vdev;
744         vector->virq = -1;
745         vector->use = true;
746 
747         if (event_notifier_init(&vector->interrupt, 0)) {
748             error_report("vfio: Error: event_notifier_init failed");
749         }
750 
751         qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
752                             vfio_msi_interrupt, NULL, vector);
753 
754         /*
755          * Attempt to enable route through KVM irqchip,
756          * default to userspace handling if unavailable.
757          */
758         vfio_add_kvm_msi_virq(vdev, vector, i, false);
759     }
760 
761     vfio_commit_kvm_msi_virq_batch(vdev);
762 
763     /* Set interrupt type prior to possible interrupts */
764     vdev->interrupt = VFIO_INT_MSI;
765 
766     ret = vfio_enable_vectors(vdev, false);
767     if (ret) {
768         if (ret < 0) {
769             error_report("vfio: Error: Failed to setup MSI fds: %s",
770                          strerror(-ret));
771         } else {
772             error_report("vfio: Error: Failed to enable %d "
773                          "MSI vectors, retry with %d", vdev->nr_vectors, ret);
774         }
775 
776         vfio_msi_disable_common(vdev);
777 
778         if (ret > 0) {
779             vdev->nr_vectors = ret;
780             goto retry;
781         }
782 
783         /*
784          * Failing to setup MSI doesn't really fall within any specification.
785          * Let's try leaving interrupts disabled and hope the guest figures
786          * out to fall back to INTx for this device.
787          */
788         error_report("vfio: Error: Failed to enable MSI");
789 
790         return;
791     }
792 
793     trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
794 }
795 
796 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
797 {
798     int i;
799 
800     for (i = 0; i < vdev->nr_vectors; i++) {
801         VFIOMSIVector *vector = &vdev->msi_vectors[i];
802         if (vdev->msi_vectors[i].use) {
803             if (vector->virq >= 0) {
804                 vfio_remove_kvm_msi_virq(vector);
805             }
806             qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
807                                 NULL, NULL, NULL);
808             event_notifier_cleanup(&vector->interrupt);
809         }
810     }
811 
812     g_free(vdev->msi_vectors);
813     vdev->msi_vectors = NULL;
814     vdev->nr_vectors = 0;
815     vdev->interrupt = VFIO_INT_NONE;
816 }
817 
818 static void vfio_msix_disable(VFIOPCIDevice *vdev)
819 {
820     Error *err = NULL;
821     int i;
822 
823     msix_unset_vector_notifiers(&vdev->pdev);
824 
825     /*
826      * MSI-X will only release vectors if MSI-X is still enabled on the
827      * device, check through the rest and release it ourselves if necessary.
828      */
829     for (i = 0; i < vdev->nr_vectors; i++) {
830         if (vdev->msi_vectors[i].use) {
831             vfio_msix_vector_release(&vdev->pdev, i);
832             msix_vector_unuse(&vdev->pdev, i);
833         }
834     }
835 
836     /*
837      * Always clear MSI-X IRQ index. A PF device could have enabled
838      * MSI-X with no vectors. See vfio_msix_enable().
839      */
840     vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
841 
842     vfio_msi_disable_common(vdev);
843     if (!vfio_intx_enable(vdev, &err)) {
844         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
845     }
846 
847     memset(vdev->msix->pending, 0,
848            BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
849 
850     trace_vfio_msix_disable(vdev->vbasedev.name);
851 }
852 
853 static void vfio_msi_disable(VFIOPCIDevice *vdev)
854 {
855     Error *err = NULL;
856 
857     vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
858     vfio_msi_disable_common(vdev);
859     vfio_intx_enable(vdev, &err);
860     if (err) {
861         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
862     }
863 
864     trace_vfio_msi_disable(vdev->vbasedev.name);
865 }
866 
867 static void vfio_update_msi(VFIOPCIDevice *vdev)
868 {
869     int i;
870 
871     for (i = 0; i < vdev->nr_vectors; i++) {
872         VFIOMSIVector *vector = &vdev->msi_vectors[i];
873         MSIMessage msg;
874 
875         if (!vector->use || vector->virq < 0) {
876             continue;
877         }
878 
879         msg = msi_get_message(&vdev->pdev, i);
880         vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
881     }
882 }
883 
884 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
885 {
886     VFIODevice *vbasedev = &vdev->vbasedev;
887     struct vfio_region_info *reg_info = NULL;
888     uint64_t size;
889     off_t off = 0;
890     ssize_t bytes;
891     int ret;
892 
893     ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_ROM_REGION_INDEX,
894                                       &reg_info);
895 
896     if (ret != 0) {
897         error_report("vfio: Error getting ROM info: %s", strerror(-ret));
898         return;
899     }
900 
901     trace_vfio_pci_load_rom(vbasedev->name, (unsigned long)reg_info->size,
902                             (unsigned long)reg_info->offset,
903                             (unsigned long)reg_info->flags);
904 
905     vdev->rom_size = size = reg_info->size;
906     vdev->rom_offset = reg_info->offset;
907 
908     if (!vdev->rom_size) {
909         vdev->rom_read_failed = true;
910         error_report("vfio-pci: Cannot read device rom at %s", vbasedev->name);
911         error_printf("Device option ROM contents are probably invalid "
912                     "(check dmesg).\nSkip option ROM probe with rombar=0, "
913                     "or load from file with romfile=\n");
914         return;
915     }
916 
917     vdev->rom = g_malloc(size);
918     memset(vdev->rom, 0xff, size);
919 
920     while (size) {
921         bytes = vbasedev->io_ops->region_read(vbasedev,
922                                               VFIO_PCI_ROM_REGION_INDEX,
923                                               off, size, vdev->rom + off);
924 
925         if (bytes == 0) {
926             break;
927         } else if (bytes > 0) {
928             off += bytes;
929             size -= bytes;
930         } else {
931             if (bytes == -EINTR || bytes == -EAGAIN) {
932                 continue;
933             }
934             error_report("vfio: Error reading device ROM: %s",
935                          strreaderror(bytes));
936 
937             break;
938         }
939     }
940 
941     /*
942      * Test the ROM signature against our device, if the vendor is correct
943      * but the device ID doesn't match, store the correct device ID and
944      * recompute the checksum.  Intel IGD devices need this and are known
945      * to have bogus checksums so we can't simply adjust the checksum.
946      */
947     if (pci_get_word(vdev->rom) == 0xaa55 &&
948         pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
949         !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
950         uint16_t vid, did;
951 
952         vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
953         did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
954 
955         if (vid == vdev->vendor_id && did != vdev->device_id) {
956             int i;
957             uint8_t csum, *data = vdev->rom;
958 
959             pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
960                          vdev->device_id);
961             data[6] = 0;
962 
963             for (csum = 0, i = 0; i < vdev->rom_size; i++) {
964                 csum += data[i];
965             }
966 
967             data[6] = -csum;
968         }
969     }
970 }
971 
972 /* "Raw" read of underlying config space. */
973 static int vfio_pci_config_space_read(VFIOPCIDevice *vdev, off_t offset,
974                                       uint32_t size, void *data)
975 {
976     return vdev->vbasedev.io_ops->region_read(&vdev->vbasedev,
977                                               VFIO_PCI_CONFIG_REGION_INDEX,
978                                               offset, size, data);
979 }
980 
981 /* "Raw" write of underlying config space. */
982 static int vfio_pci_config_space_write(VFIOPCIDevice *vdev, off_t offset,
983                                        uint32_t size, void *data)
984 {
985     return vdev->vbasedev.io_ops->region_write(&vdev->vbasedev,
986                                                VFIO_PCI_CONFIG_REGION_INDEX,
987                                                offset, size, data);
988 }
989 
990 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
991 {
992     VFIOPCIDevice *vdev = opaque;
993     union {
994         uint8_t byte;
995         uint16_t word;
996         uint32_t dword;
997         uint64_t qword;
998     } val;
999     uint64_t data = 0;
1000 
1001     /* Load the ROM lazily when the guest tries to read it */
1002     if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
1003         vfio_pci_load_rom(vdev);
1004     }
1005 
1006     memcpy(&val, vdev->rom + addr,
1007            (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
1008 
1009     switch (size) {
1010     case 1:
1011         data = val.byte;
1012         break;
1013     case 2:
1014         data = le16_to_cpu(val.word);
1015         break;
1016     case 4:
1017         data = le32_to_cpu(val.dword);
1018         break;
1019     default:
1020         hw_error("vfio: unsupported read size, %d bytes\n", size);
1021         break;
1022     }
1023 
1024     trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
1025 
1026     return data;
1027 }
1028 
1029 static void vfio_rom_write(void *opaque, hwaddr addr,
1030                            uint64_t data, unsigned size)
1031 {
1032 }
1033 
1034 static const MemoryRegionOps vfio_rom_ops = {
1035     .read = vfio_rom_read,
1036     .write = vfio_rom_write,
1037     .endianness = DEVICE_LITTLE_ENDIAN,
1038 };
1039 
1040 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
1041 {
1042     VFIODevice *vbasedev = &vdev->vbasedev;
1043     uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
1044     char *name;
1045 
1046     if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
1047         /* Since pci handles romfile, just print a message and return */
1048         if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) {
1049             warn_report("Device at %s is known to cause system instability"
1050                         " issues during option rom execution",
1051                         vdev->vbasedev.name);
1052             error_printf("Proceeding anyway since user specified romfile\n");
1053         }
1054         return;
1055     }
1056 
1057     /*
1058      * Use the same size ROM BAR as the physical device.  The contents
1059      * will get filled in later when the guest tries to read it.
1060      */
1061     if (vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4 ||
1062         vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
1063         vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
1064         vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4) {
1065 
1066         error_report("%s(%s) ROM access failed", __func__, vbasedev->name);
1067         return;
1068     }
1069 
1070     size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
1071 
1072     if (!size) {
1073         return;
1074     }
1075 
1076     if (vfio_opt_rom_in_denylist(vdev)) {
1077         if (vdev->pdev.rom_bar > 0) {
1078             warn_report("Device at %s is known to cause system instability"
1079                         " issues during option rom execution",
1080                         vdev->vbasedev.name);
1081             error_printf("Proceeding anyway since user specified"
1082                          " positive value for rombar\n");
1083         } else {
1084             warn_report("Rom loading for device at %s has been disabled"
1085                         " due to system instability issues",
1086                         vdev->vbasedev.name);
1087             error_printf("Specify rombar=1 or romfile to force\n");
1088             return;
1089         }
1090     }
1091 
1092     trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
1093 
1094     name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
1095 
1096     memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
1097                           &vfio_rom_ops, vdev, name, size);
1098     g_free(name);
1099 
1100     pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
1101                      PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
1102 
1103     vdev->rom_read_failed = false;
1104 }
1105 
1106 void vfio_vga_write(void *opaque, hwaddr addr,
1107                            uint64_t data, unsigned size)
1108 {
1109     VFIOVGARegion *region = opaque;
1110     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1111     union {
1112         uint8_t byte;
1113         uint16_t word;
1114         uint32_t dword;
1115         uint64_t qword;
1116     } buf;
1117     off_t offset = vga->fd_offset + region->offset + addr;
1118 
1119     switch (size) {
1120     case 1:
1121         buf.byte = data;
1122         break;
1123     case 2:
1124         buf.word = cpu_to_le16(data);
1125         break;
1126     case 4:
1127         buf.dword = cpu_to_le32(data);
1128         break;
1129     default:
1130         hw_error("vfio: unsupported write size, %d bytes", size);
1131         break;
1132     }
1133 
1134     if (pwrite(vga->fd, &buf, size, offset) != size) {
1135         error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1136                      __func__, region->offset + addr, data, size);
1137     }
1138 
1139     trace_vfio_vga_write(region->offset + addr, data, size);
1140 }
1141 
1142 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1143 {
1144     VFIOVGARegion *region = opaque;
1145     VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1146     union {
1147         uint8_t byte;
1148         uint16_t word;
1149         uint32_t dword;
1150         uint64_t qword;
1151     } buf;
1152     uint64_t data = 0;
1153     off_t offset = vga->fd_offset + region->offset + addr;
1154 
1155     if (pread(vga->fd, &buf, size, offset) != size) {
1156         error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1157                      __func__, region->offset + addr, size);
1158         return (uint64_t)-1;
1159     }
1160 
1161     switch (size) {
1162     case 1:
1163         data = buf.byte;
1164         break;
1165     case 2:
1166         data = le16_to_cpu(buf.word);
1167         break;
1168     case 4:
1169         data = le32_to_cpu(buf.dword);
1170         break;
1171     default:
1172         hw_error("vfio: unsupported read size, %d bytes", size);
1173         break;
1174     }
1175 
1176     trace_vfio_vga_read(region->offset + addr, size, data);
1177 
1178     return data;
1179 }
1180 
1181 static const MemoryRegionOps vfio_vga_ops = {
1182     .read = vfio_vga_read,
1183     .write = vfio_vga_write,
1184     .endianness = DEVICE_LITTLE_ENDIAN,
1185 };
1186 
1187 /*
1188  * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1189  * size if the BAR is in an exclusive page in host so that we could map
1190  * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1191  * page in guest. So we should set the priority of the expanded memory
1192  * region to zero in case of overlap with BARs which share the same page
1193  * with the sub-page BAR in guest. Besides, we should also recover the
1194  * size of this sub-page BAR when its base address is changed in guest
1195  * and not page aligned any more.
1196  */
1197 static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1198 {
1199     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1200     VFIORegion *region = &vdev->bars[bar].region;
1201     MemoryRegion *mmap_mr, *region_mr, *base_mr;
1202     PCIIORegion *r;
1203     pcibus_t bar_addr;
1204     uint64_t size = region->size;
1205 
1206     /* Make sure that the whole region is allowed to be mmapped */
1207     if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1208         region->mmaps[0].size != region->size) {
1209         return;
1210     }
1211 
1212     r = &pdev->io_regions[bar];
1213     bar_addr = r->addr;
1214     base_mr = vdev->bars[bar].mr;
1215     region_mr = region->mem;
1216     mmap_mr = &region->mmaps[0].mem;
1217 
1218     /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1219     if (bar_addr != PCI_BAR_UNMAPPED &&
1220         !(bar_addr & ~qemu_real_host_page_mask())) {
1221         size = qemu_real_host_page_size();
1222     }
1223 
1224     memory_region_transaction_begin();
1225 
1226     if (vdev->bars[bar].size < size) {
1227         memory_region_set_size(base_mr, size);
1228     }
1229     memory_region_set_size(region_mr, size);
1230     memory_region_set_size(mmap_mr, size);
1231     if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1232         memory_region_del_subregion(r->address_space, base_mr);
1233         memory_region_add_subregion_overlap(r->address_space,
1234                                             bar_addr, base_mr, 0);
1235     }
1236 
1237     memory_region_transaction_commit();
1238 }
1239 
1240 /*
1241  * PCI config space
1242  */
1243 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1244 {
1245     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1246     VFIODevice *vbasedev = &vdev->vbasedev;
1247     uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1248 
1249     memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1250     emu_bits = le32_to_cpu(emu_bits);
1251 
1252     if (emu_bits) {
1253         emu_val = pci_default_read_config(pdev, addr, len);
1254     }
1255 
1256     if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1257         ssize_t ret;
1258 
1259         ret = vfio_pci_config_space_read(vdev, addr, len, &phys_val);
1260         if (ret != len) {
1261             error_report("%s(%s, 0x%x, 0x%x) failed: %s",
1262                          __func__, vbasedev->name, addr, len,
1263                          strreaderror(ret));
1264             return -1;
1265         }
1266         phys_val = le32_to_cpu(phys_val);
1267     }
1268 
1269     val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1270 
1271     trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1272 
1273     return val;
1274 }
1275 
1276 void vfio_pci_write_config(PCIDevice *pdev,
1277                            uint32_t addr, uint32_t val, int len)
1278 {
1279     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1280     VFIODevice *vbasedev = &vdev->vbasedev;
1281     uint32_t val_le = cpu_to_le32(val);
1282     int ret;
1283 
1284     trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1285 
1286     /* Write everything to VFIO, let it filter out what we can't write */
1287     ret = vfio_pci_config_space_write(vdev, addr, len, &val_le);
1288     if (ret != len) {
1289         error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %s",
1290                      __func__, vbasedev->name, addr, val, len,
1291                     strwriteerror(ret));
1292     }
1293 
1294     /* MSI/MSI-X Enabling/Disabling */
1295     if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1296         ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1297         int is_enabled, was_enabled = msi_enabled(pdev);
1298 
1299         pci_default_write_config(pdev, addr, val, len);
1300 
1301         is_enabled = msi_enabled(pdev);
1302 
1303         if (!was_enabled) {
1304             if (is_enabled) {
1305                 vfio_msi_enable(vdev);
1306             }
1307         } else {
1308             if (!is_enabled) {
1309                 vfio_msi_disable(vdev);
1310             } else {
1311                 vfio_update_msi(vdev);
1312             }
1313         }
1314     } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1315         ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1316         int is_enabled, was_enabled = msix_enabled(pdev);
1317 
1318         pci_default_write_config(pdev, addr, val, len);
1319 
1320         is_enabled = msix_enabled(pdev);
1321 
1322         if (!was_enabled && is_enabled) {
1323             vfio_msix_enable(vdev);
1324         } else if (was_enabled && !is_enabled) {
1325             vfio_msix_disable(vdev);
1326         }
1327     } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1328         range_covers_byte(addr, len, PCI_COMMAND)) {
1329         pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1330         int bar;
1331 
1332         for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1333             old_addr[bar] = pdev->io_regions[bar].addr;
1334         }
1335 
1336         pci_default_write_config(pdev, addr, val, len);
1337 
1338         for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1339             if (old_addr[bar] != pdev->io_regions[bar].addr &&
1340                 vdev->bars[bar].region.size > 0 &&
1341                 vdev->bars[bar].region.size < qemu_real_host_page_size()) {
1342                 vfio_sub_page_bar_update_mapping(pdev, bar);
1343             }
1344         }
1345     } else {
1346         /* Write everything to QEMU to keep emulated bits correct */
1347         pci_default_write_config(pdev, addr, val, len);
1348     }
1349 }
1350 
1351 /*
1352  * Interrupt setup
1353  */
1354 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1355 {
1356     /*
1357      * More complicated than it looks.  Disabling MSI/X transitions the
1358      * device to INTx mode (if supported).  Therefore we need to first
1359      * disable MSI/X and then cleanup by disabling INTx.
1360      */
1361     if (vdev->interrupt == VFIO_INT_MSIX) {
1362         vfio_msix_disable(vdev);
1363     } else if (vdev->interrupt == VFIO_INT_MSI) {
1364         vfio_msi_disable(vdev);
1365     }
1366 
1367     if (vdev->interrupt == VFIO_INT_INTx) {
1368         vfio_intx_disable(vdev);
1369     }
1370 }
1371 
1372 static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1373 {
1374     uint16_t ctrl;
1375     bool msi_64bit, msi_maskbit;
1376     int ret, entries;
1377     Error *err = NULL;
1378 
1379     ret = vfio_pci_config_space_read(vdev, pos + PCI_CAP_FLAGS,
1380                                      sizeof(ctrl), &ctrl);
1381     if (ret != sizeof(ctrl)) {
1382         error_setg(errp, "failed reading MSI PCI_CAP_FLAGS: %s",
1383                    strreaderror(ret));
1384         return false;
1385     }
1386     ctrl = le16_to_cpu(ctrl);
1387 
1388     msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1389     msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1390     entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1391 
1392     trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1393 
1394     ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1395     if (ret < 0) {
1396         if (ret == -ENOTSUP) {
1397             return true;
1398         }
1399         error_propagate_prepend(errp, err, "msi_init failed: ");
1400         return false;
1401     }
1402     vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1403 
1404     return true;
1405 }
1406 
1407 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1408 {
1409     off_t start, end;
1410     VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1411 
1412     /*
1413      * If the host driver allows mapping of a MSIX data, we are going to
1414      * do map the entire BAR and emulate MSIX table on top of that.
1415      */
1416     if (vfio_device_has_region_cap(&vdev->vbasedev, region->nr,
1417                                    VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
1418         return;
1419     }
1420 
1421     /*
1422      * We expect to find a single mmap covering the whole BAR, anything else
1423      * means it's either unsupported or already setup.
1424      */
1425     if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1426         region->size != region->mmaps[0].size) {
1427         return;
1428     }
1429 
1430     /* MSI-X table start and end aligned to host page size */
1431     start = vdev->msix->table_offset & qemu_real_host_page_mask();
1432     end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1433                                (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1434 
1435     /*
1436      * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
1437      * NB - Host page size is necessarily a power of two and so is the PCI
1438      * BAR (not counting EA yet), therefore if we have host page aligned
1439      * @start and @end, then any remainder of the BAR before or after those
1440      * must be at least host page sized and therefore mmap'able.
1441      */
1442     if (!start) {
1443         if (end >= region->size) {
1444             region->nr_mmaps = 0;
1445             g_free(region->mmaps);
1446             region->mmaps = NULL;
1447             trace_vfio_msix_fixup(vdev->vbasedev.name,
1448                                   vdev->msix->table_bar, 0, 0);
1449         } else {
1450             region->mmaps[0].offset = end;
1451             region->mmaps[0].size = region->size - end;
1452             trace_vfio_msix_fixup(vdev->vbasedev.name,
1453                               vdev->msix->table_bar, region->mmaps[0].offset,
1454                               region->mmaps[0].offset + region->mmaps[0].size);
1455         }
1456 
1457     /* Maybe it's aligned at the end of the BAR */
1458     } else if (end >= region->size) {
1459         region->mmaps[0].size = start;
1460         trace_vfio_msix_fixup(vdev->vbasedev.name,
1461                               vdev->msix->table_bar, region->mmaps[0].offset,
1462                               region->mmaps[0].offset + region->mmaps[0].size);
1463 
1464     /* Otherwise it must split the BAR */
1465     } else {
1466         region->nr_mmaps = 2;
1467         region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1468 
1469         memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1470 
1471         region->mmaps[0].size = start;
1472         trace_vfio_msix_fixup(vdev->vbasedev.name,
1473                               vdev->msix->table_bar, region->mmaps[0].offset,
1474                               region->mmaps[0].offset + region->mmaps[0].size);
1475 
1476         region->mmaps[1].offset = end;
1477         region->mmaps[1].size = region->size - end;
1478         trace_vfio_msix_fixup(vdev->vbasedev.name,
1479                               vdev->msix->table_bar, region->mmaps[1].offset,
1480                               region->mmaps[1].offset + region->mmaps[1].size);
1481     }
1482 }
1483 
1484 static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1485 {
1486     int target_bar = -1;
1487     size_t msix_sz;
1488 
1489     if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
1490         return true;
1491     }
1492 
1493     /* The actual minimum size of MSI-X structures */
1494     msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1495               (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1496     /* Round up to host pages, we don't want to share a page */
1497     msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1498     /* PCI BARs must be a power of 2 */
1499     msix_sz = pow2ceil(msix_sz);
1500 
1501     if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) {
1502         /*
1503          * TODO: Lookup table for known devices.
1504          *
1505          * Logically we might use an algorithm here to select the BAR adding
1506          * the least additional MMIO space, but we cannot programmatically
1507          * predict the driver dependency on BAR ordering or sizing, therefore
1508          * 'auto' becomes a lookup for combinations reported to work.
1509          */
1510         if (target_bar < 0) {
1511             error_setg(errp, "No automatic MSI-X relocation available for "
1512                        "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1513             return false;
1514         }
1515     } else {
1516         target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0);
1517     }
1518 
1519     /* I/O port BARs cannot host MSI-X structures */
1520     if (vdev->bars[target_bar].ioport) {
1521         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1522                    "I/O port BAR", target_bar);
1523         return false;
1524     }
1525 
1526     /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1527     if (!vdev->bars[target_bar].size &&
1528          target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1529         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1530                    "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1531         return false;
1532     }
1533 
1534     /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1535     if (vdev->bars[target_bar].size > 1 * GiB &&
1536         !vdev->bars[target_bar].mem64) {
1537         error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1538                    "no space to extend 32-bit BAR", target_bar);
1539         return false;
1540     }
1541 
1542     /*
1543      * If adding a new BAR, test if we can make it 64bit.  We make it
1544      * prefetchable since QEMU MSI-X emulation has no read side effects
1545      * and doing so makes mapping more flexible.
1546      */
1547     if (!vdev->bars[target_bar].size) {
1548         if (target_bar < (PCI_ROM_SLOT - 1) &&
1549             !vdev->bars[target_bar + 1].size) {
1550             vdev->bars[target_bar].mem64 = true;
1551             vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1552         }
1553         vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1554         vdev->bars[target_bar].size = msix_sz;
1555         vdev->msix->table_offset = 0;
1556     } else {
1557         vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1558                                           msix_sz * 2);
1559         /*
1560          * Due to above size calc, MSI-X always starts halfway into the BAR,
1561          * which will always be a separate host page.
1562          */
1563         vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1564     }
1565 
1566     vdev->msix->table_bar = target_bar;
1567     vdev->msix->pba_bar = target_bar;
1568     /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1569     vdev->msix->pba_offset = vdev->msix->table_offset +
1570                                   (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1571 
1572     trace_vfio_msix_relo(vdev->vbasedev.name,
1573                          vdev->msix->table_bar, vdev->msix->table_offset);
1574     return true;
1575 }
1576 
1577 /*
1578  * We don't have any control over how pci_add_capability() inserts
1579  * capabilities into the chain.  In order to setup MSI-X we need a
1580  * MemoryRegion for the BAR.  In order to setup the BAR and not
1581  * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1582  * need to first look for where the MSI-X table lives.  So we
1583  * unfortunately split MSI-X setup across two functions.
1584  */
1585 static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
1586 {
1587     uint8_t pos;
1588     uint16_t ctrl;
1589     uint32_t table, pba;
1590     struct vfio_irq_info irq_info;
1591     VFIOMSIXInfo *msix;
1592     int ret;
1593 
1594     pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1595     if (!pos) {
1596         return true;
1597     }
1598 
1599     ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_FLAGS,
1600                                      sizeof(ctrl), &ctrl);
1601     if (ret != sizeof(ctrl)) {
1602         error_setg(errp, "failed to read PCI MSIX FLAGS: %s",
1603                    strreaderror(ret));
1604         return false;
1605     }
1606 
1607     ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_TABLE,
1608                                      sizeof(table), &table);
1609     if (ret != sizeof(table)) {
1610         error_setg(errp, "failed to read PCI MSIX TABLE: %s",
1611                    strreaderror(ret));
1612         return false;
1613     }
1614 
1615     ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_PBA,
1616                                      sizeof(pba), &pba);
1617     if (ret != sizeof(pba)) {
1618         error_setg(errp, "failed to read PCI MSIX PBA: %s", strreaderror(ret));
1619         return false;
1620     }
1621 
1622     ctrl = le16_to_cpu(ctrl);
1623     table = le32_to_cpu(table);
1624     pba = le32_to_cpu(pba);
1625 
1626     msix = g_malloc0(sizeof(*msix));
1627     msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1628     msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1629     msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1630     msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1631     msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1632 
1633     ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
1634                                    &irq_info);
1635     if (ret < 0) {
1636         error_setg_errno(errp, -ret, "failed to get MSI-X irq info");
1637         g_free(msix);
1638         return false;
1639     }
1640 
1641     msix->noresize = !!(irq_info.flags & VFIO_IRQ_INFO_NORESIZE);
1642 
1643     /*
1644      * Test the size of the pba_offset variable and catch if it extends outside
1645      * of the specified BAR. If it is the case, we need to apply a hardware
1646      * specific quirk if the device is known or we have a broken configuration.
1647      */
1648     if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1649         /*
1650          * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1651          * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1652          * the VF PBA offset while the BAR itself is only 8k. The correct value
1653          * is 0x1000, so we hard code that here.
1654          */
1655         if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1656             (vdev->device_id & 0xff00) == 0x5800) {
1657             msix->pba_offset = 0x1000;
1658         /*
1659          * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor
1660          * return an incorrect value of 0x460000 for the VF PBA offset while
1661          * the BAR itself is only 0x10000.  The correct value is 0xb400.
1662          */
1663         } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU,
1664                                PCI_DEVICE_ID_KUNLUN_VF)) {
1665             msix->pba_offset = 0xb400;
1666         } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
1667             error_setg(errp, "hardware reports invalid configuration, "
1668                        "MSIX PBA outside of specified BAR");
1669             g_free(msix);
1670             return false;
1671         }
1672     }
1673 
1674     trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1675                                 msix->table_offset, msix->entries,
1676                                 msix->noresize);
1677     vdev->msix = msix;
1678 
1679     vfio_pci_fixup_msix_region(vdev);
1680 
1681     return vfio_pci_relocate_msix(vdev, errp);
1682 }
1683 
1684 static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1685 {
1686     int ret;
1687     Error *err = NULL;
1688 
1689     vdev->msix->pending = g_new0(unsigned long,
1690                                  BITS_TO_LONGS(vdev->msix->entries));
1691     ret = msix_init(&vdev->pdev, vdev->msix->entries,
1692                     vdev->bars[vdev->msix->table_bar].mr,
1693                     vdev->msix->table_bar, vdev->msix->table_offset,
1694                     vdev->bars[vdev->msix->pba_bar].mr,
1695                     vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1696                     &err);
1697     if (ret < 0) {
1698         if (ret == -ENOTSUP) {
1699             warn_report_err(err);
1700             return true;
1701         }
1702 
1703         error_propagate(errp, err);
1704         return false;
1705     }
1706 
1707     /*
1708      * The PCI spec suggests that devices provide additional alignment for
1709      * MSI-X structures and avoid overlapping non-MSI-X related registers.
1710      * For an assigned device, this hopefully means that emulation of MSI-X
1711      * structures does not affect the performance of the device.  If devices
1712      * fail to provide that alignment, a significant performance penalty may
1713      * result, for instance Mellanox MT27500 VFs:
1714      * http://www.spinics.net/lists/kvm/msg125881.html
1715      *
1716      * The PBA is simply not that important for such a serious regression and
1717      * most drivers do not appear to look at it.  The solution for this is to
1718      * disable the PBA MemoryRegion unless it's being used.  We disable it
1719      * here and only enable it if a masked vector fires through QEMU.  As the
1720      * vector-use notifier is called, which occurs on unmask, we test whether
1721      * PBA emulation is needed and again disable if not.
1722      */
1723     memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1724 
1725     /*
1726      * The emulated machine may provide a paravirt interface for MSIX setup
1727      * so it is not strictly necessary to emulate MSIX here. This becomes
1728      * helpful when frequently accessed MMIO registers are located in
1729      * subpages adjacent to the MSIX table but the MSIX data containing page
1730      * cannot be mapped because of a host page size bigger than the MSIX table
1731      * alignment.
1732      */
1733     if (object_property_get_bool(OBJECT(qdev_get_machine()),
1734                                  "vfio-no-msix-emulation", NULL)) {
1735         memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
1736     }
1737 
1738     return true;
1739 }
1740 
1741 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1742 {
1743     msi_uninit(&vdev->pdev);
1744 
1745     if (vdev->msix) {
1746         msix_uninit(&vdev->pdev,
1747                     vdev->bars[vdev->msix->table_bar].mr,
1748                     vdev->bars[vdev->msix->pba_bar].mr);
1749         g_free(vdev->msix->pending);
1750     }
1751 }
1752 
1753 /*
1754  * Resource setup
1755  */
1756 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1757 {
1758     int i;
1759 
1760     for (i = 0; i < PCI_ROM_SLOT; i++) {
1761         vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1762     }
1763 }
1764 
1765 static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
1766 {
1767     VFIOBAR *bar = &vdev->bars[nr];
1768 
1769     uint32_t pci_bar;
1770     int ret;
1771 
1772     /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1773     if (!bar->region.size) {
1774         return;
1775     }
1776 
1777     /* Determine what type of BAR this is for registration */
1778     ret = vfio_pci_config_space_read(vdev, PCI_BASE_ADDRESS_0 + (4 * nr),
1779                                      sizeof(pci_bar), &pci_bar);
1780     if (ret != sizeof(pci_bar)) {
1781         error_report("vfio: Failed to read BAR %d: %s", nr, strreaderror(ret));
1782         return;
1783     }
1784 
1785     pci_bar = le32_to_cpu(pci_bar);
1786     bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1787     bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1788     bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1789                                          ~PCI_BASE_ADDRESS_MEM_MASK);
1790     bar->size = bar->region.size;
1791 }
1792 
1793 static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1794 {
1795     int i;
1796 
1797     for (i = 0; i < PCI_ROM_SLOT; i++) {
1798         vfio_bar_prepare(vdev, i);
1799     }
1800 }
1801 
1802 static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1803 {
1804     VFIOBAR *bar = &vdev->bars[nr];
1805     char *name;
1806 
1807     if (!bar->size) {
1808         return;
1809     }
1810 
1811     bar->mr = g_new0(MemoryRegion, 1);
1812     name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1813     memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1814     g_free(name);
1815 
1816     if (bar->region.size) {
1817         memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1818 
1819         if (vfio_region_mmap(&bar->region)) {
1820             error_report("Failed to mmap %s BAR %d. Performance may be slow",
1821                          vdev->vbasedev.name, nr);
1822         }
1823     }
1824 
1825     pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
1826 }
1827 
1828 static void vfio_bars_register(VFIOPCIDevice *vdev)
1829 {
1830     int i;
1831 
1832     for (i = 0; i < PCI_ROM_SLOT; i++) {
1833         vfio_bar_register(vdev, i);
1834     }
1835 }
1836 
1837 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1838 {
1839     int i;
1840 
1841     for (i = 0; i < PCI_ROM_SLOT; i++) {
1842         VFIOBAR *bar = &vdev->bars[i];
1843 
1844         vfio_bar_quirk_exit(vdev, i);
1845         vfio_region_exit(&bar->region);
1846         if (bar->region.size) {
1847             memory_region_del_subregion(bar->mr, bar->region.mem);
1848         }
1849     }
1850 
1851     if (vdev->vga) {
1852         pci_unregister_vga(&vdev->pdev);
1853         vfio_vga_quirk_exit(vdev);
1854     }
1855 }
1856 
1857 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1858 {
1859     int i;
1860 
1861     for (i = 0; i < PCI_ROM_SLOT; i++) {
1862         VFIOBAR *bar = &vdev->bars[i];
1863 
1864         vfio_bar_quirk_finalize(vdev, i);
1865         vfio_region_finalize(&bar->region);
1866         if (bar->mr) {
1867             assert(bar->size);
1868             object_unparent(OBJECT(bar->mr));
1869             g_free(bar->mr);
1870             bar->mr = NULL;
1871         }
1872     }
1873 
1874     if (vdev->vga) {
1875         vfio_vga_quirk_finalize(vdev);
1876         for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1877             object_unparent(OBJECT(&vdev->vga->region[i].mem));
1878         }
1879         g_free(vdev->vga);
1880     }
1881 }
1882 
1883 /*
1884  * General setup
1885  */
1886 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1887 {
1888     uint8_t tmp;
1889     uint16_t next = PCI_CONFIG_SPACE_SIZE;
1890 
1891     for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1892          tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1893         if (tmp > pos && tmp < next) {
1894             next = tmp;
1895         }
1896     }
1897 
1898     return next - pos;
1899 }
1900 
1901 
1902 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1903 {
1904     uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1905 
1906     for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1907         tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1908         if (tmp > pos && tmp < next) {
1909             next = tmp;
1910         }
1911     }
1912 
1913     return next - pos;
1914 }
1915 
1916 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1917 {
1918     pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1919 }
1920 
1921 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1922                                    uint16_t val, uint16_t mask)
1923 {
1924     vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1925     vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1926     vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1927 }
1928 
1929 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1930 {
1931     pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1932 }
1933 
1934 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1935                                    uint32_t val, uint32_t mask)
1936 {
1937     vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1938     vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1939     vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1940 }
1941 
1942 static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev)
1943 {
1944     struct vfio_device_info_cap_pci_atomic_comp *cap;
1945     g_autofree struct vfio_device_info *info = NULL;
1946     PCIBus *bus = pci_get_bus(&vdev->pdev);
1947     PCIDevice *parent = bus->parent_dev;
1948     struct vfio_info_cap_header *hdr;
1949     uint32_t mask = 0;
1950     uint8_t *pos;
1951 
1952     /*
1953      * PCIe Atomic Ops completer support is only added automatically for single
1954      * function devices downstream of a root port supporting DEVCAP2.  Support
1955      * is added during realize and, if added, removed during device exit.  The
1956      * single function requirement avoids conflicting requirements should a
1957      * slot be composed of multiple devices with differing capabilities.
1958      */
1959     if (pci_bus_is_root(bus) || !parent || !parent->exp.exp_cap ||
1960         pcie_cap_get_type(parent) != PCI_EXP_TYPE_ROOT_PORT ||
1961         pcie_cap_get_version(parent) != PCI_EXP_FLAGS_VER2 ||
1962         vdev->pdev.devfn ||
1963         vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1964         return;
1965     }
1966 
1967     pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
1968 
1969     /* Abort if there'a already an Atomic Ops configuration on the root port */
1970     if (pci_get_long(pos) & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
1971                              PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
1972                              PCI_EXP_DEVCAP2_ATOMIC_COMP128)) {
1973         return;
1974     }
1975 
1976     info = vfio_get_device_info(vdev->vbasedev.fd);
1977     if (!info) {
1978         return;
1979     }
1980 
1981     hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP);
1982     if (!hdr) {
1983         return;
1984     }
1985 
1986     cap = (void *)hdr;
1987     if (cap->flags & VFIO_PCI_ATOMIC_COMP32) {
1988         mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP32;
1989     }
1990     if (cap->flags & VFIO_PCI_ATOMIC_COMP64) {
1991         mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP64;
1992     }
1993     if (cap->flags & VFIO_PCI_ATOMIC_COMP128) {
1994         mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP128;
1995     }
1996 
1997     if (!mask) {
1998         return;
1999     }
2000 
2001     pci_long_test_and_set_mask(pos, mask);
2002     vdev->clear_parent_atomics_on_exit = true;
2003 }
2004 
2005 static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev)
2006 {
2007     if (vdev->clear_parent_atomics_on_exit) {
2008         PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev;
2009         uint8_t *pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
2010 
2011         pci_long_test_and_clear_mask(pos, PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2012                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
2013                                           PCI_EXP_DEVCAP2_ATOMIC_COMP128);
2014     }
2015 }
2016 
2017 static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
2018                                 Error **errp)
2019 {
2020     uint16_t flags;
2021     uint8_t type;
2022 
2023     flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
2024     type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
2025 
2026     if (type != PCI_EXP_TYPE_ENDPOINT &&
2027         type != PCI_EXP_TYPE_LEG_END &&
2028         type != PCI_EXP_TYPE_RC_END) {
2029 
2030         error_setg(errp, "assignment of PCIe type 0x%x "
2031                    "devices is not currently supported", type);
2032         return false;
2033     }
2034 
2035     if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
2036         PCIBus *bus = pci_get_bus(&vdev->pdev);
2037         PCIDevice *bridge;
2038 
2039         /*
2040          * Traditionally PCI device assignment exposes the PCIe capability
2041          * as-is on non-express buses.  The reason being that some drivers
2042          * simply assume that it's there, for example tg3.  However when
2043          * we're running on a native PCIe machine type, like Q35, we need
2044          * to hide the PCIe capability.  The reason for this is twofold;
2045          * first Windows guests get a Code 10 error when the PCIe capability
2046          * is exposed in this configuration.  Therefore express devices won't
2047          * work at all unless they're attached to express buses in the VM.
2048          * Second, a native PCIe machine introduces the possibility of fine
2049          * granularity IOMMUs supporting both translation and isolation.
2050          * Guest code to discover the IOMMU visibility of a device, such as
2051          * IOMMU grouping code on Linux, is very aware of device types and
2052          * valid transitions between bus types.  An express device on a non-
2053          * express bus is not a valid combination on bare metal systems.
2054          *
2055          * Drivers that require a PCIe capability to make the device
2056          * functional are simply going to need to have their devices placed
2057          * on a PCIe bus in the VM.
2058          */
2059         while (!pci_bus_is_root(bus)) {
2060             bridge = pci_bridge_get_device(bus);
2061             bus = pci_get_bus(bridge);
2062         }
2063 
2064         if (pci_bus_is_express(bus)) {
2065             return true;
2066         }
2067 
2068     } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
2069         /*
2070          * On a Root Complex bus Endpoints become Root Complex Integrated
2071          * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2072          */
2073         if (type == PCI_EXP_TYPE_ENDPOINT) {
2074             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2075                                    PCI_EXP_TYPE_RC_END << 4,
2076                                    PCI_EXP_FLAGS_TYPE);
2077 
2078             /* Link Capabilities, Status, and Control goes away */
2079             if (size > PCI_EXP_LNKCTL) {
2080                 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
2081                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2082                 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
2083 
2084 #ifndef PCI_EXP_LNKCAP2
2085 #define PCI_EXP_LNKCAP2 44
2086 #endif
2087 #ifndef PCI_EXP_LNKSTA2
2088 #define PCI_EXP_LNKSTA2 50
2089 #endif
2090                 /* Link 2 Capabilities, Status, and Control goes away */
2091                 if (size > PCI_EXP_LNKCAP2) {
2092                     vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
2093                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
2094                     vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
2095                 }
2096             }
2097 
2098         } else if (type == PCI_EXP_TYPE_LEG_END) {
2099             /*
2100              * Legacy endpoints don't belong on the root complex.  Windows
2101              * seems to be happier with devices if we skip the capability.
2102              */
2103             return true;
2104         }
2105 
2106     } else {
2107         /*
2108          * Convert Root Complex Integrated Endpoints to regular endpoints.
2109          * These devices don't support LNK/LNK2 capabilities, so make them up.
2110          */
2111         if (type == PCI_EXP_TYPE_RC_END) {
2112             vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2113                                    PCI_EXP_TYPE_ENDPOINT << 4,
2114                                    PCI_EXP_FLAGS_TYPE);
2115             vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
2116                            QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
2117                            QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
2118             vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2119         }
2120 
2121         vfio_pci_enable_rp_atomics(vdev);
2122     }
2123 
2124     /*
2125      * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
2126      * (Niantic errate #35) causing Windows to error with a Code 10 for the
2127      * device on Q35.  Fixup any such devices to report version 1.  If we
2128      * were to remove the capability entirely the guest would lose extended
2129      * config space.
2130      */
2131     if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
2132         vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2133                                1, PCI_EXP_FLAGS_VERS);
2134     }
2135 
2136     pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
2137                              errp);
2138     if (pos < 0) {
2139         return false;
2140     }
2141 
2142     vdev->pdev.exp.exp_cap = pos;
2143 
2144     return true;
2145 }
2146 
2147 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
2148 {
2149     uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
2150 
2151     if (cap & PCI_EXP_DEVCAP_FLR) {
2152         trace_vfio_check_pcie_flr(vdev->vbasedev.name);
2153         vdev->has_flr = true;
2154     }
2155 }
2156 
2157 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
2158 {
2159     uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
2160 
2161     if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
2162         trace_vfio_check_pm_reset(vdev->vbasedev.name);
2163         vdev->has_pm_reset = true;
2164     }
2165 }
2166 
2167 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
2168 {
2169     uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
2170 
2171     if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
2172         trace_vfio_check_af_flr(vdev->vbasedev.name);
2173         vdev->has_flr = true;
2174     }
2175 }
2176 
2177 static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos,
2178                                          uint8_t size, Error **errp)
2179 {
2180     PCIDevice *pdev = &vdev->pdev;
2181 
2182     pos = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, size, errp);
2183     if (pos < 0) {
2184         return false;
2185     }
2186 
2187     /*
2188      * Exempt config space check for Vendor Specific Information during
2189      * restore/load.
2190      * Config space check is still enforced for 3 byte VSC header.
2191      */
2192     if (vdev->skip_vsc_check && size > 3) {
2193         memset(pdev->cmask + pos + 3, 0, size - 3);
2194     }
2195 
2196     return true;
2197 }
2198 
2199 static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
2200 {
2201     ERRP_GUARD();
2202     PCIDevice *pdev = &vdev->pdev;
2203     uint8_t cap_id, next, size;
2204     bool ret;
2205 
2206     cap_id = pdev->config[pos];
2207     next = pdev->config[pos + PCI_CAP_LIST_NEXT];
2208 
2209     /*
2210      * If it becomes important to configure capabilities to their actual
2211      * size, use this as the default when it's something we don't recognize.
2212      * Since QEMU doesn't actually handle many of the config accesses,
2213      * exact size doesn't seem worthwhile.
2214      */
2215     size = vfio_std_cap_max_size(pdev, pos);
2216 
2217     /*
2218      * pci_add_capability always inserts the new capability at the head
2219      * of the chain.  Therefore to end up with a chain that matches the
2220      * physical device, we insert from the end by making this recursive.
2221      * This is also why we pre-calculate size above as cached config space
2222      * will be changed as we unwind the stack.
2223      */
2224     if (next) {
2225         if (!vfio_add_std_cap(vdev, next, errp)) {
2226             return false;
2227         }
2228     } else {
2229         /* Begin the rebuild, use QEMU emulated list bits */
2230         pdev->config[PCI_CAPABILITY_LIST] = 0;
2231         vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
2232         vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2233 
2234         if (!vfio_add_virt_caps(vdev, errp)) {
2235             return false;
2236         }
2237     }
2238 
2239     /* Scale down size, esp in case virt caps were added above */
2240     size = MIN(size, vfio_std_cap_max_size(pdev, pos));
2241 
2242     /* Use emulated next pointer to allow dropping caps */
2243     pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
2244 
2245     switch (cap_id) {
2246     case PCI_CAP_ID_MSI:
2247         ret = vfio_msi_setup(vdev, pos, errp);
2248         break;
2249     case PCI_CAP_ID_EXP:
2250         vfio_check_pcie_flr(vdev, pos);
2251         ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
2252         break;
2253     case PCI_CAP_ID_MSIX:
2254         ret = vfio_msix_setup(vdev, pos, errp);
2255         break;
2256     case PCI_CAP_ID_PM:
2257         vfio_check_pm_reset(vdev, pos);
2258         ret = pci_pm_init(pdev, pos, errp) >= 0;
2259         /*
2260          * PCI-core config space emulation needs write access to the power
2261          * state enabled for tracking BAR mapping relative to PM state.
2262          */
2263         pci_set_word(pdev->wmask + pos + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK);
2264         break;
2265     case PCI_CAP_ID_AF:
2266         vfio_check_af_flr(vdev, pos);
2267         ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
2268         break;
2269     case PCI_CAP_ID_VNDR:
2270         ret = vfio_add_vendor_specific_cap(vdev, pos, size, errp);
2271         break;
2272     default:
2273         ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
2274         break;
2275     }
2276 
2277     if (!ret) {
2278         error_prepend(errp,
2279                       "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2280                       cap_id, size, pos);
2281     }
2282 
2283     return ret;
2284 }
2285 
2286 static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos)
2287 {
2288     uint32_t ctrl;
2289     int i, nbar;
2290 
2291     ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL);
2292     nbar = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
2293 
2294     for (i = 0; i < nbar; i++) {
2295         uint32_t cap;
2296         int size;
2297 
2298         ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8));
2299         size = (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
2300 
2301         /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */
2302         cap = size <= 27 ? 1U << (size + 4) : 0;
2303 
2304         /*
2305          * The PCIe spec (v6.0.1, 7.8.6) requires HW to support at least one
2306          * size in the range 1MB to 512GB.  We intend to mask all sizes except
2307          * the one currently enabled in the size field, therefore if it's
2308          * outside the range, hide the whole capability as this virtualization
2309          * trick won't work.  If >512GB resizable BARs start to appear, we
2310          * might need an opt-in or reservation scheme in the kernel.
2311          */
2312         if (!(cap & PCI_REBAR_CAP_SIZES)) {
2313             return -EINVAL;
2314         }
2315 
2316         /* Hide all sizes reported in the ctrl reg per above requirement. */
2317         ctrl &= (PCI_REBAR_CTRL_BAR_SIZE |
2318                  PCI_REBAR_CTRL_NBAR_MASK |
2319                  PCI_REBAR_CTRL_BAR_IDX);
2320 
2321         /*
2322          * The BAR size field is RW, however we've mangled the capability
2323          * register such that we only report a single size, ie. the current
2324          * BAR size.  A write of an unsupported value is undefined, therefore
2325          * the register field is essentially RO.
2326          */
2327         vfio_add_emulated_long(vdev, pos + PCI_REBAR_CAP + (i * 8), cap, ~0);
2328         vfio_add_emulated_long(vdev, pos + PCI_REBAR_CTRL + (i * 8), ctrl, ~0);
2329     }
2330 
2331     return 0;
2332 }
2333 
2334 static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
2335 {
2336     PCIDevice *pdev = &vdev->pdev;
2337     uint32_t header;
2338     uint16_t cap_id, next, size;
2339     uint8_t cap_ver;
2340     uint8_t *config;
2341 
2342     /* Only add extended caps if we have them and the guest can see them */
2343     if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
2344         !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
2345         return;
2346     }
2347 
2348     /*
2349      * pcie_add_capability always inserts the new capability at the tail
2350      * of the chain.  Therefore to end up with a chain that matches the
2351      * physical device, we cache the config space to avoid overwriting
2352      * the original config space when we parse the extended capabilities.
2353      */
2354     config = g_memdup(pdev->config, vdev->config_size);
2355 
2356     /*
2357      * Extended capabilities are chained with each pointing to the next, so we
2358      * can drop anything other than the head of the chain simply by modifying
2359      * the previous next pointer.  Seed the head of the chain here such that
2360      * we can simply skip any capabilities we want to drop below, regardless
2361      * of their position in the chain.  If this stub capability still exists
2362      * after we add the capabilities we want to expose, update the capability
2363      * ID to zero.  Note that we cannot seed with the capability header being
2364      * zero as this conflicts with definition of an absent capability chain
2365      * and prevents capabilities beyond the head of the list from being added.
2366      * By replacing the dummy capability ID with zero after walking the device
2367      * chain, we also transparently mark extended capabilities as absent if
2368      * no capabilities were added.  Note that the PCIe spec defines an absence
2369      * of extended capabilities to be determined by a value of zero for the
2370      * capability ID, version, AND next pointer.  A non-zero next pointer
2371      * should be sufficient to indicate additional capabilities are present,
2372      * which will occur if we call pcie_add_capability() below.  The entire
2373      * first dword is emulated to support this.
2374      *
2375      * NB. The kernel side does similar masking, so be prepared that our
2376      * view of the device may also contain a capability ID zero in the head
2377      * of the chain.  Skip it for the same reason that we cannot seed the
2378      * chain with a zero capability.
2379      */
2380     pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2381                  PCI_EXT_CAP(0xFFFF, 0, 0));
2382     pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2383     pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2384 
2385     for (next = PCI_CONFIG_SPACE_SIZE; next;
2386          next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2387         header = pci_get_long(config + next);
2388         cap_id = PCI_EXT_CAP_ID(header);
2389         cap_ver = PCI_EXT_CAP_VER(header);
2390 
2391         /*
2392          * If it becomes important to configure extended capabilities to their
2393          * actual size, use this as the default when it's something we don't
2394          * recognize. Since QEMU doesn't actually handle many of the config
2395          * accesses, exact size doesn't seem worthwhile.
2396          */
2397         size = vfio_ext_cap_max_size(config, next);
2398 
2399         /* Use emulated next pointer to allow dropping extended caps */
2400         pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2401                                    PCI_EXT_CAP_NEXT_MASK);
2402 
2403         switch (cap_id) {
2404         case 0: /* kernel masked capability */
2405         case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
2406         case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
2407             trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2408             break;
2409         case PCI_EXT_CAP_ID_REBAR:
2410             if (!vfio_setup_rebar_ecap(vdev, next)) {
2411                 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2412             }
2413             break;
2414         default:
2415             pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2416         }
2417 
2418     }
2419 
2420     /* Cleanup chain head ID if necessary */
2421     if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2422         pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
2423     }
2424 
2425     g_free(config);
2426 }
2427 
2428 static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
2429 {
2430     PCIDevice *pdev = &vdev->pdev;
2431 
2432     if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2433         !pdev->config[PCI_CAPABILITY_LIST]) {
2434         return true; /* Nothing to add */
2435     }
2436 
2437     if (!vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp)) {
2438         return false;
2439     }
2440 
2441     vfio_add_ext_cap(vdev);
2442     return true;
2443 }
2444 
2445 void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
2446 {
2447     PCIDevice *pdev = &vdev->pdev;
2448     uint16_t cmd;
2449 
2450     vfio_disable_interrupts(vdev);
2451 
2452     /*
2453      * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
2454      * Also put INTx Disable in known state.
2455      */
2456     cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2457     cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2458              PCI_COMMAND_INTX_DISABLE);
2459     vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2460 
2461     /* Make sure the device is in D0 */
2462     if (pdev->pm_cap) {
2463         uint16_t pmcsr;
2464         uint8_t state;
2465 
2466         pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
2467         state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2468         if (state) {
2469             pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2470             vfio_pci_write_config(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2471             /* vfio handles the necessary delay here */
2472             pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
2473             state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2474             if (state) {
2475                 error_report("vfio: Unable to power on device, stuck in D%d",
2476                              state);
2477             }
2478         }
2479     }
2480 }
2481 
2482 void vfio_pci_post_reset(VFIOPCIDevice *vdev)
2483 {
2484     VFIODevice *vbasedev = &vdev->vbasedev;
2485     Error *err = NULL;
2486     int ret, nr;
2487 
2488     if (!vfio_intx_enable(vdev, &err)) {
2489         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2490     }
2491 
2492     for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2493         off_t addr = PCI_BASE_ADDRESS_0 + (4 * nr);
2494         uint32_t val = 0;
2495         uint32_t len = sizeof(val);
2496 
2497         ret = vfio_pci_config_space_write(vdev, addr, len, &val);
2498         if (ret != len) {
2499             error_report("%s(%s) reset bar %d failed: %s", __func__,
2500                          vbasedev->name, nr, strwriteerror(ret));
2501         }
2502     }
2503 
2504     vfio_quirk_reset(vdev);
2505 }
2506 
2507 bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
2508 {
2509     char tmp[13];
2510 
2511     sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2512             addr->bus, addr->slot, addr->function);
2513 
2514     return (strcmp(tmp, name) == 0);
2515 }
2516 
2517 int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
2518                                     struct vfio_pci_hot_reset_info **info_p)
2519 {
2520     struct vfio_pci_hot_reset_info *info;
2521     int ret, count;
2522 
2523     assert(info_p && !*info_p);
2524 
2525     info = g_malloc0(sizeof(*info));
2526     info->argsz = sizeof(*info);
2527 
2528     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2529     if (ret && errno != ENOSPC) {
2530         ret = -errno;
2531         g_free(info);
2532         if (!vdev->has_pm_reset) {
2533             error_report("vfio: Cannot reset device %s, "
2534                          "no available reset mechanism.", vdev->vbasedev.name);
2535         }
2536         return ret;
2537     }
2538 
2539     count = info->count;
2540     info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0])));
2541     info->argsz = sizeof(*info) + (count * sizeof(info->devices[0]));
2542 
2543     ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2544     if (ret) {
2545         ret = -errno;
2546         g_free(info);
2547         error_report("vfio: hot reset info failed: %m");
2548         return ret;
2549     }
2550 
2551     *info_p = info;
2552     return 0;
2553 }
2554 
2555 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
2556 {
2557     VFIODevice *vbasedev = &vdev->vbasedev;
2558     const VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer);
2559 
2560     return vioc->pci_hot_reset(vbasedev, single);
2561 }
2562 
2563 /*
2564  * We want to differentiate hot reset of multiple in-use devices vs hot reset
2565  * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
2566  * of doing hot resets when there is only a single device per bus.  The in-use
2567  * here refers to how many VFIODevices are affected.  A hot reset that affects
2568  * multiple devices, but only a single in-use device, means that we can call
2569  * it from our bus ->reset() callback since the extent is effectively a single
2570  * device.  This allows us to make use of it in the hotplug path.  When there
2571  * are multiple in-use devices, we can only trigger the hot reset during a
2572  * system reset and thus from our reset handler.  We separate _one vs _multi
2573  * here so that we don't overlap and do a double reset on the system reset
2574  * path where both our reset handler and ->reset() callback are used.  Calling
2575  * _one() will only do a hot reset for the one in-use devices case, calling
2576  * _multi() will do nothing if a _one() would have been sufficient.
2577  */
2578 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2579 {
2580     return vfio_pci_hot_reset(vdev, true);
2581 }
2582 
2583 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2584 {
2585     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2586     return vfio_pci_hot_reset(vdev, false);
2587 }
2588 
2589 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2590 {
2591     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2592     if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2593         vbasedev->needs_reset = true;
2594     }
2595 }
2596 
2597 static Object *vfio_pci_get_object(VFIODevice *vbasedev)
2598 {
2599     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2600 
2601     return OBJECT(vdev);
2602 }
2603 
2604 static bool vfio_msix_present(void *opaque, int version_id)
2605 {
2606     PCIDevice *pdev = opaque;
2607 
2608     return msix_present(pdev);
2609 }
2610 
2611 static bool vfio_display_migration_needed(void *opaque)
2612 {
2613     VFIOPCIDevice *vdev = opaque;
2614 
2615     /*
2616      * We need to migrate the VFIODisplay object if ramfb *migration* was
2617      * explicitly requested (in which case we enforced both ramfb=on and
2618      * display=on), or ramfb migration was left at the default "auto"
2619      * setting, and *ramfb* was explicitly requested (in which case we
2620      * enforced display=on).
2621      */
2622     return vdev->ramfb_migrate == ON_OFF_AUTO_ON ||
2623         (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO && vdev->enable_ramfb);
2624 }
2625 
2626 static const VMStateDescription vmstate_vfio_display = {
2627     .name = "VFIOPCIDevice/VFIODisplay",
2628     .version_id = 1,
2629     .minimum_version_id = 1,
2630     .needed = vfio_display_migration_needed,
2631     .fields = (const VMStateField[]){
2632         VMSTATE_STRUCT_POINTER(dpy, VFIOPCIDevice, vfio_display_vmstate,
2633                                VFIODisplay),
2634         VMSTATE_END_OF_LIST()
2635     }
2636 };
2637 
2638 static const VMStateDescription vmstate_vfio_pci_config = {
2639     .name = "VFIOPCIDevice",
2640     .version_id = 1,
2641     .minimum_version_id = 1,
2642     .fields = (const VMStateField[]) {
2643         VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice),
2644         VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present),
2645         VMSTATE_END_OF_LIST()
2646     },
2647     .subsections = (const VMStateDescription * const []) {
2648         &vmstate_vfio_display,
2649         NULL
2650     }
2651 };
2652 
2653 static int vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f, Error **errp)
2654 {
2655     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2656 
2657     return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL,
2658                                        errp);
2659 }
2660 
2661 static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
2662 {
2663     VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2664     PCIDevice *pdev = &vdev->pdev;
2665     pcibus_t old_addr[PCI_NUM_REGIONS - 1];
2666     int bar, ret;
2667 
2668     for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2669         old_addr[bar] = pdev->io_regions[bar].addr;
2670     }
2671 
2672     ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1);
2673     if (ret) {
2674         return ret;
2675     }
2676 
2677     vfio_pci_write_config(pdev, PCI_COMMAND,
2678                           pci_get_word(pdev->config + PCI_COMMAND), 2);
2679 
2680     for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2681         /*
2682          * The address may not be changed in some scenarios
2683          * (e.g. the VF driver isn't loaded in VM).
2684          */
2685         if (old_addr[bar] != pdev->io_regions[bar].addr &&
2686             vdev->bars[bar].region.size > 0 &&
2687             vdev->bars[bar].region.size < qemu_real_host_page_size()) {
2688             vfio_sub_page_bar_update_mapping(pdev, bar);
2689         }
2690     }
2691 
2692     if (msi_enabled(pdev)) {
2693         vfio_msi_enable(vdev);
2694     } else if (msix_enabled(pdev)) {
2695         vfio_msix_enable(vdev);
2696     }
2697 
2698     return ret;
2699 }
2700 
2701 static VFIODeviceOps vfio_pci_ops = {
2702     .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2703     .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2704     .vfio_eoi = vfio_intx_eoi,
2705     .vfio_get_object = vfio_pci_get_object,
2706     .vfio_save_config = vfio_pci_save_config,
2707     .vfio_load_config = vfio_pci_load_config,
2708 };
2709 
2710 bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
2711 {
2712     VFIODevice *vbasedev = &vdev->vbasedev;
2713     struct vfio_region_info *reg_info = NULL;
2714     int ret;
2715 
2716     ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2717     if (ret) {
2718         error_setg_errno(errp, -ret,
2719                          "failed getting region info for VGA region index %d",
2720                          VFIO_PCI_VGA_REGION_INDEX);
2721         return false;
2722     }
2723 
2724     if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2725         !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2726         reg_info->size < 0xbffff + 1) {
2727         error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2728                    (unsigned long)reg_info->flags,
2729                    (unsigned long)reg_info->size);
2730         return false;
2731     }
2732 
2733     vdev->vga = g_new0(VFIOVGA, 1);
2734 
2735     vdev->vga->fd_offset = reg_info->offset;
2736     vdev->vga->fd = vdev->vbasedev.fd;
2737 
2738     vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2739     vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2740     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2741 
2742     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2743                           OBJECT(vdev), &vfio_vga_ops,
2744                           &vdev->vga->region[QEMU_PCI_VGA_MEM],
2745                           "vfio-vga-mmio@0xa0000",
2746                           QEMU_PCI_VGA_MEM_SIZE);
2747 
2748     vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2749     vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2750     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2751 
2752     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2753                           OBJECT(vdev), &vfio_vga_ops,
2754                           &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2755                           "vfio-vga-io@0x3b0",
2756                           QEMU_PCI_VGA_IO_LO_SIZE);
2757 
2758     vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2759     vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2760     QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2761 
2762     memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2763                           OBJECT(vdev), &vfio_vga_ops,
2764                           &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2765                           "vfio-vga-io@0x3c0",
2766                           QEMU_PCI_VGA_IO_HI_SIZE);
2767 
2768     pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2769                      &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2770                      &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2771 
2772     return true;
2773 }
2774 
2775 static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
2776 {
2777     VFIODevice *vbasedev = &vdev->vbasedev;
2778     struct vfio_region_info *reg_info = NULL;
2779     struct vfio_irq_info irq_info;
2780     int i, ret = -1;
2781 
2782     /* Sanity check device */
2783     if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2784         error_setg(errp, "this isn't a PCI device");
2785         return false;
2786     }
2787 
2788     if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2789         error_setg(errp, "unexpected number of io regions %u",
2790                    vbasedev->num_regions);
2791         return false;
2792     }
2793 
2794     if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2795         error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
2796         return false;
2797     }
2798 
2799     for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2800         char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2801 
2802         ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2803                                 &vdev->bars[i].region, i, name);
2804         g_free(name);
2805 
2806         if (ret) {
2807             error_setg_errno(errp, -ret, "failed to get region %d info", i);
2808             return false;
2809         }
2810 
2811         QLIST_INIT(&vdev->bars[i].quirks);
2812     }
2813 
2814     ret = vfio_device_get_region_info(vbasedev,
2815                                       VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2816     if (ret) {
2817         error_setg_errno(errp, -ret, "failed to get config info");
2818         return false;
2819     }
2820 
2821     trace_vfio_populate_device_config(vdev->vbasedev.name,
2822                                       (unsigned long)reg_info->size,
2823                                       (unsigned long)reg_info->offset,
2824                                       (unsigned long)reg_info->flags);
2825 
2826     vdev->config_size = reg_info->size;
2827     if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2828         vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2829     }
2830     vdev->config_offset = reg_info->offset;
2831 
2832     if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2833         if (!vfio_populate_vga(vdev, errp)) {
2834             error_append_hint(errp, "device does not support "
2835                               "requested feature x-vga\n");
2836             return false;
2837         }
2838     }
2839 
2840     ret = vfio_device_get_irq_info(vbasedev, VFIO_PCI_ERR_IRQ_INDEX, &irq_info);
2841     if (ret) {
2842         /* This can fail for an old kernel or legacy PCI dev */
2843         trace_vfio_populate_device_get_irq_info_failure(strerror(-ret));
2844     } else if (irq_info.count == 1) {
2845         vdev->pci_aer = true;
2846     } else {
2847         warn_report(VFIO_MSG_PREFIX
2848                     "Could not enable error recovery for the device",
2849                     vbasedev->name);
2850     }
2851 
2852     return true;
2853 }
2854 
2855 static void vfio_pci_put_device(VFIOPCIDevice *vdev)
2856 {
2857     vfio_device_detach(&vdev->vbasedev);
2858 
2859     g_free(vdev->vbasedev.name);
2860     g_free(vdev->msix);
2861 }
2862 
2863 static void vfio_err_notifier_handler(void *opaque)
2864 {
2865     VFIOPCIDevice *vdev = opaque;
2866 
2867     if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2868         return;
2869     }
2870 
2871     /*
2872      * TBD. Retrieve the error details and decide what action
2873      * needs to be taken. One of the actions could be to pass
2874      * the error to the guest and have the guest driver recover
2875      * from the error. This requires that PCIe capabilities be
2876      * exposed to the guest. For now, we just terminate the
2877      * guest to contain the error.
2878      */
2879 
2880     error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2881 
2882     vm_stop(RUN_STATE_INTERNAL_ERROR);
2883 }
2884 
2885 /*
2886  * Registers error notifier for devices supporting error recovery.
2887  * If we encounter a failure in this function, we report an error
2888  * and continue after disabling error recovery support for the
2889  * device.
2890  */
2891 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2892 {
2893     Error *err = NULL;
2894     int32_t fd;
2895 
2896     if (!vdev->pci_aer) {
2897         return;
2898     }
2899 
2900     if (event_notifier_init(&vdev->err_notifier, 0)) {
2901         error_report("vfio: Unable to init event notifier for error detection");
2902         vdev->pci_aer = false;
2903         return;
2904     }
2905 
2906     fd = event_notifier_get_fd(&vdev->err_notifier);
2907     qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
2908 
2909     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2910                                        VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2911         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2912         qemu_set_fd_handler(fd, NULL, NULL, vdev);
2913         event_notifier_cleanup(&vdev->err_notifier);
2914         vdev->pci_aer = false;
2915     }
2916 }
2917 
2918 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2919 {
2920     Error *err = NULL;
2921 
2922     if (!vdev->pci_aer) {
2923         return;
2924     }
2925 
2926     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2927                                        VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2928         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2929     }
2930     qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2931                         NULL, NULL, vdev);
2932     event_notifier_cleanup(&vdev->err_notifier);
2933 }
2934 
2935 static void vfio_req_notifier_handler(void *opaque)
2936 {
2937     VFIOPCIDevice *vdev = opaque;
2938     Error *err = NULL;
2939 
2940     if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2941         return;
2942     }
2943 
2944     qdev_unplug(DEVICE(vdev), &err);
2945     if (err) {
2946         warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2947     }
2948 }
2949 
2950 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2951 {
2952     struct vfio_irq_info irq_info;
2953     Error *err = NULL;
2954     int32_t fd;
2955     int ret;
2956 
2957     if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2958         return;
2959     }
2960 
2961     ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX,
2962                                    &irq_info);
2963     if (ret < 0 || irq_info.count < 1) {
2964         return;
2965     }
2966 
2967     if (event_notifier_init(&vdev->req_notifier, 0)) {
2968         error_report("vfio: Unable to init event notifier for device request");
2969         return;
2970     }
2971 
2972     fd = event_notifier_get_fd(&vdev->req_notifier);
2973     qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
2974 
2975     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2976                                        VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2977         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2978         qemu_set_fd_handler(fd, NULL, NULL, vdev);
2979         event_notifier_cleanup(&vdev->req_notifier);
2980     } else {
2981         vdev->req_enabled = true;
2982     }
2983 }
2984 
2985 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2986 {
2987     Error *err = NULL;
2988 
2989     if (!vdev->req_enabled) {
2990         return;
2991     }
2992 
2993     if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2994                                        VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2995         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2996     }
2997     qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2998                         NULL, NULL, vdev);
2999     event_notifier_cleanup(&vdev->req_notifier);
3000 
3001     vdev->req_enabled = false;
3002 }
3003 
3004 static bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp)
3005 {
3006     PCIDevice *pdev = &vdev->pdev;
3007     VFIODevice *vbasedev = &vdev->vbasedev;
3008 
3009     /* vfio emulates a lot for us, but some bits need extra love */
3010     vdev->emulated_config_bits = g_malloc0(vdev->config_size);
3011 
3012     /* QEMU can choose to expose the ROM or not */
3013     memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
3014     /* QEMU can also add or extend BARs */
3015     memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
3016 
3017     /*
3018      * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
3019      * device ID is managed by the vendor and need only be a 16-bit value.
3020      * Allow any 16-bit value for subsystem so they can be hidden or changed.
3021      */
3022     if (vdev->vendor_id != PCI_ANY_ID) {
3023         if (vdev->vendor_id >= 0xffff) {
3024             error_setg(errp, "invalid PCI vendor ID provided");
3025             return false;
3026         }
3027         vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
3028         trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id);
3029     } else {
3030         vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
3031     }
3032 
3033     if (vdev->device_id != PCI_ANY_ID) {
3034         if (vdev->device_id > 0xffff) {
3035             error_setg(errp, "invalid PCI device ID provided");
3036             return false;
3037         }
3038         vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
3039         trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id);
3040     } else {
3041         vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
3042     }
3043 
3044     if (vdev->sub_vendor_id != PCI_ANY_ID) {
3045         if (vdev->sub_vendor_id > 0xffff) {
3046             error_setg(errp, "invalid PCI subsystem vendor ID provided");
3047             return false;
3048         }
3049         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
3050                                vdev->sub_vendor_id, ~0);
3051         trace_vfio_pci_emulated_sub_vendor_id(vbasedev->name,
3052                                               vdev->sub_vendor_id);
3053     }
3054 
3055     if (vdev->sub_device_id != PCI_ANY_ID) {
3056         if (vdev->sub_device_id > 0xffff) {
3057             error_setg(errp, "invalid PCI subsystem device ID provided");
3058             return false;
3059         }
3060         vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
3061         trace_vfio_pci_emulated_sub_device_id(vbasedev->name,
3062                                               vdev->sub_device_id);
3063     }
3064 
3065     /* QEMU can change multi-function devices to single function, or reverse */
3066     vdev->emulated_config_bits[PCI_HEADER_TYPE] =
3067                                               PCI_HEADER_TYPE_MULTI_FUNCTION;
3068 
3069     /* Restore or clear multifunction, this is always controlled by QEMU */
3070     if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
3071         vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
3072     } else {
3073         vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
3074     }
3075 
3076     /*
3077      * Clear host resource mapping info.  If we choose not to register a
3078      * BAR, such as might be the case with the option ROM, we can get
3079      * confusing, unwritable, residual addresses from the host here.
3080      */
3081     memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
3082     memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
3083 
3084     vfio_pci_size_rom(vdev);
3085 
3086     vfio_bars_prepare(vdev);
3087 
3088     if (!vfio_msix_early_setup(vdev, errp)) {
3089         return false;
3090     }
3091 
3092     vfio_bars_register(vdev);
3093 
3094     return true;
3095 }
3096 
3097 static bool vfio_interrupt_setup(VFIOPCIDevice *vdev, Error **errp)
3098 {
3099     PCIDevice *pdev = &vdev->pdev;
3100 
3101     /* QEMU emulates all of MSI & MSIX */
3102     if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
3103         memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
3104                MSIX_CAP_LENGTH);
3105     }
3106 
3107     if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3108         memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3109                vdev->msi_cap_size);
3110     }
3111 
3112     if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
3113         vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3114                                              vfio_intx_mmap_enable, vdev);
3115         pci_device_set_intx_routing_notifier(&vdev->pdev,
3116                                              vfio_intx_routing_notifier);
3117         vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
3118         kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
3119         if (!vfio_intx_enable(vdev, errp)) {
3120             timer_free(vdev->intx.mmap_timer);
3121             pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3122             kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3123             return false;
3124         }
3125     }
3126     return true;
3127 }
3128 
3129 static void vfio_realize(PCIDevice *pdev, Error **errp)
3130 {
3131     ERRP_GUARD();
3132     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
3133     VFIODevice *vbasedev = &vdev->vbasedev;
3134     int i, ret;
3135     char uuid[UUID_STR_LEN];
3136     g_autofree char *name = NULL;
3137     uint32_t config_space_size;
3138 
3139     if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
3140         if (!(~vdev->host.domain || ~vdev->host.bus ||
3141               ~vdev->host.slot || ~vdev->host.function)) {
3142             error_setg(errp, "No provided host device");
3143             error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
3144 #ifdef CONFIG_IOMMUFD
3145                               "or -device vfio-pci,fd=DEVICE_FD "
3146 #endif
3147                               "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
3148             return;
3149         }
3150         vbasedev->sysfsdev =
3151             g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
3152                             vdev->host.domain, vdev->host.bus,
3153                             vdev->host.slot, vdev->host.function);
3154     }
3155 
3156     if (!vfio_device_get_name(vbasedev, errp)) {
3157         return;
3158     }
3159 
3160     /*
3161      * Mediated devices *might* operate compatibly with discarding of RAM, but
3162      * we cannot know for certain, it depends on whether the mdev vendor driver
3163      * stays in sync with the active working set of the guest driver.  Prevent
3164      * the x-balloon-allowed option unless this is minimally an mdev device.
3165      */
3166     vbasedev->mdev = vfio_device_is_mdev(vbasedev);
3167 
3168     trace_vfio_mdev(vbasedev->name, vbasedev->mdev);
3169 
3170     if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) {
3171         error_setg(errp, "x-balloon-allowed only potentially compatible "
3172                    "with mdev devices");
3173         goto error;
3174     }
3175 
3176     if (!qemu_uuid_is_null(&vdev->vf_token)) {
3177         qemu_uuid_unparse(&vdev->vf_token, uuid);
3178         name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid);
3179     } else {
3180         name = g_strdup(vbasedev->name);
3181     }
3182 
3183     if (!vfio_device_attach(name, vbasedev,
3184                             pci_device_iommu_address_space(pdev), errp)) {
3185         goto error;
3186     }
3187 
3188     if (!vfio_populate_device(vdev, errp)) {
3189         goto error;
3190     }
3191 
3192     config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size);
3193 
3194     /* Get a copy of config space */
3195     ret = vfio_pci_config_space_read(vdev, 0, config_space_size,
3196                                      vdev->pdev.config);
3197     if (ret < (int)config_space_size) {
3198         ret = ret < 0 ? -ret : EFAULT;
3199         error_setg_errno(errp, ret, "failed to read device config space");
3200         goto error;
3201     }
3202 
3203     if (!vfio_pci_config_setup(vdev, errp)) {
3204         goto error;
3205     }
3206 
3207     if (!vbasedev->mdev &&
3208         !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) {
3209         error_prepend(errp, "Failed to set vIOMMU: ");
3210         goto out_teardown;
3211     }
3212 
3213     if (!vfio_add_capabilities(vdev, errp)) {
3214         goto out_unset_idev;
3215     }
3216 
3217     if (!vfio_config_quirk_setup(vdev, errp)) {
3218         goto out_unset_idev;
3219     }
3220 
3221     if (vdev->vga) {
3222         vfio_vga_quirk_setup(vdev);
3223     }
3224 
3225     for (i = 0; i < PCI_ROM_SLOT; i++) {
3226         vfio_bar_quirk_setup(vdev, i);
3227     }
3228 
3229     if (!vfio_interrupt_setup(vdev, errp)) {
3230         goto out_unset_idev;
3231     }
3232 
3233     if (vdev->display != ON_OFF_AUTO_OFF) {
3234         if (!vfio_display_probe(vdev, errp)) {
3235             goto out_deregister;
3236         }
3237     }
3238     if (vdev->enable_ramfb && vdev->dpy == NULL) {
3239         error_setg(errp, "ramfb=on requires display=on");
3240         goto out_deregister;
3241     }
3242     if (vdev->display_xres || vdev->display_yres) {
3243         if (vdev->dpy == NULL) {
3244             error_setg(errp, "xres and yres properties require display=on");
3245             goto out_deregister;
3246         }
3247         if (vdev->dpy->edid_regs == NULL) {
3248             error_setg(errp, "xres and yres properties need edid support");
3249             goto out_deregister;
3250         }
3251     }
3252 
3253     if (vdev->ramfb_migrate == ON_OFF_AUTO_ON && !vdev->enable_ramfb) {
3254         warn_report("x-ramfb-migrate=on but ramfb=off. "
3255                     "Forcing x-ramfb-migrate to off.");
3256         vdev->ramfb_migrate = ON_OFF_AUTO_OFF;
3257     }
3258     if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
3259         if (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO) {
3260             vdev->ramfb_migrate = ON_OFF_AUTO_OFF;
3261         } else if (vdev->ramfb_migrate == ON_OFF_AUTO_ON) {
3262             error_setg(errp, "x-ramfb-migrate requires enable-migration");
3263             goto out_deregister;
3264         }
3265     }
3266 
3267     if (!pdev->failover_pair_id) {
3268         if (!vfio_migration_realize(vbasedev, errp)) {
3269             goto out_deregister;
3270         }
3271     }
3272 
3273     vfio_register_err_notifier(vdev);
3274     vfio_register_req_notifier(vdev);
3275     vfio_setup_resetfn_quirk(vdev);
3276 
3277     return;
3278 
3279 out_deregister:
3280     if (vdev->interrupt == VFIO_INT_INTx) {
3281         vfio_intx_disable(vdev);
3282     }
3283     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3284     if (vdev->irqchip_change_notifier.notify) {
3285         kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3286     }
3287     if (vdev->intx.mmap_timer) {
3288         timer_free(vdev->intx.mmap_timer);
3289     }
3290 out_unset_idev:
3291     if (!vbasedev->mdev) {
3292         pci_device_unset_iommu_device(pdev);
3293     }
3294 out_teardown:
3295     vfio_teardown_msi(vdev);
3296     vfio_bars_exit(vdev);
3297 error:
3298     error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name);
3299 }
3300 
3301 static void vfio_instance_finalize(Object *obj)
3302 {
3303     VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3304 
3305     vfio_display_finalize(vdev);
3306     vfio_bars_finalize(vdev);
3307     g_free(vdev->emulated_config_bits);
3308     g_free(vdev->rom);
3309     /*
3310      * XXX Leaking igd_opregion is not an oversight, we can't remove the
3311      * fw_cfg entry therefore leaking this allocation seems like the safest
3312      * option.
3313      *
3314      * g_free(vdev->igd_opregion);
3315      */
3316     vfio_pci_put_device(vdev);
3317 }
3318 
3319 static void vfio_exitfn(PCIDevice *pdev)
3320 {
3321     VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
3322     VFIODevice *vbasedev = &vdev->vbasedev;
3323 
3324     vfio_unregister_req_notifier(vdev);
3325     vfio_unregister_err_notifier(vdev);
3326     pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3327     if (vdev->irqchip_change_notifier.notify) {
3328         kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3329     }
3330     vfio_disable_interrupts(vdev);
3331     if (vdev->intx.mmap_timer) {
3332         timer_free(vdev->intx.mmap_timer);
3333     }
3334     vfio_teardown_msi(vdev);
3335     vfio_pci_disable_rp_atomics(vdev);
3336     vfio_bars_exit(vdev);
3337     vfio_migration_exit(vbasedev);
3338     if (!vbasedev->mdev) {
3339         pci_device_unset_iommu_device(pdev);
3340     }
3341 }
3342 
3343 static void vfio_pci_reset(DeviceState *dev)
3344 {
3345     VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev);
3346 
3347     trace_vfio_pci_reset(vdev->vbasedev.name);
3348 
3349     vfio_pci_pre_reset(vdev);
3350 
3351     if (vdev->display != ON_OFF_AUTO_OFF) {
3352         vfio_display_reset(vdev);
3353     }
3354 
3355     if (vdev->resetfn && !vdev->resetfn(vdev)) {
3356         goto post_reset;
3357     }
3358 
3359     if (vdev->vbasedev.reset_works &&
3360         (vdev->has_flr || !vdev->has_pm_reset) &&
3361         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3362         trace_vfio_pci_reset_flr(vdev->vbasedev.name);
3363         goto post_reset;
3364     }
3365 
3366     /* See if we can do our own bus reset */
3367     if (!vfio_pci_hot_reset_one(vdev)) {
3368         goto post_reset;
3369     }
3370 
3371     /* If nothing else works and the device supports PM reset, use it */
3372     if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
3373         !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3374         trace_vfio_pci_reset_pm(vdev->vbasedev.name);
3375         goto post_reset;
3376     }
3377 
3378 post_reset:
3379     vfio_pci_post_reset(vdev);
3380 }
3381 
3382 static void vfio_instance_init(Object *obj)
3383 {
3384     PCIDevice *pci_dev = PCI_DEVICE(obj);
3385     VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3386     VFIODevice *vbasedev = &vdev->vbasedev;
3387 
3388     device_add_bootindex_property(obj, &vdev->bootindex,
3389                                   "bootindex", NULL,
3390                                   &pci_dev->qdev);
3391     vdev->host.domain = ~0U;
3392     vdev->host.bus = ~0U;
3393     vdev->host.slot = ~0U;
3394     vdev->host.function = ~0U;
3395 
3396     vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_pci_ops,
3397                      DEVICE(vdev), false);
3398 
3399     vdev->nv_gpudirect_clique = 0xFF;
3400 
3401     /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3402      * line, therefore, no need to wait to realize like other devices */
3403     pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
3404 }
3405 
3406 static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data)
3407 {
3408     DeviceClass *dc = DEVICE_CLASS(klass);
3409     PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3410 
3411     dc->desc = "VFIO PCI base device";
3412     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3413     pdc->exit = vfio_exitfn;
3414     pdc->config_read = vfio_pci_read_config;
3415     pdc->config_write = vfio_pci_write_config;
3416 }
3417 
3418 static const TypeInfo vfio_pci_base_dev_info = {
3419     .name = TYPE_VFIO_PCI_BASE,
3420     .parent = TYPE_PCI_DEVICE,
3421     .instance_size = 0,
3422     .abstract = true,
3423     .class_init = vfio_pci_base_dev_class_init,
3424     .interfaces = (const InterfaceInfo[]) {
3425         { INTERFACE_PCIE_DEVICE },
3426         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3427         { }
3428     },
3429 };
3430 
3431 static PropertyInfo vfio_pci_migration_multifd_transfer_prop;
3432 
3433 static const Property vfio_pci_dev_properties[] = {
3434     DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
3435     DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token),
3436     DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
3437     DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
3438                             vbasedev.pre_copy_dirty_page_tracking,
3439                             ON_OFF_AUTO_ON),
3440     DEFINE_PROP_ON_OFF_AUTO("x-device-dirty-page-tracking", VFIOPCIDevice,
3441                             vbasedev.device_dirty_page_tracking,
3442                             ON_OFF_AUTO_ON),
3443     DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
3444                             display, ON_OFF_AUTO_OFF),
3445     DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
3446     DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0),
3447     DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
3448                        intx.mmap_timeout, 1100),
3449     DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
3450                     VFIO_FEATURE_ENABLE_VGA_BIT, false),
3451     DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3452                     VFIO_FEATURE_ENABLE_REQ_BIT, true),
3453     DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3454                     VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, true),
3455     DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features,
3456                     VFIO_FEATURE_ENABLE_IGD_LPC_BIT, false),
3457     DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice,
3458                             igd_legacy_mode, ON_OFF_AUTO_AUTO),
3459     DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
3460                             vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
3461     DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice,
3462                 vbasedev.migration_multifd_transfer,
3463                 vfio_pci_migration_multifd_transfer_prop, OnOffAuto,
3464                 .set_default = true, .defval.i = ON_OFF_AUTO_AUTO),
3465     DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice,
3466                      vbasedev.migration_events, false),
3467     DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
3468     DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
3469                      vbasedev.ram_block_discard_allowed, false),
3470     DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3471     DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3472     DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
3473     DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3474                      no_geforce_quirks, false),
3475     DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
3476                      false),
3477     DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
3478                      false),
3479     DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3480     DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3481     DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3482                        sub_vendor_id, PCI_ANY_ID),
3483     DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3484                        sub_device_id, PCI_ANY_ID),
3485     DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
3486     DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3487                                    nv_gpudirect_clique,
3488                                    qdev_prop_nv_gpudirect_clique, uint8_t),
3489     DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3490                                 OFF_AUTO_PCIBAR_OFF),
3491 #ifdef CONFIG_IOMMUFD
3492     DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd,
3493                      TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
3494 #endif
3495     DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true),
3496 };
3497 
3498 #ifdef CONFIG_IOMMUFD
3499 static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp)
3500 {
3501     VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3502     vfio_device_set_fd(&vdev->vbasedev, str, errp);
3503 }
3504 #endif
3505 
3506 static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data)
3507 {
3508     DeviceClass *dc = DEVICE_CLASS(klass);
3509     PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3510 
3511     device_class_set_legacy_reset(dc, vfio_pci_reset);
3512     device_class_set_props(dc, vfio_pci_dev_properties);
3513 #ifdef CONFIG_IOMMUFD
3514     object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd);
3515 #endif
3516     dc->desc = "VFIO-based PCI device assignment";
3517     pdc->realize = vfio_realize;
3518 
3519     object_class_property_set_description(klass, /* 1.3 */
3520                                           "host",
3521                                           "Host PCI address [domain:]<bus:slot.function> of assigned device");
3522     object_class_property_set_description(klass, /* 1.3 */
3523                                           "x-intx-mmap-timeout-ms",
3524                                           "When EOI is not provided by KVM/QEMU, wait time "
3525                                           "(milliseconds) to re-enable device direct access "
3526                                           "after INTx (DEBUG)");
3527     object_class_property_set_description(klass, /* 1.5 */
3528                                           "x-vga",
3529                                           "Expose VGA address spaces for device");
3530     object_class_property_set_description(klass, /* 2.3 */
3531                                           "x-req",
3532                                           "Disable device request notification support (DEBUG)");
3533     object_class_property_set_description(klass, /* 2.4 and 2.5 */
3534                                           "x-no-mmap",
3535                                           "Disable MMAP for device. Allows to trace MMIO "
3536                                           "accesses (DEBUG)");
3537     object_class_property_set_description(klass, /* 2.5 */
3538                                           "x-no-kvm-intx",
3539                                           "Disable direct VFIO->KVM INTx injection. Allows to "
3540                                           "trace INTx interrupts (DEBUG)");
3541     object_class_property_set_description(klass, /* 2.5 */
3542                                           "x-no-kvm-msi",
3543                                           "Disable direct VFIO->KVM MSI injection. Allows to "
3544                                           "trace MSI interrupts (DEBUG)");
3545     object_class_property_set_description(klass, /* 2.5 */
3546                                           "x-no-kvm-msix",
3547                                           "Disable direct VFIO->KVM MSIx injection. Allows to "
3548                                           "trace MSIx interrupts (DEBUG)");
3549     object_class_property_set_description(klass, /* 2.5 */
3550                                           "x-pci-vendor-id",
3551                                           "Override PCI Vendor ID with provided value (DEBUG)");
3552     object_class_property_set_description(klass, /* 2.5 */
3553                                           "x-pci-device-id",
3554                                           "Override PCI device ID with provided value (DEBUG)");
3555     object_class_property_set_description(klass, /* 2.5 */
3556                                           "x-pci-sub-vendor-id",
3557                                           "Override PCI Subsystem Vendor ID with provided value "
3558                                           "(DEBUG)");
3559     object_class_property_set_description(klass, /* 2.5 */
3560                                           "x-pci-sub-device-id",
3561                                           "Override PCI Subsystem Device ID with provided value "
3562                                           "(DEBUG)");
3563     object_class_property_set_description(klass, /* 2.6 */
3564                                           "sysfsdev",
3565                                           "Host sysfs path of assigned device");
3566     object_class_property_set_description(klass, /* 2.7 */
3567                                           "x-igd-opregion",
3568                                           "Expose host IGD OpRegion to guest");
3569     object_class_property_set_description(klass, /* 2.7 (See c4c45e943e51) */
3570                                           "x-igd-gms",
3571                                           "Override IGD data stolen memory size (32MiB units)");
3572     object_class_property_set_description(klass, /* 2.11 */
3573                                           "x-nv-gpudirect-clique",
3574                                           "Add NVIDIA GPUDirect capability indicating P2P DMA "
3575                                           "clique for device [0-15]");
3576     object_class_property_set_description(klass, /* 2.12 */
3577                                           "x-no-geforce-quirks",
3578                                           "Disable GeForce quirks (for NVIDIA Quadro/GRID/Tesla). "
3579                                           "Improves performance");
3580     object_class_property_set_description(klass, /* 2.12 */
3581                                           "display",
3582                                           "Enable display support for device, ex. vGPU");
3583     object_class_property_set_description(klass, /* 2.12 */
3584                                           "x-msix-relocation",
3585                                           "Specify MSI-X MMIO relocation to the end of specified "
3586                                           "existing BAR or new BAR to avoid virtualization overhead "
3587                                           "due to adjacent device registers");
3588     object_class_property_set_description(klass, /* 3.0 */
3589                                           "x-no-kvm-ioeventfd",
3590                                           "Disable registration of ioeventfds with KVM (DEBUG)");
3591     object_class_property_set_description(klass, /* 3.0 */
3592                                           "x-no-vfio-ioeventfd",
3593                                           "Disable linking of KVM ioeventfds to VFIO ioeventfds "
3594                                           "(DEBUG)");
3595     object_class_property_set_description(klass, /* 3.1 */
3596                                           "x-balloon-allowed",
3597                                           "Override allowing ballooning with device (DEBUG, DANGER)");
3598     object_class_property_set_description(klass, /* 3.2 */
3599                                           "xres",
3600                                           "Set X display resolution the vGPU should use");
3601     object_class_property_set_description(klass, /* 3.2 */
3602                                           "yres",
3603                                           "Set Y display resolution the vGPU should use");
3604     object_class_property_set_description(klass, /* 5.2 */
3605                                           "x-pre-copy-dirty-page-tracking",
3606                                           "Disable dirty pages tracking during iterative phase "
3607                                           "(DEBUG)");
3608     object_class_property_set_description(klass, /* 5.2, 8.0 non-experimetal */
3609                                           "enable-migration",
3610                                           "Enale device migration. Also requires a host VFIO PCI "
3611                                           "variant or mdev driver with migration support enabled");
3612     object_class_property_set_description(klass, /* 8.1 */
3613                                           "vf-token",
3614                                           "Specify UUID VF token. Required for VF when PF is owned "
3615                                           "by another VFIO driver");
3616 #ifdef CONFIG_IOMMUFD
3617     object_class_property_set_description(klass, /* 9.0 */
3618                                           "iommufd",
3619                                           "Set host IOMMUFD backend device");
3620 #endif
3621     object_class_property_set_description(klass, /* 9.1 */
3622                                           "x-device-dirty-page-tracking",
3623                                           "Disable device dirty page tracking and use "
3624                                           "container-based dirty page tracking");
3625     object_class_property_set_description(klass, /* 9.1 */
3626                                           "migration-events",
3627                                           "Emit VFIO migration QAPI event when a VFIO device "
3628                                           "changes its migration state. For management applications");
3629     object_class_property_set_description(klass, /* 9.1 */
3630                                           "skip-vsc-check",
3631                                           "Skip config space check for Vendor Specific Capability. "
3632                                           "Setting to false will enforce strict checking of VSC content "
3633                                           "(DEBUG)");
3634     object_class_property_set_description(klass, /* 10.0 */
3635                                           "x-migration-multifd-transfer",
3636                                           "Transfer this device state via "
3637                                           "multifd channels when live migrating it");
3638 }
3639 
3640 static const TypeInfo vfio_pci_dev_info = {
3641     .name = TYPE_VFIO_PCI,
3642     .parent = TYPE_VFIO_PCI_BASE,
3643     .instance_size = sizeof(VFIOPCIDevice),
3644     .class_init = vfio_pci_dev_class_init,
3645     .instance_init = vfio_instance_init,
3646     .instance_finalize = vfio_instance_finalize,
3647 };
3648 
3649 static const Property vfio_pci_dev_nohotplug_properties[] = {
3650     DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
3651     DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate,
3652                             ON_OFF_AUTO_AUTO),
3653 };
3654 
3655 static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass,
3656                                               const void *data)
3657 {
3658     DeviceClass *dc = DEVICE_CLASS(klass);
3659 
3660     device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
3661     dc->hotpluggable = false;
3662 
3663     object_class_property_set_description(klass, /* 3.1 */
3664                                           "ramfb",
3665                                           "Enable ramfb to provide pre-boot graphics for devices "
3666                                           "enabling display option");
3667     object_class_property_set_description(klass, /* 8.2 */
3668                                           "x-ramfb-migrate",
3669                                           "Override default migration support for ramfb support "
3670                                           "(DEBUG)");
3671 }
3672 
3673 static const TypeInfo vfio_pci_nohotplug_dev_info = {
3674     .name = TYPE_VFIO_PCI_NOHOTPLUG,
3675     .parent = TYPE_VFIO_PCI,
3676     .instance_size = sizeof(VFIOPCIDevice),
3677     .class_init = vfio_pci_nohotplug_dev_class_init,
3678 };
3679 
3680 static void register_vfio_pci_dev_type(void)
3681 {
3682     /*
3683      * Ordinary ON_OFF_AUTO property isn't runtime-mutable, but source VM can
3684      * run for a long time before being migrated so it is desirable to have a
3685      * fallback mechanism to the old way of transferring VFIO device state if
3686      * it turns to be necessary.
3687      * The following makes this type of property have the same mutability level
3688      * as ordinary migration parameters.
3689      */
3690     vfio_pci_migration_multifd_transfer_prop = qdev_prop_on_off_auto;
3691     vfio_pci_migration_multifd_transfer_prop.realized_set_allowed = true;
3692 
3693     type_register_static(&vfio_pci_base_dev_info);
3694     type_register_static(&vfio_pci_dev_info);
3695     type_register_static(&vfio_pci_nohotplug_dev_info);
3696 }
3697 
3698 type_init(register_vfio_pci_dev_type)
3699