1 /*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
23 #include <linux/vfio.h>
24 #include <sys/ioctl.h>
25
26 #include "hw/hw.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_bridge.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/qdev-properties-system.h"
32 #include "migration/vmstate.h"
33 #include "qobject/qdict.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/module.h"
37 #include "qemu/range.h"
38 #include "qemu/units.h"
39 #include "system/kvm.h"
40 #include "system/runstate.h"
41 #include "pci.h"
42 #include "trace.h"
43 #include "qapi/error.h"
44 #include "migration/blocker.h"
45 #include "migration/qemu-file.h"
46 #include "system/iommufd.h"
47 #include "vfio-migration-internal.h"
48 #include "vfio-helpers.h"
49
50 #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
51
52 /* Protected by BQL */
53 static KVMRouteChange vfio_route_change;
54
55 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
56 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
57 static void vfio_msi_disable_common(VFIOPCIDevice *vdev);
58
59 /*
60 * Disabling BAR mmaping can be slow, but toggling it around INTx can
61 * also be a huge overhead. We try to get the best of both worlds by
62 * waiting until an interrupt to disable mmaps (subsequent transitions
63 * to the same state are effectively no overhead). If the interrupt has
64 * been serviced and the time gap is long enough, we re-enable mmaps for
65 * performance. This works well for things like graphics cards, which
66 * may not use their interrupt at all and are penalized to an unusable
67 * level by read/write BAR traps. Other devices, like NICs, have more
68 * regular interrupts and see much better latency by staying in non-mmap
69 * mode. We therefore set the default mmap_timeout such that a ping
70 * is just enough to keep the mmap disabled. Users can experiment with
71 * other options with the x-intx-mmap-timeout-ms parameter (a value of
72 * zero disables the timer).
73 */
vfio_intx_mmap_enable(void * opaque)74 static void vfio_intx_mmap_enable(void *opaque)
75 {
76 VFIOPCIDevice *vdev = opaque;
77
78 if (vdev->intx.pending) {
79 timer_mod(vdev->intx.mmap_timer,
80 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
81 return;
82 }
83
84 vfio_mmap_set_enabled(vdev, true);
85 }
86
vfio_intx_interrupt(void * opaque)87 static void vfio_intx_interrupt(void *opaque)
88 {
89 VFIOPCIDevice *vdev = opaque;
90
91 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
92 return;
93 }
94
95 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
96
97 vdev->intx.pending = true;
98 pci_irq_assert(&vdev->pdev);
99 vfio_mmap_set_enabled(vdev, false);
100 if (vdev->intx.mmap_timeout) {
101 timer_mod(vdev->intx.mmap_timer,
102 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
103 }
104 }
105
vfio_intx_eoi(VFIODevice * vbasedev)106 static void vfio_intx_eoi(VFIODevice *vbasedev)
107 {
108 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
109
110 if (!vdev->intx.pending) {
111 return;
112 }
113
114 trace_vfio_intx_eoi(vbasedev->name);
115
116 vdev->intx.pending = false;
117 pci_irq_deassert(&vdev->pdev);
118 vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
119 }
120
vfio_intx_enable_kvm(VFIOPCIDevice * vdev,Error ** errp)121 static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
122 {
123 #ifdef CONFIG_KVM
124 int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
125
126 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
127 vdev->intx.route.mode != PCI_INTX_ENABLED ||
128 !kvm_resamplefds_enabled()) {
129 return true;
130 }
131
132 /* Get to a known interrupt state */
133 qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
134 vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
135 vdev->intx.pending = false;
136 pci_irq_deassert(&vdev->pdev);
137
138 /* Get an eventfd for resample/unmask */
139 if (event_notifier_init(&vdev->intx.unmask, 0)) {
140 error_setg(errp, "event_notifier_init failed eoi");
141 goto fail;
142 }
143
144 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
145 &vdev->intx.interrupt,
146 &vdev->intx.unmask,
147 vdev->intx.route.irq)) {
148 error_setg_errno(errp, errno, "failed to setup resample irqfd");
149 goto fail_irqfd;
150 }
151
152 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
153 VFIO_IRQ_SET_ACTION_UNMASK,
154 event_notifier_get_fd(&vdev->intx.unmask),
155 errp)) {
156 goto fail_vfio;
157 }
158
159 /* Let'em rip */
160 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
161
162 vdev->intx.kvm_accel = true;
163
164 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
165
166 return true;
167
168 fail_vfio:
169 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
170 vdev->intx.route.irq);
171 fail_irqfd:
172 event_notifier_cleanup(&vdev->intx.unmask);
173 fail:
174 qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
175 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
176 return false;
177 #else
178 return true;
179 #endif
180 }
181
vfio_intx_disable_kvm(VFIOPCIDevice * vdev)182 static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
183 {
184 #ifdef CONFIG_KVM
185 if (!vdev->intx.kvm_accel) {
186 return;
187 }
188
189 /*
190 * Get to a known state, hardware masked, QEMU ready to accept new
191 * interrupts, QEMU IRQ de-asserted.
192 */
193 vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
194 vdev->intx.pending = false;
195 pci_irq_deassert(&vdev->pdev);
196
197 /* Tell KVM to stop listening for an INTx irqfd */
198 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
199 vdev->intx.route.irq)) {
200 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
201 }
202
203 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
204 event_notifier_cleanup(&vdev->intx.unmask);
205
206 /* QEMU starts listening for interrupt events. */
207 qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
208 vfio_intx_interrupt, NULL, vdev);
209
210 vdev->intx.kvm_accel = false;
211
212 /* If we've missed an event, let it re-fire through QEMU */
213 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
214
215 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
216 #endif
217 }
218
vfio_intx_update(VFIOPCIDevice * vdev,PCIINTxRoute * route)219 static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
220 {
221 Error *err = NULL;
222
223 trace_vfio_intx_update(vdev->vbasedev.name,
224 vdev->intx.route.irq, route->irq);
225
226 vfio_intx_disable_kvm(vdev);
227
228 vdev->intx.route = *route;
229
230 if (route->mode != PCI_INTX_ENABLED) {
231 return;
232 }
233
234 if (!vfio_intx_enable_kvm(vdev, &err)) {
235 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
236 }
237
238 /* Re-enable the interrupt in cased we missed an EOI */
239 vfio_intx_eoi(&vdev->vbasedev);
240 }
241
vfio_intx_routing_notifier(PCIDevice * pdev)242 static void vfio_intx_routing_notifier(PCIDevice *pdev)
243 {
244 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
245 PCIINTxRoute route;
246
247 if (vdev->interrupt != VFIO_INT_INTx) {
248 return;
249 }
250
251 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
252
253 if (pci_intx_route_changed(&vdev->intx.route, &route)) {
254 vfio_intx_update(vdev, &route);
255 }
256 }
257
vfio_irqchip_change(Notifier * notify,void * data)258 static void vfio_irqchip_change(Notifier *notify, void *data)
259 {
260 VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
261 irqchip_change_notifier);
262
263 vfio_intx_update(vdev, &vdev->intx.route);
264 }
265
vfio_intx_enable(VFIOPCIDevice * vdev,Error ** errp)266 static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
267 {
268 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
269 Error *err = NULL;
270 int32_t fd;
271 int ret;
272
273
274 if (!pin) {
275 return true;
276 }
277
278 vfio_disable_interrupts(vdev);
279
280 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
281 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
282
283 #ifdef CONFIG_KVM
284 /*
285 * Only conditional to avoid generating error messages on platforms
286 * where we won't actually use the result anyway.
287 */
288 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
289 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
290 vdev->intx.pin);
291 }
292 #endif
293
294 ret = event_notifier_init(&vdev->intx.interrupt, 0);
295 if (ret) {
296 error_setg_errno(errp, -ret, "event_notifier_init failed");
297 return false;
298 }
299 fd = event_notifier_get_fd(&vdev->intx.interrupt);
300 qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
301
302 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
303 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
304 qemu_set_fd_handler(fd, NULL, NULL, vdev);
305 event_notifier_cleanup(&vdev->intx.interrupt);
306 return false;
307 }
308
309 if (!vfio_intx_enable_kvm(vdev, &err)) {
310 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
311 }
312
313 vdev->interrupt = VFIO_INT_INTx;
314
315 trace_vfio_intx_enable(vdev->vbasedev.name);
316 return true;
317 }
318
vfio_intx_disable(VFIOPCIDevice * vdev)319 static void vfio_intx_disable(VFIOPCIDevice *vdev)
320 {
321 int fd;
322
323 timer_del(vdev->intx.mmap_timer);
324 vfio_intx_disable_kvm(vdev);
325 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
326 vdev->intx.pending = false;
327 pci_irq_deassert(&vdev->pdev);
328 vfio_mmap_set_enabled(vdev, true);
329
330 fd = event_notifier_get_fd(&vdev->intx.interrupt);
331 qemu_set_fd_handler(fd, NULL, NULL, vdev);
332 event_notifier_cleanup(&vdev->intx.interrupt);
333
334 vdev->interrupt = VFIO_INT_NONE;
335
336 trace_vfio_intx_disable(vdev->vbasedev.name);
337 }
338
339 /*
340 * MSI/X
341 */
vfio_msi_interrupt(void * opaque)342 static void vfio_msi_interrupt(void *opaque)
343 {
344 VFIOMSIVector *vector = opaque;
345 VFIOPCIDevice *vdev = vector->vdev;
346 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
347 void (*notify)(PCIDevice *dev, unsigned vector);
348 MSIMessage msg;
349 int nr = vector - vdev->msi_vectors;
350
351 if (!event_notifier_test_and_clear(&vector->interrupt)) {
352 return;
353 }
354
355 if (vdev->interrupt == VFIO_INT_MSIX) {
356 get_msg = msix_get_message;
357 notify = msix_notify;
358
359 /* A masked vector firing needs to use the PBA, enable it */
360 if (msix_is_masked(&vdev->pdev, nr)) {
361 set_bit(nr, vdev->msix->pending);
362 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
363 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
364 }
365 } else if (vdev->interrupt == VFIO_INT_MSI) {
366 get_msg = msi_get_message;
367 notify = msi_notify;
368 } else {
369 abort();
370 }
371
372 msg = get_msg(&vdev->pdev, nr);
373 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
374 notify(&vdev->pdev, nr);
375 }
376
377 /*
378 * Get MSI-X enabled, but no vector enabled, by setting vector 0 with an invalid
379 * fd to kernel.
380 */
vfio_enable_msix_no_vec(VFIOPCIDevice * vdev)381 static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev)
382 {
383 g_autofree struct vfio_irq_set *irq_set = NULL;
384 int argsz;
385 int32_t *fd;
386
387 argsz = sizeof(*irq_set) + sizeof(*fd);
388
389 irq_set = g_malloc0(argsz);
390 irq_set->argsz = argsz;
391 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
392 VFIO_IRQ_SET_ACTION_TRIGGER;
393 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
394 irq_set->start = 0;
395 irq_set->count = 1;
396 fd = (int32_t *)&irq_set->data;
397 *fd = -1;
398
399 return vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
400 }
401
vfio_enable_vectors(VFIOPCIDevice * vdev,bool msix)402 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
403 {
404 struct vfio_irq_set *irq_set;
405 int ret = 0, i, argsz;
406 int32_t *fds;
407
408 /*
409 * If dynamic MSI-X allocation is supported, the vectors to be allocated
410 * and enabled can be scattered. Before kernel enabling MSI-X, setting
411 * nr_vectors causes all these vectors to be allocated on host.
412 *
413 * To keep allocation as needed, use vector 0 with an invalid fd to get
414 * MSI-X enabled first, then set vectors with a potentially sparse set of
415 * eventfds to enable interrupts only when enabled in guest.
416 */
417 if (msix && !vdev->msix->noresize) {
418 ret = vfio_enable_msix_no_vec(vdev);
419
420 if (ret) {
421 return ret;
422 }
423 }
424
425 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
426
427 irq_set = g_malloc0(argsz);
428 irq_set->argsz = argsz;
429 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
430 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
431 irq_set->start = 0;
432 irq_set->count = vdev->nr_vectors;
433 fds = (int32_t *)&irq_set->data;
434
435 for (i = 0; i < vdev->nr_vectors; i++) {
436 int fd = -1;
437
438 /*
439 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
440 * bits, therefore we always use the KVM signaling path when setup.
441 * MSI-X mask and pending bits are emulated, so we want to use the
442 * KVM signaling path only when configured and unmasked.
443 */
444 if (vdev->msi_vectors[i].use) {
445 if (vdev->msi_vectors[i].virq < 0 ||
446 (msix && msix_is_masked(&vdev->pdev, i))) {
447 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
448 } else {
449 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
450 }
451 }
452
453 fds[i] = fd;
454 }
455
456 ret = vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set);
457
458 g_free(irq_set);
459
460 return ret;
461 }
462
vfio_add_kvm_msi_virq(VFIOPCIDevice * vdev,VFIOMSIVector * vector,int vector_n,bool msix)463 static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
464 int vector_n, bool msix)
465 {
466 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
467 return;
468 }
469
470 vector->virq = kvm_irqchip_add_msi_route(&vfio_route_change,
471 vector_n, &vdev->pdev);
472 }
473
vfio_connect_kvm_msi_virq(VFIOMSIVector * vector)474 static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector)
475 {
476 if (vector->virq < 0) {
477 return;
478 }
479
480 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
481 goto fail_notifier;
482 }
483
484 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
485 NULL, vector->virq) < 0) {
486 goto fail_kvm;
487 }
488
489 return;
490
491 fail_kvm:
492 event_notifier_cleanup(&vector->kvm_interrupt);
493 fail_notifier:
494 kvm_irqchip_release_virq(kvm_state, vector->virq);
495 vector->virq = -1;
496 }
497
vfio_remove_kvm_msi_virq(VFIOMSIVector * vector)498 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
499 {
500 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
501 vector->virq);
502 kvm_irqchip_release_virq(kvm_state, vector->virq);
503 vector->virq = -1;
504 event_notifier_cleanup(&vector->kvm_interrupt);
505 }
506
vfio_update_kvm_msi_virq(VFIOMSIVector * vector,MSIMessage msg,PCIDevice * pdev)507 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
508 PCIDevice *pdev)
509 {
510 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
511 kvm_irqchip_commit_routes(kvm_state);
512 }
513
set_irq_signalling(VFIODevice * vbasedev,VFIOMSIVector * vector,unsigned int nr)514 static void set_irq_signalling(VFIODevice *vbasedev, VFIOMSIVector *vector,
515 unsigned int nr)
516 {
517 Error *err = NULL;
518 int32_t fd;
519
520 if (vector->virq >= 0) {
521 fd = event_notifier_get_fd(&vector->kvm_interrupt);
522 } else {
523 fd = event_notifier_get_fd(&vector->interrupt);
524 }
525
526 if (!vfio_device_irq_set_signaling(vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
527 VFIO_IRQ_SET_ACTION_TRIGGER,
528 fd, &err)) {
529 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
530 }
531 }
532
vfio_msix_vector_do_use(PCIDevice * pdev,unsigned int nr,MSIMessage * msg,IOHandler * handler)533 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
534 MSIMessage *msg, IOHandler *handler)
535 {
536 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
537 VFIOMSIVector *vector;
538 int ret;
539 bool resizing = !!(vdev->nr_vectors < nr + 1);
540
541 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
542
543 vector = &vdev->msi_vectors[nr];
544
545 if (!vector->use) {
546 vector->vdev = vdev;
547 vector->virq = -1;
548 if (event_notifier_init(&vector->interrupt, 0)) {
549 error_report("vfio: Error: event_notifier_init failed");
550 }
551 vector->use = true;
552 msix_vector_use(pdev, nr);
553 }
554
555 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
556 handler, NULL, vector);
557
558 /*
559 * Attempt to enable route through KVM irqchip,
560 * default to userspace handling if unavailable.
561 */
562 if (vector->virq >= 0) {
563 if (!msg) {
564 vfio_remove_kvm_msi_virq(vector);
565 } else {
566 vfio_update_kvm_msi_virq(vector, *msg, pdev);
567 }
568 } else {
569 if (msg) {
570 if (vdev->defer_kvm_irq_routing) {
571 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
572 } else {
573 vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
574 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
575 kvm_irqchip_commit_route_changes(&vfio_route_change);
576 vfio_connect_kvm_msi_virq(vector);
577 }
578 }
579 }
580
581 /*
582 * When dynamic allocation is not supported, we don't want to have the
583 * host allocate all possible MSI vectors for a device if they're not
584 * in use, so we shutdown and incrementally increase them as needed.
585 * nr_vectors represents the total number of vectors allocated.
586 *
587 * When dynamic allocation is supported, let the host only allocate
588 * and enable a vector when it is in use in guest. nr_vectors represents
589 * the upper bound of vectors being enabled (but not all of the ranges
590 * is allocated or enabled).
591 */
592 if (resizing) {
593 vdev->nr_vectors = nr + 1;
594 }
595
596 if (!vdev->defer_kvm_irq_routing) {
597 if (vdev->msix->noresize && resizing) {
598 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
599 ret = vfio_enable_vectors(vdev, true);
600 if (ret) {
601 error_report("vfio: failed to enable vectors, %s",
602 strerror(-ret));
603 }
604 } else {
605 set_irq_signalling(&vdev->vbasedev, vector, nr);
606 }
607 }
608
609 /* Disable PBA emulation when nothing more is pending. */
610 clear_bit(nr, vdev->msix->pending);
611 if (find_first_bit(vdev->msix->pending,
612 vdev->nr_vectors) == vdev->nr_vectors) {
613 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
614 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
615 }
616
617 return 0;
618 }
619
vfio_msix_vector_use(PCIDevice * pdev,unsigned int nr,MSIMessage msg)620 static int vfio_msix_vector_use(PCIDevice *pdev,
621 unsigned int nr, MSIMessage msg)
622 {
623 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
624 }
625
vfio_msix_vector_release(PCIDevice * pdev,unsigned int nr)626 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
627 {
628 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
629 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
630
631 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
632
633 /*
634 * There are still old guests that mask and unmask vectors on every
635 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
636 * the KVM setup in place, simply switch VFIO to use the non-bypass
637 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
638 * core will mask the interrupt and set pending bits, allowing it to
639 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
640 */
641 if (vector->virq >= 0) {
642 int32_t fd = event_notifier_get_fd(&vector->interrupt);
643 Error *err = NULL;
644
645 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
646 nr, VFIO_IRQ_SET_ACTION_TRIGGER, fd,
647 &err)) {
648 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
649 }
650 }
651 }
652
vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice * vdev)653 static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
654 {
655 assert(!vdev->defer_kvm_irq_routing);
656 vdev->defer_kvm_irq_routing = true;
657 vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state);
658 }
659
vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice * vdev)660 static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev)
661 {
662 int i;
663
664 assert(vdev->defer_kvm_irq_routing);
665 vdev->defer_kvm_irq_routing = false;
666
667 kvm_irqchip_commit_route_changes(&vfio_route_change);
668
669 for (i = 0; i < vdev->nr_vectors; i++) {
670 vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]);
671 }
672 }
673
vfio_msix_enable(VFIOPCIDevice * vdev)674 static void vfio_msix_enable(VFIOPCIDevice *vdev)
675 {
676 int ret;
677
678 vfio_disable_interrupts(vdev);
679
680 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
681
682 vdev->interrupt = VFIO_INT_MSIX;
683
684 /*
685 * Setting vector notifiers triggers synchronous vector-use
686 * callbacks for each active vector. Deferring to commit the KVM
687 * routes once rather than per vector provides a substantial
688 * performance improvement.
689 */
690 vfio_prepare_kvm_msi_virq_batch(vdev);
691
692 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
693 vfio_msix_vector_release, NULL)) {
694 error_report("vfio: msix_set_vector_notifiers failed");
695 }
696
697 vfio_commit_kvm_msi_virq_batch(vdev);
698
699 if (vdev->nr_vectors) {
700 ret = vfio_enable_vectors(vdev, true);
701 if (ret) {
702 error_report("vfio: failed to enable vectors, %s",
703 strerror(-ret));
704 }
705 } else {
706 /*
707 * Some communication channels between VF & PF or PF & fw rely on the
708 * physical state of the device and expect that enabling MSI-X from the
709 * guest enables the same on the host. When our guest is Linux, the
710 * guest driver call to pci_enable_msix() sets the enabling bit in the
711 * MSI-X capability, but leaves the vector table masked. We therefore
712 * can't rely on a vector_use callback (from request_irq() in the guest)
713 * to switch the physical device into MSI-X mode because that may come a
714 * long time after pci_enable_msix(). This code sets vector 0 with an
715 * invalid fd to make the physical device MSI-X enabled, but with no
716 * vectors enabled, just like the guest view.
717 */
718 ret = vfio_enable_msix_no_vec(vdev);
719 if (ret) {
720 error_report("vfio: failed to enable MSI-X, %s",
721 strerror(-ret));
722 }
723 }
724
725 trace_vfio_msix_enable(vdev->vbasedev.name);
726 }
727
vfio_msi_enable(VFIOPCIDevice * vdev)728 static void vfio_msi_enable(VFIOPCIDevice *vdev)
729 {
730 int ret, i;
731
732 vfio_disable_interrupts(vdev);
733
734 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
735 retry:
736 /*
737 * Setting vector notifiers needs to enable route for each vector.
738 * Deferring to commit the KVM routes once rather than per vector
739 * provides a substantial performance improvement.
740 */
741 vfio_prepare_kvm_msi_virq_batch(vdev);
742
743 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
744
745 for (i = 0; i < vdev->nr_vectors; i++) {
746 VFIOMSIVector *vector = &vdev->msi_vectors[i];
747
748 vector->vdev = vdev;
749 vector->virq = -1;
750 vector->use = true;
751
752 if (event_notifier_init(&vector->interrupt, 0)) {
753 error_report("vfio: Error: event_notifier_init failed");
754 }
755
756 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
757 vfio_msi_interrupt, NULL, vector);
758
759 /*
760 * Attempt to enable route through KVM irqchip,
761 * default to userspace handling if unavailable.
762 */
763 vfio_add_kvm_msi_virq(vdev, vector, i, false);
764 }
765
766 vfio_commit_kvm_msi_virq_batch(vdev);
767
768 /* Set interrupt type prior to possible interrupts */
769 vdev->interrupt = VFIO_INT_MSI;
770
771 ret = vfio_enable_vectors(vdev, false);
772 if (ret) {
773 if (ret < 0) {
774 error_report("vfio: Error: Failed to setup MSI fds: %s",
775 strerror(-ret));
776 } else {
777 error_report("vfio: Error: Failed to enable %d "
778 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
779 }
780
781 vfio_msi_disable_common(vdev);
782
783 if (ret > 0) {
784 vdev->nr_vectors = ret;
785 goto retry;
786 }
787
788 /*
789 * Failing to setup MSI doesn't really fall within any specification.
790 * Let's try leaving interrupts disabled and hope the guest figures
791 * out to fall back to INTx for this device.
792 */
793 error_report("vfio: Error: Failed to enable MSI");
794
795 return;
796 }
797
798 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
799 }
800
vfio_msi_disable_common(VFIOPCIDevice * vdev)801 static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
802 {
803 int i;
804
805 for (i = 0; i < vdev->nr_vectors; i++) {
806 VFIOMSIVector *vector = &vdev->msi_vectors[i];
807 if (vdev->msi_vectors[i].use) {
808 if (vector->virq >= 0) {
809 vfio_remove_kvm_msi_virq(vector);
810 }
811 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
812 NULL, NULL, NULL);
813 event_notifier_cleanup(&vector->interrupt);
814 }
815 }
816
817 g_free(vdev->msi_vectors);
818 vdev->msi_vectors = NULL;
819 vdev->nr_vectors = 0;
820 vdev->interrupt = VFIO_INT_NONE;
821 }
822
vfio_msix_disable(VFIOPCIDevice * vdev)823 static void vfio_msix_disable(VFIOPCIDevice *vdev)
824 {
825 Error *err = NULL;
826 int i;
827
828 msix_unset_vector_notifiers(&vdev->pdev);
829
830 /*
831 * MSI-X will only release vectors if MSI-X is still enabled on the
832 * device, check through the rest and release it ourselves if necessary.
833 */
834 for (i = 0; i < vdev->nr_vectors; i++) {
835 if (vdev->msi_vectors[i].use) {
836 vfio_msix_vector_release(&vdev->pdev, i);
837 msix_vector_unuse(&vdev->pdev, i);
838 }
839 }
840
841 /*
842 * Always clear MSI-X IRQ index. A PF device could have enabled
843 * MSI-X with no vectors. See vfio_msix_enable().
844 */
845 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
846
847 vfio_msi_disable_common(vdev);
848 if (!vfio_intx_enable(vdev, &err)) {
849 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
850 }
851
852 memset(vdev->msix->pending, 0,
853 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
854
855 trace_vfio_msix_disable(vdev->vbasedev.name);
856 }
857
vfio_msi_disable(VFIOPCIDevice * vdev)858 static void vfio_msi_disable(VFIOPCIDevice *vdev)
859 {
860 Error *err = NULL;
861
862 vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
863 vfio_msi_disable_common(vdev);
864 vfio_intx_enable(vdev, &err);
865 if (err) {
866 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
867 }
868
869 trace_vfio_msi_disable(vdev->vbasedev.name);
870 }
871
vfio_update_msi(VFIOPCIDevice * vdev)872 static void vfio_update_msi(VFIOPCIDevice *vdev)
873 {
874 int i;
875
876 for (i = 0; i < vdev->nr_vectors; i++) {
877 VFIOMSIVector *vector = &vdev->msi_vectors[i];
878 MSIMessage msg;
879
880 if (!vector->use || vector->virq < 0) {
881 continue;
882 }
883
884 msg = msi_get_message(&vdev->pdev, i);
885 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
886 }
887 }
888
vfio_pci_load_rom(VFIOPCIDevice * vdev)889 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
890 {
891 VFIODevice *vbasedev = &vdev->vbasedev;
892 struct vfio_region_info *reg_info = NULL;
893 uint64_t size;
894 off_t off = 0;
895 ssize_t bytes;
896 int ret;
897
898 ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_ROM_REGION_INDEX,
899 ®_info);
900
901 if (ret != 0) {
902 error_report("vfio: Error getting ROM info: %s", strerror(-ret));
903 return;
904 }
905
906 trace_vfio_pci_load_rom(vbasedev->name, (unsigned long)reg_info->size,
907 (unsigned long)reg_info->offset,
908 (unsigned long)reg_info->flags);
909
910 vdev->rom_size = size = reg_info->size;
911 vdev->rom_offset = reg_info->offset;
912
913 if (!vdev->rom_size) {
914 vdev->rom_read_failed = true;
915 error_report("vfio-pci: Cannot read device rom at %s", vbasedev->name);
916 error_printf("Device option ROM contents are probably invalid "
917 "(check dmesg).\nSkip option ROM probe with rombar=0, "
918 "or load from file with romfile=\n");
919 return;
920 }
921
922 vdev->rom = g_malloc(size);
923 memset(vdev->rom, 0xff, size);
924
925 while (size) {
926 bytes = vbasedev->io_ops->region_read(vbasedev,
927 VFIO_PCI_ROM_REGION_INDEX,
928 off, size, vdev->rom + off);
929
930 if (bytes == 0) {
931 break;
932 } else if (bytes > 0) {
933 off += bytes;
934 size -= bytes;
935 } else {
936 if (bytes == -EINTR || bytes == -EAGAIN) {
937 continue;
938 }
939 error_report("vfio: Error reading device ROM: %s",
940 strreaderror(bytes));
941
942 break;
943 }
944 }
945
946 /*
947 * Test the ROM signature against our device, if the vendor is correct
948 * but the device ID doesn't match, store the correct device ID and
949 * recompute the checksum. Intel IGD devices need this and are known
950 * to have bogus checksums so we can't simply adjust the checksum.
951 */
952 if (pci_get_word(vdev->rom) == 0xaa55 &&
953 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
954 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
955 uint16_t vid, did;
956
957 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
958 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
959
960 if (vid == vdev->vendor_id && did != vdev->device_id) {
961 int i;
962 uint8_t csum, *data = vdev->rom;
963
964 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
965 vdev->device_id);
966 data[6] = 0;
967
968 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
969 csum += data[i];
970 }
971
972 data[6] = -csum;
973 }
974 }
975 }
976
977 /* "Raw" read of underlying config space. */
vfio_pci_config_space_read(VFIOPCIDevice * vdev,off_t offset,uint32_t size,void * data)978 static int vfio_pci_config_space_read(VFIOPCIDevice *vdev, off_t offset,
979 uint32_t size, void *data)
980 {
981 return vdev->vbasedev.io_ops->region_read(&vdev->vbasedev,
982 VFIO_PCI_CONFIG_REGION_INDEX,
983 offset, size, data);
984 }
985
986 /* "Raw" write of underlying config space. */
vfio_pci_config_space_write(VFIOPCIDevice * vdev,off_t offset,uint32_t size,void * data)987 static int vfio_pci_config_space_write(VFIOPCIDevice *vdev, off_t offset,
988 uint32_t size, void *data)
989 {
990 return vdev->vbasedev.io_ops->region_write(&vdev->vbasedev,
991 VFIO_PCI_CONFIG_REGION_INDEX,
992 offset, size, data);
993 }
994
vfio_rom_read(void * opaque,hwaddr addr,unsigned size)995 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
996 {
997 VFIOPCIDevice *vdev = opaque;
998 union {
999 uint8_t byte;
1000 uint16_t word;
1001 uint32_t dword;
1002 uint64_t qword;
1003 } val;
1004 uint64_t data = 0;
1005
1006 /* Load the ROM lazily when the guest tries to read it */
1007 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
1008 vfio_pci_load_rom(vdev);
1009 }
1010
1011 memcpy(&val, vdev->rom + addr,
1012 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
1013
1014 switch (size) {
1015 case 1:
1016 data = val.byte;
1017 break;
1018 case 2:
1019 data = le16_to_cpu(val.word);
1020 break;
1021 case 4:
1022 data = le32_to_cpu(val.dword);
1023 break;
1024 default:
1025 hw_error("vfio: unsupported read size, %d bytes\n", size);
1026 break;
1027 }
1028
1029 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
1030
1031 return data;
1032 }
1033
vfio_rom_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)1034 static void vfio_rom_write(void *opaque, hwaddr addr,
1035 uint64_t data, unsigned size)
1036 {
1037 }
1038
1039 static const MemoryRegionOps vfio_rom_ops = {
1040 .read = vfio_rom_read,
1041 .write = vfio_rom_write,
1042 .endianness = DEVICE_LITTLE_ENDIAN,
1043 };
1044
vfio_pci_size_rom(VFIOPCIDevice * vdev)1045 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
1046 {
1047 VFIODevice *vbasedev = &vdev->vbasedev;
1048 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
1049 char *name;
1050
1051 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
1052 /* Since pci handles romfile, just print a message and return */
1053 if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) {
1054 warn_report("Device at %s is known to cause system instability"
1055 " issues during option rom execution",
1056 vdev->vbasedev.name);
1057 error_printf("Proceeding anyway since user specified romfile\n");
1058 }
1059 return;
1060 }
1061
1062 /*
1063 * Use the same size ROM BAR as the physical device. The contents
1064 * will get filled in later when the guest tries to read it.
1065 */
1066 if (vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4 ||
1067 vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
1068 vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 ||
1069 vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4) {
1070
1071 error_report("%s(%s) ROM access failed", __func__, vbasedev->name);
1072 return;
1073 }
1074
1075 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
1076
1077 if (!size) {
1078 return;
1079 }
1080
1081 if (vfio_opt_rom_in_denylist(vdev)) {
1082 if (vdev->pdev.rom_bar > 0) {
1083 warn_report("Device at %s is known to cause system instability"
1084 " issues during option rom execution",
1085 vdev->vbasedev.name);
1086 error_printf("Proceeding anyway since user specified"
1087 " positive value for rombar\n");
1088 } else {
1089 warn_report("Rom loading for device at %s has been disabled"
1090 " due to system instability issues",
1091 vdev->vbasedev.name);
1092 error_printf("Specify rombar=1 or romfile to force\n");
1093 return;
1094 }
1095 }
1096
1097 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
1098
1099 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
1100
1101 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
1102 &vfio_rom_ops, vdev, name, size);
1103 g_free(name);
1104
1105 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
1106 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
1107
1108 vdev->rom_read_failed = false;
1109 }
1110
vfio_vga_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)1111 void vfio_vga_write(void *opaque, hwaddr addr,
1112 uint64_t data, unsigned size)
1113 {
1114 VFIOVGARegion *region = opaque;
1115 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1116 union {
1117 uint8_t byte;
1118 uint16_t word;
1119 uint32_t dword;
1120 uint64_t qword;
1121 } buf;
1122 off_t offset = vga->fd_offset + region->offset + addr;
1123
1124 switch (size) {
1125 case 1:
1126 buf.byte = data;
1127 break;
1128 case 2:
1129 buf.word = cpu_to_le16(data);
1130 break;
1131 case 4:
1132 buf.dword = cpu_to_le32(data);
1133 break;
1134 default:
1135 hw_error("vfio: unsupported write size, %d bytes", size);
1136 break;
1137 }
1138
1139 if (pwrite(vga->fd, &buf, size, offset) != size) {
1140 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1141 __func__, region->offset + addr, data, size);
1142 }
1143
1144 trace_vfio_vga_write(region->offset + addr, data, size);
1145 }
1146
vfio_vga_read(void * opaque,hwaddr addr,unsigned size)1147 uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1148 {
1149 VFIOVGARegion *region = opaque;
1150 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1151 union {
1152 uint8_t byte;
1153 uint16_t word;
1154 uint32_t dword;
1155 uint64_t qword;
1156 } buf;
1157 uint64_t data = 0;
1158 off_t offset = vga->fd_offset + region->offset + addr;
1159
1160 if (pread(vga->fd, &buf, size, offset) != size) {
1161 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1162 __func__, region->offset + addr, size);
1163 return (uint64_t)-1;
1164 }
1165
1166 switch (size) {
1167 case 1:
1168 data = buf.byte;
1169 break;
1170 case 2:
1171 data = le16_to_cpu(buf.word);
1172 break;
1173 case 4:
1174 data = le32_to_cpu(buf.dword);
1175 break;
1176 default:
1177 hw_error("vfio: unsupported read size, %d bytes", size);
1178 break;
1179 }
1180
1181 trace_vfio_vga_read(region->offset + addr, size, data);
1182
1183 return data;
1184 }
1185
1186 static const MemoryRegionOps vfio_vga_ops = {
1187 .read = vfio_vga_read,
1188 .write = vfio_vga_write,
1189 .endianness = DEVICE_LITTLE_ENDIAN,
1190 };
1191
1192 /*
1193 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1194 * size if the BAR is in an exclusive page in host so that we could map
1195 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1196 * page in guest. So we should set the priority of the expanded memory
1197 * region to zero in case of overlap with BARs which share the same page
1198 * with the sub-page BAR in guest. Besides, we should also recover the
1199 * size of this sub-page BAR when its base address is changed in guest
1200 * and not page aligned any more.
1201 */
vfio_sub_page_bar_update_mapping(PCIDevice * pdev,int bar)1202 static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1203 {
1204 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1205 VFIORegion *region = &vdev->bars[bar].region;
1206 MemoryRegion *mmap_mr, *region_mr, *base_mr;
1207 PCIIORegion *r;
1208 pcibus_t bar_addr;
1209 uint64_t size = region->size;
1210
1211 /* Make sure that the whole region is allowed to be mmapped */
1212 if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1213 region->mmaps[0].size != region->size) {
1214 return;
1215 }
1216
1217 r = &pdev->io_regions[bar];
1218 bar_addr = r->addr;
1219 base_mr = vdev->bars[bar].mr;
1220 region_mr = region->mem;
1221 mmap_mr = ®ion->mmaps[0].mem;
1222
1223 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1224 if (bar_addr != PCI_BAR_UNMAPPED &&
1225 !(bar_addr & ~qemu_real_host_page_mask())) {
1226 size = qemu_real_host_page_size();
1227 }
1228
1229 memory_region_transaction_begin();
1230
1231 if (vdev->bars[bar].size < size) {
1232 memory_region_set_size(base_mr, size);
1233 }
1234 memory_region_set_size(region_mr, size);
1235 memory_region_set_size(mmap_mr, size);
1236 if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1237 memory_region_del_subregion(r->address_space, base_mr);
1238 memory_region_add_subregion_overlap(r->address_space,
1239 bar_addr, base_mr, 0);
1240 }
1241
1242 memory_region_transaction_commit();
1243 }
1244
1245 /*
1246 * PCI config space
1247 */
vfio_pci_read_config(PCIDevice * pdev,uint32_t addr,int len)1248 uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1249 {
1250 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1251 VFIODevice *vbasedev = &vdev->vbasedev;
1252 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1253
1254 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1255 emu_bits = le32_to_cpu(emu_bits);
1256
1257 if (emu_bits) {
1258 emu_val = pci_default_read_config(pdev, addr, len);
1259 }
1260
1261 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1262 ssize_t ret;
1263
1264 ret = vfio_pci_config_space_read(vdev, addr, len, &phys_val);
1265 if (ret != len) {
1266 error_report("%s(%s, 0x%x, 0x%x) failed: %s",
1267 __func__, vbasedev->name, addr, len,
1268 strreaderror(ret));
1269 return -1;
1270 }
1271 phys_val = le32_to_cpu(phys_val);
1272 }
1273
1274 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1275
1276 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1277
1278 return val;
1279 }
1280
vfio_pci_write_config(PCIDevice * pdev,uint32_t addr,uint32_t val,int len)1281 void vfio_pci_write_config(PCIDevice *pdev,
1282 uint32_t addr, uint32_t val, int len)
1283 {
1284 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
1285 VFIODevice *vbasedev = &vdev->vbasedev;
1286 uint32_t val_le = cpu_to_le32(val);
1287 int ret;
1288
1289 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1290
1291 /* Write everything to VFIO, let it filter out what we can't write */
1292 ret = vfio_pci_config_space_write(vdev, addr, len, &val_le);
1293 if (ret != len) {
1294 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %s",
1295 __func__, vbasedev->name, addr, val, len,
1296 strwriteerror(ret));
1297 }
1298
1299 /* MSI/MSI-X Enabling/Disabling */
1300 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1301 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1302 int is_enabled, was_enabled = msi_enabled(pdev);
1303
1304 pci_default_write_config(pdev, addr, val, len);
1305
1306 is_enabled = msi_enabled(pdev);
1307
1308 if (!was_enabled) {
1309 if (is_enabled) {
1310 vfio_msi_enable(vdev);
1311 }
1312 } else {
1313 if (!is_enabled) {
1314 vfio_msi_disable(vdev);
1315 } else {
1316 vfio_update_msi(vdev);
1317 }
1318 }
1319 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1320 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1321 int is_enabled, was_enabled = msix_enabled(pdev);
1322
1323 pci_default_write_config(pdev, addr, val, len);
1324
1325 is_enabled = msix_enabled(pdev);
1326
1327 if (!was_enabled && is_enabled) {
1328 vfio_msix_enable(vdev);
1329 } else if (was_enabled && !is_enabled) {
1330 vfio_msix_disable(vdev);
1331 }
1332 } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1333 range_covers_byte(addr, len, PCI_COMMAND)) {
1334 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1335 int bar;
1336
1337 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1338 old_addr[bar] = pdev->io_regions[bar].addr;
1339 }
1340
1341 pci_default_write_config(pdev, addr, val, len);
1342
1343 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1344 if (old_addr[bar] != pdev->io_regions[bar].addr &&
1345 vdev->bars[bar].region.size > 0 &&
1346 vdev->bars[bar].region.size < qemu_real_host_page_size()) {
1347 vfio_sub_page_bar_update_mapping(pdev, bar);
1348 }
1349 }
1350 } else {
1351 /* Write everything to QEMU to keep emulated bits correct */
1352 pci_default_write_config(pdev, addr, val, len);
1353 }
1354 }
1355
1356 /*
1357 * Interrupt setup
1358 */
vfio_disable_interrupts(VFIOPCIDevice * vdev)1359 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1360 {
1361 /*
1362 * More complicated than it looks. Disabling MSI/X transitions the
1363 * device to INTx mode (if supported). Therefore we need to first
1364 * disable MSI/X and then cleanup by disabling INTx.
1365 */
1366 if (vdev->interrupt == VFIO_INT_MSIX) {
1367 vfio_msix_disable(vdev);
1368 } else if (vdev->interrupt == VFIO_INT_MSI) {
1369 vfio_msi_disable(vdev);
1370 }
1371
1372 if (vdev->interrupt == VFIO_INT_INTx) {
1373 vfio_intx_disable(vdev);
1374 }
1375 }
1376
vfio_msi_setup(VFIOPCIDevice * vdev,int pos,Error ** errp)1377 static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1378 {
1379 uint16_t ctrl;
1380 bool msi_64bit, msi_maskbit;
1381 int ret, entries;
1382 Error *err = NULL;
1383
1384 ret = vfio_pci_config_space_read(vdev, pos + PCI_CAP_FLAGS,
1385 sizeof(ctrl), &ctrl);
1386 if (ret != sizeof(ctrl)) {
1387 error_setg(errp, "failed reading MSI PCI_CAP_FLAGS: %s",
1388 strreaderror(ret));
1389 return false;
1390 }
1391 ctrl = le16_to_cpu(ctrl);
1392
1393 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1394 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1395 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1396
1397 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1398
1399 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1400 if (ret < 0) {
1401 if (ret == -ENOTSUP) {
1402 return true;
1403 }
1404 error_propagate_prepend(errp, err, "msi_init failed: ");
1405 return false;
1406 }
1407 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1408
1409 return true;
1410 }
1411
vfio_pci_fixup_msix_region(VFIOPCIDevice * vdev)1412 static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1413 {
1414 off_t start, end;
1415 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1416
1417 /*
1418 * If the host driver allows mapping of a MSIX data, we are going to
1419 * do map the entire BAR and emulate MSIX table on top of that.
1420 */
1421 if (vfio_device_has_region_cap(&vdev->vbasedev, region->nr,
1422 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
1423 return;
1424 }
1425
1426 /*
1427 * We expect to find a single mmap covering the whole BAR, anything else
1428 * means it's either unsupported or already setup.
1429 */
1430 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1431 region->size != region->mmaps[0].size) {
1432 return;
1433 }
1434
1435 /* MSI-X table start and end aligned to host page size */
1436 start = vdev->msix->table_offset & qemu_real_host_page_mask();
1437 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1438 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1439
1440 /*
1441 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1442 * NB - Host page size is necessarily a power of two and so is the PCI
1443 * BAR (not counting EA yet), therefore if we have host page aligned
1444 * @start and @end, then any remainder of the BAR before or after those
1445 * must be at least host page sized and therefore mmap'able.
1446 */
1447 if (!start) {
1448 if (end >= region->size) {
1449 region->nr_mmaps = 0;
1450 g_free(region->mmaps);
1451 region->mmaps = NULL;
1452 trace_vfio_msix_fixup(vdev->vbasedev.name,
1453 vdev->msix->table_bar, 0, 0);
1454 } else {
1455 region->mmaps[0].offset = end;
1456 region->mmaps[0].size = region->size - end;
1457 trace_vfio_msix_fixup(vdev->vbasedev.name,
1458 vdev->msix->table_bar, region->mmaps[0].offset,
1459 region->mmaps[0].offset + region->mmaps[0].size);
1460 }
1461
1462 /* Maybe it's aligned at the end of the BAR */
1463 } else if (end >= region->size) {
1464 region->mmaps[0].size = start;
1465 trace_vfio_msix_fixup(vdev->vbasedev.name,
1466 vdev->msix->table_bar, region->mmaps[0].offset,
1467 region->mmaps[0].offset + region->mmaps[0].size);
1468
1469 /* Otherwise it must split the BAR */
1470 } else {
1471 region->nr_mmaps = 2;
1472 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1473
1474 memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap));
1475
1476 region->mmaps[0].size = start;
1477 trace_vfio_msix_fixup(vdev->vbasedev.name,
1478 vdev->msix->table_bar, region->mmaps[0].offset,
1479 region->mmaps[0].offset + region->mmaps[0].size);
1480
1481 region->mmaps[1].offset = end;
1482 region->mmaps[1].size = region->size - end;
1483 trace_vfio_msix_fixup(vdev->vbasedev.name,
1484 vdev->msix->table_bar, region->mmaps[1].offset,
1485 region->mmaps[1].offset + region->mmaps[1].size);
1486 }
1487 }
1488
vfio_pci_relocate_msix(VFIOPCIDevice * vdev,Error ** errp)1489 static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1490 {
1491 int target_bar = -1;
1492 size_t msix_sz;
1493
1494 if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
1495 return true;
1496 }
1497
1498 /* The actual minimum size of MSI-X structures */
1499 msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1500 (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1501 /* Round up to host pages, we don't want to share a page */
1502 msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1503 /* PCI BARs must be a power of 2 */
1504 msix_sz = pow2ceil(msix_sz);
1505
1506 if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) {
1507 /*
1508 * TODO: Lookup table for known devices.
1509 *
1510 * Logically we might use an algorithm here to select the BAR adding
1511 * the least additional MMIO space, but we cannot programmatically
1512 * predict the driver dependency on BAR ordering or sizing, therefore
1513 * 'auto' becomes a lookup for combinations reported to work.
1514 */
1515 if (target_bar < 0) {
1516 error_setg(errp, "No automatic MSI-X relocation available for "
1517 "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1518 return false;
1519 }
1520 } else {
1521 target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0);
1522 }
1523
1524 /* I/O port BARs cannot host MSI-X structures */
1525 if (vdev->bars[target_bar].ioport) {
1526 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1527 "I/O port BAR", target_bar);
1528 return false;
1529 }
1530
1531 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1532 if (!vdev->bars[target_bar].size &&
1533 target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1534 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1535 "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1536 return false;
1537 }
1538
1539 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1540 if (vdev->bars[target_bar].size > 1 * GiB &&
1541 !vdev->bars[target_bar].mem64) {
1542 error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1543 "no space to extend 32-bit BAR", target_bar);
1544 return false;
1545 }
1546
1547 /*
1548 * If adding a new BAR, test if we can make it 64bit. We make it
1549 * prefetchable since QEMU MSI-X emulation has no read side effects
1550 * and doing so makes mapping more flexible.
1551 */
1552 if (!vdev->bars[target_bar].size) {
1553 if (target_bar < (PCI_ROM_SLOT - 1) &&
1554 !vdev->bars[target_bar + 1].size) {
1555 vdev->bars[target_bar].mem64 = true;
1556 vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1557 }
1558 vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1559 vdev->bars[target_bar].size = msix_sz;
1560 vdev->msix->table_offset = 0;
1561 } else {
1562 vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1563 msix_sz * 2);
1564 /*
1565 * Due to above size calc, MSI-X always starts halfway into the BAR,
1566 * which will always be a separate host page.
1567 */
1568 vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1569 }
1570
1571 vdev->msix->table_bar = target_bar;
1572 vdev->msix->pba_bar = target_bar;
1573 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1574 vdev->msix->pba_offset = vdev->msix->table_offset +
1575 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1576
1577 trace_vfio_msix_relo(vdev->vbasedev.name,
1578 vdev->msix->table_bar, vdev->msix->table_offset);
1579 return true;
1580 }
1581
1582 /*
1583 * We don't have any control over how pci_add_capability() inserts
1584 * capabilities into the chain. In order to setup MSI-X we need a
1585 * MemoryRegion for the BAR. In order to setup the BAR and not
1586 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1587 * need to first look for where the MSI-X table lives. So we
1588 * unfortunately split MSI-X setup across two functions.
1589 */
vfio_msix_early_setup(VFIOPCIDevice * vdev,Error ** errp)1590 static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
1591 {
1592 uint8_t pos;
1593 uint16_t ctrl;
1594 uint32_t table, pba;
1595 struct vfio_irq_info irq_info;
1596 VFIOMSIXInfo *msix;
1597 int ret;
1598
1599 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1600 if (!pos) {
1601 return true;
1602 }
1603
1604 ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_FLAGS,
1605 sizeof(ctrl), &ctrl);
1606 if (ret != sizeof(ctrl)) {
1607 error_setg(errp, "failed to read PCI MSIX FLAGS: %s",
1608 strreaderror(ret));
1609 return false;
1610 }
1611
1612 ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_TABLE,
1613 sizeof(table), &table);
1614 if (ret != sizeof(table)) {
1615 error_setg(errp, "failed to read PCI MSIX TABLE: %s",
1616 strreaderror(ret));
1617 return false;
1618 }
1619
1620 ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_PBA,
1621 sizeof(pba), &pba);
1622 if (ret != sizeof(pba)) {
1623 error_setg(errp, "failed to read PCI MSIX PBA: %s", strreaderror(ret));
1624 return false;
1625 }
1626
1627 ctrl = le16_to_cpu(ctrl);
1628 table = le32_to_cpu(table);
1629 pba = le32_to_cpu(pba);
1630
1631 msix = g_malloc0(sizeof(*msix));
1632 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1633 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1634 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1635 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1636 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1637
1638 ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX,
1639 &irq_info);
1640 if (ret < 0) {
1641 error_setg_errno(errp, -ret, "failed to get MSI-X irq info");
1642 g_free(msix);
1643 return false;
1644 }
1645
1646 msix->noresize = !!(irq_info.flags & VFIO_IRQ_INFO_NORESIZE);
1647
1648 /*
1649 * Test the size of the pba_offset variable and catch if it extends outside
1650 * of the specified BAR. If it is the case, we need to apply a hardware
1651 * specific quirk if the device is known or we have a broken configuration.
1652 */
1653 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1654 /*
1655 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1656 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1657 * the VF PBA offset while the BAR itself is only 8k. The correct value
1658 * is 0x1000, so we hard code that here.
1659 */
1660 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1661 (vdev->device_id & 0xff00) == 0x5800) {
1662 msix->pba_offset = 0x1000;
1663 /*
1664 * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor
1665 * return an incorrect value of 0x460000 for the VF PBA offset while
1666 * the BAR itself is only 0x10000. The correct value is 0xb400.
1667 */
1668 } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU,
1669 PCI_DEVICE_ID_KUNLUN_VF)) {
1670 msix->pba_offset = 0xb400;
1671 } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) {
1672 error_setg(errp, "hardware reports invalid configuration, "
1673 "MSIX PBA outside of specified BAR");
1674 g_free(msix);
1675 return false;
1676 }
1677 }
1678
1679 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1680 msix->table_offset, msix->entries,
1681 msix->noresize);
1682 vdev->msix = msix;
1683
1684 vfio_pci_fixup_msix_region(vdev);
1685
1686 return vfio_pci_relocate_msix(vdev, errp);
1687 }
1688
vfio_msix_setup(VFIOPCIDevice * vdev,int pos,Error ** errp)1689 static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1690 {
1691 int ret;
1692 Error *err = NULL;
1693
1694 vdev->msix->pending = g_new0(unsigned long,
1695 BITS_TO_LONGS(vdev->msix->entries));
1696 ret = msix_init(&vdev->pdev, vdev->msix->entries,
1697 vdev->bars[vdev->msix->table_bar].mr,
1698 vdev->msix->table_bar, vdev->msix->table_offset,
1699 vdev->bars[vdev->msix->pba_bar].mr,
1700 vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1701 &err);
1702 if (ret < 0) {
1703 if (ret == -ENOTSUP) {
1704 warn_report_err(err);
1705 return true;
1706 }
1707
1708 error_propagate(errp, err);
1709 return false;
1710 }
1711
1712 /*
1713 * The PCI spec suggests that devices provide additional alignment for
1714 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1715 * For an assigned device, this hopefully means that emulation of MSI-X
1716 * structures does not affect the performance of the device. If devices
1717 * fail to provide that alignment, a significant performance penalty may
1718 * result, for instance Mellanox MT27500 VFs:
1719 * http://www.spinics.net/lists/kvm/msg125881.html
1720 *
1721 * The PBA is simply not that important for such a serious regression and
1722 * most drivers do not appear to look at it. The solution for this is to
1723 * disable the PBA MemoryRegion unless it's being used. We disable it
1724 * here and only enable it if a masked vector fires through QEMU. As the
1725 * vector-use notifier is called, which occurs on unmask, we test whether
1726 * PBA emulation is needed and again disable if not.
1727 */
1728 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1729
1730 /*
1731 * The emulated machine may provide a paravirt interface for MSIX setup
1732 * so it is not strictly necessary to emulate MSIX here. This becomes
1733 * helpful when frequently accessed MMIO registers are located in
1734 * subpages adjacent to the MSIX table but the MSIX data containing page
1735 * cannot be mapped because of a host page size bigger than the MSIX table
1736 * alignment.
1737 */
1738 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1739 "vfio-no-msix-emulation", NULL)) {
1740 memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
1741 }
1742
1743 return true;
1744 }
1745
vfio_teardown_msi(VFIOPCIDevice * vdev)1746 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1747 {
1748 msi_uninit(&vdev->pdev);
1749
1750 if (vdev->msix) {
1751 msix_uninit(&vdev->pdev,
1752 vdev->bars[vdev->msix->table_bar].mr,
1753 vdev->bars[vdev->msix->pba_bar].mr);
1754 g_free(vdev->msix->pending);
1755 }
1756 }
1757
1758 /*
1759 * Resource setup
1760 */
vfio_mmap_set_enabled(VFIOPCIDevice * vdev,bool enabled)1761 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1762 {
1763 int i;
1764
1765 for (i = 0; i < PCI_ROM_SLOT; i++) {
1766 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1767 }
1768 }
1769
vfio_bar_prepare(VFIOPCIDevice * vdev,int nr)1770 static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
1771 {
1772 VFIOBAR *bar = &vdev->bars[nr];
1773
1774 uint32_t pci_bar;
1775 int ret;
1776
1777 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1778 if (!bar->region.size) {
1779 return;
1780 }
1781
1782 /* Determine what type of BAR this is for registration */
1783 ret = vfio_pci_config_space_read(vdev, PCI_BASE_ADDRESS_0 + (4 * nr),
1784 sizeof(pci_bar), &pci_bar);
1785 if (ret != sizeof(pci_bar)) {
1786 error_report("vfio: Failed to read BAR %d: %s", nr, strreaderror(ret));
1787 return;
1788 }
1789
1790 pci_bar = le32_to_cpu(pci_bar);
1791 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1792 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1793 bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1794 ~PCI_BASE_ADDRESS_MEM_MASK);
1795 bar->size = bar->region.size;
1796 }
1797
vfio_bars_prepare(VFIOPCIDevice * vdev)1798 static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1799 {
1800 int i;
1801
1802 for (i = 0; i < PCI_ROM_SLOT; i++) {
1803 vfio_bar_prepare(vdev, i);
1804 }
1805 }
1806
vfio_bar_register(VFIOPCIDevice * vdev,int nr)1807 static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1808 {
1809 VFIOBAR *bar = &vdev->bars[nr];
1810 char *name;
1811
1812 if (!bar->size) {
1813 return;
1814 }
1815
1816 bar->mr = g_new0(MemoryRegion, 1);
1817 name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1818 memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1819 g_free(name);
1820
1821 if (bar->region.size) {
1822 memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1823
1824 if (vfio_region_mmap(&bar->region)) {
1825 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1826 vdev->vbasedev.name, nr);
1827 }
1828 }
1829
1830 pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
1831 }
1832
vfio_bars_register(VFIOPCIDevice * vdev)1833 static void vfio_bars_register(VFIOPCIDevice *vdev)
1834 {
1835 int i;
1836
1837 for (i = 0; i < PCI_ROM_SLOT; i++) {
1838 vfio_bar_register(vdev, i);
1839 }
1840 }
1841
vfio_bars_exit(VFIOPCIDevice * vdev)1842 static void vfio_bars_exit(VFIOPCIDevice *vdev)
1843 {
1844 int i;
1845
1846 for (i = 0; i < PCI_ROM_SLOT; i++) {
1847 VFIOBAR *bar = &vdev->bars[i];
1848
1849 vfio_bar_quirk_exit(vdev, i);
1850 vfio_region_exit(&bar->region);
1851 if (bar->region.size) {
1852 memory_region_del_subregion(bar->mr, bar->region.mem);
1853 }
1854 }
1855
1856 if (vdev->vga) {
1857 pci_unregister_vga(&vdev->pdev);
1858 vfio_vga_quirk_exit(vdev);
1859 }
1860 }
1861
vfio_bars_finalize(VFIOPCIDevice * vdev)1862 static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1863 {
1864 int i;
1865
1866 for (i = 0; i < PCI_ROM_SLOT; i++) {
1867 VFIOBAR *bar = &vdev->bars[i];
1868
1869 vfio_bar_quirk_finalize(vdev, i);
1870 vfio_region_finalize(&bar->region);
1871 if (bar->mr) {
1872 assert(bar->size);
1873 object_unparent(OBJECT(bar->mr));
1874 g_free(bar->mr);
1875 bar->mr = NULL;
1876 }
1877 }
1878
1879 if (vdev->vga) {
1880 vfio_vga_quirk_finalize(vdev);
1881 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1882 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1883 }
1884 g_free(vdev->vga);
1885 }
1886 }
1887
1888 /*
1889 * General setup
1890 */
vfio_std_cap_max_size(PCIDevice * pdev,uint8_t pos)1891 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1892 {
1893 uint8_t tmp;
1894 uint16_t next = PCI_CONFIG_SPACE_SIZE;
1895
1896 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1897 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1898 if (tmp > pos && tmp < next) {
1899 next = tmp;
1900 }
1901 }
1902
1903 return next - pos;
1904 }
1905
1906
vfio_ext_cap_max_size(const uint8_t * config,uint16_t pos)1907 static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1908 {
1909 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1910
1911 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1912 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1913 if (tmp > pos && tmp < next) {
1914 next = tmp;
1915 }
1916 }
1917
1918 return next - pos;
1919 }
1920
vfio_set_word_bits(uint8_t * buf,uint16_t val,uint16_t mask)1921 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1922 {
1923 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1924 }
1925
vfio_add_emulated_word(VFIOPCIDevice * vdev,int pos,uint16_t val,uint16_t mask)1926 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1927 uint16_t val, uint16_t mask)
1928 {
1929 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1930 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1931 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1932 }
1933
vfio_set_long_bits(uint8_t * buf,uint32_t val,uint32_t mask)1934 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1935 {
1936 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1937 }
1938
vfio_add_emulated_long(VFIOPCIDevice * vdev,int pos,uint32_t val,uint32_t mask)1939 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1940 uint32_t val, uint32_t mask)
1941 {
1942 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1943 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1944 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1945 }
1946
vfio_pci_enable_rp_atomics(VFIOPCIDevice * vdev)1947 static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev)
1948 {
1949 struct vfio_device_info_cap_pci_atomic_comp *cap;
1950 g_autofree struct vfio_device_info *info = NULL;
1951 PCIBus *bus = pci_get_bus(&vdev->pdev);
1952 PCIDevice *parent = bus->parent_dev;
1953 struct vfio_info_cap_header *hdr;
1954 uint32_t mask = 0;
1955 uint8_t *pos;
1956
1957 /*
1958 * PCIe Atomic Ops completer support is only added automatically for single
1959 * function devices downstream of a root port supporting DEVCAP2. Support
1960 * is added during realize and, if added, removed during device exit. The
1961 * single function requirement avoids conflicting requirements should a
1962 * slot be composed of multiple devices with differing capabilities.
1963 */
1964 if (pci_bus_is_root(bus) || !parent || !parent->exp.exp_cap ||
1965 pcie_cap_get_type(parent) != PCI_EXP_TYPE_ROOT_PORT ||
1966 pcie_cap_get_version(parent) != PCI_EXP_FLAGS_VER2 ||
1967 vdev->pdev.devfn ||
1968 vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1969 return;
1970 }
1971
1972 pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
1973
1974 /* Abort if there'a already an Atomic Ops configuration on the root port */
1975 if (pci_get_long(pos) & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
1976 PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
1977 PCI_EXP_DEVCAP2_ATOMIC_COMP128)) {
1978 return;
1979 }
1980
1981 info = vfio_get_device_info(vdev->vbasedev.fd);
1982 if (!info) {
1983 return;
1984 }
1985
1986 hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP);
1987 if (!hdr) {
1988 return;
1989 }
1990
1991 cap = (void *)hdr;
1992 if (cap->flags & VFIO_PCI_ATOMIC_COMP32) {
1993 mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP32;
1994 }
1995 if (cap->flags & VFIO_PCI_ATOMIC_COMP64) {
1996 mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP64;
1997 }
1998 if (cap->flags & VFIO_PCI_ATOMIC_COMP128) {
1999 mask |= PCI_EXP_DEVCAP2_ATOMIC_COMP128;
2000 }
2001
2002 if (!mask) {
2003 return;
2004 }
2005
2006 pci_long_test_and_set_mask(pos, mask);
2007 vdev->clear_parent_atomics_on_exit = true;
2008 }
2009
vfio_pci_disable_rp_atomics(VFIOPCIDevice * vdev)2010 static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev)
2011 {
2012 if (vdev->clear_parent_atomics_on_exit) {
2013 PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev;
2014 uint8_t *pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2;
2015
2016 pci_long_test_and_clear_mask(pos, PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2017 PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
2018 PCI_EXP_DEVCAP2_ATOMIC_COMP128);
2019 }
2020 }
2021
vfio_setup_pcie_cap(VFIOPCIDevice * vdev,int pos,uint8_t size,Error ** errp)2022 static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
2023 Error **errp)
2024 {
2025 uint16_t flags;
2026 uint8_t type;
2027
2028 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
2029 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
2030
2031 if (type != PCI_EXP_TYPE_ENDPOINT &&
2032 type != PCI_EXP_TYPE_LEG_END &&
2033 type != PCI_EXP_TYPE_RC_END) {
2034
2035 error_setg(errp, "assignment of PCIe type 0x%x "
2036 "devices is not currently supported", type);
2037 return false;
2038 }
2039
2040 if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
2041 PCIBus *bus = pci_get_bus(&vdev->pdev);
2042 PCIDevice *bridge;
2043
2044 /*
2045 * Traditionally PCI device assignment exposes the PCIe capability
2046 * as-is on non-express buses. The reason being that some drivers
2047 * simply assume that it's there, for example tg3. However when
2048 * we're running on a native PCIe machine type, like Q35, we need
2049 * to hide the PCIe capability. The reason for this is twofold;
2050 * first Windows guests get a Code 10 error when the PCIe capability
2051 * is exposed in this configuration. Therefore express devices won't
2052 * work at all unless they're attached to express buses in the VM.
2053 * Second, a native PCIe machine introduces the possibility of fine
2054 * granularity IOMMUs supporting both translation and isolation.
2055 * Guest code to discover the IOMMU visibility of a device, such as
2056 * IOMMU grouping code on Linux, is very aware of device types and
2057 * valid transitions between bus types. An express device on a non-
2058 * express bus is not a valid combination on bare metal systems.
2059 *
2060 * Drivers that require a PCIe capability to make the device
2061 * functional are simply going to need to have their devices placed
2062 * on a PCIe bus in the VM.
2063 */
2064 while (!pci_bus_is_root(bus)) {
2065 bridge = pci_bridge_get_device(bus);
2066 bus = pci_get_bus(bridge);
2067 }
2068
2069 if (pci_bus_is_express(bus)) {
2070 return true;
2071 }
2072
2073 } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
2074 /*
2075 * On a Root Complex bus Endpoints become Root Complex Integrated
2076 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2077 */
2078 if (type == PCI_EXP_TYPE_ENDPOINT) {
2079 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2080 PCI_EXP_TYPE_RC_END << 4,
2081 PCI_EXP_FLAGS_TYPE);
2082
2083 /* Link Capabilities, Status, and Control goes away */
2084 if (size > PCI_EXP_LNKCTL) {
2085 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
2086 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2087 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
2088
2089 #ifndef PCI_EXP_LNKCAP2
2090 #define PCI_EXP_LNKCAP2 44
2091 #endif
2092 #ifndef PCI_EXP_LNKSTA2
2093 #define PCI_EXP_LNKSTA2 50
2094 #endif
2095 /* Link 2 Capabilities, Status, and Control goes away */
2096 if (size > PCI_EXP_LNKCAP2) {
2097 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
2098 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
2099 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
2100 }
2101 }
2102
2103 } else if (type == PCI_EXP_TYPE_LEG_END) {
2104 /*
2105 * Legacy endpoints don't belong on the root complex. Windows
2106 * seems to be happier with devices if we skip the capability.
2107 */
2108 return true;
2109 }
2110
2111 } else {
2112 /*
2113 * Convert Root Complex Integrated Endpoints to regular endpoints.
2114 * These devices don't support LNK/LNK2 capabilities, so make them up.
2115 */
2116 if (type == PCI_EXP_TYPE_RC_END) {
2117 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2118 PCI_EXP_TYPE_ENDPOINT << 4,
2119 PCI_EXP_FLAGS_TYPE);
2120 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
2121 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
2122 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
2123 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2124 }
2125
2126 vfio_pci_enable_rp_atomics(vdev);
2127 }
2128
2129 /*
2130 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
2131 * (Niantic errate #35) causing Windows to error with a Code 10 for the
2132 * device on Q35. Fixup any such devices to report version 1. If we
2133 * were to remove the capability entirely the guest would lose extended
2134 * config space.
2135 */
2136 if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
2137 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2138 1, PCI_EXP_FLAGS_VERS);
2139 }
2140
2141 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
2142 errp);
2143 if (pos < 0) {
2144 return false;
2145 }
2146
2147 vdev->pdev.exp.exp_cap = pos;
2148
2149 return true;
2150 }
2151
vfio_check_pcie_flr(VFIOPCIDevice * vdev,uint8_t pos)2152 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
2153 {
2154 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
2155
2156 if (cap & PCI_EXP_DEVCAP_FLR) {
2157 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
2158 vdev->has_flr = true;
2159 }
2160 }
2161
vfio_check_pm_reset(VFIOPCIDevice * vdev,uint8_t pos)2162 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
2163 {
2164 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
2165
2166 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
2167 trace_vfio_check_pm_reset(vdev->vbasedev.name);
2168 vdev->has_pm_reset = true;
2169 }
2170 }
2171
vfio_check_af_flr(VFIOPCIDevice * vdev,uint8_t pos)2172 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
2173 {
2174 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
2175
2176 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
2177 trace_vfio_check_af_flr(vdev->vbasedev.name);
2178 vdev->has_flr = true;
2179 }
2180 }
2181
vfio_add_vendor_specific_cap(VFIOPCIDevice * vdev,int pos,uint8_t size,Error ** errp)2182 static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos,
2183 uint8_t size, Error **errp)
2184 {
2185 PCIDevice *pdev = &vdev->pdev;
2186
2187 pos = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, size, errp);
2188 if (pos < 0) {
2189 return false;
2190 }
2191
2192 /*
2193 * Exempt config space check for Vendor Specific Information during
2194 * restore/load.
2195 * Config space check is still enforced for 3 byte VSC header.
2196 */
2197 if (vdev->skip_vsc_check && size > 3) {
2198 memset(pdev->cmask + pos + 3, 0, size - 3);
2199 }
2200
2201 return true;
2202 }
2203
vfio_add_std_cap(VFIOPCIDevice * vdev,uint8_t pos,Error ** errp)2204 static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
2205 {
2206 ERRP_GUARD();
2207 PCIDevice *pdev = &vdev->pdev;
2208 uint8_t cap_id, next, size;
2209 bool ret;
2210
2211 cap_id = pdev->config[pos];
2212 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
2213
2214 /*
2215 * If it becomes important to configure capabilities to their actual
2216 * size, use this as the default when it's something we don't recognize.
2217 * Since QEMU doesn't actually handle many of the config accesses,
2218 * exact size doesn't seem worthwhile.
2219 */
2220 size = vfio_std_cap_max_size(pdev, pos);
2221
2222 /*
2223 * pci_add_capability always inserts the new capability at the head
2224 * of the chain. Therefore to end up with a chain that matches the
2225 * physical device, we insert from the end by making this recursive.
2226 * This is also why we pre-calculate size above as cached config space
2227 * will be changed as we unwind the stack.
2228 */
2229 if (next) {
2230 if (!vfio_add_std_cap(vdev, next, errp)) {
2231 return false;
2232 }
2233 } else {
2234 /* Begin the rebuild, use QEMU emulated list bits */
2235 pdev->config[PCI_CAPABILITY_LIST] = 0;
2236 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
2237 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2238
2239 if (!vfio_add_virt_caps(vdev, errp)) {
2240 return false;
2241 }
2242 }
2243
2244 /* Scale down size, esp in case virt caps were added above */
2245 size = MIN(size, vfio_std_cap_max_size(pdev, pos));
2246
2247 /* Use emulated next pointer to allow dropping caps */
2248 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
2249
2250 switch (cap_id) {
2251 case PCI_CAP_ID_MSI:
2252 ret = vfio_msi_setup(vdev, pos, errp);
2253 break;
2254 case PCI_CAP_ID_EXP:
2255 vfio_check_pcie_flr(vdev, pos);
2256 ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
2257 break;
2258 case PCI_CAP_ID_MSIX:
2259 ret = vfio_msix_setup(vdev, pos, errp);
2260 break;
2261 case PCI_CAP_ID_PM:
2262 vfio_check_pm_reset(vdev, pos);
2263 ret = pci_pm_init(pdev, pos, errp) >= 0;
2264 /*
2265 * PCI-core config space emulation needs write access to the power
2266 * state enabled for tracking BAR mapping relative to PM state.
2267 */
2268 pci_set_word(pdev->wmask + pos + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK);
2269 break;
2270 case PCI_CAP_ID_AF:
2271 vfio_check_af_flr(vdev, pos);
2272 ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
2273 break;
2274 case PCI_CAP_ID_VNDR:
2275 ret = vfio_add_vendor_specific_cap(vdev, pos, size, errp);
2276 break;
2277 default:
2278 ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0;
2279 break;
2280 }
2281
2282 if (!ret) {
2283 error_prepend(errp,
2284 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2285 cap_id, size, pos);
2286 }
2287
2288 return ret;
2289 }
2290
vfio_setup_rebar_ecap(VFIOPCIDevice * vdev,uint16_t pos)2291 static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos)
2292 {
2293 uint32_t ctrl;
2294 int i, nbar;
2295
2296 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL);
2297 nbar = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
2298
2299 for (i = 0; i < nbar; i++) {
2300 uint32_t cap;
2301 int size;
2302
2303 ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8));
2304 size = (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
2305
2306 /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */
2307 cap = size <= 27 ? 1U << (size + 4) : 0;
2308
2309 /*
2310 * The PCIe spec (v6.0.1, 7.8.6) requires HW to support at least one
2311 * size in the range 1MB to 512GB. We intend to mask all sizes except
2312 * the one currently enabled in the size field, therefore if it's
2313 * outside the range, hide the whole capability as this virtualization
2314 * trick won't work. If >512GB resizable BARs start to appear, we
2315 * might need an opt-in or reservation scheme in the kernel.
2316 */
2317 if (!(cap & PCI_REBAR_CAP_SIZES)) {
2318 return -EINVAL;
2319 }
2320
2321 /* Hide all sizes reported in the ctrl reg per above requirement. */
2322 ctrl &= (PCI_REBAR_CTRL_BAR_SIZE |
2323 PCI_REBAR_CTRL_NBAR_MASK |
2324 PCI_REBAR_CTRL_BAR_IDX);
2325
2326 /*
2327 * The BAR size field is RW, however we've mangled the capability
2328 * register such that we only report a single size, ie. the current
2329 * BAR size. A write of an unsupported value is undefined, therefore
2330 * the register field is essentially RO.
2331 */
2332 vfio_add_emulated_long(vdev, pos + PCI_REBAR_CAP + (i * 8), cap, ~0);
2333 vfio_add_emulated_long(vdev, pos + PCI_REBAR_CTRL + (i * 8), ctrl, ~0);
2334 }
2335
2336 return 0;
2337 }
2338
vfio_add_ext_cap(VFIOPCIDevice * vdev)2339 static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
2340 {
2341 PCIDevice *pdev = &vdev->pdev;
2342 uint32_t header;
2343 uint16_t cap_id, next, size;
2344 uint8_t cap_ver;
2345 uint8_t *config;
2346
2347 /* Only add extended caps if we have them and the guest can see them */
2348 if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
2349 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
2350 return;
2351 }
2352
2353 /*
2354 * pcie_add_capability always inserts the new capability at the tail
2355 * of the chain. Therefore to end up with a chain that matches the
2356 * physical device, we cache the config space to avoid overwriting
2357 * the original config space when we parse the extended capabilities.
2358 */
2359 config = g_memdup(pdev->config, vdev->config_size);
2360
2361 /*
2362 * Extended capabilities are chained with each pointing to the next, so we
2363 * can drop anything other than the head of the chain simply by modifying
2364 * the previous next pointer. Seed the head of the chain here such that
2365 * we can simply skip any capabilities we want to drop below, regardless
2366 * of their position in the chain. If this stub capability still exists
2367 * after we add the capabilities we want to expose, update the capability
2368 * ID to zero. Note that we cannot seed with the capability header being
2369 * zero as this conflicts with definition of an absent capability chain
2370 * and prevents capabilities beyond the head of the list from being added.
2371 * By replacing the dummy capability ID with zero after walking the device
2372 * chain, we also transparently mark extended capabilities as absent if
2373 * no capabilities were added. Note that the PCIe spec defines an absence
2374 * of extended capabilities to be determined by a value of zero for the
2375 * capability ID, version, AND next pointer. A non-zero next pointer
2376 * should be sufficient to indicate additional capabilities are present,
2377 * which will occur if we call pcie_add_capability() below. The entire
2378 * first dword is emulated to support this.
2379 *
2380 * NB. The kernel side does similar masking, so be prepared that our
2381 * view of the device may also contain a capability ID zero in the head
2382 * of the chain. Skip it for the same reason that we cannot seed the
2383 * chain with a zero capability.
2384 */
2385 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2386 PCI_EXT_CAP(0xFFFF, 0, 0));
2387 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2388 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2389
2390 for (next = PCI_CONFIG_SPACE_SIZE; next;
2391 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2392 header = pci_get_long(config + next);
2393 cap_id = PCI_EXT_CAP_ID(header);
2394 cap_ver = PCI_EXT_CAP_VER(header);
2395
2396 /*
2397 * If it becomes important to configure extended capabilities to their
2398 * actual size, use this as the default when it's something we don't
2399 * recognize. Since QEMU doesn't actually handle many of the config
2400 * accesses, exact size doesn't seem worthwhile.
2401 */
2402 size = vfio_ext_cap_max_size(config, next);
2403
2404 /* Use emulated next pointer to allow dropping extended caps */
2405 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2406 PCI_EXT_CAP_NEXT_MASK);
2407
2408 switch (cap_id) {
2409 case 0: /* kernel masked capability */
2410 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
2411 case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
2412 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2413 break;
2414 case PCI_EXT_CAP_ID_REBAR:
2415 if (!vfio_setup_rebar_ecap(vdev, next)) {
2416 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2417 }
2418 break;
2419 default:
2420 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2421 }
2422
2423 }
2424
2425 /* Cleanup chain head ID if necessary */
2426 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2427 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
2428 }
2429
2430 g_free(config);
2431 }
2432
vfio_add_capabilities(VFIOPCIDevice * vdev,Error ** errp)2433 static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
2434 {
2435 PCIDevice *pdev = &vdev->pdev;
2436
2437 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2438 !pdev->config[PCI_CAPABILITY_LIST]) {
2439 return true; /* Nothing to add */
2440 }
2441
2442 if (!vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp)) {
2443 return false;
2444 }
2445
2446 vfio_add_ext_cap(vdev);
2447 return true;
2448 }
2449
vfio_pci_pre_reset(VFIOPCIDevice * vdev)2450 void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
2451 {
2452 PCIDevice *pdev = &vdev->pdev;
2453 uint16_t cmd;
2454
2455 vfio_disable_interrupts(vdev);
2456
2457 /*
2458 * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
2459 * Also put INTx Disable in known state.
2460 */
2461 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2462 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2463 PCI_COMMAND_INTX_DISABLE);
2464 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2465
2466 /* Make sure the device is in D0 */
2467 if (pdev->pm_cap) {
2468 uint16_t pmcsr;
2469 uint8_t state;
2470
2471 pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
2472 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2473 if (state) {
2474 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2475 vfio_pci_write_config(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2476 /* vfio handles the necessary delay here */
2477 pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2);
2478 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2479 if (state) {
2480 error_report("vfio: Unable to power on device, stuck in D%d",
2481 state);
2482 }
2483 }
2484 }
2485 }
2486
vfio_pci_post_reset(VFIOPCIDevice * vdev)2487 void vfio_pci_post_reset(VFIOPCIDevice *vdev)
2488 {
2489 VFIODevice *vbasedev = &vdev->vbasedev;
2490 Error *err = NULL;
2491 int ret, nr;
2492
2493 if (!vfio_intx_enable(vdev, &err)) {
2494 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2495 }
2496
2497 for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2498 off_t addr = PCI_BASE_ADDRESS_0 + (4 * nr);
2499 uint32_t val = 0;
2500 uint32_t len = sizeof(val);
2501
2502 ret = vfio_pci_config_space_write(vdev, addr, len, &val);
2503 if (ret != len) {
2504 error_report("%s(%s) reset bar %d failed: %s", __func__,
2505 vbasedev->name, nr, strwriteerror(ret));
2506 }
2507 }
2508
2509 vfio_quirk_reset(vdev);
2510 }
2511
vfio_pci_host_match(PCIHostDeviceAddress * addr,const char * name)2512 bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
2513 {
2514 char tmp[13];
2515
2516 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2517 addr->bus, addr->slot, addr->function);
2518
2519 return (strcmp(tmp, name) == 0);
2520 }
2521
vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice * vdev,struct vfio_pci_hot_reset_info ** info_p)2522 int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
2523 struct vfio_pci_hot_reset_info **info_p)
2524 {
2525 struct vfio_pci_hot_reset_info *info;
2526 int ret, count;
2527
2528 assert(info_p && !*info_p);
2529
2530 info = g_malloc0(sizeof(*info));
2531 info->argsz = sizeof(*info);
2532
2533 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2534 if (ret && errno != ENOSPC) {
2535 ret = -errno;
2536 g_free(info);
2537 if (!vdev->has_pm_reset) {
2538 error_report("vfio: Cannot reset device %s, "
2539 "no available reset mechanism.", vdev->vbasedev.name);
2540 }
2541 return ret;
2542 }
2543
2544 count = info->count;
2545 info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0])));
2546 info->argsz = sizeof(*info) + (count * sizeof(info->devices[0]));
2547
2548 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2549 if (ret) {
2550 ret = -errno;
2551 g_free(info);
2552 error_report("vfio: hot reset info failed: %m");
2553 return ret;
2554 }
2555
2556 *info_p = info;
2557 return 0;
2558 }
2559
vfio_pci_hot_reset(VFIOPCIDevice * vdev,bool single)2560 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
2561 {
2562 VFIODevice *vbasedev = &vdev->vbasedev;
2563 const VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer);
2564
2565 return vioc->pci_hot_reset(vbasedev, single);
2566 }
2567
2568 /*
2569 * We want to differentiate hot reset of multiple in-use devices vs hot reset
2570 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2571 * of doing hot resets when there is only a single device per bus. The in-use
2572 * here refers to how many VFIODevices are affected. A hot reset that affects
2573 * multiple devices, but only a single in-use device, means that we can call
2574 * it from our bus ->reset() callback since the extent is effectively a single
2575 * device. This allows us to make use of it in the hotplug path. When there
2576 * are multiple in-use devices, we can only trigger the hot reset during a
2577 * system reset and thus from our reset handler. We separate _one vs _multi
2578 * here so that we don't overlap and do a double reset on the system reset
2579 * path where both our reset handler and ->reset() callback are used. Calling
2580 * _one() will only do a hot reset for the one in-use devices case, calling
2581 * _multi() will do nothing if a _one() would have been sufficient.
2582 */
vfio_pci_hot_reset_one(VFIOPCIDevice * vdev)2583 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2584 {
2585 return vfio_pci_hot_reset(vdev, true);
2586 }
2587
vfio_pci_hot_reset_multi(VFIODevice * vbasedev)2588 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2589 {
2590 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2591 return vfio_pci_hot_reset(vdev, false);
2592 }
2593
vfio_pci_compute_needs_reset(VFIODevice * vbasedev)2594 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2595 {
2596 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2597 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2598 vbasedev->needs_reset = true;
2599 }
2600 }
2601
vfio_pci_get_object(VFIODevice * vbasedev)2602 static Object *vfio_pci_get_object(VFIODevice *vbasedev)
2603 {
2604 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2605
2606 return OBJECT(vdev);
2607 }
2608
vfio_msix_present(void * opaque,int version_id)2609 static bool vfio_msix_present(void *opaque, int version_id)
2610 {
2611 PCIDevice *pdev = opaque;
2612
2613 return msix_present(pdev);
2614 }
2615
vfio_display_migration_needed(void * opaque)2616 static bool vfio_display_migration_needed(void *opaque)
2617 {
2618 VFIOPCIDevice *vdev = opaque;
2619
2620 /*
2621 * We need to migrate the VFIODisplay object if ramfb *migration* was
2622 * explicitly requested (in which case we enforced both ramfb=on and
2623 * display=on), or ramfb migration was left at the default "auto"
2624 * setting, and *ramfb* was explicitly requested (in which case we
2625 * enforced display=on).
2626 */
2627 return vdev->ramfb_migrate == ON_OFF_AUTO_ON ||
2628 (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO && vdev->enable_ramfb);
2629 }
2630
2631 static const VMStateDescription vmstate_vfio_display = {
2632 .name = "VFIOPCIDevice/VFIODisplay",
2633 .version_id = 1,
2634 .minimum_version_id = 1,
2635 .needed = vfio_display_migration_needed,
2636 .fields = (const VMStateField[]){
2637 VMSTATE_STRUCT_POINTER(dpy, VFIOPCIDevice, vfio_display_vmstate,
2638 VFIODisplay),
2639 VMSTATE_END_OF_LIST()
2640 }
2641 };
2642
2643 static const VMStateDescription vmstate_vfio_pci_config = {
2644 .name = "VFIOPCIDevice",
2645 .version_id = 1,
2646 .minimum_version_id = 1,
2647 .fields = (const VMStateField[]) {
2648 VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice),
2649 VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present),
2650 VMSTATE_END_OF_LIST()
2651 },
2652 .subsections = (const VMStateDescription * const []) {
2653 &vmstate_vfio_display,
2654 NULL
2655 }
2656 };
2657
vfio_pci_save_config(VFIODevice * vbasedev,QEMUFile * f,Error ** errp)2658 static int vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f, Error **errp)
2659 {
2660 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2661
2662 return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL,
2663 errp);
2664 }
2665
vfio_pci_load_config(VFIODevice * vbasedev,QEMUFile * f)2666 static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
2667 {
2668 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2669 PCIDevice *pdev = &vdev->pdev;
2670 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
2671 int bar, ret;
2672
2673 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2674 old_addr[bar] = pdev->io_regions[bar].addr;
2675 }
2676
2677 ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1);
2678 if (ret) {
2679 return ret;
2680 }
2681
2682 vfio_pci_write_config(pdev, PCI_COMMAND,
2683 pci_get_word(pdev->config + PCI_COMMAND), 2);
2684
2685 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
2686 /*
2687 * The address may not be changed in some scenarios
2688 * (e.g. the VF driver isn't loaded in VM).
2689 */
2690 if (old_addr[bar] != pdev->io_regions[bar].addr &&
2691 vdev->bars[bar].region.size > 0 &&
2692 vdev->bars[bar].region.size < qemu_real_host_page_size()) {
2693 vfio_sub_page_bar_update_mapping(pdev, bar);
2694 }
2695 }
2696
2697 if (msi_enabled(pdev)) {
2698 vfio_msi_enable(vdev);
2699 } else if (msix_enabled(pdev)) {
2700 vfio_msix_enable(vdev);
2701 }
2702
2703 return ret;
2704 }
2705
2706 static VFIODeviceOps vfio_pci_ops = {
2707 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2708 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2709 .vfio_eoi = vfio_intx_eoi,
2710 .vfio_get_object = vfio_pci_get_object,
2711 .vfio_save_config = vfio_pci_save_config,
2712 .vfio_load_config = vfio_pci_load_config,
2713 };
2714
vfio_populate_vga(VFIOPCIDevice * vdev,Error ** errp)2715 bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
2716 {
2717 VFIODevice *vbasedev = &vdev->vbasedev;
2718 struct vfio_region_info *reg_info = NULL;
2719 int ret;
2720
2721 ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, ®_info);
2722 if (ret) {
2723 error_setg_errno(errp, -ret,
2724 "failed getting region info for VGA region index %d",
2725 VFIO_PCI_VGA_REGION_INDEX);
2726 return false;
2727 }
2728
2729 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2730 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2731 reg_info->size < 0xbffff + 1) {
2732 error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2733 (unsigned long)reg_info->flags,
2734 (unsigned long)reg_info->size);
2735 return false;
2736 }
2737
2738 vdev->vga = g_new0(VFIOVGA, 1);
2739
2740 vdev->vga->fd_offset = reg_info->offset;
2741 vdev->vga->fd = vdev->vbasedev.fd;
2742
2743 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2744 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2745 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2746
2747 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2748 OBJECT(vdev), &vfio_vga_ops,
2749 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2750 "vfio-vga-mmio@0xa0000",
2751 QEMU_PCI_VGA_MEM_SIZE);
2752
2753 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2754 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2755 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2756
2757 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2758 OBJECT(vdev), &vfio_vga_ops,
2759 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2760 "vfio-vga-io@0x3b0",
2761 QEMU_PCI_VGA_IO_LO_SIZE);
2762
2763 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2764 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2765 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2766
2767 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2768 OBJECT(vdev), &vfio_vga_ops,
2769 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2770 "vfio-vga-io@0x3c0",
2771 QEMU_PCI_VGA_IO_HI_SIZE);
2772
2773 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2774 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2775 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2776
2777 return true;
2778 }
2779
vfio_populate_device(VFIOPCIDevice * vdev,Error ** errp)2780 static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
2781 {
2782 VFIODevice *vbasedev = &vdev->vbasedev;
2783 struct vfio_region_info *reg_info = NULL;
2784 struct vfio_irq_info irq_info;
2785 int i, ret = -1;
2786
2787 /* Sanity check device */
2788 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2789 error_setg(errp, "this isn't a PCI device");
2790 return false;
2791 }
2792
2793 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2794 error_setg(errp, "unexpected number of io regions %u",
2795 vbasedev->num_regions);
2796 return false;
2797 }
2798
2799 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2800 error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
2801 return false;
2802 }
2803
2804 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2805 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2806
2807 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2808 &vdev->bars[i].region, i, name);
2809 g_free(name);
2810
2811 if (ret) {
2812 error_setg_errno(errp, -ret, "failed to get region %d info", i);
2813 return false;
2814 }
2815
2816 QLIST_INIT(&vdev->bars[i].quirks);
2817 }
2818
2819 ret = vfio_device_get_region_info(vbasedev,
2820 VFIO_PCI_CONFIG_REGION_INDEX, ®_info);
2821 if (ret) {
2822 error_setg_errno(errp, -ret, "failed to get config info");
2823 return false;
2824 }
2825
2826 trace_vfio_populate_device_config(vdev->vbasedev.name,
2827 (unsigned long)reg_info->size,
2828 (unsigned long)reg_info->offset,
2829 (unsigned long)reg_info->flags);
2830
2831 vdev->config_size = reg_info->size;
2832 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2833 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2834 }
2835 vdev->config_offset = reg_info->offset;
2836
2837 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2838 if (!vfio_populate_vga(vdev, errp)) {
2839 error_append_hint(errp, "device does not support "
2840 "requested feature x-vga\n");
2841 return false;
2842 }
2843 }
2844
2845 ret = vfio_device_get_irq_info(vbasedev, VFIO_PCI_ERR_IRQ_INDEX, &irq_info);
2846 if (ret) {
2847 /* This can fail for an old kernel or legacy PCI dev */
2848 trace_vfio_populate_device_get_irq_info_failure(strerror(-ret));
2849 } else if (irq_info.count == 1) {
2850 vdev->pci_aer = true;
2851 } else {
2852 warn_report(VFIO_MSG_PREFIX
2853 "Could not enable error recovery for the device",
2854 vbasedev->name);
2855 }
2856
2857 return true;
2858 }
2859
vfio_pci_put_device(VFIOPCIDevice * vdev)2860 static void vfio_pci_put_device(VFIOPCIDevice *vdev)
2861 {
2862 vfio_display_finalize(vdev);
2863 vfio_bars_finalize(vdev);
2864 g_free(vdev->emulated_config_bits);
2865 g_free(vdev->rom);
2866 /*
2867 * XXX Leaking igd_opregion is not an oversight, we can't remove the
2868 * fw_cfg entry therefore leaking this allocation seems like the safest
2869 * option.
2870 *
2871 * g_free(vdev->igd_opregion);
2872 */
2873
2874 vfio_device_detach(&vdev->vbasedev);
2875
2876 g_free(vdev->vbasedev.name);
2877 g_free(vdev->msix);
2878 }
2879
vfio_err_notifier_handler(void * opaque)2880 static void vfio_err_notifier_handler(void *opaque)
2881 {
2882 VFIOPCIDevice *vdev = opaque;
2883
2884 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2885 return;
2886 }
2887
2888 /*
2889 * TBD. Retrieve the error details and decide what action
2890 * needs to be taken. One of the actions could be to pass
2891 * the error to the guest and have the guest driver recover
2892 * from the error. This requires that PCIe capabilities be
2893 * exposed to the guest. For now, we just terminate the
2894 * guest to contain the error.
2895 */
2896
2897 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2898
2899 vm_stop(RUN_STATE_INTERNAL_ERROR);
2900 }
2901
2902 /*
2903 * Registers error notifier for devices supporting error recovery.
2904 * If we encounter a failure in this function, we report an error
2905 * and continue after disabling error recovery support for the
2906 * device.
2907 */
vfio_register_err_notifier(VFIOPCIDevice * vdev)2908 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2909 {
2910 Error *err = NULL;
2911 int32_t fd;
2912
2913 if (!vdev->pci_aer) {
2914 return;
2915 }
2916
2917 if (event_notifier_init(&vdev->err_notifier, 0)) {
2918 error_report("vfio: Unable to init event notifier for error detection");
2919 vdev->pci_aer = false;
2920 return;
2921 }
2922
2923 fd = event_notifier_get_fd(&vdev->err_notifier);
2924 qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
2925
2926 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2927 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2928 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2929 qemu_set_fd_handler(fd, NULL, NULL, vdev);
2930 event_notifier_cleanup(&vdev->err_notifier);
2931 vdev->pci_aer = false;
2932 }
2933 }
2934
vfio_unregister_err_notifier(VFIOPCIDevice * vdev)2935 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2936 {
2937 Error *err = NULL;
2938
2939 if (!vdev->pci_aer) {
2940 return;
2941 }
2942
2943 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
2944 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
2945 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2946 }
2947 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2948 NULL, NULL, vdev);
2949 event_notifier_cleanup(&vdev->err_notifier);
2950 }
2951
vfio_req_notifier_handler(void * opaque)2952 static void vfio_req_notifier_handler(void *opaque)
2953 {
2954 VFIOPCIDevice *vdev = opaque;
2955 Error *err = NULL;
2956
2957 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2958 return;
2959 }
2960
2961 qdev_unplug(DEVICE(vdev), &err);
2962 if (err) {
2963 warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2964 }
2965 }
2966
vfio_register_req_notifier(VFIOPCIDevice * vdev)2967 static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2968 {
2969 struct vfio_irq_info irq_info;
2970 Error *err = NULL;
2971 int32_t fd;
2972 int ret;
2973
2974 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2975 return;
2976 }
2977
2978 ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX,
2979 &irq_info);
2980 if (ret < 0 || irq_info.count < 1) {
2981 return;
2982 }
2983
2984 if (event_notifier_init(&vdev->req_notifier, 0)) {
2985 error_report("vfio: Unable to init event notifier for device request");
2986 return;
2987 }
2988
2989 fd = event_notifier_get_fd(&vdev->req_notifier);
2990 qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
2991
2992 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
2993 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
2994 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
2995 qemu_set_fd_handler(fd, NULL, NULL, vdev);
2996 event_notifier_cleanup(&vdev->req_notifier);
2997 } else {
2998 vdev->req_enabled = true;
2999 }
3000 }
3001
vfio_unregister_req_notifier(VFIOPCIDevice * vdev)3002 static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
3003 {
3004 Error *err = NULL;
3005
3006 if (!vdev->req_enabled) {
3007 return;
3008 }
3009
3010 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
3011 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
3012 error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
3013 }
3014 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
3015 NULL, NULL, vdev);
3016 event_notifier_cleanup(&vdev->req_notifier);
3017
3018 vdev->req_enabled = false;
3019 }
3020
vfio_pci_config_setup(VFIOPCIDevice * vdev,Error ** errp)3021 static bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp)
3022 {
3023 PCIDevice *pdev = &vdev->pdev;
3024 VFIODevice *vbasedev = &vdev->vbasedev;
3025 uint32_t config_space_size;
3026 int ret;
3027
3028 config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size);
3029
3030 /* Get a copy of config space */
3031 ret = vfio_pci_config_space_read(vdev, 0, config_space_size,
3032 vdev->pdev.config);
3033 if (ret < (int)config_space_size) {
3034 ret = ret < 0 ? -ret : EFAULT;
3035 error_setg_errno(errp, ret, "failed to read device config space");
3036 return false;
3037 }
3038
3039 /* vfio emulates a lot for us, but some bits need extra love */
3040 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
3041
3042 /* QEMU can choose to expose the ROM or not */
3043 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
3044 /* QEMU can also add or extend BARs */
3045 memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
3046
3047 /*
3048 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
3049 * device ID is managed by the vendor and need only be a 16-bit value.
3050 * Allow any 16-bit value for subsystem so they can be hidden or changed.
3051 */
3052 if (vdev->vendor_id != PCI_ANY_ID) {
3053 if (vdev->vendor_id >= 0xffff) {
3054 error_setg(errp, "invalid PCI vendor ID provided");
3055 return false;
3056 }
3057 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
3058 trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id);
3059 } else {
3060 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
3061 }
3062
3063 if (vdev->device_id != PCI_ANY_ID) {
3064 if (vdev->device_id > 0xffff) {
3065 error_setg(errp, "invalid PCI device ID provided");
3066 return false;
3067 }
3068 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
3069 trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id);
3070 } else {
3071 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
3072 }
3073
3074 if (vdev->sub_vendor_id != PCI_ANY_ID) {
3075 if (vdev->sub_vendor_id > 0xffff) {
3076 error_setg(errp, "invalid PCI subsystem vendor ID provided");
3077 return false;
3078 }
3079 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
3080 vdev->sub_vendor_id, ~0);
3081 trace_vfio_pci_emulated_sub_vendor_id(vbasedev->name,
3082 vdev->sub_vendor_id);
3083 }
3084
3085 if (vdev->sub_device_id != PCI_ANY_ID) {
3086 if (vdev->sub_device_id > 0xffff) {
3087 error_setg(errp, "invalid PCI subsystem device ID provided");
3088 return false;
3089 }
3090 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
3091 trace_vfio_pci_emulated_sub_device_id(vbasedev->name,
3092 vdev->sub_device_id);
3093 }
3094
3095 /* QEMU can change multi-function devices to single function, or reverse */
3096 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
3097 PCI_HEADER_TYPE_MULTI_FUNCTION;
3098
3099 /* Restore or clear multifunction, this is always controlled by QEMU */
3100 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
3101 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
3102 } else {
3103 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
3104 }
3105
3106 /*
3107 * Clear host resource mapping info. If we choose not to register a
3108 * BAR, such as might be the case with the option ROM, we can get
3109 * confusing, unwritable, residual addresses from the host here.
3110 */
3111 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
3112 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
3113
3114 vfio_pci_size_rom(vdev);
3115
3116 vfio_bars_prepare(vdev);
3117
3118 if (!vfio_msix_early_setup(vdev, errp)) {
3119 return false;
3120 }
3121
3122 vfio_bars_register(vdev);
3123
3124 return true;
3125 }
3126
vfio_interrupt_setup(VFIOPCIDevice * vdev,Error ** errp)3127 static bool vfio_interrupt_setup(VFIOPCIDevice *vdev, Error **errp)
3128 {
3129 PCIDevice *pdev = &vdev->pdev;
3130
3131 /* QEMU emulates all of MSI & MSIX */
3132 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
3133 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
3134 MSIX_CAP_LENGTH);
3135 }
3136
3137 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3138 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3139 vdev->msi_cap_size);
3140 }
3141
3142 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
3143 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3144 vfio_intx_mmap_enable, vdev);
3145 pci_device_set_intx_routing_notifier(&vdev->pdev,
3146 vfio_intx_routing_notifier);
3147 vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
3148 kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
3149 if (!vfio_intx_enable(vdev, errp)) {
3150 timer_free(vdev->intx.mmap_timer);
3151 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3152 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3153 return false;
3154 }
3155 }
3156 return true;
3157 }
3158
vfio_pci_realize(PCIDevice * pdev,Error ** errp)3159 static void vfio_pci_realize(PCIDevice *pdev, Error **errp)
3160 {
3161 ERRP_GUARD();
3162 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
3163 VFIODevice *vbasedev = &vdev->vbasedev;
3164 int i;
3165 char uuid[UUID_STR_LEN];
3166 g_autofree char *name = NULL;
3167
3168 if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
3169 if (!(~vdev->host.domain || ~vdev->host.bus ||
3170 ~vdev->host.slot || ~vdev->host.function)) {
3171 error_setg(errp, "No provided host device");
3172 error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
3173 #ifdef CONFIG_IOMMUFD
3174 "or -device vfio-pci,fd=DEVICE_FD "
3175 #endif
3176 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
3177 return;
3178 }
3179 vbasedev->sysfsdev =
3180 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
3181 vdev->host.domain, vdev->host.bus,
3182 vdev->host.slot, vdev->host.function);
3183 }
3184
3185 if (!vfio_device_get_name(vbasedev, errp)) {
3186 return;
3187 }
3188
3189 /*
3190 * Mediated devices *might* operate compatibly with discarding of RAM, but
3191 * we cannot know for certain, it depends on whether the mdev vendor driver
3192 * stays in sync with the active working set of the guest driver. Prevent
3193 * the x-balloon-allowed option unless this is minimally an mdev device.
3194 */
3195 vbasedev->mdev = vfio_device_is_mdev(vbasedev);
3196
3197 trace_vfio_mdev(vbasedev->name, vbasedev->mdev);
3198
3199 if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) {
3200 error_setg(errp, "x-balloon-allowed only potentially compatible "
3201 "with mdev devices");
3202 goto error;
3203 }
3204
3205 if (!qemu_uuid_is_null(&vdev->vf_token)) {
3206 qemu_uuid_unparse(&vdev->vf_token, uuid);
3207 name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid);
3208 } else {
3209 name = g_strdup(vbasedev->name);
3210 }
3211
3212 if (!vfio_device_attach(name, vbasedev,
3213 pci_device_iommu_address_space(pdev), errp)) {
3214 goto error;
3215 }
3216
3217 if (!vfio_populate_device(vdev, errp)) {
3218 goto error;
3219 }
3220
3221 if (!vfio_pci_config_setup(vdev, errp)) {
3222 goto error;
3223 }
3224
3225 if (!vbasedev->mdev &&
3226 !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) {
3227 error_prepend(errp, "Failed to set vIOMMU: ");
3228 goto out_teardown;
3229 }
3230
3231 if (!vfio_add_capabilities(vdev, errp)) {
3232 goto out_unset_idev;
3233 }
3234
3235 if (!vfio_config_quirk_setup(vdev, errp)) {
3236 goto out_unset_idev;
3237 }
3238
3239 if (vdev->vga) {
3240 vfio_vga_quirk_setup(vdev);
3241 }
3242
3243 for (i = 0; i < PCI_ROM_SLOT; i++) {
3244 vfio_bar_quirk_setup(vdev, i);
3245 }
3246
3247 if (!vfio_interrupt_setup(vdev, errp)) {
3248 goto out_unset_idev;
3249 }
3250
3251 if (vdev->display != ON_OFF_AUTO_OFF) {
3252 if (!vfio_display_probe(vdev, errp)) {
3253 goto out_deregister;
3254 }
3255 }
3256 if (vdev->enable_ramfb && vdev->dpy == NULL) {
3257 error_setg(errp, "ramfb=on requires display=on");
3258 goto out_deregister;
3259 }
3260 if (vdev->display_xres || vdev->display_yres) {
3261 if (vdev->dpy == NULL) {
3262 error_setg(errp, "xres and yres properties require display=on");
3263 goto out_deregister;
3264 }
3265 if (vdev->dpy->edid_regs == NULL) {
3266 error_setg(errp, "xres and yres properties need edid support");
3267 goto out_deregister;
3268 }
3269 }
3270
3271 if (vdev->ramfb_migrate == ON_OFF_AUTO_ON && !vdev->enable_ramfb) {
3272 warn_report("x-ramfb-migrate=on but ramfb=off. "
3273 "Forcing x-ramfb-migrate to off.");
3274 vdev->ramfb_migrate = ON_OFF_AUTO_OFF;
3275 }
3276 if (vbasedev->enable_migration == ON_OFF_AUTO_OFF) {
3277 if (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO) {
3278 vdev->ramfb_migrate = ON_OFF_AUTO_OFF;
3279 } else if (vdev->ramfb_migrate == ON_OFF_AUTO_ON) {
3280 error_setg(errp, "x-ramfb-migrate requires enable-migration");
3281 goto out_deregister;
3282 }
3283 }
3284
3285 if (!pdev->failover_pair_id) {
3286 if (!vfio_migration_realize(vbasedev, errp)) {
3287 goto out_deregister;
3288 }
3289 }
3290
3291 vfio_register_err_notifier(vdev);
3292 vfio_register_req_notifier(vdev);
3293 vfio_setup_resetfn_quirk(vdev);
3294
3295 return;
3296
3297 out_deregister:
3298 if (vdev->interrupt == VFIO_INT_INTx) {
3299 vfio_intx_disable(vdev);
3300 }
3301 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3302 if (vdev->irqchip_change_notifier.notify) {
3303 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3304 }
3305 if (vdev->intx.mmap_timer) {
3306 timer_free(vdev->intx.mmap_timer);
3307 }
3308 out_unset_idev:
3309 if (!vbasedev->mdev) {
3310 pci_device_unset_iommu_device(pdev);
3311 }
3312 out_teardown:
3313 vfio_teardown_msi(vdev);
3314 vfio_bars_exit(vdev);
3315 error:
3316 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name);
3317 }
3318
vfio_instance_finalize(Object * obj)3319 static void vfio_instance_finalize(Object *obj)
3320 {
3321 VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3322
3323 vfio_pci_put_device(vdev);
3324 }
3325
vfio_exitfn(PCIDevice * pdev)3326 static void vfio_exitfn(PCIDevice *pdev)
3327 {
3328 VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev);
3329 VFIODevice *vbasedev = &vdev->vbasedev;
3330
3331 vfio_unregister_req_notifier(vdev);
3332 vfio_unregister_err_notifier(vdev);
3333 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3334 if (vdev->irqchip_change_notifier.notify) {
3335 kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
3336 }
3337 vfio_disable_interrupts(vdev);
3338 if (vdev->intx.mmap_timer) {
3339 timer_free(vdev->intx.mmap_timer);
3340 }
3341 vfio_teardown_msi(vdev);
3342 vfio_pci_disable_rp_atomics(vdev);
3343 vfio_bars_exit(vdev);
3344 vfio_migration_exit(vbasedev);
3345 if (!vbasedev->mdev) {
3346 pci_device_unset_iommu_device(pdev);
3347 }
3348 }
3349
vfio_pci_reset(DeviceState * dev)3350 static void vfio_pci_reset(DeviceState *dev)
3351 {
3352 VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev);
3353
3354 trace_vfio_pci_reset(vdev->vbasedev.name);
3355
3356 vfio_pci_pre_reset(vdev);
3357
3358 if (vdev->display != ON_OFF_AUTO_OFF) {
3359 vfio_display_reset(vdev);
3360 }
3361
3362 if (vdev->resetfn && !vdev->resetfn(vdev)) {
3363 goto post_reset;
3364 }
3365
3366 if (vdev->vbasedev.reset_works &&
3367 (vdev->has_flr || !vdev->has_pm_reset) &&
3368 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3369 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
3370 goto post_reset;
3371 }
3372
3373 /* See if we can do our own bus reset */
3374 if (!vfio_pci_hot_reset_one(vdev)) {
3375 goto post_reset;
3376 }
3377
3378 /* If nothing else works and the device supports PM reset, use it */
3379 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
3380 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3381 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
3382 goto post_reset;
3383 }
3384
3385 post_reset:
3386 vfio_pci_post_reset(vdev);
3387 }
3388
vfio_instance_init(Object * obj)3389 static void vfio_instance_init(Object *obj)
3390 {
3391 PCIDevice *pci_dev = PCI_DEVICE(obj);
3392 VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3393 VFIODevice *vbasedev = &vdev->vbasedev;
3394
3395 device_add_bootindex_property(obj, &vdev->bootindex,
3396 "bootindex", NULL,
3397 &pci_dev->qdev);
3398 vdev->host.domain = ~0U;
3399 vdev->host.bus = ~0U;
3400 vdev->host.slot = ~0U;
3401 vdev->host.function = ~0U;
3402
3403 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_pci_ops,
3404 DEVICE(vdev), false);
3405
3406 vdev->nv_gpudirect_clique = 0xFF;
3407
3408 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3409 * line, therefore, no need to wait to realize like other devices */
3410 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
3411 }
3412
vfio_pci_base_dev_class_init(ObjectClass * klass,const void * data)3413 static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data)
3414 {
3415 DeviceClass *dc = DEVICE_CLASS(klass);
3416 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3417
3418 dc->desc = "VFIO PCI base device";
3419 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3420 pdc->exit = vfio_exitfn;
3421 pdc->config_read = vfio_pci_read_config;
3422 pdc->config_write = vfio_pci_write_config;
3423 }
3424
3425 static const TypeInfo vfio_pci_base_dev_info = {
3426 .name = TYPE_VFIO_PCI_BASE,
3427 .parent = TYPE_PCI_DEVICE,
3428 .instance_size = 0,
3429 .abstract = true,
3430 .class_init = vfio_pci_base_dev_class_init,
3431 .interfaces = (const InterfaceInfo[]) {
3432 { INTERFACE_PCIE_DEVICE },
3433 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3434 { }
3435 },
3436 };
3437
3438 static PropertyInfo vfio_pci_migration_multifd_transfer_prop;
3439
3440 static const Property vfio_pci_dev_properties[] = {
3441 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
3442 DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token),
3443 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
3444 DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
3445 vbasedev.pre_copy_dirty_page_tracking,
3446 ON_OFF_AUTO_ON),
3447 DEFINE_PROP_ON_OFF_AUTO("x-device-dirty-page-tracking", VFIOPCIDevice,
3448 vbasedev.device_dirty_page_tracking,
3449 ON_OFF_AUTO_ON),
3450 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
3451 display, ON_OFF_AUTO_OFF),
3452 DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
3453 DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0),
3454 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
3455 intx.mmap_timeout, 1100),
3456 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
3457 VFIO_FEATURE_ENABLE_VGA_BIT, false),
3458 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3459 VFIO_FEATURE_ENABLE_REQ_BIT, true),
3460 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3461 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, true),
3462 DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features,
3463 VFIO_FEATURE_ENABLE_IGD_LPC_BIT, false),
3464 DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice,
3465 igd_legacy_mode, ON_OFF_AUTO_AUTO),
3466 DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice,
3467 vbasedev.enable_migration, ON_OFF_AUTO_AUTO),
3468 DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice,
3469 vbasedev.migration_multifd_transfer,
3470 vfio_pci_migration_multifd_transfer_prop, OnOffAuto,
3471 .set_default = true, .defval.i = ON_OFF_AUTO_AUTO),
3472 DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice,
3473 vbasedev.migration_events, false),
3474 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
3475 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
3476 vbasedev.ram_block_discard_allowed, false),
3477 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3478 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3479 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
3480 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3481 no_geforce_quirks, false),
3482 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
3483 false),
3484 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
3485 false),
3486 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3487 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3488 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3489 sub_vendor_id, PCI_ANY_ID),
3490 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3491 sub_device_id, PCI_ANY_ID),
3492 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
3493 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3494 nv_gpudirect_clique,
3495 qdev_prop_nv_gpudirect_clique, uint8_t),
3496 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3497 OFF_AUTO_PCIBAR_OFF),
3498 #ifdef CONFIG_IOMMUFD
3499 DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd,
3500 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
3501 #endif
3502 DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true),
3503 };
3504
3505 #ifdef CONFIG_IOMMUFD
vfio_pci_set_fd(Object * obj,const char * str,Error ** errp)3506 static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp)
3507 {
3508 VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj);
3509 vfio_device_set_fd(&vdev->vbasedev, str, errp);
3510 }
3511 #endif
3512
vfio_pci_dev_class_init(ObjectClass * klass,const void * data)3513 static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data)
3514 {
3515 DeviceClass *dc = DEVICE_CLASS(klass);
3516 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3517
3518 device_class_set_legacy_reset(dc, vfio_pci_reset);
3519 device_class_set_props(dc, vfio_pci_dev_properties);
3520 #ifdef CONFIG_IOMMUFD
3521 object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd);
3522 #endif
3523 dc->desc = "VFIO-based PCI device assignment";
3524 pdc->realize = vfio_pci_realize;
3525
3526 object_class_property_set_description(klass, /* 1.3 */
3527 "host",
3528 "Host PCI address [domain:]<bus:slot.function> of assigned device");
3529 object_class_property_set_description(klass, /* 1.3 */
3530 "x-intx-mmap-timeout-ms",
3531 "When EOI is not provided by KVM/QEMU, wait time "
3532 "(milliseconds) to re-enable device direct access "
3533 "after INTx (DEBUG)");
3534 object_class_property_set_description(klass, /* 1.5 */
3535 "x-vga",
3536 "Expose VGA address spaces for device");
3537 object_class_property_set_description(klass, /* 2.3 */
3538 "x-req",
3539 "Disable device request notification support (DEBUG)");
3540 object_class_property_set_description(klass, /* 2.4 and 2.5 */
3541 "x-no-mmap",
3542 "Disable MMAP for device. Allows to trace MMIO "
3543 "accesses (DEBUG)");
3544 object_class_property_set_description(klass, /* 2.5 */
3545 "x-no-kvm-intx",
3546 "Disable direct VFIO->KVM INTx injection. Allows to "
3547 "trace INTx interrupts (DEBUG)");
3548 object_class_property_set_description(klass, /* 2.5 */
3549 "x-no-kvm-msi",
3550 "Disable direct VFIO->KVM MSI injection. Allows to "
3551 "trace MSI interrupts (DEBUG)");
3552 object_class_property_set_description(klass, /* 2.5 */
3553 "x-no-kvm-msix",
3554 "Disable direct VFIO->KVM MSIx injection. Allows to "
3555 "trace MSIx interrupts (DEBUG)");
3556 object_class_property_set_description(klass, /* 2.5 */
3557 "x-pci-vendor-id",
3558 "Override PCI Vendor ID with provided value (DEBUG)");
3559 object_class_property_set_description(klass, /* 2.5 */
3560 "x-pci-device-id",
3561 "Override PCI device ID with provided value (DEBUG)");
3562 object_class_property_set_description(klass, /* 2.5 */
3563 "x-pci-sub-vendor-id",
3564 "Override PCI Subsystem Vendor ID with provided value "
3565 "(DEBUG)");
3566 object_class_property_set_description(klass, /* 2.5 */
3567 "x-pci-sub-device-id",
3568 "Override PCI Subsystem Device ID with provided value "
3569 "(DEBUG)");
3570 object_class_property_set_description(klass, /* 2.6 */
3571 "sysfsdev",
3572 "Host sysfs path of assigned device");
3573 object_class_property_set_description(klass, /* 2.7 */
3574 "x-igd-opregion",
3575 "Expose host IGD OpRegion to guest");
3576 object_class_property_set_description(klass, /* 2.7 (See c4c45e943e51) */
3577 "x-igd-gms",
3578 "Override IGD data stolen memory size (32MiB units)");
3579 object_class_property_set_description(klass, /* 2.11 */
3580 "x-nv-gpudirect-clique",
3581 "Add NVIDIA GPUDirect capability indicating P2P DMA "
3582 "clique for device [0-15]");
3583 object_class_property_set_description(klass, /* 2.12 */
3584 "x-no-geforce-quirks",
3585 "Disable GeForce quirks (for NVIDIA Quadro/GRID/Tesla). "
3586 "Improves performance");
3587 object_class_property_set_description(klass, /* 2.12 */
3588 "display",
3589 "Enable display support for device, ex. vGPU");
3590 object_class_property_set_description(klass, /* 2.12 */
3591 "x-msix-relocation",
3592 "Specify MSI-X MMIO relocation to the end of specified "
3593 "existing BAR or new BAR to avoid virtualization overhead "
3594 "due to adjacent device registers");
3595 object_class_property_set_description(klass, /* 3.0 */
3596 "x-no-kvm-ioeventfd",
3597 "Disable registration of ioeventfds with KVM (DEBUG)");
3598 object_class_property_set_description(klass, /* 3.0 */
3599 "x-no-vfio-ioeventfd",
3600 "Disable linking of KVM ioeventfds to VFIO ioeventfds "
3601 "(DEBUG)");
3602 object_class_property_set_description(klass, /* 3.1 */
3603 "x-balloon-allowed",
3604 "Override allowing ballooning with device (DEBUG, DANGER)");
3605 object_class_property_set_description(klass, /* 3.2 */
3606 "xres",
3607 "Set X display resolution the vGPU should use");
3608 object_class_property_set_description(klass, /* 3.2 */
3609 "yres",
3610 "Set Y display resolution the vGPU should use");
3611 object_class_property_set_description(klass, /* 5.2 */
3612 "x-pre-copy-dirty-page-tracking",
3613 "Disable dirty pages tracking during iterative phase "
3614 "(DEBUG)");
3615 object_class_property_set_description(klass, /* 5.2, 8.0 non-experimetal */
3616 "enable-migration",
3617 "Enale device migration. Also requires a host VFIO PCI "
3618 "variant or mdev driver with migration support enabled");
3619 object_class_property_set_description(klass, /* 8.1 */
3620 "vf-token",
3621 "Specify UUID VF token. Required for VF when PF is owned "
3622 "by another VFIO driver");
3623 #ifdef CONFIG_IOMMUFD
3624 object_class_property_set_description(klass, /* 9.0 */
3625 "iommufd",
3626 "Set host IOMMUFD backend device");
3627 #endif
3628 object_class_property_set_description(klass, /* 9.1 */
3629 "x-device-dirty-page-tracking",
3630 "Disable device dirty page tracking and use "
3631 "container-based dirty page tracking");
3632 object_class_property_set_description(klass, /* 9.1 */
3633 "migration-events",
3634 "Emit VFIO migration QAPI event when a VFIO device "
3635 "changes its migration state. For management applications");
3636 object_class_property_set_description(klass, /* 9.1 */
3637 "skip-vsc-check",
3638 "Skip config space check for Vendor Specific Capability. "
3639 "Setting to false will enforce strict checking of VSC content "
3640 "(DEBUG)");
3641 object_class_property_set_description(klass, /* 10.0 */
3642 "x-migration-multifd-transfer",
3643 "Transfer this device state via "
3644 "multifd channels when live migrating it");
3645 }
3646
3647 static const TypeInfo vfio_pci_dev_info = {
3648 .name = TYPE_VFIO_PCI,
3649 .parent = TYPE_VFIO_PCI_BASE,
3650 .instance_size = sizeof(VFIOPCIDevice),
3651 .class_init = vfio_pci_dev_class_init,
3652 .instance_init = vfio_instance_init,
3653 .instance_finalize = vfio_instance_finalize,
3654 };
3655
3656 static const Property vfio_pci_dev_nohotplug_properties[] = {
3657 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
3658 DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate,
3659 ON_OFF_AUTO_AUTO),
3660 };
3661
vfio_pci_nohotplug_dev_class_init(ObjectClass * klass,const void * data)3662 static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass,
3663 const void *data)
3664 {
3665 DeviceClass *dc = DEVICE_CLASS(klass);
3666
3667 device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
3668 dc->hotpluggable = false;
3669
3670 object_class_property_set_description(klass, /* 3.1 */
3671 "ramfb",
3672 "Enable ramfb to provide pre-boot graphics for devices "
3673 "enabling display option");
3674 object_class_property_set_description(klass, /* 8.2 */
3675 "x-ramfb-migrate",
3676 "Override default migration support for ramfb support "
3677 "(DEBUG)");
3678 }
3679
3680 static const TypeInfo vfio_pci_nohotplug_dev_info = {
3681 .name = TYPE_VFIO_PCI_NOHOTPLUG,
3682 .parent = TYPE_VFIO_PCI,
3683 .instance_size = sizeof(VFIOPCIDevice),
3684 .class_init = vfio_pci_nohotplug_dev_class_init,
3685 };
3686
register_vfio_pci_dev_type(void)3687 static void register_vfio_pci_dev_type(void)
3688 {
3689 /*
3690 * Ordinary ON_OFF_AUTO property isn't runtime-mutable, but source VM can
3691 * run for a long time before being migrated so it is desirable to have a
3692 * fallback mechanism to the old way of transferring VFIO device state if
3693 * it turns to be necessary.
3694 * The following makes this type of property have the same mutability level
3695 * as ordinary migration parameters.
3696 */
3697 vfio_pci_migration_multifd_transfer_prop = qdev_prop_on_off_auto;
3698 vfio_pci_migration_multifd_transfer_prop.realized_set_allowed = true;
3699
3700 type_register_static(&vfio_pci_base_dev_info);
3701 type_register_static(&vfio_pci_dev_info);
3702 type_register_static(&vfio_pci_nohotplug_dev_info);
3703 }
3704
3705 type_init(register_vfio_pci_dev_type)
3706