Lines Matching +full:non +full:- +full:masked

1 // SPDX-License-Identifier: GPL-2.0-only
30 bool masked; member
36 return vdev->irq_type == type; in irq_is()
41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX; in is_intx()
46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX || in is_irq_none()
47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX || in is_irq_none()
48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX); in is_irq_none()
55 return xa_load(&vdev->ctx, index); in vfio_irq_ctx_get()
61 xa_erase(&vdev->ctx, index); in vfio_irq_ctx_free()
75 ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT); in vfio_irq_ctx_alloc()
91 if (likely(is_intx(vdev) && !vdev->virq_disabled)) { in vfio_send_intx_eventfd()
97 eventfd_signal(ctx->trigger); in vfio_send_intx_eventfd()
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
104 struct pci_dev *pdev = vdev->pdev; in vfio_pci_intx_mask()
109 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_pci_intx_mask()
118 if (vdev->pci_2_3) in vfio_pci_intx_mask()
127 if (!ctx->masked) { in vfio_pci_intx_mask()
132 if (vdev->pci_2_3) in vfio_pci_intx_mask()
135 disable_irq_nosync(pdev->irq); in vfio_pci_intx_mask()
137 ctx->masked = true; in vfio_pci_intx_mask()
142 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_pci_intx_mask()
155 struct pci_dev *pdev = vdev->pdev; in vfio_pci_intx_unmask_handler()
160 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_pci_intx_unmask_handler()
167 if (vdev->pci_2_3) in vfio_pci_intx_unmask_handler()
176 if (ctx->masked && !vdev->virq_disabled) { in vfio_pci_intx_unmask_handler()
179 * but we can avoid that overhead by just re-sending in vfio_pci_intx_unmask_handler()
182 if (vdev->pci_2_3) { in vfio_pci_intx_unmask_handler()
186 enable_irq(pdev->irq); in vfio_pci_intx_unmask_handler()
188 ctx->masked = (ret > 0); in vfio_pci_intx_unmask_handler()
192 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_pci_intx_unmask_handler()
214 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_intx_handler()
216 if (!vdev->pci_2_3) { in vfio_intx_handler()
217 disable_irq_nosync(vdev->pdev->irq); in vfio_intx_handler()
218 ctx->masked = true; in vfio_intx_handler()
220 } else if (!ctx->masked && /* may be shared */ in vfio_intx_handler()
221 pci_check_and_mask_intx(vdev->pdev)) { in vfio_intx_handler()
222 ctx->masked = true; in vfio_intx_handler()
226 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_intx_handler()
239 return -EINVAL; in vfio_intx_enable()
241 if (!vdev->pdev->irq) in vfio_intx_enable()
242 return -ENODEV; in vfio_intx_enable()
246 return -ENOMEM; in vfio_intx_enable()
249 * If the virtual interrupt is masked, restore it. Devices in vfio_intx_enable()
250 * supporting DisINTx can be masked at the hardware level in vfio_intx_enable()
251 * here, non-PCI-2.3 devices will have to wait until the in vfio_intx_enable()
254 ctx->masked = vdev->virq_disabled; in vfio_intx_enable()
255 if (vdev->pci_2_3) in vfio_intx_enable()
256 pci_intx(vdev->pdev, !ctx->masked); in vfio_intx_enable()
258 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; in vfio_intx_enable()
265 struct pci_dev *pdev = vdev->pdev; in vfio_intx_set_signal()
274 return -EINVAL; in vfio_intx_set_signal()
276 if (ctx->trigger) { in vfio_intx_set_signal()
277 free_irq(pdev->irq, vdev); in vfio_intx_set_signal()
278 kfree(ctx->name); in vfio_intx_set_signal()
279 eventfd_ctx_put(ctx->trigger); in vfio_intx_set_signal()
280 ctx->trigger = NULL; in vfio_intx_set_signal()
286 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", in vfio_intx_set_signal()
288 if (!ctx->name) in vfio_intx_set_signal()
289 return -ENOMEM; in vfio_intx_set_signal()
293 kfree(ctx->name); in vfio_intx_set_signal()
297 ctx->trigger = trigger; in vfio_intx_set_signal()
299 if (!vdev->pci_2_3) in vfio_intx_set_signal()
302 ret = request_irq(pdev->irq, vfio_intx_handler, in vfio_intx_set_signal()
303 irqflags, ctx->name, vdev); in vfio_intx_set_signal()
305 ctx->trigger = NULL; in vfio_intx_set_signal()
306 kfree(ctx->name); in vfio_intx_set_signal()
315 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_intx_set_signal()
316 if (!vdev->pci_2_3 && ctx->masked) in vfio_intx_set_signal()
317 disable_irq_nosync(pdev->irq); in vfio_intx_set_signal()
318 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_intx_set_signal()
330 vfio_virqfd_disable(&ctx->unmask); in vfio_intx_disable()
331 vfio_virqfd_disable(&ctx->mask); in vfio_intx_disable()
333 vfio_intx_set_signal(vdev, -1); in vfio_intx_disable()
334 vdev->irq_type = VFIO_PCI_NUM_IRQS; in vfio_intx_disable()
339 * MSI/MSI-X
351 struct pci_dev *pdev = vdev->pdev; in vfio_msi_enable()
357 return -EINVAL; in vfio_msi_enable()
370 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : in vfio_msi_enable()
375 * Compute the virtual hardware field for max msi vectors - in vfio_msi_enable()
378 vdev->msi_qmax = fls(nvec * 2 - 1) - 1; in vfio_msi_enable()
385 * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
387 * interrupt is allocated if dynamic MSI-X is supported.
391 * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
397 struct pci_dev *pdev = vdev->pdev; in vfio_msi_alloc_irq()
404 return -EINVAL; in vfio_msi_alloc_irq()
405 if (irq > 0 || !msix || !vdev->has_dyn_msix) in vfio_msi_alloc_irq()
418 struct pci_dev *pdev = vdev->pdev; in vfio_msi_set_vector_signal()
421 int irq = -EINVAL, ret; in vfio_msi_set_vector_signal()
427 irq_bypass_unregister_producer(&ctx->producer); in vfio_msi_set_vector_signal()
430 free_irq(irq, ctx->trigger); in vfio_msi_set_vector_signal()
432 /* Interrupt stays allocated, will be freed at MSI-X disable. */ in vfio_msi_set_vector_signal()
433 kfree(ctx->name); in vfio_msi_set_vector_signal()
434 eventfd_ctx_put(ctx->trigger); in vfio_msi_set_vector_signal()
441 if (irq == -EINVAL) { in vfio_msi_set_vector_signal()
442 /* Interrupt stays allocated, will be freed at MSI-X disable. */ in vfio_msi_set_vector_signal()
450 return -ENOMEM; in vfio_msi_set_vector_signal()
452 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)", in vfio_msi_set_vector_signal()
454 if (!ctx->name) { in vfio_msi_set_vector_signal()
455 ret = -ENOMEM; in vfio_msi_set_vector_signal()
466 * If the vector was previously allocated, refresh the on-device in vfio_msi_set_vector_signal()
478 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger); in vfio_msi_set_vector_signal()
483 ctx->producer.token = trigger; in vfio_msi_set_vector_signal()
484 ctx->producer.irq = irq; in vfio_msi_set_vector_signal()
485 ret = irq_bypass_register_producer(&ctx->producer); in vfio_msi_set_vector_signal()
487 dev_info(&pdev->dev, in vfio_msi_set_vector_signal()
489 ctx->producer.token, ret); in vfio_msi_set_vector_signal()
491 ctx->producer.token = NULL; in vfio_msi_set_vector_signal()
493 ctx->trigger = trigger; in vfio_msi_set_vector_signal()
500 kfree(ctx->name); in vfio_msi_set_vector_signal()
513 int fd = fds ? fds[i] : -1; in vfio_msi_set_block()
519 vfio_msi_set_vector_signal(vdev, i, -1, msix); in vfio_msi_set_block()
527 struct pci_dev *pdev = vdev->pdev; in vfio_msi_disable()
532 xa_for_each(&vdev->ctx, i, ctx) { in vfio_msi_disable()
533 vfio_virqfd_disable(&ctx->unmask); in vfio_msi_disable()
534 vfio_virqfd_disable(&ctx->mask); in vfio_msi_disable()
535 vfio_msi_set_vector_signal(vdev, i, -1, msix); in vfio_msi_disable()
546 if (vdev->nointx) in vfio_msi_disable()
549 vdev->irq_type = VFIO_PCI_NUM_IRQS; in vfio_msi_disable()
560 return -EINVAL; in vfio_pci_set_intx_unmask()
573 return -EINVAL; in vfio_pci_set_intx_unmask()
578 &ctx->unmask, fd); in vfio_pci_set_intx_unmask()
580 vfio_virqfd_disable(&ctx->unmask); in vfio_pci_set_intx_unmask()
591 return -EINVAL; in vfio_pci_set_intx_mask()
600 return -ENOTTY; /* XXX implement me */ in vfio_pci_set_intx_mask()
616 return -EINVAL; in vfio_pci_set_intx_trigger()
637 return -EINVAL; in vfio_pci_set_intx_trigger()
663 return -EINVAL; in vfio_pci_set_msi_trigger()
669 if (vdev->irq_type == index) in vfio_pci_set_msi_trigger()
685 return -EINVAL; in vfio_pci_set_msi_trigger()
692 eventfd_signal(ctx->trigger); in vfio_pci_set_msi_trigger()
695 if (bools[i - start]) in vfio_pci_set_msi_trigger()
696 eventfd_signal(ctx->trigger); in vfio_pci_set_msi_trigger()
721 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
732 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
735 if (fd == -1) { in vfio_pci_set_ctx_trigger_single()
754 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
762 return -EINVAL; in vfio_pci_set_err_trigger()
764 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, in vfio_pci_set_err_trigger()
773 return -EINVAL; in vfio_pci_set_req_trigger()
775 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, in vfio_pci_set_req_trigger()
816 if (pci_is_pcie(vdev->pdev)) in vfio_pci_set_irqs_ioctl()
831 return -ENOTTY; in vfio_pci_set_irqs_ioctl()