Home
last modified time | relevance | path

Searched full:viommu (Results 1 – 19 of 19) sorted by relevance

/linux-6.15/drivers/iommu/iommufd/
Dviommu.c8 struct iommufd_viommu *viommu = in iommufd_viommu_destroy() local
11 if (viommu->ops && viommu->ops->destroy) in iommufd_viommu_destroy()
12 viommu->ops->destroy(viommu); in iommufd_viommu_destroy()
13 refcount_dec(&viommu->hwpt->common.obj.users); in iommufd_viommu_destroy()
14 xa_destroy(&viommu->vdevs); in iommufd_viommu_destroy()
21 struct iommufd_viommu *viommu; in iommufd_viommu_alloc_ioctl() local
50 viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain, in iommufd_viommu_alloc_ioctl()
52 if (IS_ERR(viommu)) { in iommufd_viommu_alloc_ioctl()
53 rc = PTR_ERR(viommu); in iommufd_viommu_alloc_ioctl()
57 xa_init(&viommu->vdevs); in iommufd_viommu_alloc_ioctl()
[all …]
Dhw_pagetable.c60 if (hwpt_nested->viommu) in iommufd_hwpt_nested_destroy()
61 refcount_dec(&hwpt_nested->viommu->obj.users); in iommufd_hwpt_nested_destroy()
278 * iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU
279 * @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with
283 * Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED
287 iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags, in iommufd_viommu_alloc_hwpt_nested() argument
298 if (!viommu->ops || !viommu->ops->alloc_domain_nested) in iommufd_viommu_alloc_hwpt_nested()
302 viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); in iommufd_viommu_alloc_hwpt_nested()
308 hwpt_nested->viommu = viommu; in iommufd_viommu_alloc_hwpt_nested()
309 refcount_inc(&viommu->obj.users); in iommufd_viommu_alloc_hwpt_nested()
[all …]
Ddriver.c39 /* Caller should xa_lock(&viommu->vdevs) to protect the return value */
40 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, in iommufd_viommu_find_dev() argument
45 lockdep_assert_held(&viommu->vdevs.xa_lock); in iommufd_viommu_find_dev()
47 vdev = xa_load(&viommu->vdevs, vdev_id); in iommufd_viommu_find_dev()
52 /* Return -ENOENT if device is not associated to the vIOMMU */
53 int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu, in iommufd_viommu_get_vdev_id() argument
63 xa_lock(&viommu->vdevs); in iommufd_viommu_get_vdev_id()
64 xa_for_each(&viommu->vdevs, index, vdev) { in iommufd_viommu_get_vdev_id()
71 xa_unlock(&viommu->vdevs); in iommufd_viommu_get_vdev_id()
80 int iommufd_viommu_report_event(struct iommufd_viommu *viommu, in iommufd_viommu_report_event() argument
[all …]
Deventq.c272 struct iommufd_viommu *viommu = veventq->viommu; in iommufd_veventq_abort() local
275 lockdep_assert_held_write(&viommu->veventqs_rwsem); in iommufd_veventq_abort()
283 refcount_dec(&viommu->obj.users); in iommufd_veventq_abort()
292 down_write(&veventq->viommu->veventqs_rwsem); in iommufd_veventq_destroy()
294 up_write(&veventq->viommu->veventqs_rwsem); in iommufd_veventq_destroy()
535 struct iommufd_viommu *viommu; in iommufd_veventq_alloc() local
545 viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); in iommufd_veventq_alloc()
546 if (IS_ERR(viommu)) in iommufd_veventq_alloc()
547 return PTR_ERR(viommu); in iommufd_veventq_alloc()
549 down_write(&viommu->veventqs_rwsem); in iommufd_veventq_alloc()
[all …]
Diommufd_private.h319 struct iommufd_viommu *viommu; member
515 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
527 * An iommufd_veventq object represents an interface to deliver vIOMMU events to
529 * a vIOMMU object during the allocations.
533 struct iommufd_viommu *viommu; member
592 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu, u32 type) in iommufd_viommu_find_veventq() argument
596 lockdep_assert_held(&viommu->veventqs_rwsem); in iommufd_viommu_find_veventq()
598 list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) { in iommufd_viommu_find_veventq()
613 struct iommufd_viommu *viommu; member
615 u64 id; /* per-vIOMMU virtual ID */
Dselftest.c153 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu) in to_mock_viommu() argument
155 return container_of(viommu, struct mock_viommu, core); in to_mock_viommu()
164 struct mock_viommu *viommu; member
217 if (new_viommu != mdev->viommu) { in mock_domain_nop_attach()
219 mdev->viommu = new_viommu; in mock_domain_nop_attach()
631 static void mock_viommu_destroy(struct iommufd_viommu *viommu) in mock_viommu_destroy() argument
634 viommu->iommu_dev, struct mock_iommu_device, iommu_dev); in mock_viommu_destroy()
639 /* iommufd core frees mock_viommu and viommu */ in mock_viommu_destroy()
643 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, in mock_viommu_alloc_domain_nested() argument
646 struct mock_viommu *mock_viommu = to_mock_viommu(viommu); in mock_viommu_alloc_domain_nested()
[all …]
DMakefile11 viommu.o
Diommufd_test.h235 * struct iommu_viommu_invalidate_selftest - Invalidation data for Mock VIOMMU
Dmain.c322 struct iommu_viommu_alloc viommu; member
/linux-6.15/drivers/iommu/
Dvirtio-iommu.c64 struct viommu_dev *viommu; member
65 struct mutex mutex; /* protects viommu pointer */
78 struct viommu_dev *viommu; member
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
154 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
187 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument
[all …]
/linux-6.15/drivers/acpi/
Dviot.c48 struct viot_iommu *viommu; member
77 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu, in viot_get_pci_iommu_fwnode() argument
103 viommu->fwnode = dev_fwnode(&pdev->dev); in viot_get_pci_iommu_fwnode()
108 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu, in viot_get_mmio_iommu_fwnode() argument
123 viommu->fwnode = &adev->fwnode; in viot_get_mmio_iommu_fwnode()
130 struct viot_iommu *viommu; in viot_get_iommu() local
138 list_for_each_entry(viommu, &viot_iommus, list) in viot_get_iommu()
139 if (viommu->offset == offset) in viot_get_iommu()
140 return viommu; in viot_get_iommu()
145 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL); in viot_get_iommu()
[all …]
/linux-6.15/Documentation/userspace-api/
Diommufd.rst85 Such a vIOMMU object generally has the access to a nesting parent pagetable
86 to support some HW-accelerated virtualization features. So, a vIOMMU object
88 encapsulate that HWPT_PAGING object. Therefore, a vIOMMU object can be used
93 The name "vIOMMU" isn't necessarily identical to a virtualized IOMMU in a
97 vIOMMU objects created for individual slices of different physical IOMMUs.
98 In other words, a vIOMMU object is always a representation of one physical
103 backed by corresponding vIOMMU objects, in which case a guest OS would do
108 information or attributes (related to the vIOMMU) in a VM. An immediate vDATA
109 example can be the virtual ID of the device on a vIOMMU, which is a unique ID
110 that VMM assigns to the device for a translation channel/port of the vIOMMU,
[all …]
/linux-6.15/include/linux/
Diommufd.h108 * struct iommufd_viommu_ops - vIOMMU specific operations
110 * of the vIOMMU will be free-ed by iommufd core after calling this op
111 * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
116 * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
125 void (*destroy)(struct iommufd_viommu *viommu);
127 struct iommufd_viommu *viommu, u32 flags,
129 int (*cache_invalidate)(struct iommufd_viommu *viommu,
195 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
197 int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
199 int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
[all …]
/linux-6.15/include/uapi/linux/
Diommufd.h474 * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to
493 * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a
498 * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU
799 * Supported command list only when passing in a vIOMMU via @hwpt_id:
818 * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation
829 * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications
832 * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field.
972 * to the vIOMMU, such as:
994 * @viommu_id: vIOMMU ID to associate with the virtual device
995 * @dev_id: The physical device to allocate a virtual instance on the vIOMMU
[all …]
/linux-6.15/Documentation/devicetree/bindings/virtio/
Dmmio.yaml56 iommus = <&viommu 23>;
59 viommu: iommu@3100 {
/linux-6.15/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3-iommufd.c220 arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, in arm_vsmmu_alloc_domain_nested() argument
223 struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); in arm_vsmmu_alloc_domain_nested()
330 static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu, in arm_vsmmu_cache_invalidate() argument
333 struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); in arm_vsmmu_cache_invalidate()
/linux-6.15/tools/testing/selftests/iommu/
Diommufd.c2666 unsigned int viommu; in FIXTURE_VARIANT() local
2676 if (variant->viommu) { in FIXTURE_SETUP()
2689 /* Allocate a vIOMMU taking refcount of the parent hwpt */ in FIXTURE_SETUP()
2709 .viommu = 0, in FIXTURE_VARIANT_ADD()
2714 .viommu = 1, in FIXTURE_VARIANT_ADD()
2738 /* Negative test -- unsupported viommu type */ in TEST_F()
3106 /* Allocate a regular nested hwpt based on viommu */ in TEST_F()
Diommufd_utils.h332 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs) \ argument
335 _test_cmd_viommu_invalidate(self->fd, viommu, reqs, \
/linux-6.15/drivers/iommu/amd/
Diommu.c1666 * When NpCache is on, we infer that we run in a VM and use a vIOMMU. in amd_iommu_domain_flush_pages()