1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
3  */
4 #include "iommufd_private.h"
5 
_iommufd_object_alloc(struct iommufd_ctx * ictx,size_t size,enum iommufd_object_type type)6 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
7 					     size_t size,
8 					     enum iommufd_object_type type)
9 {
10 	struct iommufd_object *obj;
11 	int rc;
12 
13 	obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
14 	if (!obj)
15 		return ERR_PTR(-ENOMEM);
16 	obj->type = type;
17 	/* Starts out bias'd by 1 until it is removed from the xarray */
18 	refcount_set(&obj->shortterm_users, 1);
19 	refcount_set(&obj->users, 1);
20 
21 	/*
22 	 * Reserve an ID in the xarray but do not publish the pointer yet since
23 	 * the caller hasn't initialized it yet. Once the pointer is published
24 	 * in the xarray and visible to other threads we can't reliably destroy
25 	 * it anymore, so the caller must complete all errorable operations
26 	 * before calling iommufd_object_finalize().
27 	 */
28 	rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
29 		      GFP_KERNEL_ACCOUNT);
30 	if (rc)
31 		goto out_free;
32 	return obj;
33 out_free:
34 	kfree(obj);
35 	return ERR_PTR(rc);
36 }
37 EXPORT_SYMBOL_NS_GPL(_iommufd_object_alloc, "IOMMUFD");
38 
39 /* Caller should xa_lock(&viommu->vdevs) to protect the return value */
iommufd_viommu_find_dev(struct iommufd_viommu * viommu,unsigned long vdev_id)40 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
41 				       unsigned long vdev_id)
42 {
43 	struct iommufd_vdevice *vdev;
44 
45 	lockdep_assert_held(&viommu->vdevs.xa_lock);
46 
47 	vdev = xa_load(&viommu->vdevs, vdev_id);
48 	return vdev ? vdev->dev : NULL;
49 }
50 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_find_dev, "IOMMUFD");
51 
52 /* Return -ENOENT if device is not associated to the vIOMMU */
iommufd_viommu_get_vdev_id(struct iommufd_viommu * viommu,struct device * dev,unsigned long * vdev_id)53 int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
54 			       struct device *dev, unsigned long *vdev_id)
55 {
56 	struct iommufd_vdevice *vdev;
57 	unsigned long index;
58 	int rc = -ENOENT;
59 
60 	if (WARN_ON_ONCE(!vdev_id))
61 		return -EINVAL;
62 
63 	xa_lock(&viommu->vdevs);
64 	xa_for_each(&viommu->vdevs, index, vdev) {
65 		if (vdev->dev == dev) {
66 			*vdev_id = vdev->id;
67 			rc = 0;
68 			break;
69 		}
70 	}
71 	xa_unlock(&viommu->vdevs);
72 	return rc;
73 }
74 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_get_vdev_id, "IOMMUFD");
75 
76 /*
77  * Typically called in driver's threaded IRQ handler.
78  * The @type and @event_data must be defined in include/uapi/linux/iommufd.h
79  */
iommufd_viommu_report_event(struct iommufd_viommu * viommu,enum iommu_veventq_type type,void * event_data,size_t data_len)80 int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
81 				enum iommu_veventq_type type, void *event_data,
82 				size_t data_len)
83 {
84 	struct iommufd_veventq *veventq;
85 	struct iommufd_vevent *vevent;
86 	int rc = 0;
87 
88 	if (WARN_ON_ONCE(!data_len || !event_data))
89 		return -EINVAL;
90 
91 	down_read(&viommu->veventqs_rwsem);
92 
93 	veventq = iommufd_viommu_find_veventq(viommu, type);
94 	if (!veventq) {
95 		rc = -EOPNOTSUPP;
96 		goto out_unlock_veventqs;
97 	}
98 
99 	spin_lock(&veventq->common.lock);
100 	if (veventq->num_events == veventq->depth) {
101 		vevent = &veventq->lost_events_header;
102 		goto out_set_header;
103 	}
104 
105 	vevent = kzalloc(struct_size(vevent, event_data, data_len), GFP_ATOMIC);
106 	if (!vevent) {
107 		rc = -ENOMEM;
108 		vevent = &veventq->lost_events_header;
109 		goto out_set_header;
110 	}
111 	memcpy(vevent->event_data, event_data, data_len);
112 	vevent->data_len = data_len;
113 	veventq->num_events++;
114 
115 out_set_header:
116 	iommufd_vevent_handler(veventq, vevent);
117 	spin_unlock(&veventq->common.lock);
118 out_unlock_veventqs:
119 	up_read(&viommu->veventqs_rwsem);
120 	return rc;
121 }
122 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_report_event, "IOMMUFD");
123 
124 #ifdef CONFIG_IRQ_MSI_IOMMU
125 /*
126  * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
127  * layer. The mapping to IOVA is global to the iommufd file descriptor, every
128  * domain that is attached to a device using the same MSI parameters will use
129  * the same IOVA.
130  */
131 static struct iommufd_sw_msi_map *
iommufd_sw_msi_get_map(struct iommufd_ctx * ictx,phys_addr_t msi_addr,phys_addr_t sw_msi_start)132 iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
133 		       phys_addr_t sw_msi_start)
134 {
135 	struct iommufd_sw_msi_map *cur;
136 	unsigned int max_pgoff = 0;
137 
138 	lockdep_assert_held(&ictx->sw_msi_lock);
139 
140 	list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
141 		if (cur->sw_msi_start != sw_msi_start)
142 			continue;
143 		max_pgoff = max(max_pgoff, cur->pgoff + 1);
144 		if (cur->msi_addr == msi_addr)
145 			return cur;
146 	}
147 
148 	if (ictx->sw_msi_id >=
149 	    BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
150 		return ERR_PTR(-EOVERFLOW);
151 
152 	cur = kzalloc(sizeof(*cur), GFP_KERNEL);
153 	if (!cur)
154 		return ERR_PTR(-ENOMEM);
155 
156 	cur->sw_msi_start = sw_msi_start;
157 	cur->msi_addr = msi_addr;
158 	cur->pgoff = max_pgoff;
159 	cur->id = ictx->sw_msi_id++;
160 	list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
161 	return cur;
162 }
163 
iommufd_sw_msi_install(struct iommufd_ctx * ictx,struct iommufd_hwpt_paging * hwpt_paging,struct iommufd_sw_msi_map * msi_map)164 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
165 			   struct iommufd_hwpt_paging *hwpt_paging,
166 			   struct iommufd_sw_msi_map *msi_map)
167 {
168 	unsigned long iova;
169 
170 	lockdep_assert_held(&ictx->sw_msi_lock);
171 
172 	iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
173 	if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
174 		int rc;
175 
176 		rc = iommu_map(hwpt_paging->common.domain, iova,
177 			       msi_map->msi_addr, PAGE_SIZE,
178 			       IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
179 			       GFP_KERNEL_ACCOUNT);
180 		if (rc)
181 			return rc;
182 		__set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
183 	}
184 	return 0;
185 }
186 EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi_install, "IOMMUFD_INTERNAL");
187 
188 /*
189  * Called by the irq code if the platform translates the MSI address through the
190  * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
191  * allocate a fd global iova for the physical page that is the same on all
192  * domains and devices.
193  */
iommufd_sw_msi(struct iommu_domain * domain,struct msi_desc * desc,phys_addr_t msi_addr)194 int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
195 		   phys_addr_t msi_addr)
196 {
197 	struct device *dev = msi_desc_to_dev(desc);
198 	struct iommufd_hwpt_paging *hwpt_paging;
199 	struct iommu_attach_handle *raw_handle;
200 	struct iommufd_attach_handle *handle;
201 	struct iommufd_sw_msi_map *msi_map;
202 	struct iommufd_ctx *ictx;
203 	unsigned long iova;
204 	int rc;
205 
206 	/*
207 	 * It is safe to call iommu_attach_handle_get() here because the iommu
208 	 * core code invokes this under the group mutex which also prevents any
209 	 * change of the attach handle for the duration of this function.
210 	 */
211 	iommu_group_mutex_assert(dev);
212 
213 	raw_handle =
214 		iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
215 	if (IS_ERR(raw_handle))
216 		return 0;
217 	hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
218 
219 	handle = to_iommufd_handle(raw_handle);
220 	/* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
221 	if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
222 		return 0;
223 
224 	ictx = handle->idev->ictx;
225 	guard(mutex)(&ictx->sw_msi_lock);
226 	/*
227 	 * The input msi_addr is the exact byte offset of the MSI doorbell, we
228 	 * assume the caller has checked that it is contained with a MMIO region
229 	 * that is secure to map at PAGE_SIZE.
230 	 */
231 	msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
232 					 msi_addr & PAGE_MASK,
233 					 handle->idev->igroup->sw_msi_start);
234 	if (IS_ERR(msi_map))
235 		return PTR_ERR(msi_map);
236 
237 	rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
238 	if (rc)
239 		return rc;
240 	__set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
241 
242 	iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
243 	msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
244 	return 0;
245 }
246 EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi, "IOMMUFD");
247 #endif
248 
249 MODULE_DESCRIPTION("iommufd code shared with builtin modules");
250 MODULE_IMPORT_NS("IOMMUFD_INTERNAL");
251 MODULE_LICENSE("GPL");
252