1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7
8 #include "vfio.h"
9
10 MODULE_IMPORT_NS("IOMMUFD");
11 MODULE_IMPORT_NS("IOMMUFD_VFIO");
12
vfio_iommufd_device_has_compat_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)13 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
14 struct iommufd_ctx *ictx)
15 {
16 u32 ioas_id;
17
18 return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
19 }
20
vfio_df_iommufd_bind(struct vfio_device_file * df)21 int vfio_df_iommufd_bind(struct vfio_device_file *df)
22 {
23 struct vfio_device *vdev = df->device;
24 struct iommufd_ctx *ictx = df->iommufd;
25
26 lockdep_assert_held(&vdev->dev_set->lock);
27
28 return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
29 }
30
vfio_iommufd_compat_attach_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)31 int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
32 struct iommufd_ctx *ictx)
33 {
34 u32 ioas_id;
35 int ret;
36
37 lockdep_assert_held(&vdev->dev_set->lock);
38
39 /* compat noiommu does not need to do ioas attach */
40 if (vfio_device_is_noiommu(vdev))
41 return 0;
42
43 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
44 if (ret)
45 return ret;
46
47 /* The legacy path has no way to return the selected pt_id */
48 return vdev->ops->attach_ioas(vdev, &ioas_id);
49 }
50
vfio_df_iommufd_unbind(struct vfio_device_file * df)51 void vfio_df_iommufd_unbind(struct vfio_device_file *df)
52 {
53 struct vfio_device *vdev = df->device;
54
55 lockdep_assert_held(&vdev->dev_set->lock);
56
57 if (vfio_device_is_noiommu(vdev))
58 return;
59
60 if (vdev->ops->unbind_iommufd)
61 vdev->ops->unbind_iommufd(vdev);
62 }
63
vfio_iommufd_device_ictx(struct vfio_device * vdev)64 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
65 {
66 if (vdev->iommufd_device)
67 return iommufd_device_to_ictx(vdev->iommufd_device);
68 return NULL;
69 }
70 EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
71
vfio_iommufd_device_id(struct vfio_device * vdev)72 static int vfio_iommufd_device_id(struct vfio_device *vdev)
73 {
74 if (vdev->iommufd_device)
75 return iommufd_device_to_id(vdev->iommufd_device);
76 return -EINVAL;
77 }
78
79 /*
80 * Return devid for a device.
81 * valid ID for the device that is owned by the ictx
82 * -ENOENT = device is owned but there is no ID
83 * -ENODEV or other error = device is not owned
84 */
vfio_iommufd_get_dev_id(struct vfio_device * vdev,struct iommufd_ctx * ictx)85 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
86 {
87 struct iommu_group *group;
88 int devid;
89
90 if (vfio_iommufd_device_ictx(vdev) == ictx)
91 return vfio_iommufd_device_id(vdev);
92
93 group = iommu_group_get(vdev->dev);
94 if (!group)
95 return -ENODEV;
96
97 if (iommufd_ctx_has_group(ictx, group))
98 devid = -ENOENT;
99 else
100 devid = -ENODEV;
101
102 iommu_group_put(group);
103
104 return devid;
105 }
106 EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
107
108 /*
109 * The physical standard ops mean that the iommufd_device is bound to the
110 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
111 * using this ops set should call vfio_register_group_dev()
112 */
vfio_iommufd_physical_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)113 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
114 struct iommufd_ctx *ictx, u32 *out_device_id)
115 {
116 struct iommufd_device *idev;
117
118 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
119 if (IS_ERR(idev))
120 return PTR_ERR(idev);
121 vdev->iommufd_device = idev;
122 ida_init(&vdev->pasids);
123 return 0;
124 }
125 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
126
vfio_iommufd_physical_unbind(struct vfio_device * vdev)127 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
128 {
129 int pasid;
130
131 lockdep_assert_held(&vdev->dev_set->lock);
132
133 while ((pasid = ida_find_first(&vdev->pasids)) >= 0) {
134 iommufd_device_detach(vdev->iommufd_device, pasid);
135 ida_free(&vdev->pasids, pasid);
136 }
137
138 if (vdev->iommufd_attached) {
139 iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
140 vdev->iommufd_attached = false;
141 }
142 iommufd_device_unbind(vdev->iommufd_device);
143 vdev->iommufd_device = NULL;
144 }
145 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
146
vfio_iommufd_physical_attach_ioas(struct vfio_device * vdev,u32 * pt_id)147 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
148 {
149 int rc;
150
151 lockdep_assert_held(&vdev->dev_set->lock);
152
153 if (WARN_ON(!vdev->iommufd_device))
154 return -EINVAL;
155
156 if (vdev->iommufd_attached)
157 rc = iommufd_device_replace(vdev->iommufd_device,
158 IOMMU_NO_PASID, pt_id);
159 else
160 rc = iommufd_device_attach(vdev->iommufd_device,
161 IOMMU_NO_PASID, pt_id);
162 if (rc)
163 return rc;
164 vdev->iommufd_attached = true;
165 return 0;
166 }
167 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
168
vfio_iommufd_physical_detach_ioas(struct vfio_device * vdev)169 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
170 {
171 lockdep_assert_held(&vdev->dev_set->lock);
172
173 if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
174 return;
175
176 iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
177 vdev->iommufd_attached = false;
178 }
179 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
180
vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device * vdev,u32 pasid,u32 * pt_id)181 int vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device *vdev,
182 u32 pasid, u32 *pt_id)
183 {
184 int rc;
185
186 lockdep_assert_held(&vdev->dev_set->lock);
187
188 if (WARN_ON(!vdev->iommufd_device))
189 return -EINVAL;
190
191 if (ida_exists(&vdev->pasids, pasid))
192 return iommufd_device_replace(vdev->iommufd_device,
193 pasid, pt_id);
194
195 rc = ida_alloc_range(&vdev->pasids, pasid, pasid, GFP_KERNEL);
196 if (rc < 0)
197 return rc;
198
199 rc = iommufd_device_attach(vdev->iommufd_device, pasid, pt_id);
200 if (rc)
201 ida_free(&vdev->pasids, pasid);
202
203 return rc;
204 }
205 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_attach_ioas);
206
vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device * vdev,u32 pasid)207 void vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device *vdev,
208 u32 pasid)
209 {
210 lockdep_assert_held(&vdev->dev_set->lock);
211
212 if (WARN_ON(!vdev->iommufd_device))
213 return;
214
215 if (!ida_exists(&vdev->pasids, pasid))
216 return;
217
218 iommufd_device_detach(vdev->iommufd_device, pasid);
219 ida_free(&vdev->pasids, pasid);
220 }
221 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_detach_ioas);
222
223 /*
224 * The emulated standard ops mean that vfio_device is going to use the
225 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
226 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
227 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
228 */
229
vfio_emulated_unmap(void * data,unsigned long iova,unsigned long length)230 static void vfio_emulated_unmap(void *data, unsigned long iova,
231 unsigned long length)
232 {
233 struct vfio_device *vdev = data;
234
235 if (vdev->ops->dma_unmap)
236 vdev->ops->dma_unmap(vdev, iova, length);
237 }
238
239 static const struct iommufd_access_ops vfio_user_ops = {
240 .needs_pin_pages = 1,
241 .unmap = vfio_emulated_unmap,
242 };
243
vfio_iommufd_emulated_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)244 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
245 struct iommufd_ctx *ictx, u32 *out_device_id)
246 {
247 struct iommufd_access *user;
248
249 lockdep_assert_held(&vdev->dev_set->lock);
250
251 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
252 if (IS_ERR(user))
253 return PTR_ERR(user);
254 vdev->iommufd_access = user;
255 return 0;
256 }
257 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
258
vfio_iommufd_emulated_unbind(struct vfio_device * vdev)259 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
260 {
261 lockdep_assert_held(&vdev->dev_set->lock);
262
263 if (vdev->iommufd_access) {
264 iommufd_access_destroy(vdev->iommufd_access);
265 vdev->iommufd_attached = false;
266 vdev->iommufd_access = NULL;
267 }
268 }
269 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
270
vfio_iommufd_emulated_attach_ioas(struct vfio_device * vdev,u32 * pt_id)271 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
272 {
273 int rc;
274
275 lockdep_assert_held(&vdev->dev_set->lock);
276
277 if (vdev->iommufd_attached)
278 rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
279 else
280 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
281 if (rc)
282 return rc;
283 vdev->iommufd_attached = true;
284 return 0;
285 }
286 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
287
vfio_iommufd_emulated_detach_ioas(struct vfio_device * vdev)288 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
289 {
290 lockdep_assert_held(&vdev->dev_set->lock);
291
292 if (WARN_ON(!vdev->iommufd_access) ||
293 !vdev->iommufd_attached)
294 return;
295
296 iommufd_access_detach(vdev->iommufd_access);
297 vdev->iommufd_attached = false;
298 }
299 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
300