xref: /linux/drivers/iommu/iommufd/main.c (revision c93529ad4fa8d8d8cb21649e70a46991a1dda0f8) !
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Intel Corporation
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  *
5  * iommufd provides control over the IOMMU HW objects created by IOMMU kernel
6  * drivers. IOMMU HW objects revolve around IO page tables that map incoming DMA
7  * addresses (IOVA) to CPU addresses.
8  */
9 #define pr_fmt(fmt) "iommufd: " fmt
10 
11 #include <linux/bug.h>
12 #include <linux/file.h>
13 #include <linux/fs.h>
14 #include <linux/iommufd.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <uapi/linux/iommufd.h>
20 
21 #include "io_pagetable.h"
22 #include "iommufd_private.h"
23 #include "iommufd_test.h"
24 
25 struct iommufd_object_ops {
26 	void (*pre_destroy)(struct iommufd_object *obj);
27 	void (*destroy)(struct iommufd_object *obj);
28 	void (*abort)(struct iommufd_object *obj);
29 };
30 static const struct iommufd_object_ops iommufd_object_ops[];
31 static struct miscdevice vfio_misc_dev;
32 
_iommufd_object_alloc(struct iommufd_ctx * ictx,size_t size,enum iommufd_object_type type)33 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
34 					     size_t size,
35 					     enum iommufd_object_type type)
36 {
37 	struct iommufd_object *obj;
38 	int rc;
39 
40 	obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
41 	if (!obj)
42 		return ERR_PTR(-ENOMEM);
43 	obj->type = type;
44 	/* Starts out bias'd by 1 until it is removed from the xarray */
45 	refcount_set(&obj->wait_cnt, 1);
46 	refcount_set(&obj->users, 1);
47 
48 	/*
49 	 * Reserve an ID in the xarray but do not publish the pointer yet since
50 	 * the caller hasn't initialized it yet. Once the pointer is published
51 	 * in the xarray and visible to other threads we can't reliably destroy
52 	 * it anymore, so the caller must complete all errorable operations
53 	 * before calling iommufd_object_finalize().
54 	 */
55 	rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
56 		      GFP_KERNEL_ACCOUNT);
57 	if (rc)
58 		goto out_free;
59 	return obj;
60 out_free:
61 	kfree(obj);
62 	return ERR_PTR(rc);
63 }
64 
_iommufd_object_alloc_ucmd(struct iommufd_ucmd * ucmd,size_t size,enum iommufd_object_type type)65 struct iommufd_object *_iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd,
66 						  size_t size,
67 						  enum iommufd_object_type type)
68 {
69 	struct iommufd_object *new_obj;
70 
71 	/* Something is coded wrong if this is hit */
72 	if (WARN_ON(ucmd->new_obj))
73 		return ERR_PTR(-EBUSY);
74 
75 	/*
76 	 * An abort op means that its caller needs to invoke it within a lock in
77 	 * the caller. So it doesn't work with _iommufd_object_alloc_ucmd() that
78 	 * will invoke the abort op in iommufd_object_abort_and_destroy(), which
79 	 * must be outside the caller's lock.
80 	 */
81 	if (WARN_ON(iommufd_object_ops[type].abort))
82 		return ERR_PTR(-EOPNOTSUPP);
83 
84 	new_obj = _iommufd_object_alloc(ucmd->ictx, size, type);
85 	if (IS_ERR(new_obj))
86 		return new_obj;
87 
88 	ucmd->new_obj = new_obj;
89 	return new_obj;
90 }
91 
92 /*
93  * Allow concurrent access to the object.
94  *
95  * Once another thread can see the object pointer it can prevent object
96  * destruction. Expect for special kernel-only objects there is no in-kernel way
97  * to reliably destroy a single object. Thus all APIs that are creating objects
98  * must use iommufd_object_abort() to handle their errors and only call
99  * iommufd_object_finalize() once object creation cannot fail.
100  */
iommufd_object_finalize(struct iommufd_ctx * ictx,struct iommufd_object * obj)101 void iommufd_object_finalize(struct iommufd_ctx *ictx,
102 			     struct iommufd_object *obj)
103 {
104 	XA_STATE(xas, &ictx->objects, obj->id);
105 	void *old;
106 
107 	xa_lock(&ictx->objects);
108 	old = xas_store(&xas, obj);
109 	xa_unlock(&ictx->objects);
110 	/* obj->id was returned from xa_alloc() so the xas_store() cannot fail */
111 	WARN_ON(old != XA_ZERO_ENTRY);
112 }
113 
114 /* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */
iommufd_object_abort(struct iommufd_ctx * ictx,struct iommufd_object * obj)115 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
116 {
117 	XA_STATE(xas, &ictx->objects, obj->id);
118 	void *old;
119 
120 	xa_lock(&ictx->objects);
121 	old = xas_store(&xas, NULL);
122 	xa_unlock(&ictx->objects);
123 	WARN_ON(old != XA_ZERO_ENTRY);
124 	kfree(obj);
125 }
126 
127 /*
128  * Abort an object that has been fully initialized and needs destroy, but has
129  * not been finalized.
130  */
iommufd_object_abort_and_destroy(struct iommufd_ctx * ictx,struct iommufd_object * obj)131 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
132 				      struct iommufd_object *obj)
133 {
134 	if (iommufd_object_ops[obj->type].abort)
135 		iommufd_object_ops[obj->type].abort(obj);
136 	else
137 		iommufd_object_ops[obj->type].destroy(obj);
138 	iommufd_object_abort(ictx, obj);
139 }
140 
iommufd_get_object(struct iommufd_ctx * ictx,u32 id,enum iommufd_object_type type)141 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
142 					  enum iommufd_object_type type)
143 {
144 	struct iommufd_object *obj;
145 
146 	if (iommufd_should_fail())
147 		return ERR_PTR(-ENOENT);
148 
149 	xa_lock(&ictx->objects);
150 	obj = xa_load(&ictx->objects, id);
151 	if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
152 	    !iommufd_lock_obj(obj))
153 		obj = ERR_PTR(-ENOENT);
154 	xa_unlock(&ictx->objects);
155 	return obj;
156 }
157 
iommufd_object_dec_wait(struct iommufd_ctx * ictx,struct iommufd_object * to_destroy)158 static int iommufd_object_dec_wait(struct iommufd_ctx *ictx,
159 				   struct iommufd_object *to_destroy)
160 {
161 	if (refcount_dec_and_test(&to_destroy->wait_cnt))
162 		return 0;
163 
164 	if (iommufd_object_ops[to_destroy->type].pre_destroy)
165 		iommufd_object_ops[to_destroy->type].pre_destroy(to_destroy);
166 
167 	if (wait_event_timeout(ictx->destroy_wait,
168 			       refcount_read(&to_destroy->wait_cnt) == 0,
169 			       msecs_to_jiffies(60000)))
170 		return 0;
171 
172 	pr_crit("Time out waiting for iommufd object to become free\n");
173 	refcount_inc(&to_destroy->wait_cnt);
174 	return -EBUSY;
175 }
176 
177 /*
178  * Remove the given object id from the xarray if the only reference to the
179  * object is held by the xarray.
180  */
iommufd_object_remove(struct iommufd_ctx * ictx,struct iommufd_object * to_destroy,u32 id,unsigned int flags)181 int iommufd_object_remove(struct iommufd_ctx *ictx,
182 			  struct iommufd_object *to_destroy, u32 id,
183 			  unsigned int flags)
184 {
185 	struct iommufd_object *obj;
186 	XA_STATE(xas, &ictx->objects, id);
187 	bool zerod_wait_cnt = false;
188 	int ret;
189 
190 	/*
191 	 * The purpose of the wait_cnt is to ensure deterministic destruction
192 	 * of objects used by external drivers and destroyed by this function.
193 	 * Incrementing this wait_cnt should either be short lived, such as
194 	 * during ioctl execution, or be revoked and blocked during
195 	 * pre_destroy(), such as vdev holding the idev's refcount.
196 	 */
197 	if (flags & REMOVE_WAIT) {
198 		ret = iommufd_object_dec_wait(ictx, to_destroy);
199 		if (ret) {
200 			/*
201 			 * We have a bug. Put back the callers reference and
202 			 * defer cleaning this object until close.
203 			 */
204 			refcount_dec(&to_destroy->users);
205 			return ret;
206 		}
207 		zerod_wait_cnt = true;
208 	}
209 
210 	xa_lock(&ictx->objects);
211 	obj = xas_load(&xas);
212 	if (to_destroy) {
213 		/*
214 		 * If the caller is holding a ref on obj we put it here under
215 		 * the spinlock.
216 		 */
217 		refcount_dec(&obj->users);
218 
219 		if (WARN_ON(obj != to_destroy)) {
220 			ret = -ENOENT;
221 			goto err_xa;
222 		}
223 	} else if (xa_is_zero(obj) || !obj) {
224 		ret = -ENOENT;
225 		goto err_xa;
226 	}
227 
228 	if (!refcount_dec_if_one(&obj->users)) {
229 		ret = -EBUSY;
230 		goto err_xa;
231 	}
232 
233 	xas_store(&xas, (flags & REMOVE_OBJ_TOMBSTONE) ? XA_ZERO_ENTRY : NULL);
234 	if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
235 		ictx->vfio_ioas = NULL;
236 	xa_unlock(&ictx->objects);
237 
238 	/*
239 	 * Since users is zero any positive wait_cnt must be racing
240 	 * iommufd_put_object(), or we have a bug.
241 	 */
242 	if (!zerod_wait_cnt) {
243 		ret = iommufd_object_dec_wait(ictx, obj);
244 		if (WARN_ON(ret))
245 			return ret;
246 	}
247 
248 	iommufd_object_ops[obj->type].destroy(obj);
249 	kfree(obj);
250 	return 0;
251 
252 err_xa:
253 	if (zerod_wait_cnt) {
254 		/* Restore the xarray owned reference */
255 		refcount_set(&obj->wait_cnt, 1);
256 	}
257 	xa_unlock(&ictx->objects);
258 
259 	/* The returned object reference count is zero */
260 	return ret;
261 }
262 
iommufd_destroy(struct iommufd_ucmd * ucmd)263 static int iommufd_destroy(struct iommufd_ucmd *ucmd)
264 {
265 	struct iommu_destroy *cmd = ucmd->cmd;
266 
267 	return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0);
268 }
269 
iommufd_fops_open(struct inode * inode,struct file * filp)270 static int iommufd_fops_open(struct inode *inode, struct file *filp)
271 {
272 	struct iommufd_ctx *ictx;
273 
274 	ictx = kzalloc(sizeof(*ictx), GFP_KERNEL_ACCOUNT);
275 	if (!ictx)
276 		return -ENOMEM;
277 
278 	/*
279 	 * For compatibility with VFIO when /dev/vfio/vfio is opened we default
280 	 * to the same rlimit accounting as vfio uses.
281 	 */
282 	if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER) &&
283 	    filp->private_data == &vfio_misc_dev) {
284 		ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
285 		pr_info_once("IOMMUFD is providing /dev/vfio/vfio, not VFIO.\n");
286 	}
287 
288 	init_rwsem(&ictx->ioas_creation_lock);
289 	xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
290 	xa_init(&ictx->groups);
291 	ictx->file = filp;
292 	mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE);
293 	init_waitqueue_head(&ictx->destroy_wait);
294 	mutex_init(&ictx->sw_msi_lock);
295 	INIT_LIST_HEAD(&ictx->sw_msi_list);
296 	filp->private_data = ictx;
297 	return 0;
298 }
299 
iommufd_fops_release(struct inode * inode,struct file * filp)300 static int iommufd_fops_release(struct inode *inode, struct file *filp)
301 {
302 	struct iommufd_ctx *ictx = filp->private_data;
303 	struct iommufd_sw_msi_map *next;
304 	struct iommufd_sw_msi_map *cur;
305 	struct iommufd_object *obj;
306 
307 	/*
308 	 * The objects in the xarray form a graph of "users" counts, and we have
309 	 * to destroy them in a depth first manner. Leaf objects will reduce the
310 	 * users count of interior objects when they are destroyed.
311 	 *
312 	 * Repeatedly destroying all the "1 users" leaf objects will progress
313 	 * until the entire list is destroyed. If this can't progress then there
314 	 * is some bug related to object refcounting.
315 	 */
316 	while (!xa_empty(&ictx->objects)) {
317 		unsigned int destroyed = 0;
318 		unsigned long index;
319 		bool empty = true;
320 
321 		/*
322 		 * We can't use xa_empty() to end the loop as the tombstones
323 		 * are stored as XA_ZERO_ENTRY in the xarray. However
324 		 * xa_for_each() automatically converts them to NULL and skips
325 		 * them causing xa_empty() to be kept false. Thus once
326 		 * xa_for_each() finds no further !NULL entries the loop is
327 		 * done.
328 		 */
329 		xa_for_each(&ictx->objects, index, obj) {
330 			empty = false;
331 			if (!refcount_dec_if_one(&obj->users))
332 				continue;
333 
334 			destroyed++;
335 			xa_erase(&ictx->objects, index);
336 			iommufd_object_ops[obj->type].destroy(obj);
337 			kfree(obj);
338 		}
339 
340 		if (empty)
341 			break;
342 
343 		/* Bug related to users refcount */
344 		if (WARN_ON(!destroyed))
345 			break;
346 	}
347 
348 	/*
349 	 * There may be some tombstones left over from
350 	 * iommufd_object_tombstone_user()
351 	 */
352 	xa_destroy(&ictx->objects);
353 
354 	WARN_ON(!xa_empty(&ictx->groups));
355 
356 	mutex_destroy(&ictx->sw_msi_lock);
357 	list_for_each_entry_safe(cur, next, &ictx->sw_msi_list, sw_msi_item)
358 		kfree(cur);
359 
360 	kfree(ictx);
361 	return 0;
362 }
363 
iommufd_option(struct iommufd_ucmd * ucmd)364 static int iommufd_option(struct iommufd_ucmd *ucmd)
365 {
366 	struct iommu_option *cmd = ucmd->cmd;
367 	int rc;
368 
369 	if (cmd->__reserved)
370 		return -EOPNOTSUPP;
371 
372 	switch (cmd->option_id) {
373 	case IOMMU_OPTION_RLIMIT_MODE:
374 		rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx);
375 		break;
376 	case IOMMU_OPTION_HUGE_PAGES:
377 		rc = iommufd_ioas_option(ucmd);
378 		break;
379 	default:
380 		return -EOPNOTSUPP;
381 	}
382 	if (rc)
383 		return rc;
384 	if (copy_to_user(&((struct iommu_option __user *)ucmd->ubuffer)->val64,
385 			 &cmd->val64, sizeof(cmd->val64)))
386 		return -EFAULT;
387 	return 0;
388 }
389 
390 union ucmd_buffer {
391 	struct iommu_destroy destroy;
392 	struct iommu_fault_alloc fault;
393 	struct iommu_hw_info info;
394 	struct iommu_hw_queue_alloc hw_queue;
395 	struct iommu_hwpt_alloc hwpt;
396 	struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap;
397 	struct iommu_hwpt_invalidate cache;
398 	struct iommu_hwpt_set_dirty_tracking set_dirty_tracking;
399 	struct iommu_ioas_alloc alloc;
400 	struct iommu_ioas_allow_iovas allow_iovas;
401 	struct iommu_ioas_copy ioas_copy;
402 	struct iommu_ioas_iova_ranges iova_ranges;
403 	struct iommu_ioas_map map;
404 	struct iommu_ioas_unmap unmap;
405 	struct iommu_option option;
406 	struct iommu_vdevice_alloc vdev;
407 	struct iommu_veventq_alloc veventq;
408 	struct iommu_vfio_ioas vfio_ioas;
409 	struct iommu_viommu_alloc viommu;
410 #ifdef CONFIG_IOMMUFD_TEST
411 	struct iommu_test_cmd test;
412 #endif
413 };
414 
415 struct iommufd_ioctl_op {
416 	unsigned int size;
417 	unsigned int min_size;
418 	unsigned int ioctl_num;
419 	int (*execute)(struct iommufd_ucmd *ucmd);
420 };
421 
422 #define IOCTL_OP(_ioctl, _fn, _struct, _last)                                  \
423 	[_IOC_NR(_ioctl) - IOMMUFD_CMD_BASE] = {                               \
424 		.size = sizeof(_struct) +                                      \
425 			BUILD_BUG_ON_ZERO(sizeof(union ucmd_buffer) <          \
426 					  sizeof(_struct)),                    \
427 		.min_size = offsetofend(_struct, _last),                       \
428 		.ioctl_num = _ioctl,                                           \
429 		.execute = _fn,                                                \
430 	}
431 static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
432 	IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
433 	IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc,
434 		 struct iommu_fault_alloc, out_fault_fd),
435 	IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
436 		 __reserved),
437 	IOCTL_OP(IOMMU_HW_QUEUE_ALLOC, iommufd_hw_queue_alloc_ioctl,
438 		 struct iommu_hw_queue_alloc, length),
439 	IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
440 		 __reserved),
441 	IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap,
442 		 struct iommu_hwpt_get_dirty_bitmap, data),
443 	IOCTL_OP(IOMMU_HWPT_INVALIDATE, iommufd_hwpt_invalidate,
444 		 struct iommu_hwpt_invalidate, __reserved),
445 	IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking,
446 		 struct iommu_hwpt_set_dirty_tracking, __reserved),
447 	IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
448 		 struct iommu_ioas_alloc, out_ioas_id),
449 	IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas,
450 		 struct iommu_ioas_allow_iovas, allowed_iovas),
451 	IOCTL_OP(IOMMU_IOAS_CHANGE_PROCESS, iommufd_ioas_change_process,
452 		 struct iommu_ioas_change_process, __reserved),
453 	IOCTL_OP(IOMMU_IOAS_COPY, iommufd_ioas_copy, struct iommu_ioas_copy,
454 		 src_iova),
455 	IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
456 		 struct iommu_ioas_iova_ranges, out_iova_alignment),
457 	IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova),
458 	IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file,
459 		 struct iommu_ioas_map_file, iova),
460 	IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
461 		 length),
462 	IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
463 	IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
464 		 struct iommu_vdevice_alloc, virt_id),
465 	IOCTL_OP(IOMMU_VEVENTQ_ALLOC, iommufd_veventq_alloc,
466 		 struct iommu_veventq_alloc, out_veventq_fd),
467 	IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
468 		 __reserved),
469 	IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
470 		 struct iommu_viommu_alloc, out_viommu_id),
471 #ifdef CONFIG_IOMMUFD_TEST
472 	IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
473 #endif
474 };
475 
iommufd_fops_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)476 static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
477 			       unsigned long arg)
478 {
479 	struct iommufd_ctx *ictx = filp->private_data;
480 	const struct iommufd_ioctl_op *op;
481 	struct iommufd_ucmd ucmd = {};
482 	union ucmd_buffer buf;
483 	unsigned int nr;
484 	int ret;
485 
486 	nr = _IOC_NR(cmd);
487 	if (nr < IOMMUFD_CMD_BASE ||
488 	    (nr - IOMMUFD_CMD_BASE) >= ARRAY_SIZE(iommufd_ioctl_ops))
489 		return iommufd_vfio_ioctl(ictx, cmd, arg);
490 
491 	ucmd.ictx = ictx;
492 	ucmd.ubuffer = (void __user *)arg;
493 	ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
494 	if (ret)
495 		return ret;
496 
497 	op = &iommufd_ioctl_ops[nr - IOMMUFD_CMD_BASE];
498 	if (op->ioctl_num != cmd)
499 		return -ENOIOCTLCMD;
500 	if (ucmd.user_size < op->min_size)
501 		return -EINVAL;
502 
503 	ucmd.cmd = &buf;
504 	ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer,
505 				    ucmd.user_size);
506 	if (ret)
507 		return ret;
508 	ret = op->execute(&ucmd);
509 
510 	if (ucmd.new_obj) {
511 		if (ret)
512 			iommufd_object_abort_and_destroy(ictx, ucmd.new_obj);
513 		else
514 			iommufd_object_finalize(ictx, ucmd.new_obj);
515 	}
516 	return ret;
517 }
518 
iommufd_fops_vma_open(struct vm_area_struct * vma)519 static void iommufd_fops_vma_open(struct vm_area_struct *vma)
520 {
521 	struct iommufd_mmap *immap = vma->vm_private_data;
522 
523 	refcount_inc(&immap->owner->users);
524 }
525 
iommufd_fops_vma_close(struct vm_area_struct * vma)526 static void iommufd_fops_vma_close(struct vm_area_struct *vma)
527 {
528 	struct iommufd_mmap *immap = vma->vm_private_data;
529 
530 	refcount_dec(&immap->owner->users);
531 }
532 
533 static const struct vm_operations_struct iommufd_vma_ops = {
534 	.open = iommufd_fops_vma_open,
535 	.close = iommufd_fops_vma_close,
536 };
537 
538 /* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
iommufd_fops_mmap(struct file * filp,struct vm_area_struct * vma)539 static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
540 {
541 	struct iommufd_ctx *ictx = filp->private_data;
542 	size_t length = vma->vm_end - vma->vm_start;
543 	struct iommufd_mmap *immap;
544 	int rc;
545 
546 	if (!PAGE_ALIGNED(length))
547 		return -EINVAL;
548 	if (!(vma->vm_flags & VM_SHARED))
549 		return -EINVAL;
550 	if (vma->vm_flags & VM_EXEC)
551 		return -EPERM;
552 
553 	/* vma->vm_pgoff carries a page-shifted start position to an immap */
554 	immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
555 	if (!immap)
556 		return -ENXIO;
557 	/*
558 	 * mtree_load() returns the immap for any contained mmio_addr, so only
559 	 * allow the exact immap thing to be mapped
560 	 */
561 	if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
562 		return -ENXIO;
563 
564 	vma->vm_pgoff = 0;
565 	vma->vm_private_data = immap;
566 	vma->vm_ops = &iommufd_vma_ops;
567 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
568 
569 	rc = io_remap_pfn_range(vma, vma->vm_start,
570 				immap->mmio_addr >> PAGE_SHIFT, length,
571 				vma->vm_page_prot);
572 	if (rc)
573 		return rc;
574 
575 	/* vm_ops.open won't be called for mmap itself. */
576 	refcount_inc(&immap->owner->users);
577 	return rc;
578 }
579 
580 static const struct file_operations iommufd_fops = {
581 	.owner = THIS_MODULE,
582 	.open = iommufd_fops_open,
583 	.release = iommufd_fops_release,
584 	.unlocked_ioctl = iommufd_fops_ioctl,
585 	.mmap = iommufd_fops_mmap,
586 };
587 
588 /**
589  * iommufd_ctx_get - Get a context reference
590  * @ictx: Context to get
591  *
592  * The caller must already hold a valid reference to ictx.
593  */
iommufd_ctx_get(struct iommufd_ctx * ictx)594 void iommufd_ctx_get(struct iommufd_ctx *ictx)
595 {
596 	get_file(ictx->file);
597 }
598 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_get, "IOMMUFD");
599 
600 /**
601  * iommufd_ctx_from_file - Acquires a reference to the iommufd context
602  * @file: File to obtain the reference from
603  *
604  * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. The struct file
605  * remains owned by the caller and the caller must still do fput. On success
606  * the caller is responsible to call iommufd_ctx_put().
607  */
iommufd_ctx_from_file(struct file * file)608 struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
609 {
610 	struct iommufd_ctx *ictx;
611 
612 	if (file->f_op != &iommufd_fops)
613 		return ERR_PTR(-EBADFD);
614 	ictx = file->private_data;
615 	iommufd_ctx_get(ictx);
616 	return ictx;
617 }
618 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, "IOMMUFD");
619 
620 /**
621  * iommufd_ctx_from_fd - Acquires a reference to the iommufd context
622  * @fd: File descriptor to obtain the reference from
623  *
624  * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. On success
625  * the caller is responsible to call iommufd_ctx_put().
626  */
iommufd_ctx_from_fd(int fd)627 struct iommufd_ctx *iommufd_ctx_from_fd(int fd)
628 {
629 	struct file *file;
630 
631 	file = fget(fd);
632 	if (!file)
633 		return ERR_PTR(-EBADF);
634 
635 	if (file->f_op != &iommufd_fops) {
636 		fput(file);
637 		return ERR_PTR(-EBADFD);
638 	}
639 	/* fget is the same as iommufd_ctx_get() */
640 	return file->private_data;
641 }
642 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_fd, "IOMMUFD");
643 
644 /**
645  * iommufd_ctx_put - Put back a reference
646  * @ictx: Context to put back
647  */
iommufd_ctx_put(struct iommufd_ctx * ictx)648 void iommufd_ctx_put(struct iommufd_ctx *ictx)
649 {
650 	fput(ictx->file);
651 }
652 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
653 
654 static const struct iommufd_object_ops iommufd_object_ops[] = {
655 	[IOMMUFD_OBJ_ACCESS] = {
656 		.destroy = iommufd_access_destroy_object,
657 	},
658 	[IOMMUFD_OBJ_DEVICE] = {
659 		.pre_destroy = iommufd_device_pre_destroy,
660 		.destroy = iommufd_device_destroy,
661 	},
662 	[IOMMUFD_OBJ_FAULT] = {
663 		.destroy = iommufd_fault_destroy,
664 	},
665 	[IOMMUFD_OBJ_HW_QUEUE] = {
666 		.destroy = iommufd_hw_queue_destroy,
667 	},
668 	[IOMMUFD_OBJ_HWPT_PAGING] = {
669 		.destroy = iommufd_hwpt_paging_destroy,
670 		.abort = iommufd_hwpt_paging_abort,
671 	},
672 	[IOMMUFD_OBJ_HWPT_NESTED] = {
673 		.destroy = iommufd_hwpt_nested_destroy,
674 		.abort = iommufd_hwpt_nested_abort,
675 	},
676 	[IOMMUFD_OBJ_IOAS] = {
677 		.destroy = iommufd_ioas_destroy,
678 	},
679 	[IOMMUFD_OBJ_VDEVICE] = {
680 		.destroy = iommufd_vdevice_destroy,
681 		.abort = iommufd_vdevice_abort,
682 	},
683 	[IOMMUFD_OBJ_VEVENTQ] = {
684 		.destroy = iommufd_veventq_destroy,
685 		.abort = iommufd_veventq_abort,
686 	},
687 	[IOMMUFD_OBJ_VIOMMU] = {
688 		.destroy = iommufd_viommu_destroy,
689 	},
690 #ifdef CONFIG_IOMMUFD_TEST
691 	[IOMMUFD_OBJ_SELFTEST] = {
692 		.destroy = iommufd_selftest_destroy,
693 	},
694 #endif
695 };
696 
697 static struct miscdevice iommu_misc_dev = {
698 	.minor = MISC_DYNAMIC_MINOR,
699 	.name = "iommu",
700 	.fops = &iommufd_fops,
701 	.nodename = "iommu",
702 	.mode = 0660,
703 };
704 
705 static struct miscdevice vfio_misc_dev = {
706 	.minor = VFIO_MINOR,
707 	.name = "vfio",
708 	.fops = &iommufd_fops,
709 	.nodename = "vfio/vfio",
710 	.mode = 0666,
711 };
712 
iommufd_init(void)713 static int __init iommufd_init(void)
714 {
715 	int ret;
716 
717 	ret = misc_register(&iommu_misc_dev);
718 	if (ret)
719 		return ret;
720 
721 	if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)) {
722 		ret = misc_register(&vfio_misc_dev);
723 		if (ret)
724 			goto err_misc;
725 	}
726 	ret = iommufd_test_init();
727 	if (ret)
728 		goto err_vfio_misc;
729 	return 0;
730 
731 err_vfio_misc:
732 	if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER))
733 		misc_deregister(&vfio_misc_dev);
734 err_misc:
735 	misc_deregister(&iommu_misc_dev);
736 	return ret;
737 }
738 
iommufd_exit(void)739 static void __exit iommufd_exit(void)
740 {
741 	iommufd_test_exit();
742 	if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER))
743 		misc_deregister(&vfio_misc_dev);
744 	misc_deregister(&iommu_misc_dev);
745 }
746 
747 module_init(iommufd_init);
748 module_exit(iommufd_exit);
749 
750 #if IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)
751 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
752 MODULE_ALIAS("devname:vfio/vfio");
753 #endif
754 MODULE_IMPORT_NS("IOMMUFD_INTERNAL");
755 MODULE_IMPORT_NS("IOMMUFD");
756 MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
757 MODULE_LICENSE("GPL");
758