1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3 */
4 #ifndef __IOMMUFD_PRIVATE_H
5 #define __IOMMUFD_PRIVATE_H
6
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <linux/iova_bitmap.h>
10 #include <linux/maple_tree.h>
11 #include <linux/rwsem.h>
12 #include <linux/uaccess.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15
16 #include "../iommu-priv.h"
17
18 struct iommu_domain;
19 struct iommu_group;
20 struct iommu_option;
21 struct iommufd_device;
22
23 struct iommufd_sw_msi_map {
24 struct list_head sw_msi_item;
25 phys_addr_t sw_msi_start;
26 phys_addr_t msi_addr;
27 unsigned int pgoff;
28 unsigned int id;
29 };
30
31 /* Bitmap of struct iommufd_sw_msi_map::id */
32 struct iommufd_sw_msi_maps {
33 DECLARE_BITMAP(bitmap, 64);
34 };
35
36 #ifdef CONFIG_IRQ_MSI_IOMMU
37 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
38 struct iommufd_hwpt_paging *hwpt_paging,
39 struct iommufd_sw_msi_map *msi_map);
40 #endif
41
42 struct iommufd_ctx {
43 struct file *file;
44 struct xarray objects;
45 struct xarray groups;
46 wait_queue_head_t destroy_wait;
47 struct rw_semaphore ioas_creation_lock;
48 struct maple_tree mt_mmap;
49
50 struct mutex sw_msi_lock;
51 struct list_head sw_msi_list;
52 unsigned int sw_msi_id;
53
54 u8 account_mode;
55 /* Compatibility with VFIO no iommu */
56 u8 no_iommu_mode;
57 struct iommufd_ioas *vfio_ioas;
58 };
59
60 /* Entry for iommufd_ctx::mt_mmap */
61 struct iommufd_mmap {
62 struct iommufd_object *owner;
63
64 /* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
65 unsigned long vm_pgoff;
66
67 /* Physical range for io_remap_pfn_range() */
68 phys_addr_t mmio_addr;
69 size_t length;
70 };
71
72 /*
73 * The IOVA to PFN map. The map automatically copies the PFNs into multiple
74 * domains and permits sharing of PFNs between io_pagetable instances. This
75 * supports both a design where IOAS's are 1:1 with a domain (eg because the
76 * domain is HW customized), or where the IOAS is 1:N with multiple generic
77 * domains. The io_pagetable holds an interval tree of iopt_areas which point
78 * to shared iopt_pages which hold the pfns mapped to the page table.
79 *
80 * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
81 */
82 struct io_pagetable {
83 struct rw_semaphore domains_rwsem;
84 struct xarray domains;
85 struct xarray access_list;
86 unsigned int next_domain_id;
87
88 struct rw_semaphore iova_rwsem;
89 struct rb_root_cached area_itree;
90 /* IOVA that cannot become reserved, struct iopt_allowed */
91 struct rb_root_cached allowed_itree;
92 /* IOVA that cannot be allocated, struct iopt_reserved */
93 struct rb_root_cached reserved_itree;
94 u8 disable_large_pages;
95 unsigned long iova_alignment;
96 };
97
98 void iopt_init_table(struct io_pagetable *iopt);
99 void iopt_destroy_table(struct io_pagetable *iopt);
100 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
101 unsigned long length, struct list_head *pages_list);
102 void iopt_free_pages_list(struct list_head *pages_list);
103 enum {
104 IOPT_ALLOC_IOVA = 1 << 0,
105 };
106 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
107 unsigned long *iova, void __user *uptr,
108 unsigned long length, int iommu_prot,
109 unsigned int flags);
110 int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
111 unsigned long *iova, struct file *file,
112 unsigned long start, unsigned long length,
113 int iommu_prot, unsigned int flags);
114 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
115 unsigned long length, unsigned long *dst_iova,
116 int iommu_prot, unsigned int flags);
117 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
118 unsigned long length, unsigned long *unmapped);
119 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
120
121 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
122 struct iommu_domain *domain,
123 unsigned long flags,
124 struct iommu_hwpt_get_dirty_bitmap *bitmap);
125 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
126 struct iommu_domain *domain, bool enable);
127
128 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
129 unsigned long length);
130 int iopt_table_add_domain(struct io_pagetable *iopt,
131 struct iommu_domain *domain);
132 void iopt_table_remove_domain(struct io_pagetable *iopt,
133 struct iommu_domain *domain);
134 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
135 struct device *dev,
136 phys_addr_t *sw_msi_start);
137 int iopt_set_allow_iova(struct io_pagetable *iopt,
138 struct rb_root_cached *allowed_iova);
139 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
140 unsigned long last, void *owner);
141 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
142 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
143 size_t num_iovas);
144 void iopt_enable_large_pages(struct io_pagetable *iopt);
145 int iopt_disable_large_pages(struct io_pagetable *iopt);
146
147 struct iommufd_ucmd {
148 struct iommufd_ctx *ictx;
149 void __user *ubuffer;
150 u32 user_size;
151 void *cmd;
152 struct iommufd_object *new_obj;
153 };
154
155 int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
156 unsigned long arg);
157
158 /* Copy the response in ucmd->cmd back to userspace. */
iommufd_ucmd_respond(struct iommufd_ucmd * ucmd,size_t cmd_len)159 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
160 size_t cmd_len)
161 {
162 if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
163 min_t(size_t, ucmd->user_size, cmd_len)))
164 return -EFAULT;
165 return 0;
166 }
167
iommufd_lock_obj(struct iommufd_object * obj)168 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
169 {
170 if (!refcount_inc_not_zero(&obj->users))
171 return false;
172 if (!refcount_inc_not_zero(&obj->wait_cnt)) {
173 /*
174 * If the caller doesn't already have a ref on obj this must be
175 * called under the xa_lock. Otherwise the caller is holding a
176 * ref on users. Thus it cannot be one before this decrement.
177 */
178 refcount_dec(&obj->users);
179 return false;
180 }
181 return true;
182 }
183
184 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
185 enum iommufd_object_type type);
iommufd_put_object(struct iommufd_ctx * ictx,struct iommufd_object * obj)186 static inline void iommufd_put_object(struct iommufd_ctx *ictx,
187 struct iommufd_object *obj)
188 {
189 /*
190 * Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
191 * !0 users with a 0 wait_cnt.
192 */
193 refcount_dec(&obj->users);
194 if (refcount_dec_and_test(&obj->wait_cnt))
195 wake_up_interruptible_all(&ictx->destroy_wait);
196 }
197
198 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
199 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
200 struct iommufd_object *obj);
201 void iommufd_object_finalize(struct iommufd_ctx *ictx,
202 struct iommufd_object *obj);
203
204 enum {
205 REMOVE_WAIT = BIT(0),
206 REMOVE_OBJ_TOMBSTONE = BIT(1),
207 };
208 int iommufd_object_remove(struct iommufd_ctx *ictx,
209 struct iommufd_object *to_destroy, u32 id,
210 unsigned int flags);
211
212 /*
213 * The caller holds a users refcount and wants to destroy the object. At this
214 * point the caller has no wait_cnt reference and at least the xarray will be
215 * holding one.
216 */
iommufd_object_destroy_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)217 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
218 struct iommufd_object *obj)
219 {
220 int ret;
221
222 ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
223
224 /*
225 * If there is a bug and we couldn't destroy the object then we did put
226 * back the caller's users refcount and will eventually try to free it
227 * again during close.
228 */
229 WARN_ON(ret);
230 }
231
232 /*
233 * Similar to iommufd_object_destroy_user(), except that the object ID is left
234 * reserved/tombstoned.
235 */
iommufd_object_tombstone_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)236 static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
237 struct iommufd_object *obj)
238 {
239 int ret;
240
241 ret = iommufd_object_remove(ictx, obj, obj->id,
242 REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
243
244 /*
245 * If there is a bug and we couldn't destroy the object then we did put
246 * back the caller's users refcount and will eventually try to free it
247 * again during close.
248 */
249 WARN_ON(ret);
250 }
251
252 /*
253 * The HWPT allocated by autodomains is used in possibly many devices and
254 * is automatically destroyed when its refcount reaches zero.
255 *
256 * If userspace uses the HWPT manually, even for a short term, then it will
257 * disrupt this refcounting and the auto-free in the kernel will not work.
258 * Userspace that tries to use the automatically allocated HWPT must be careful
259 * to ensure that it is consistently destroyed, eg by not racing accesses
260 * and by not attaching an automatic HWPT to a device manually.
261 */
262 static inline void
iommufd_object_put_and_try_destroy(struct iommufd_ctx * ictx,struct iommufd_object * obj)263 iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
264 struct iommufd_object *obj)
265 {
266 iommufd_object_remove(ictx, obj, obj->id, 0);
267 }
268
269 /*
270 * Callers of these normal object allocators must call iommufd_object_finalize()
271 * to finalize the object, or call iommufd_object_abort_and_destroy() to revert
272 * the allocation.
273 */
274 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
275 size_t size,
276 enum iommufd_object_type type);
277
278 #define __iommufd_object_alloc(ictx, ptr, type, obj) \
279 container_of(_iommufd_object_alloc( \
280 ictx, \
281 sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
282 offsetof(typeof(*(ptr)), \
283 obj) != 0), \
284 type), \
285 typeof(*(ptr)), obj)
286
287 #define iommufd_object_alloc(ictx, ptr, type) \
288 __iommufd_object_alloc(ictx, ptr, type, obj)
289
290 /*
291 * Callers of these _ucmd allocators should not call iommufd_object_finalize()
292 * or iommufd_object_abort_and_destroy(), as the core automatically does that.
293 */
294 struct iommufd_object *
295 _iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
296 enum iommufd_object_type type);
297
298 #define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj) \
299 container_of(_iommufd_object_alloc_ucmd( \
300 ucmd, \
301 sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
302 offsetof(typeof(*(ptr)), \
303 obj) != 0), \
304 type), \
305 typeof(*(ptr)), obj)
306
307 #define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
308 __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
309
310 /*
311 * The IO Address Space (IOAS) pagetable is a virtual page table backed by the
312 * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
313 * mapping is copied into all of the associated domains and made available to
314 * in-kernel users.
315 *
316 * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
317 * object. When we go to attach a device to an IOAS we need to get an
318 * iommu_domain and wrapping iommufd_hw_pagetable for it.
319 *
320 * An iommu_domain & iommfd_hw_pagetable will be automatically selected
321 * for a device based on the hwpt_list. If no suitable iommu_domain
322 * is found a new iommu_domain will be created.
323 */
324 struct iommufd_ioas {
325 struct iommufd_object obj;
326 struct io_pagetable iopt;
327 struct mutex mutex;
328 struct list_head hwpt_list;
329 };
330
iommufd_get_ioas(struct iommufd_ctx * ictx,u32 id)331 static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
332 u32 id)
333 {
334 return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
335 struct iommufd_ioas, obj);
336 }
337
338 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
339 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
340 void iommufd_ioas_destroy(struct iommufd_object *obj);
341 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
342 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
343 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
344 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
345 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
346 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
347 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
348 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
349 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
350 struct iommufd_ctx *ictx);
351
352 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
353 int iommufd_check_iova_range(struct io_pagetable *iopt,
354 struct iommu_hwpt_get_dirty_bitmap *bitmap);
355
356 /*
357 * A HW pagetable is called an iommu_domain inside the kernel. This user object
358 * allows directly creating and inspecting the domains. Domains that have kernel
359 * owned page tables will be associated with an iommufd_ioas that provides the
360 * IOVA to PFN map.
361 */
362 struct iommufd_hw_pagetable {
363 struct iommufd_object obj;
364 struct iommu_domain *domain;
365 struct iommufd_fault *fault;
366 bool pasid_compat : 1;
367 };
368
369 struct iommufd_hwpt_paging {
370 struct iommufd_hw_pagetable common;
371 struct iommufd_ioas *ioas;
372 bool auto_domain : 1;
373 bool enforce_cache_coherency : 1;
374 bool nest_parent : 1;
375 /* Head at iommufd_ioas::hwpt_list */
376 struct list_head hwpt_item;
377 struct iommufd_sw_msi_maps present_sw_msi;
378 };
379
380 struct iommufd_hwpt_nested {
381 struct iommufd_hw_pagetable common;
382 struct iommufd_hwpt_paging *parent;
383 struct iommufd_viommu *viommu;
384 };
385
hwpt_is_paging(struct iommufd_hw_pagetable * hwpt)386 static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
387 {
388 return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
389 }
390
391 static inline struct iommufd_hwpt_paging *
to_hwpt_paging(struct iommufd_hw_pagetable * hwpt)392 to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
393 {
394 return container_of(hwpt, struct iommufd_hwpt_paging, common);
395 }
396
397 static inline struct iommufd_hwpt_nested *
to_hwpt_nested(struct iommufd_hw_pagetable * hwpt)398 to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
399 {
400 return container_of(hwpt, struct iommufd_hwpt_nested, common);
401 }
402
403 static inline struct iommufd_hwpt_paging *
find_hwpt_paging(struct iommufd_hw_pagetable * hwpt)404 find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
405 {
406 switch (hwpt->obj.type) {
407 case IOMMUFD_OBJ_HWPT_PAGING:
408 return to_hwpt_paging(hwpt);
409 case IOMMUFD_OBJ_HWPT_NESTED:
410 return to_hwpt_nested(hwpt)->parent;
411 default:
412 return NULL;
413 }
414 }
415
416 static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd * ucmd,u32 id)417 iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
418 {
419 return container_of(iommufd_get_object(ucmd->ictx, id,
420 IOMMUFD_OBJ_HWPT_PAGING),
421 struct iommufd_hwpt_paging, common.obj);
422 }
423
424 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt_nested(struct iommufd_ucmd * ucmd,u32 id)425 iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
426 {
427 return container_of(iommufd_get_object(ucmd->ictx, id,
428 IOMMUFD_OBJ_HWPT_NESTED),
429 struct iommufd_hw_pagetable, obj);
430 }
431
432 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
433 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
434
435 struct iommufd_hwpt_paging *
436 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
437 struct iommufd_device *idev, ioasid_t pasid,
438 u32 flags, bool immediate_attach,
439 const struct iommu_user_data *user_data);
440 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
441 struct iommufd_device *idev, ioasid_t pasid);
442 struct iommufd_hw_pagetable *
443 iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
444 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
445 void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
446 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
447 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
448 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
449 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
450
iommufd_hw_pagetable_put(struct iommufd_ctx * ictx,struct iommufd_hw_pagetable * hwpt)451 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
452 struct iommufd_hw_pagetable *hwpt)
453 {
454 if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
455 struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
456
457 lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
458
459 if (hwpt_paging->auto_domain) {
460 iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
461 return;
462 }
463 }
464 refcount_dec(&hwpt->obj.users);
465 }
466
467 struct iommufd_attach;
468
469 struct iommufd_group {
470 struct kref ref;
471 struct mutex lock;
472 struct iommufd_ctx *ictx;
473 struct iommu_group *group;
474 struct xarray pasid_attach;
475 struct iommufd_sw_msi_maps required_sw_msi;
476 phys_addr_t sw_msi_start;
477 };
478
479 /*
480 * A iommufd_device object represents the binding relationship between a
481 * consuming driver and the iommufd. These objects are created/destroyed by
482 * external drivers, not by userspace.
483 */
484 struct iommufd_device {
485 struct iommufd_object obj;
486 struct iommufd_ctx *ictx;
487 struct iommufd_group *igroup;
488 struct list_head group_item;
489 /* always the physical device */
490 struct device *dev;
491 bool enforce_cache_coherency;
492 struct iommufd_vdevice *vdev;
493 bool destroying;
494 };
495
496 static inline struct iommufd_device *
iommufd_get_device(struct iommufd_ucmd * ucmd,u32 id)497 iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
498 {
499 return container_of(iommufd_get_object(ucmd->ictx, id,
500 IOMMUFD_OBJ_DEVICE),
501 struct iommufd_device, obj);
502 }
503
504 void iommufd_device_pre_destroy(struct iommufd_object *obj);
505 void iommufd_device_destroy(struct iommufd_object *obj);
506 int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
507
508 struct iommufd_access {
509 struct iommufd_object obj;
510 struct iommufd_ctx *ictx;
511 struct iommufd_ioas *ioas;
512 struct iommufd_ioas *ioas_unpin;
513 struct mutex ioas_lock;
514 const struct iommufd_access_ops *ops;
515 void *data;
516 unsigned long iova_alignment;
517 u32 iopt_access_list_id;
518 };
519
520 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
521 void iopt_remove_access(struct io_pagetable *iopt,
522 struct iommufd_access *access, u32 iopt_access_list_id);
523 void iommufd_access_destroy_object(struct iommufd_object *obj);
524
525 /* iommufd_access for internal use */
iommufd_access_is_internal(struct iommufd_access * access)526 static inline bool iommufd_access_is_internal(struct iommufd_access *access)
527 {
528 return !access->ictx;
529 }
530
531 struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
532
533 static inline void
iommufd_access_destroy_internal(struct iommufd_ctx * ictx,struct iommufd_access * access)534 iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
535 struct iommufd_access *access)
536 {
537 iommufd_object_destroy_user(ictx, &access->obj);
538 }
539
540 int iommufd_access_attach_internal(struct iommufd_access *access,
541 struct iommufd_ioas *ioas);
542
iommufd_access_detach_internal(struct iommufd_access * access)543 static inline void iommufd_access_detach_internal(struct iommufd_access *access)
544 {
545 iommufd_access_detach(access);
546 }
547
548 struct iommufd_eventq {
549 struct iommufd_object obj;
550 struct iommufd_ctx *ictx;
551 struct file *filep;
552
553 spinlock_t lock; /* protects the deliver list */
554 struct list_head deliver;
555
556 struct wait_queue_head wait_queue;
557 };
558
559 struct iommufd_attach_handle {
560 struct iommu_attach_handle handle;
561 struct iommufd_device *idev;
562 };
563
564 /* Convert an iommu attach handle to iommufd handle. */
565 #define to_iommufd_handle(hdl) container_of(hdl, struct iommufd_attach_handle, handle)
566
567 /*
568 * An iommufd_fault object represents an interface to deliver I/O page faults
569 * to the user space. These objects are created/destroyed by the user space and
570 * associated with hardware page table objects during page-table allocation.
571 */
572 struct iommufd_fault {
573 struct iommufd_eventq common;
574 struct mutex mutex; /* serializes response flows */
575 struct xarray response;
576 };
577
578 static inline struct iommufd_fault *
eventq_to_fault(struct iommufd_eventq * eventq)579 eventq_to_fault(struct iommufd_eventq *eventq)
580 {
581 return container_of(eventq, struct iommufd_fault, common);
582 }
583
584 static inline struct iommufd_fault *
iommufd_get_fault(struct iommufd_ucmd * ucmd,u32 id)585 iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
586 {
587 return container_of(iommufd_get_object(ucmd->ictx, id,
588 IOMMUFD_OBJ_FAULT),
589 struct iommufd_fault, common.obj);
590 }
591
592 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
593 void iommufd_fault_destroy(struct iommufd_object *obj);
594 int iommufd_fault_iopf_handler(struct iopf_group *group);
595 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
596 struct iommufd_attach_handle *handle);
597
598 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
599 struct iommufd_vevent {
600 struct iommufd_vevent_header header;
601 struct list_head node; /* for iommufd_eventq::deliver */
602 ssize_t data_len;
603 u64 event_data[] __counted_by(data_len);
604 };
605
606 #define vevent_for_lost_events_header(vevent) \
607 (vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
608
609 /*
610 * An iommufd_veventq object represents an interface to deliver vIOMMU events to
611 * the user space. It is created/destroyed by the user space and associated with
612 * a vIOMMU object during the allocations.
613 */
614 struct iommufd_veventq {
615 struct iommufd_eventq common;
616 struct iommufd_viommu *viommu;
617 struct list_head node; /* for iommufd_viommu::veventqs */
618 struct iommufd_vevent lost_events_header;
619
620 enum iommu_veventq_type type;
621 unsigned int depth;
622
623 /* Use common.lock for protection */
624 u32 num_events;
625 u32 sequence;
626 };
627
628 static inline struct iommufd_veventq *
eventq_to_veventq(struct iommufd_eventq * eventq)629 eventq_to_veventq(struct iommufd_eventq *eventq)
630 {
631 return container_of(eventq, struct iommufd_veventq, common);
632 }
633
634 static inline struct iommufd_veventq *
iommufd_get_veventq(struct iommufd_ucmd * ucmd,u32 id)635 iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
636 {
637 return container_of(iommufd_get_object(ucmd->ictx, id,
638 IOMMUFD_OBJ_VEVENTQ),
639 struct iommufd_veventq, common.obj);
640 }
641
642 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
643 void iommufd_veventq_destroy(struct iommufd_object *obj);
644 void iommufd_veventq_abort(struct iommufd_object *obj);
645
iommufd_vevent_handler(struct iommufd_veventq * veventq,struct iommufd_vevent * vevent)646 static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
647 struct iommufd_vevent *vevent)
648 {
649 struct iommufd_eventq *eventq = &veventq->common;
650
651 lockdep_assert_held(&eventq->lock);
652
653 /*
654 * Remove the lost_events_header and add the new node at the same time.
655 * Note the new node can be lost_events_header, for a sequence update.
656 */
657 if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
658 list_del(&veventq->lost_events_header.node);
659 list_add_tail(&vevent->node, &eventq->deliver);
660 vevent->header.sequence = veventq->sequence;
661 veventq->sequence = (veventq->sequence + 1) & INT_MAX;
662
663 wake_up_interruptible(&eventq->wait_queue);
664 }
665
666 static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd * ucmd,u32 id)667 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
668 {
669 return container_of(iommufd_get_object(ucmd->ictx, id,
670 IOMMUFD_OBJ_VIOMMU),
671 struct iommufd_viommu, obj);
672 }
673
674 static inline struct iommufd_veventq *
iommufd_viommu_find_veventq(struct iommufd_viommu * viommu,enum iommu_veventq_type type)675 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
676 enum iommu_veventq_type type)
677 {
678 struct iommufd_veventq *veventq, *next;
679
680 lockdep_assert_held(&viommu->veventqs_rwsem);
681
682 list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
683 if (veventq->type == type)
684 return veventq;
685 }
686 return NULL;
687 }
688
689 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
690 void iommufd_viommu_destroy(struct iommufd_object *obj);
691 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
692 void iommufd_vdevice_destroy(struct iommufd_object *obj);
693 void iommufd_vdevice_abort(struct iommufd_object *obj);
694 int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
695 void iommufd_hw_queue_destroy(struct iommufd_object *obj);
696
697 static inline struct iommufd_vdevice *
iommufd_get_vdevice(struct iommufd_ctx * ictx,u32 id)698 iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
699 {
700 return container_of(iommufd_get_object(ictx, id,
701 IOMMUFD_OBJ_VDEVICE),
702 struct iommufd_vdevice, obj);
703 }
704
705 #ifdef CONFIG_IOMMUFD_TEST
706 int iommufd_test(struct iommufd_ucmd *ucmd);
707 void iommufd_selftest_destroy(struct iommufd_object *obj);
708 extern size_t iommufd_test_memory_limit;
709 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
710 unsigned int ioas_id, u64 *iova, u32 *flags);
711 bool iommufd_should_fail(void);
712 int __init iommufd_test_init(void);
713 void iommufd_test_exit(void);
714 bool iommufd_selftest_is_mock_dev(struct device *dev);
715 #else
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)716 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
717 unsigned int ioas_id,
718 u64 *iova, u32 *flags)
719 {
720 }
iommufd_should_fail(void)721 static inline bool iommufd_should_fail(void)
722 {
723 return false;
724 }
iommufd_test_init(void)725 static inline int __init iommufd_test_init(void)
726 {
727 return 0;
728 }
iommufd_test_exit(void)729 static inline void iommufd_test_exit(void)
730 {
731 }
iommufd_selftest_is_mock_dev(struct device * dev)732 static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
733 {
734 return false;
735 }
736 #endif
737 #endif
738