1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 * Kernel side components to support tools/testing/selftests/iommu
5 */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
20
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
26
27 size_t iommufd_test_memory_limit = 65536;
28
29 struct mock_bus_type {
30 struct bus_type bus;
31 struct notifier_block nb;
32 };
33
34 static struct mock_bus_type iommufd_mock_bus_type = {
35 .bus = {
36 .name = "iommufd_mock",
37 },
38 };
39
40 static DEFINE_IDA(mock_dev_ida);
41
42 enum {
43 MOCK_DIRTY_TRACK = 1,
44 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
46
47 /*
48 * Like a real page table alignment requires the low bits of the address
49 * to be zero. xarray also requires the high bit to be zero, so we store
50 * the pfns shifted. The upper bits are used for metadata.
51 */
52 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
53
54 _MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
59 };
60
61 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
62 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
63
64 /*
65 * Syzkaller has trouble randomizing the correct iova to use since it is linked
66 * to the map ioctl's output, and it has no ide about that. So, simplify things.
67 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
68 * value. This has a much smaller randomization space and syzkaller can hit it.
69 */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)70 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
71 u64 *iova)
72 {
73 struct syz_layout {
74 __u32 nth_area;
75 __u32 offset;
76 };
77 struct syz_layout *syz = (void *)iova;
78 unsigned int nth = syz->nth_area;
79 struct iopt_area *area;
80
81 down_read(&iopt->iova_rwsem);
82 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
83 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
84 if (nth == 0) {
85 up_read(&iopt->iova_rwsem);
86 return iopt_area_iova(area) + syz->offset;
87 }
88 nth--;
89 }
90 up_read(&iopt->iova_rwsem);
91
92 return 0;
93 }
94
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)95 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
96 u64 *iova)
97 {
98 unsigned long ret;
99
100 mutex_lock(&access->ioas_lock);
101 if (!access->ioas) {
102 mutex_unlock(&access->ioas_lock);
103 return 0;
104 }
105 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
106 mutex_unlock(&access->ioas_lock);
107 return ret;
108 }
109
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)110 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
111 unsigned int ioas_id, u64 *iova, u32 *flags)
112 {
113 struct iommufd_ioas *ioas;
114
115 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
116 return;
117 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
118
119 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
120 if (IS_ERR(ioas))
121 return;
122 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
123 iommufd_put_object(ucmd->ictx, &ioas->obj);
124 }
125
126 struct mock_iommu_domain {
127 unsigned long flags;
128 struct iommu_domain domain;
129 struct xarray pfns;
130 };
131
132 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)133 to_mock_domain(struct iommu_domain *domain)
134 {
135 return container_of(domain, struct mock_iommu_domain, domain);
136 }
137
138 struct mock_iommu_domain_nested {
139 struct iommu_domain domain;
140 struct mock_viommu *mock_viommu;
141 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
142 };
143
144 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)145 to_mock_nested(struct iommu_domain *domain)
146 {
147 return container_of(domain, struct mock_iommu_domain_nested, domain);
148 }
149
150 struct mock_viommu {
151 struct iommufd_viommu core;
152 struct mock_iommu_domain *s2_parent;
153 struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
154 struct mutex queue_mutex;
155
156 unsigned long mmap_offset;
157 u32 *page; /* Mmap page to test u32 type of in_data */
158 };
159
to_mock_viommu(struct iommufd_viommu * viommu)160 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
161 {
162 return container_of(viommu, struct mock_viommu, core);
163 }
164
165 struct mock_hw_queue {
166 struct iommufd_hw_queue core;
167 struct mock_viommu *mock_viommu;
168 struct mock_hw_queue *prev;
169 u16 index;
170 };
171
172 static inline struct mock_hw_queue *
to_mock_hw_queue(struct iommufd_hw_queue * hw_queue)173 to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
174 {
175 return container_of(hw_queue, struct mock_hw_queue, core);
176 }
177
178 enum selftest_obj_type {
179 TYPE_IDEV,
180 };
181
182 struct mock_dev {
183 struct device dev;
184 struct mock_viommu *viommu;
185 struct rw_semaphore viommu_rwsem;
186 unsigned long flags;
187 unsigned long vdev_id;
188 int id;
189 u32 cache[MOCK_DEV_CACHE_NUM];
190 atomic_t pasid_1024_fake_error;
191 unsigned int iopf_refcount;
192 struct iommu_domain *domain;
193 };
194
to_mock_dev(struct device * dev)195 static inline struct mock_dev *to_mock_dev(struct device *dev)
196 {
197 return container_of(dev, struct mock_dev, dev);
198 }
199
200 struct selftest_obj {
201 struct iommufd_object obj;
202 enum selftest_obj_type type;
203
204 union {
205 struct {
206 struct iommufd_device *idev;
207 struct iommufd_ctx *ictx;
208 struct mock_dev *mock_dev;
209 } idev;
210 };
211 };
212
to_selftest_obj(struct iommufd_object * obj)213 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
214 {
215 return container_of(obj, struct selftest_obj, obj);
216 }
217
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)218 static int mock_domain_nop_attach(struct iommu_domain *domain,
219 struct device *dev)
220 {
221 struct mock_dev *mdev = to_mock_dev(dev);
222 struct mock_viommu *new_viommu = NULL;
223 unsigned long vdev_id = 0;
224 int rc;
225
226 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
227 return -EINVAL;
228
229 iommu_group_mutex_assert(dev);
230 if (domain->type == IOMMU_DOMAIN_NESTED) {
231 new_viommu = to_mock_nested(domain)->mock_viommu;
232 if (new_viommu) {
233 rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
234 &vdev_id);
235 if (rc)
236 return rc;
237 }
238 }
239 if (new_viommu != mdev->viommu) {
240 down_write(&mdev->viommu_rwsem);
241 mdev->viommu = new_viommu;
242 mdev->vdev_id = vdev_id;
243 up_write(&mdev->viommu_rwsem);
244 }
245
246 rc = mock_dev_enable_iopf(dev, domain);
247 if (rc)
248 return rc;
249
250 mock_dev_disable_iopf(dev, mdev->domain);
251 mdev->domain = domain;
252
253 return 0;
254 }
255
mock_domain_set_dev_pasid_nop(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)256 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
257 struct device *dev, ioasid_t pasid,
258 struct iommu_domain *old)
259 {
260 struct mock_dev *mdev = to_mock_dev(dev);
261 int rc;
262
263 /*
264 * Per the first attach with pasid 1024, set the
265 * mdev->pasid_1024_fake_error. Hence the second call of this op
266 * can fake an error to validate the error path of the core. This
267 * is helpful to test the case in which the iommu core needs to
268 * rollback to the old domain due to driver failure. e.g. replace.
269 * User should be careful about the third call of this op, it shall
270 * succeed since the mdev->pasid_1024_fake_error is cleared in the
271 * second call.
272 */
273 if (pasid == 1024) {
274 if (domain->type == IOMMU_DOMAIN_BLOCKED) {
275 atomic_set(&mdev->pasid_1024_fake_error, 0);
276 } else if (atomic_read(&mdev->pasid_1024_fake_error)) {
277 /*
278 * Clear the flag, and fake an error to fail the
279 * replacement.
280 */
281 atomic_set(&mdev->pasid_1024_fake_error, 0);
282 return -ENOMEM;
283 } else {
284 /* Set the flag to fake an error in next call */
285 atomic_set(&mdev->pasid_1024_fake_error, 1);
286 }
287 }
288
289 rc = mock_dev_enable_iopf(dev, domain);
290 if (rc)
291 return rc;
292
293 mock_dev_disable_iopf(dev, old);
294
295 return 0;
296 }
297
298 static const struct iommu_domain_ops mock_blocking_ops = {
299 .attach_dev = mock_domain_nop_attach,
300 .set_dev_pasid = mock_domain_set_dev_pasid_nop
301 };
302
303 static struct iommu_domain mock_blocking_domain = {
304 .type = IOMMU_DOMAIN_BLOCKED,
305 .ops = &mock_blocking_ops,
306 };
307
mock_domain_hw_info(struct device * dev,u32 * length,enum iommu_hw_info_type * type)308 static void *mock_domain_hw_info(struct device *dev, u32 *length,
309 enum iommu_hw_info_type *type)
310 {
311 struct iommu_test_hw_info *info;
312
313 if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
314 *type != IOMMU_HW_INFO_TYPE_SELFTEST)
315 return ERR_PTR(-EOPNOTSUPP);
316
317 info = kzalloc(sizeof(*info), GFP_KERNEL);
318 if (!info)
319 return ERR_PTR(-ENOMEM);
320
321 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
322 *length = sizeof(*info);
323 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
324
325 return info;
326 }
327
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)328 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
329 bool enable)
330 {
331 struct mock_iommu_domain *mock = to_mock_domain(domain);
332 unsigned long flags = mock->flags;
333
334 if (enable && !domain->dirty_ops)
335 return -EINVAL;
336
337 /* No change? */
338 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
339 return 0;
340
341 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
342
343 mock->flags = flags;
344 return 0;
345 }
346
mock_test_and_clear_dirty(struct mock_iommu_domain * mock,unsigned long iova,size_t page_size,unsigned long flags)347 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
348 unsigned long iova, size_t page_size,
349 unsigned long flags)
350 {
351 unsigned long cur, end = iova + page_size - 1;
352 bool dirty = false;
353 void *ent, *old;
354
355 for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
356 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
357 if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
358 continue;
359
360 dirty = true;
361 /* Clear dirty */
362 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
363 unsigned long val;
364
365 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
366 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
367 xa_mk_value(val), GFP_KERNEL);
368 WARN_ON_ONCE(ent != old);
369 }
370 }
371
372 return dirty;
373 }
374
mock_domain_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)375 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
376 unsigned long iova, size_t size,
377 unsigned long flags,
378 struct iommu_dirty_bitmap *dirty)
379 {
380 struct mock_iommu_domain *mock = to_mock_domain(domain);
381 unsigned long end = iova + size;
382 void *ent;
383
384 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
385 return -EINVAL;
386
387 do {
388 unsigned long pgsize = MOCK_IO_PAGE_SIZE;
389 unsigned long head;
390
391 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
392 if (!ent) {
393 iova += pgsize;
394 continue;
395 }
396
397 if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
398 pgsize = MOCK_HUGE_PAGE_SIZE;
399 head = iova & ~(pgsize - 1);
400
401 /* Clear dirty */
402 if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
403 iommu_dirty_bitmap_record(dirty, iova, pgsize);
404 iova += pgsize;
405 } while (iova < end);
406
407 return 0;
408 }
409
410 static const struct iommu_dirty_ops dirty_ops = {
411 .set_dirty_tracking = mock_domain_set_dirty_tracking,
412 .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
413 };
414
415 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)416 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
417 {
418 struct mock_iommu_domain_nested *mock_nested;
419 struct iommu_hwpt_selftest user_cfg;
420 int rc, i;
421
422 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
423 return ERR_PTR(-EOPNOTSUPP);
424
425 rc = iommu_copy_struct_from_user(&user_cfg, user_data,
426 IOMMU_HWPT_DATA_SELFTEST, iotlb);
427 if (rc)
428 return ERR_PTR(rc);
429
430 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
431 if (!mock_nested)
432 return ERR_PTR(-ENOMEM);
433 mock_nested->domain.ops = &domain_nested_ops;
434 mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
435 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
436 mock_nested->iotlb[i] = user_cfg.iotlb;
437 return mock_nested;
438 }
439
440 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)441 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
442 u32 flags, const struct iommu_user_data *user_data)
443 {
444 struct mock_iommu_domain_nested *mock_nested;
445 struct mock_iommu_domain *mock_parent;
446
447 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
448 return ERR_PTR(-EOPNOTSUPP);
449 if (!parent || parent->ops != mock_ops.default_domain_ops)
450 return ERR_PTR(-EINVAL);
451
452 mock_parent = to_mock_domain(parent);
453 if (!mock_parent)
454 return ERR_PTR(-EINVAL);
455
456 mock_nested = __mock_domain_alloc_nested(user_data);
457 if (IS_ERR(mock_nested))
458 return ERR_CAST(mock_nested);
459 return &mock_nested->domain;
460 }
461
462 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)463 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
464 const struct iommu_user_data *user_data)
465 {
466 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
467 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
468 IOMMU_HWPT_ALLOC_NEST_PARENT |
469 IOMMU_HWPT_ALLOC_PASID;
470 struct mock_dev *mdev = to_mock_dev(dev);
471 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
472 struct mock_iommu_domain *mock;
473
474 if (user_data)
475 return ERR_PTR(-EOPNOTSUPP);
476 if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
477 return ERR_PTR(-EOPNOTSUPP);
478
479 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
480 if (!mock)
481 return ERR_PTR(-ENOMEM);
482 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
483 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
484 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
485 if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
486 mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
487 mock->domain.ops = mock_ops.default_domain_ops;
488 mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
489 xa_init(&mock->pfns);
490
491 if (has_dirty_flag)
492 mock->domain.dirty_ops = &dirty_ops;
493 return &mock->domain;
494 }
495
mock_domain_free(struct iommu_domain * domain)496 static void mock_domain_free(struct iommu_domain *domain)
497 {
498 struct mock_iommu_domain *mock = to_mock_domain(domain);
499
500 WARN_ON(!xa_empty(&mock->pfns));
501 kfree(mock);
502 }
503
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)504 static int mock_domain_map_pages(struct iommu_domain *domain,
505 unsigned long iova, phys_addr_t paddr,
506 size_t pgsize, size_t pgcount, int prot,
507 gfp_t gfp, size_t *mapped)
508 {
509 struct mock_iommu_domain *mock = to_mock_domain(domain);
510 unsigned long flags = MOCK_PFN_START_IOVA;
511 unsigned long start_iova = iova;
512
513 /*
514 * xarray does not reliably work with fault injection because it does a
515 * retry allocation, so put our own failure point.
516 */
517 if (iommufd_should_fail())
518 return -ENOENT;
519
520 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
521 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
522 for (; pgcount; pgcount--) {
523 size_t cur;
524
525 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
526 void *old;
527
528 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
529 flags = MOCK_PFN_LAST_IOVA;
530 if (pgsize != MOCK_IO_PAGE_SIZE) {
531 flags |= MOCK_PFN_HUGE_IOVA;
532 }
533 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
534 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
535 flags),
536 gfp);
537 if (xa_is_err(old)) {
538 for (; start_iova != iova;
539 start_iova += MOCK_IO_PAGE_SIZE)
540 xa_erase(&mock->pfns,
541 start_iova /
542 MOCK_IO_PAGE_SIZE);
543 return xa_err(old);
544 }
545 WARN_ON(old);
546 iova += MOCK_IO_PAGE_SIZE;
547 paddr += MOCK_IO_PAGE_SIZE;
548 *mapped += MOCK_IO_PAGE_SIZE;
549 flags = 0;
550 }
551 }
552 return 0;
553 }
554
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)555 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
556 unsigned long iova, size_t pgsize,
557 size_t pgcount,
558 struct iommu_iotlb_gather *iotlb_gather)
559 {
560 struct mock_iommu_domain *mock = to_mock_domain(domain);
561 bool first = true;
562 size_t ret = 0;
563 void *ent;
564
565 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
566 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
567
568 for (; pgcount; pgcount--) {
569 size_t cur;
570
571 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
572 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
573
574 /*
575 * iommufd generates unmaps that must be a strict
576 * superset of the map's performend So every
577 * starting/ending IOVA should have been an iova passed
578 * to map.
579 *
580 * This simple logic doesn't work when the HUGE_PAGE is
581 * turned on since the core code will automatically
582 * switch between the two page sizes creating a break in
583 * the unmap calls. The break can land in the middle of
584 * contiguous IOVA.
585 */
586 if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
587 if (first) {
588 WARN_ON(ent && !(xa_to_value(ent) &
589 MOCK_PFN_START_IOVA));
590 first = false;
591 }
592 if (pgcount == 1 &&
593 cur + MOCK_IO_PAGE_SIZE == pgsize)
594 WARN_ON(ent && !(xa_to_value(ent) &
595 MOCK_PFN_LAST_IOVA));
596 }
597
598 iova += MOCK_IO_PAGE_SIZE;
599 ret += MOCK_IO_PAGE_SIZE;
600 }
601 }
602 return ret;
603 }
604
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)605 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
606 dma_addr_t iova)
607 {
608 struct mock_iommu_domain *mock = to_mock_domain(domain);
609 void *ent;
610
611 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
612 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
613 WARN_ON(!ent);
614 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
615 }
616
mock_domain_capable(struct device * dev,enum iommu_cap cap)617 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
618 {
619 struct mock_dev *mdev = to_mock_dev(dev);
620
621 switch (cap) {
622 case IOMMU_CAP_CACHE_COHERENCY:
623 return true;
624 case IOMMU_CAP_DIRTY_TRACKING:
625 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
626 default:
627 break;
628 }
629
630 return false;
631 }
632
633 static struct iopf_queue *mock_iommu_iopf_queue;
634
635 static struct mock_iommu_device {
636 struct iommu_device iommu_dev;
637 struct completion complete;
638 refcount_t users;
639 } mock_iommu;
640
mock_probe_device(struct device * dev)641 static struct iommu_device *mock_probe_device(struct device *dev)
642 {
643 if (dev->bus != &iommufd_mock_bus_type.bus)
644 return ERR_PTR(-ENODEV);
645 return &mock_iommu.iommu_dev;
646 }
647
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)648 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
649 struct iommu_page_response *msg)
650 {
651 }
652
mock_dev_enable_iopf(struct device * dev,struct iommu_domain * domain)653 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
654 {
655 struct mock_dev *mdev = to_mock_dev(dev);
656 int ret;
657
658 if (!domain || !domain->iopf_handler)
659 return 0;
660
661 if (!mock_iommu_iopf_queue)
662 return -ENODEV;
663
664 if (mdev->iopf_refcount) {
665 mdev->iopf_refcount++;
666 return 0;
667 }
668
669 ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
670 if (ret)
671 return ret;
672
673 mdev->iopf_refcount = 1;
674
675 return 0;
676 }
677
mock_dev_disable_iopf(struct device * dev,struct iommu_domain * domain)678 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
679 {
680 struct mock_dev *mdev = to_mock_dev(dev);
681
682 if (!domain || !domain->iopf_handler)
683 return;
684
685 if (--mdev->iopf_refcount)
686 return;
687
688 iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
689 }
690
mock_viommu_destroy(struct iommufd_viommu * viommu)691 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
692 {
693 struct mock_iommu_device *mock_iommu = container_of(
694 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
695 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
696
697 if (refcount_dec_and_test(&mock_iommu->users))
698 complete(&mock_iommu->complete);
699 if (mock_viommu->mmap_offset)
700 iommufd_viommu_destroy_mmap(&mock_viommu->core,
701 mock_viommu->mmap_offset);
702 free_page((unsigned long)mock_viommu->page);
703 mutex_destroy(&mock_viommu->queue_mutex);
704
705 /* iommufd core frees mock_viommu and viommu */
706 }
707
708 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)709 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
710 const struct iommu_user_data *user_data)
711 {
712 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
713 struct mock_iommu_domain_nested *mock_nested;
714
715 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
716 return ERR_PTR(-EOPNOTSUPP);
717
718 mock_nested = __mock_domain_alloc_nested(user_data);
719 if (IS_ERR(mock_nested))
720 return ERR_CAST(mock_nested);
721 mock_nested->mock_viommu = mock_viommu;
722 return &mock_nested->domain;
723 }
724
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)725 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
726 struct iommu_user_data_array *array)
727 {
728 struct iommu_viommu_invalidate_selftest *cmds;
729 struct iommu_viommu_invalidate_selftest *cur;
730 struct iommu_viommu_invalidate_selftest *end;
731 int rc;
732
733 /* A zero-length array is allowed to validate the array type */
734 if (array->entry_num == 0 &&
735 array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
736 array->entry_num = 0;
737 return 0;
738 }
739
740 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
741 if (!cmds)
742 return -ENOMEM;
743 cur = cmds;
744 end = cmds + array->entry_num;
745
746 static_assert(sizeof(*cmds) == 3 * sizeof(u32));
747 rc = iommu_copy_struct_from_full_user_array(
748 cmds, sizeof(*cmds), array,
749 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
750 if (rc)
751 goto out;
752
753 while (cur != end) {
754 struct mock_dev *mdev;
755 struct device *dev;
756 int i;
757
758 if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
759 rc = -EOPNOTSUPP;
760 goto out;
761 }
762
763 if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
764 rc = -EINVAL;
765 goto out;
766 }
767
768 xa_lock(&viommu->vdevs);
769 dev = iommufd_viommu_find_dev(viommu,
770 (unsigned long)cur->vdev_id);
771 if (!dev) {
772 xa_unlock(&viommu->vdevs);
773 rc = -EINVAL;
774 goto out;
775 }
776 mdev = container_of(dev, struct mock_dev, dev);
777
778 if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
779 /* Invalidate all cache entries and ignore cache_id */
780 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
781 mdev->cache[i] = 0;
782 } else {
783 mdev->cache[cur->cache_id] = 0;
784 }
785 xa_unlock(&viommu->vdevs);
786
787 cur++;
788 }
789 out:
790 array->entry_num = cur - cmds;
791 kfree(cmds);
792 return rc;
793 }
794
mock_viommu_get_hw_queue_size(struct iommufd_viommu * viommu,enum iommu_hw_queue_type queue_type)795 static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
796 enum iommu_hw_queue_type queue_type)
797 {
798 if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
799 return 0;
800 return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
801 }
802
mock_hw_queue_destroy(struct iommufd_hw_queue * hw_queue)803 static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
804 {
805 struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
806 struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
807
808 mutex_lock(&mock_viommu->queue_mutex);
809 mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
810 if (mock_hw_queue->prev)
811 iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
812 core);
813 mutex_unlock(&mock_viommu->queue_mutex);
814 }
815
816 /* Test iommufd_hw_queue_depend/undepend() */
mock_hw_queue_init_phys(struct iommufd_hw_queue * hw_queue,u32 index,phys_addr_t base_addr_pa)817 static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
818 phys_addr_t base_addr_pa)
819 {
820 struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
821 struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
822 struct mock_hw_queue *prev = NULL;
823 int rc = 0;
824
825 if (index >= IOMMU_TEST_HW_QUEUE_MAX)
826 return -EINVAL;
827
828 mutex_lock(&mock_viommu->queue_mutex);
829
830 if (mock_viommu->hw_queue[index]) {
831 rc = -EEXIST;
832 goto unlock;
833 }
834
835 if (index) {
836 prev = mock_viommu->hw_queue[index - 1];
837 if (!prev) {
838 rc = -EIO;
839 goto unlock;
840 }
841 }
842
843 /*
844 * Test to catch a kernel bug if the core converted the physical address
845 * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
846 */
847 if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
848 hw_queue->base_addr)) {
849 rc = -EFAULT;
850 goto unlock;
851 }
852
853 if (prev) {
854 rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
855 if (rc)
856 goto unlock;
857 }
858
859 mock_hw_queue->prev = prev;
860 mock_hw_queue->mock_viommu = mock_viommu;
861 mock_viommu->hw_queue[index] = mock_hw_queue;
862
863 hw_queue->destroy = &mock_hw_queue_destroy;
864 unlock:
865 mutex_unlock(&mock_viommu->queue_mutex);
866 return rc;
867 }
868
869 static struct iommufd_viommu_ops mock_viommu_ops = {
870 .destroy = mock_viommu_destroy,
871 .alloc_domain_nested = mock_viommu_alloc_domain_nested,
872 .cache_invalidate = mock_viommu_cache_invalidate,
873 .get_hw_queue_size = mock_viommu_get_hw_queue_size,
874 .hw_queue_init_phys = mock_hw_queue_init_phys,
875 };
876
mock_get_viommu_size(struct device * dev,enum iommu_viommu_type viommu_type)877 static size_t mock_get_viommu_size(struct device *dev,
878 enum iommu_viommu_type viommu_type)
879 {
880 if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
881 return 0;
882 return VIOMMU_STRUCT_SIZE(struct mock_viommu, core);
883 }
884
mock_viommu_init(struct iommufd_viommu * viommu,struct iommu_domain * parent_domain,const struct iommu_user_data * user_data)885 static int mock_viommu_init(struct iommufd_viommu *viommu,
886 struct iommu_domain *parent_domain,
887 const struct iommu_user_data *user_data)
888 {
889 struct mock_iommu_device *mock_iommu = container_of(
890 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
891 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
892 struct iommu_viommu_selftest data;
893 int rc;
894
895 if (user_data) {
896 rc = iommu_copy_struct_from_user(
897 &data, user_data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
898 if (rc)
899 return rc;
900
901 /* Allocate two pages */
902 mock_viommu->page =
903 (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
904 if (!mock_viommu->page)
905 return -ENOMEM;
906
907 rc = iommufd_viommu_alloc_mmap(&mock_viommu->core,
908 __pa(mock_viommu->page),
909 PAGE_SIZE * 2,
910 &mock_viommu->mmap_offset);
911 if (rc)
912 goto err_free_page;
913
914 /* For loopback tests on both the page and out_data */
915 *mock_viommu->page = data.in_data;
916 data.out_data = data.in_data;
917 data.out_mmap_length = PAGE_SIZE * 2;
918 data.out_mmap_offset = mock_viommu->mmap_offset;
919 rc = iommu_copy_struct_to_user(
920 user_data, &data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
921 if (rc)
922 goto err_destroy_mmap;
923 }
924
925 refcount_inc(&mock_iommu->users);
926 mutex_init(&mock_viommu->queue_mutex);
927 mock_viommu->s2_parent = to_mock_domain(parent_domain);
928
929 viommu->ops = &mock_viommu_ops;
930 return 0;
931
932 err_destroy_mmap:
933 iommufd_viommu_destroy_mmap(&mock_viommu->core,
934 mock_viommu->mmap_offset);
935 err_free_page:
936 free_page((unsigned long)mock_viommu->page);
937 return rc;
938 }
939
940 static const struct iommu_ops mock_ops = {
941 /*
942 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
943 * because it is zero.
944 */
945 .default_domain = &mock_blocking_domain,
946 .blocked_domain = &mock_blocking_domain,
947 .owner = THIS_MODULE,
948 .hw_info = mock_domain_hw_info,
949 .domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
950 .domain_alloc_nested = mock_domain_alloc_nested,
951 .capable = mock_domain_capable,
952 .device_group = generic_device_group,
953 .probe_device = mock_probe_device,
954 .page_response = mock_domain_page_response,
955 .user_pasid_table = true,
956 .get_viommu_size = mock_get_viommu_size,
957 .viommu_init = mock_viommu_init,
958 .default_domain_ops =
959 &(struct iommu_domain_ops){
960 .free = mock_domain_free,
961 .attach_dev = mock_domain_nop_attach,
962 .map_pages = mock_domain_map_pages,
963 .unmap_pages = mock_domain_unmap_pages,
964 .iova_to_phys = mock_domain_iova_to_phys,
965 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
966 },
967 };
968
mock_domain_free_nested(struct iommu_domain * domain)969 static void mock_domain_free_nested(struct iommu_domain *domain)
970 {
971 kfree(to_mock_nested(domain));
972 }
973
974 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)975 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
976 struct iommu_user_data_array *array)
977 {
978 struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
979 struct iommu_hwpt_invalidate_selftest inv;
980 u32 processed = 0;
981 int i = 0, j;
982 int rc = 0;
983
984 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
985 rc = -EINVAL;
986 goto out;
987 }
988
989 for ( ; i < array->entry_num; i++) {
990 rc = iommu_copy_struct_from_user_array(&inv, array,
991 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
992 i, iotlb_id);
993 if (rc)
994 break;
995
996 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
997 rc = -EOPNOTSUPP;
998 break;
999 }
1000
1001 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
1002 rc = -EINVAL;
1003 break;
1004 }
1005
1006 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
1007 /* Invalidate all mock iotlb entries and ignore iotlb_id */
1008 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
1009 mock_nested->iotlb[j] = 0;
1010 } else {
1011 mock_nested->iotlb[inv.iotlb_id] = 0;
1012 }
1013
1014 processed++;
1015 }
1016
1017 out:
1018 array->entry_num = processed;
1019 return rc;
1020 }
1021
1022 static struct iommu_domain_ops domain_nested_ops = {
1023 .free = mock_domain_free_nested,
1024 .attach_dev = mock_domain_nop_attach,
1025 .cache_invalidate_user = mock_domain_cache_invalidate_user,
1026 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
1027 };
1028
1029 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)1030 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
1031 {
1032 struct iommufd_object *obj;
1033
1034 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
1035 if (IS_ERR(obj))
1036 return ERR_CAST(obj);
1037 return container_of(obj, struct iommufd_hw_pagetable, obj);
1038 }
1039
1040 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)1041 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
1042 struct mock_iommu_domain **mock)
1043 {
1044 struct iommufd_hw_pagetable *hwpt;
1045
1046 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
1047 if (IS_ERR(hwpt))
1048 return hwpt;
1049 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
1050 hwpt->domain->ops != mock_ops.default_domain_ops) {
1051 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1052 return ERR_PTR(-EINVAL);
1053 }
1054 *mock = to_mock_domain(hwpt->domain);
1055 return hwpt;
1056 }
1057
1058 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)1059 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
1060 struct mock_iommu_domain_nested **mock_nested)
1061 {
1062 struct iommufd_hw_pagetable *hwpt;
1063
1064 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
1065 if (IS_ERR(hwpt))
1066 return hwpt;
1067 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
1068 hwpt->domain->ops != &domain_nested_ops) {
1069 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1070 return ERR_PTR(-EINVAL);
1071 }
1072 *mock_nested = to_mock_nested(hwpt->domain);
1073 return hwpt;
1074 }
1075
mock_dev_release(struct device * dev)1076 static void mock_dev_release(struct device *dev)
1077 {
1078 struct mock_dev *mdev = to_mock_dev(dev);
1079
1080 ida_free(&mock_dev_ida, mdev->id);
1081 kfree(mdev);
1082 }
1083
mock_dev_create(unsigned long dev_flags)1084 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
1085 {
1086 struct property_entry prop[] = {
1087 PROPERTY_ENTRY_U32("pasid-num-bits", 0),
1088 {},
1089 };
1090 const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
1091 MOCK_FLAGS_DEVICE_HUGE_IOVA |
1092 MOCK_FLAGS_DEVICE_PASID;
1093 struct mock_dev *mdev;
1094 int rc, i;
1095
1096 if (dev_flags & ~valid_flags)
1097 return ERR_PTR(-EINVAL);
1098
1099 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1100 if (!mdev)
1101 return ERR_PTR(-ENOMEM);
1102
1103 init_rwsem(&mdev->viommu_rwsem);
1104 device_initialize(&mdev->dev);
1105 mdev->flags = dev_flags;
1106 mdev->dev.release = mock_dev_release;
1107 mdev->dev.bus = &iommufd_mock_bus_type.bus;
1108 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
1109 mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
1110
1111 rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
1112 if (rc < 0)
1113 goto err_put;
1114 mdev->id = rc;
1115
1116 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
1117 if (rc)
1118 goto err_put;
1119
1120 if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
1121 prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
1122
1123 rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
1124 if (rc) {
1125 dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
1126 goto err_put;
1127 }
1128
1129 rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
1130 if (rc)
1131 goto err_put;
1132 return mdev;
1133
1134 err_put:
1135 put_device(&mdev->dev);
1136 return ERR_PTR(rc);
1137 }
1138
mock_dev_destroy(struct mock_dev * mdev)1139 static void mock_dev_destroy(struct mock_dev *mdev)
1140 {
1141 device_unregister(&mdev->dev);
1142 }
1143
iommufd_selftest_is_mock_dev(struct device * dev)1144 bool iommufd_selftest_is_mock_dev(struct device *dev)
1145 {
1146 return dev->release == mock_dev_release;
1147 }
1148
1149 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1150 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
1151 struct iommu_test_cmd *cmd)
1152 {
1153 struct iommufd_device *idev;
1154 struct selftest_obj *sobj;
1155 u32 pt_id = cmd->id;
1156 u32 dev_flags = 0;
1157 u32 idev_id;
1158 int rc;
1159
1160 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
1161 if (IS_ERR(sobj))
1162 return PTR_ERR(sobj);
1163
1164 sobj->idev.ictx = ucmd->ictx;
1165 sobj->type = TYPE_IDEV;
1166
1167 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
1168 dev_flags = cmd->mock_domain_flags.dev_flags;
1169
1170 sobj->idev.mock_dev = mock_dev_create(dev_flags);
1171 if (IS_ERR(sobj->idev.mock_dev)) {
1172 rc = PTR_ERR(sobj->idev.mock_dev);
1173 goto out_sobj;
1174 }
1175
1176 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
1177 &idev_id);
1178 if (IS_ERR(idev)) {
1179 rc = PTR_ERR(idev);
1180 goto out_mdev;
1181 }
1182 sobj->idev.idev = idev;
1183
1184 rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
1185 if (rc)
1186 goto out_unbind;
1187
1188 /* Userspace must destroy the device_id to destroy the object */
1189 cmd->mock_domain.out_hwpt_id = pt_id;
1190 cmd->mock_domain.out_stdev_id = sobj->obj.id;
1191 cmd->mock_domain.out_idev_id = idev_id;
1192 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1193 if (rc)
1194 goto out_detach;
1195 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
1196 return 0;
1197
1198 out_detach:
1199 iommufd_device_detach(idev, IOMMU_NO_PASID);
1200 out_unbind:
1201 iommufd_device_unbind(idev);
1202 out_mdev:
1203 mock_dev_destroy(sobj->idev.mock_dev);
1204 out_sobj:
1205 iommufd_object_abort(ucmd->ictx, &sobj->obj);
1206 return rc;
1207 }
1208
1209 static struct selftest_obj *
iommufd_test_get_selftest_obj(struct iommufd_ctx * ictx,u32 id)1210 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
1211 {
1212 struct iommufd_object *dev_obj;
1213 struct selftest_obj *sobj;
1214
1215 /*
1216 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
1217 * it doesn't race with detach, which is not allowed.
1218 */
1219 dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
1220 if (IS_ERR(dev_obj))
1221 return ERR_CAST(dev_obj);
1222
1223 sobj = to_selftest_obj(dev_obj);
1224 if (sobj->type != TYPE_IDEV) {
1225 iommufd_put_object(ictx, dev_obj);
1226 return ERR_PTR(-EINVAL);
1227 }
1228 return sobj;
1229 }
1230
1231 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)1232 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
1233 unsigned int device_id, u32 pt_id,
1234 struct iommu_test_cmd *cmd)
1235 {
1236 struct selftest_obj *sobj;
1237 int rc;
1238
1239 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
1240 if (IS_ERR(sobj))
1241 return PTR_ERR(sobj);
1242
1243 rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
1244 if (rc)
1245 goto out_sobj;
1246
1247 cmd->mock_domain_replace.pt_id = pt_id;
1248 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1249
1250 out_sobj:
1251 iommufd_put_object(ucmd->ictx, &sobj->obj);
1252 return rc;
1253 }
1254
1255 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)1256 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
1257 unsigned int mockpt_id,
1258 unsigned long start, size_t length)
1259 {
1260 struct iommufd_ioas *ioas;
1261 int rc;
1262
1263 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
1264 if (IS_ERR(ioas))
1265 return PTR_ERR(ioas);
1266 down_write(&ioas->iopt.iova_rwsem);
1267 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
1268 up_write(&ioas->iopt.iova_rwsem);
1269 iommufd_put_object(ucmd->ictx, &ioas->obj);
1270 return rc;
1271 }
1272
1273 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1274 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1275 unsigned int mockpt_id, unsigned long iova,
1276 size_t length, void __user *uptr)
1277 {
1278 struct iommufd_hw_pagetable *hwpt;
1279 struct mock_iommu_domain *mock;
1280 uintptr_t end;
1281 int rc;
1282
1283 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
1284 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
1285 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1286 return -EINVAL;
1287
1288 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1289 if (IS_ERR(hwpt))
1290 return PTR_ERR(hwpt);
1291
1292 for (; length; length -= MOCK_IO_PAGE_SIZE) {
1293 struct page *pages[1];
1294 unsigned long pfn;
1295 long npages;
1296 void *ent;
1297
1298 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1299 pages);
1300 if (npages < 0) {
1301 rc = npages;
1302 goto out_put;
1303 }
1304 if (WARN_ON(npages != 1)) {
1305 rc = -EFAULT;
1306 goto out_put;
1307 }
1308 pfn = page_to_pfn(pages[0]);
1309 put_page(pages[0]);
1310
1311 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
1312 if (!ent ||
1313 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
1314 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1315 rc = -EINVAL;
1316 goto out_put;
1317 }
1318 iova += MOCK_IO_PAGE_SIZE;
1319 uptr += MOCK_IO_PAGE_SIZE;
1320 }
1321 rc = 0;
1322
1323 out_put:
1324 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1325 return rc;
1326 }
1327
1328 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1329 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1330 void __user *uptr, size_t length,
1331 unsigned int refs)
1332 {
1333 uintptr_t end;
1334
1335 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1336 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1337 return -EINVAL;
1338
1339 for (; length; length -= PAGE_SIZE) {
1340 struct page *pages[1];
1341 long npages;
1342
1343 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1344 if (npages < 0)
1345 return npages;
1346 if (WARN_ON(npages != 1))
1347 return -EFAULT;
1348 if (!PageCompound(pages[0])) {
1349 unsigned int count;
1350
1351 count = page_ref_count(pages[0]);
1352 if (count / GUP_PIN_COUNTING_BIAS != refs) {
1353 put_page(pages[0]);
1354 return -EIO;
1355 }
1356 }
1357 put_page(pages[0]);
1358 uptr += PAGE_SIZE;
1359 }
1360 return 0;
1361 }
1362
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1363 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, u32 mockpt_id,
1364 unsigned int iotlb_id, u32 iotlb)
1365 {
1366 struct mock_iommu_domain_nested *mock_nested;
1367 struct iommufd_hw_pagetable *hwpt;
1368 int rc = 0;
1369
1370 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1371 if (IS_ERR(hwpt))
1372 return PTR_ERR(hwpt);
1373
1374 mock_nested = to_mock_nested(hwpt->domain);
1375
1376 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1377 mock_nested->iotlb[iotlb_id] != iotlb)
1378 rc = -EINVAL;
1379 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1380 return rc;
1381 }
1382
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1383 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1384 unsigned int cache_id, u32 cache)
1385 {
1386 struct iommufd_device *idev;
1387 struct mock_dev *mdev;
1388 int rc = 0;
1389
1390 idev = iommufd_get_device(ucmd, idev_id);
1391 if (IS_ERR(idev))
1392 return PTR_ERR(idev);
1393 mdev = container_of(idev->dev, struct mock_dev, dev);
1394
1395 if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1396 rc = -EINVAL;
1397 iommufd_put_object(ucmd->ictx, &idev->obj);
1398 return rc;
1399 }
1400
1401 struct selftest_access {
1402 struct iommufd_access *access;
1403 struct file *file;
1404 struct mutex lock;
1405 struct list_head items;
1406 unsigned int next_id;
1407 bool destroying;
1408 };
1409
1410 struct selftest_access_item {
1411 struct list_head items_elm;
1412 unsigned long iova;
1413 size_t length;
1414 unsigned int id;
1415 };
1416
1417 static const struct file_operations iommfd_test_staccess_fops;
1418
iommufd_access_get(int fd)1419 static struct selftest_access *iommufd_access_get(int fd)
1420 {
1421 struct file *file;
1422
1423 file = fget(fd);
1424 if (!file)
1425 return ERR_PTR(-EBADFD);
1426
1427 if (file->f_op != &iommfd_test_staccess_fops) {
1428 fput(file);
1429 return ERR_PTR(-EBADFD);
1430 }
1431 return file->private_data;
1432 }
1433
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1434 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1435 unsigned long length)
1436 {
1437 unsigned long iova_last = iova + length - 1;
1438 struct selftest_access *staccess = data;
1439 struct selftest_access_item *item;
1440 struct selftest_access_item *tmp;
1441
1442 mutex_lock(&staccess->lock);
1443 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1444 if (iova > item->iova + item->length - 1 ||
1445 iova_last < item->iova)
1446 continue;
1447 list_del(&item->items_elm);
1448 iommufd_access_unpin_pages(staccess->access, item->iova,
1449 item->length);
1450 kfree(item);
1451 }
1452 mutex_unlock(&staccess->lock);
1453 }
1454
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1455 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1456 unsigned int access_id,
1457 unsigned int item_id)
1458 {
1459 struct selftest_access_item *item;
1460 struct selftest_access *staccess;
1461
1462 staccess = iommufd_access_get(access_id);
1463 if (IS_ERR(staccess))
1464 return PTR_ERR(staccess);
1465
1466 mutex_lock(&staccess->lock);
1467 list_for_each_entry(item, &staccess->items, items_elm) {
1468 if (item->id == item_id) {
1469 list_del(&item->items_elm);
1470 iommufd_access_unpin_pages(staccess->access, item->iova,
1471 item->length);
1472 mutex_unlock(&staccess->lock);
1473 kfree(item);
1474 fput(staccess->file);
1475 return 0;
1476 }
1477 }
1478 mutex_unlock(&staccess->lock);
1479 fput(staccess->file);
1480 return -ENOENT;
1481 }
1482
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1483 static int iommufd_test_staccess_release(struct inode *inode,
1484 struct file *filep)
1485 {
1486 struct selftest_access *staccess = filep->private_data;
1487
1488 if (staccess->access) {
1489 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1490 iommufd_access_destroy(staccess->access);
1491 }
1492 mutex_destroy(&staccess->lock);
1493 kfree(staccess);
1494 return 0;
1495 }
1496
1497 static const struct iommufd_access_ops selftest_access_ops_pin = {
1498 .needs_pin_pages = 1,
1499 .unmap = iommufd_test_access_unmap,
1500 };
1501
1502 static const struct iommufd_access_ops selftest_access_ops = {
1503 .unmap = iommufd_test_access_unmap,
1504 };
1505
1506 static const struct file_operations iommfd_test_staccess_fops = {
1507 .release = iommufd_test_staccess_release,
1508 };
1509
iommufd_test_alloc_access(void)1510 static struct selftest_access *iommufd_test_alloc_access(void)
1511 {
1512 struct selftest_access *staccess;
1513 struct file *filep;
1514
1515 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1516 if (!staccess)
1517 return ERR_PTR(-ENOMEM);
1518 INIT_LIST_HEAD(&staccess->items);
1519 mutex_init(&staccess->lock);
1520
1521 filep = anon_inode_getfile("[iommufd_test_staccess]",
1522 &iommfd_test_staccess_fops, staccess,
1523 O_RDWR);
1524 if (IS_ERR(filep)) {
1525 kfree(staccess);
1526 return ERR_CAST(filep);
1527 }
1528 staccess->file = filep;
1529 return staccess;
1530 }
1531
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1532 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1533 unsigned int ioas_id, unsigned int flags)
1534 {
1535 struct iommu_test_cmd *cmd = ucmd->cmd;
1536 struct selftest_access *staccess;
1537 struct iommufd_access *access;
1538 u32 id;
1539 int fdno;
1540 int rc;
1541
1542 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1543 return -EOPNOTSUPP;
1544
1545 staccess = iommufd_test_alloc_access();
1546 if (IS_ERR(staccess))
1547 return PTR_ERR(staccess);
1548
1549 fdno = get_unused_fd_flags(O_CLOEXEC);
1550 if (fdno < 0) {
1551 rc = -ENOMEM;
1552 goto out_free_staccess;
1553 }
1554
1555 access = iommufd_access_create(
1556 ucmd->ictx,
1557 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1558 &selftest_access_ops_pin :
1559 &selftest_access_ops,
1560 staccess, &id);
1561 if (IS_ERR(access)) {
1562 rc = PTR_ERR(access);
1563 goto out_put_fdno;
1564 }
1565 rc = iommufd_access_attach(access, ioas_id);
1566 if (rc)
1567 goto out_destroy;
1568 cmd->create_access.out_access_fd = fdno;
1569 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1570 if (rc)
1571 goto out_destroy;
1572
1573 staccess->access = access;
1574 fd_install(fdno, staccess->file);
1575 return 0;
1576
1577 out_destroy:
1578 iommufd_access_destroy(access);
1579 out_put_fdno:
1580 put_unused_fd(fdno);
1581 out_free_staccess:
1582 fput(staccess->file);
1583 return rc;
1584 }
1585
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1586 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1587 unsigned int access_id,
1588 unsigned int ioas_id)
1589 {
1590 struct selftest_access *staccess;
1591 int rc;
1592
1593 staccess = iommufd_access_get(access_id);
1594 if (IS_ERR(staccess))
1595 return PTR_ERR(staccess);
1596
1597 rc = iommufd_access_replace(staccess->access, ioas_id);
1598 fput(staccess->file);
1599 return rc;
1600 }
1601
1602 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1603 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1604 size_t npages)
1605 {
1606 for (; npages; npages--) {
1607 struct page *tmp_pages[1];
1608 long rc;
1609
1610 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1611 if (rc < 0)
1612 return rc;
1613 if (WARN_ON(rc != 1))
1614 return -EFAULT;
1615 put_page(tmp_pages[0]);
1616 if (tmp_pages[0] != *pages)
1617 return -EBADE;
1618 pages++;
1619 uptr += PAGE_SIZE;
1620 }
1621 return 0;
1622 }
1623
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1624 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1625 unsigned int access_id, unsigned long iova,
1626 size_t length, void __user *uptr,
1627 u32 flags)
1628 {
1629 struct iommu_test_cmd *cmd = ucmd->cmd;
1630 struct selftest_access_item *item;
1631 struct selftest_access *staccess;
1632 struct page **pages;
1633 size_t npages;
1634 int rc;
1635
1636 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1637 if (length > 16 * 1024 * 1024)
1638 return -ENOMEM;
1639
1640 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1641 return -EOPNOTSUPP;
1642
1643 staccess = iommufd_access_get(access_id);
1644 if (IS_ERR(staccess))
1645 return PTR_ERR(staccess);
1646
1647 if (staccess->access->ops != &selftest_access_ops_pin) {
1648 rc = -EOPNOTSUPP;
1649 goto out_put;
1650 }
1651
1652 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1653 iova = iommufd_test_syz_conv_iova(staccess->access,
1654 &cmd->access_pages.iova);
1655
1656 npages = (ALIGN(iova + length, PAGE_SIZE) -
1657 ALIGN_DOWN(iova, PAGE_SIZE)) /
1658 PAGE_SIZE;
1659 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1660 if (!pages) {
1661 rc = -ENOMEM;
1662 goto out_put;
1663 }
1664
1665 /*
1666 * Drivers will need to think very carefully about this locking. The
1667 * core code can do multiple unmaps instantaneously after
1668 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1669 * the range is unpinned. This simple implementation puts a global lock
1670 * around the pin, which may not suit drivers that want this to be a
1671 * performance path. drivers that get this wrong will trigger WARN_ON
1672 * races and cause EDEADLOCK failures to userspace.
1673 */
1674 mutex_lock(&staccess->lock);
1675 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1676 flags & MOCK_FLAGS_ACCESS_WRITE);
1677 if (rc)
1678 goto out_unlock;
1679
1680 /* For syzkaller allow uptr to be NULL to skip this check */
1681 if (uptr) {
1682 rc = iommufd_test_check_pages(
1683 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1684 npages);
1685 if (rc)
1686 goto out_unaccess;
1687 }
1688
1689 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1690 if (!item) {
1691 rc = -ENOMEM;
1692 goto out_unaccess;
1693 }
1694
1695 item->iova = iova;
1696 item->length = length;
1697 item->id = staccess->next_id++;
1698 list_add_tail(&item->items_elm, &staccess->items);
1699
1700 cmd->access_pages.out_access_pages_id = item->id;
1701 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1702 if (rc)
1703 goto out_free_item;
1704 goto out_unlock;
1705
1706 out_free_item:
1707 list_del(&item->items_elm);
1708 kfree(item);
1709 out_unaccess:
1710 iommufd_access_unpin_pages(staccess->access, iova, length);
1711 out_unlock:
1712 mutex_unlock(&staccess->lock);
1713 kvfree(pages);
1714 out_put:
1715 fput(staccess->file);
1716 return rc;
1717 }
1718
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1719 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1720 unsigned int access_id, unsigned long iova,
1721 size_t length, void __user *ubuf,
1722 unsigned int flags)
1723 {
1724 struct iommu_test_cmd *cmd = ucmd->cmd;
1725 struct selftest_access *staccess;
1726 void *tmp;
1727 int rc;
1728
1729 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1730 if (length > 16 * 1024 * 1024)
1731 return -ENOMEM;
1732
1733 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1734 MOCK_FLAGS_ACCESS_SYZ))
1735 return -EOPNOTSUPP;
1736
1737 staccess = iommufd_access_get(access_id);
1738 if (IS_ERR(staccess))
1739 return PTR_ERR(staccess);
1740
1741 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1742 if (!tmp) {
1743 rc = -ENOMEM;
1744 goto out_put;
1745 }
1746
1747 if (flags & MOCK_ACCESS_RW_WRITE) {
1748 if (copy_from_user(tmp, ubuf, length)) {
1749 rc = -EFAULT;
1750 goto out_free;
1751 }
1752 }
1753
1754 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1755 iova = iommufd_test_syz_conv_iova(staccess->access,
1756 &cmd->access_rw.iova);
1757
1758 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1759 if (rc)
1760 goto out_free;
1761 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1762 if (copy_to_user(ubuf, tmp, length)) {
1763 rc = -EFAULT;
1764 goto out_free;
1765 }
1766 }
1767
1768 out_free:
1769 kvfree(tmp);
1770 out_put:
1771 fput(staccess->file);
1772 return rc;
1773 }
1774 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1775 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1776 __IOMMUFD_ACCESS_RW_SLOW_PATH);
1777
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1778 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1779 unsigned long iova, size_t length,
1780 unsigned long page_size, void __user *uptr,
1781 u32 flags)
1782 {
1783 unsigned long i, max;
1784 struct iommu_test_cmd *cmd = ucmd->cmd;
1785 struct iommufd_hw_pagetable *hwpt;
1786 struct mock_iommu_domain *mock;
1787 int rc, count = 0;
1788 void *tmp;
1789
1790 if (!page_size || !length || iova % page_size || length % page_size ||
1791 !uptr)
1792 return -EINVAL;
1793
1794 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1795 if (IS_ERR(hwpt))
1796 return PTR_ERR(hwpt);
1797
1798 if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1799 rc = -EINVAL;
1800 goto out_put;
1801 }
1802
1803 max = length / page_size;
1804 tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1805 GFP_KERNEL_ACCOUNT);
1806 if (!tmp) {
1807 rc = -ENOMEM;
1808 goto out_put;
1809 }
1810
1811 if (copy_from_user(tmp, uptr, DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1812 rc = -EFAULT;
1813 goto out_free;
1814 }
1815
1816 for (i = 0; i < max; i++) {
1817 unsigned long cur = iova + i * page_size;
1818 void *ent, *old;
1819
1820 if (!test_bit(i, (unsigned long *)tmp))
1821 continue;
1822
1823 ent = xa_load(&mock->pfns, cur / page_size);
1824 if (ent) {
1825 unsigned long val;
1826
1827 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1828 old = xa_store(&mock->pfns, cur / page_size,
1829 xa_mk_value(val), GFP_KERNEL);
1830 WARN_ON_ONCE(ent != old);
1831 count++;
1832 }
1833 }
1834
1835 cmd->dirty.out_nr_dirty = count;
1836 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1837 out_free:
1838 kvfree(tmp);
1839 out_put:
1840 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1841 return rc;
1842 }
1843
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1844 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1845 struct iommu_test_cmd *cmd)
1846 {
1847 struct iopf_fault event = {};
1848 struct iommufd_device *idev;
1849
1850 idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1851 if (IS_ERR(idev))
1852 return PTR_ERR(idev);
1853
1854 event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1855 if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1856 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1857 event.fault.type = IOMMU_FAULT_PAGE_REQ;
1858 event.fault.prm.addr = cmd->trigger_iopf.addr;
1859 event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1860 event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1861 event.fault.prm.perm = cmd->trigger_iopf.perm;
1862
1863 iommu_report_device_fault(idev->dev, &event);
1864 iommufd_put_object(ucmd->ictx, &idev->obj);
1865
1866 return 0;
1867 }
1868
iommufd_test_trigger_vevent(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1869 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
1870 struct iommu_test_cmd *cmd)
1871 {
1872 struct iommu_viommu_event_selftest test = {};
1873 struct iommufd_device *idev;
1874 struct mock_dev *mdev;
1875 int rc = -ENOENT;
1876
1877 idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
1878 if (IS_ERR(idev))
1879 return PTR_ERR(idev);
1880 mdev = to_mock_dev(idev->dev);
1881
1882 down_read(&mdev->viommu_rwsem);
1883 if (!mdev->viommu || !mdev->vdev_id)
1884 goto out_unlock;
1885
1886 test.virt_id = mdev->vdev_id;
1887 rc = iommufd_viommu_report_event(&mdev->viommu->core,
1888 IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
1889 sizeof(test));
1890 out_unlock:
1891 up_read(&mdev->viommu_rwsem);
1892 iommufd_put_object(ucmd->ictx, &idev->obj);
1893
1894 return rc;
1895 }
1896
1897 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt(struct iommufd_ucmd * ucmd,u32 id)1898 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
1899 {
1900 struct iommufd_object *pt_obj;
1901
1902 pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
1903 if (IS_ERR(pt_obj))
1904 return ERR_CAST(pt_obj);
1905
1906 if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
1907 pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
1908 iommufd_put_object(ucmd->ictx, pt_obj);
1909 return ERR_PTR(-EINVAL);
1910 }
1911
1912 return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
1913 }
1914
iommufd_test_pasid_check_hwpt(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1915 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
1916 struct iommu_test_cmd *cmd)
1917 {
1918 u32 hwpt_id = cmd->pasid_check.hwpt_id;
1919 struct iommu_domain *attached_domain;
1920 struct iommu_attach_handle *handle;
1921 struct iommufd_hw_pagetable *hwpt;
1922 struct selftest_obj *sobj;
1923 struct mock_dev *mdev;
1924 int rc = 0;
1925
1926 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1927 if (IS_ERR(sobj))
1928 return PTR_ERR(sobj);
1929
1930 mdev = sobj->idev.mock_dev;
1931
1932 handle = iommu_attach_handle_get(mdev->dev.iommu_group,
1933 cmd->pasid_check.pasid, 0);
1934 if (IS_ERR(handle))
1935 attached_domain = NULL;
1936 else
1937 attached_domain = handle->domain;
1938
1939 /* hwpt_id == 0 means to check if pasid is detached */
1940 if (!hwpt_id) {
1941 if (attached_domain)
1942 rc = -EINVAL;
1943 goto out_sobj;
1944 }
1945
1946 hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
1947 if (IS_ERR(hwpt)) {
1948 rc = PTR_ERR(hwpt);
1949 goto out_sobj;
1950 }
1951
1952 if (attached_domain != hwpt->domain)
1953 rc = -EINVAL;
1954
1955 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1956 out_sobj:
1957 iommufd_put_object(ucmd->ictx, &sobj->obj);
1958 return rc;
1959 }
1960
iommufd_test_pasid_attach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1961 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
1962 struct iommu_test_cmd *cmd)
1963 {
1964 struct selftest_obj *sobj;
1965 int rc;
1966
1967 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1968 if (IS_ERR(sobj))
1969 return PTR_ERR(sobj);
1970
1971 rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
1972 &cmd->pasid_attach.pt_id);
1973 if (rc)
1974 goto out_sobj;
1975
1976 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1977 if (rc)
1978 iommufd_device_detach(sobj->idev.idev, cmd->pasid_attach.pasid);
1979
1980 out_sobj:
1981 iommufd_put_object(ucmd->ictx, &sobj->obj);
1982 return rc;
1983 }
1984
iommufd_test_pasid_replace(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1985 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
1986 struct iommu_test_cmd *cmd)
1987 {
1988 struct selftest_obj *sobj;
1989 int rc;
1990
1991 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1992 if (IS_ERR(sobj))
1993 return PTR_ERR(sobj);
1994
1995 rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
1996 &cmd->pasid_attach.pt_id);
1997 if (rc)
1998 goto out_sobj;
1999
2000 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
2001
2002 out_sobj:
2003 iommufd_put_object(ucmd->ictx, &sobj->obj);
2004 return rc;
2005 }
2006
iommufd_test_pasid_detach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)2007 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
2008 struct iommu_test_cmd *cmd)
2009 {
2010 struct selftest_obj *sobj;
2011
2012 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
2013 if (IS_ERR(sobj))
2014 return PTR_ERR(sobj);
2015
2016 iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
2017 iommufd_put_object(ucmd->ictx, &sobj->obj);
2018 return 0;
2019 }
2020
iommufd_selftest_destroy(struct iommufd_object * obj)2021 void iommufd_selftest_destroy(struct iommufd_object *obj)
2022 {
2023 struct selftest_obj *sobj = to_selftest_obj(obj);
2024
2025 switch (sobj->type) {
2026 case TYPE_IDEV:
2027 iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
2028 iommufd_device_unbind(sobj->idev.idev);
2029 mock_dev_destroy(sobj->idev.mock_dev);
2030 break;
2031 }
2032 }
2033
iommufd_test(struct iommufd_ucmd * ucmd)2034 int iommufd_test(struct iommufd_ucmd *ucmd)
2035 {
2036 struct iommu_test_cmd *cmd = ucmd->cmd;
2037
2038 switch (cmd->op) {
2039 case IOMMU_TEST_OP_ADD_RESERVED:
2040 return iommufd_test_add_reserved(ucmd, cmd->id,
2041 cmd->add_reserved.start,
2042 cmd->add_reserved.length);
2043 case IOMMU_TEST_OP_MOCK_DOMAIN:
2044 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
2045 return iommufd_test_mock_domain(ucmd, cmd);
2046 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
2047 return iommufd_test_mock_domain_replace(
2048 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
2049 case IOMMU_TEST_OP_MD_CHECK_MAP:
2050 return iommufd_test_md_check_pa(
2051 ucmd, cmd->id, cmd->check_map.iova,
2052 cmd->check_map.length,
2053 u64_to_user_ptr(cmd->check_map.uptr));
2054 case IOMMU_TEST_OP_MD_CHECK_REFS:
2055 return iommufd_test_md_check_refs(
2056 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
2057 cmd->check_refs.length, cmd->check_refs.refs);
2058 case IOMMU_TEST_OP_MD_CHECK_IOTLB:
2059 return iommufd_test_md_check_iotlb(ucmd, cmd->id,
2060 cmd->check_iotlb.id,
2061 cmd->check_iotlb.iotlb);
2062 case IOMMU_TEST_OP_DEV_CHECK_CACHE:
2063 return iommufd_test_dev_check_cache(ucmd, cmd->id,
2064 cmd->check_dev_cache.id,
2065 cmd->check_dev_cache.cache);
2066 case IOMMU_TEST_OP_CREATE_ACCESS:
2067 return iommufd_test_create_access(ucmd, cmd->id,
2068 cmd->create_access.flags);
2069 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
2070 return iommufd_test_access_replace_ioas(
2071 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
2072 case IOMMU_TEST_OP_ACCESS_PAGES:
2073 return iommufd_test_access_pages(
2074 ucmd, cmd->id, cmd->access_pages.iova,
2075 cmd->access_pages.length,
2076 u64_to_user_ptr(cmd->access_pages.uptr),
2077 cmd->access_pages.flags);
2078 case IOMMU_TEST_OP_ACCESS_RW:
2079 return iommufd_test_access_rw(
2080 ucmd, cmd->id, cmd->access_rw.iova,
2081 cmd->access_rw.length,
2082 u64_to_user_ptr(cmd->access_rw.uptr),
2083 cmd->access_rw.flags);
2084 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
2085 return iommufd_test_access_item_destroy(
2086 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
2087 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
2088 /* Protect _batch_init(), can not be less than elmsz */
2089 if (cmd->memory_limit.limit <
2090 sizeof(unsigned long) + sizeof(u32))
2091 return -EINVAL;
2092 iommufd_test_memory_limit = cmd->memory_limit.limit;
2093 return 0;
2094 case IOMMU_TEST_OP_DIRTY:
2095 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
2096 cmd->dirty.length,
2097 cmd->dirty.page_size,
2098 u64_to_user_ptr(cmd->dirty.uptr),
2099 cmd->dirty.flags);
2100 case IOMMU_TEST_OP_TRIGGER_IOPF:
2101 return iommufd_test_trigger_iopf(ucmd, cmd);
2102 case IOMMU_TEST_OP_TRIGGER_VEVENT:
2103 return iommufd_test_trigger_vevent(ucmd, cmd);
2104 case IOMMU_TEST_OP_PASID_ATTACH:
2105 return iommufd_test_pasid_attach(ucmd, cmd);
2106 case IOMMU_TEST_OP_PASID_REPLACE:
2107 return iommufd_test_pasid_replace(ucmd, cmd);
2108 case IOMMU_TEST_OP_PASID_DETACH:
2109 return iommufd_test_pasid_detach(ucmd, cmd);
2110 case IOMMU_TEST_OP_PASID_CHECK_HWPT:
2111 return iommufd_test_pasid_check_hwpt(ucmd, cmd);
2112 default:
2113 return -EOPNOTSUPP;
2114 }
2115 }
2116
iommufd_should_fail(void)2117 bool iommufd_should_fail(void)
2118 {
2119 return should_fail(&fail_iommufd, 1);
2120 }
2121
iommufd_test_init(void)2122 int __init iommufd_test_init(void)
2123 {
2124 struct platform_device_info pdevinfo = {
2125 .name = "iommufd_selftest_iommu",
2126 };
2127 int rc;
2128
2129 dbgfs_root =
2130 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
2131
2132 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
2133 if (IS_ERR(selftest_iommu_dev)) {
2134 rc = PTR_ERR(selftest_iommu_dev);
2135 goto err_dbgfs;
2136 }
2137
2138 rc = bus_register(&iommufd_mock_bus_type.bus);
2139 if (rc)
2140 goto err_platform;
2141
2142 rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
2143 &selftest_iommu_dev->dev, NULL, "%s",
2144 dev_name(&selftest_iommu_dev->dev));
2145 if (rc)
2146 goto err_bus;
2147
2148 rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
2149 &iommufd_mock_bus_type.bus,
2150 &iommufd_mock_bus_type.nb);
2151 if (rc)
2152 goto err_sysfs;
2153
2154 refcount_set(&mock_iommu.users, 1);
2155 init_completion(&mock_iommu.complete);
2156
2157 mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
2158 mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
2159
2160 return 0;
2161
2162 err_sysfs:
2163 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2164 err_bus:
2165 bus_unregister(&iommufd_mock_bus_type.bus);
2166 err_platform:
2167 platform_device_unregister(selftest_iommu_dev);
2168 err_dbgfs:
2169 debugfs_remove_recursive(dbgfs_root);
2170 return rc;
2171 }
2172
iommufd_test_wait_for_users(void)2173 static void iommufd_test_wait_for_users(void)
2174 {
2175 if (refcount_dec_and_test(&mock_iommu.users))
2176 return;
2177 /*
2178 * Time out waiting for iommu device user count to become 0.
2179 *
2180 * Note that this is just making an example here, since the selftest is
2181 * built into the iommufd module, i.e. it only unplugs the iommu device
2182 * when unloading the module. So, it is expected that this WARN_ON will
2183 * not trigger, as long as any iommufd FDs are open.
2184 */
2185 WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
2186 msecs_to_jiffies(10000)));
2187 }
2188
iommufd_test_exit(void)2189 void iommufd_test_exit(void)
2190 {
2191 if (mock_iommu_iopf_queue) {
2192 iopf_queue_free(mock_iommu_iopf_queue);
2193 mock_iommu_iopf_queue = NULL;
2194 }
2195
2196 iommufd_test_wait_for_users();
2197 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2198 iommu_device_unregister_bus(&mock_iommu.iommu_dev,
2199 &iommufd_mock_bus_type.bus,
2200 &iommufd_mock_bus_type.nb);
2201 bus_unregister(&iommufd_mock_bus_type.bus);
2202 platform_device_unregister(selftest_iommu_dev);
2203 debugfs_remove_recursive(dbgfs_root);
2204 }
2205