1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 * Kernel side components to support tools/testing/selftests/iommu
5 */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
20
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
26
27 size_t iommufd_test_memory_limit = 65536;
28
29 struct mock_bus_type {
30 struct bus_type bus;
31 struct notifier_block nb;
32 };
33
34 static struct mock_bus_type iommufd_mock_bus_type = {
35 .bus = {
36 .name = "iommufd_mock",
37 },
38 };
39
40 static DEFINE_IDA(mock_dev_ida);
41
42 enum {
43 MOCK_DIRTY_TRACK = 1,
44 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
46
47 /*
48 * Like a real page table alignment requires the low bits of the address
49 * to be zero. xarray also requires the high bit to be zero, so we store
50 * the pfns shifted. The upper bits are used for metadata.
51 */
52 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
53
54 _MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
59 };
60
61 /*
62 * Syzkaller has trouble randomizing the correct iova to use since it is linked
63 * to the map ioctl's output, and it has no ide about that. So, simplify things.
64 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65 * value. This has a much smaller randomization space and syzkaller can hit it.
66 */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
68 u64 *iova)
69 {
70 struct syz_layout {
71 __u32 nth_area;
72 __u32 offset;
73 };
74 struct syz_layout *syz = (void *)iova;
75 unsigned int nth = syz->nth_area;
76 struct iopt_area *area;
77
78 down_read(&iopt->iova_rwsem);
79 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
80 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
81 if (nth == 0) {
82 up_read(&iopt->iova_rwsem);
83 return iopt_area_iova(area) + syz->offset;
84 }
85 nth--;
86 }
87 up_read(&iopt->iova_rwsem);
88
89 return 0;
90 }
91
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
93 u64 *iova)
94 {
95 unsigned long ret;
96
97 mutex_lock(&access->ioas_lock);
98 if (!access->ioas) {
99 mutex_unlock(&access->ioas_lock);
100 return 0;
101 }
102 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
103 mutex_unlock(&access->ioas_lock);
104 return ret;
105 }
106
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
108 unsigned int ioas_id, u64 *iova, u32 *flags)
109 {
110 struct iommufd_ioas *ioas;
111
112 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
113 return;
114 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
115
116 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
117 if (IS_ERR(ioas))
118 return;
119 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
120 iommufd_put_object(ucmd->ictx, &ioas->obj);
121 }
122
123 struct mock_iommu_domain {
124 unsigned long flags;
125 struct iommu_domain domain;
126 struct xarray pfns;
127 };
128
129 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)130 to_mock_domain(struct iommu_domain *domain)
131 {
132 return container_of(domain, struct mock_iommu_domain, domain);
133 }
134
135 struct mock_iommu_domain_nested {
136 struct iommu_domain domain;
137 struct mock_viommu *mock_viommu;
138 struct mock_iommu_domain *parent;
139 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
140 };
141
142 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)143 to_mock_nested(struct iommu_domain *domain)
144 {
145 return container_of(domain, struct mock_iommu_domain_nested, domain);
146 }
147
148 struct mock_viommu {
149 struct iommufd_viommu core;
150 struct mock_iommu_domain *s2_parent;
151 };
152
to_mock_viommu(struct iommufd_viommu * viommu)153 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
154 {
155 return container_of(viommu, struct mock_viommu, core);
156 }
157
158 enum selftest_obj_type {
159 TYPE_IDEV,
160 };
161
162 struct mock_dev {
163 struct device dev;
164 struct mock_viommu *viommu;
165 struct rw_semaphore viommu_rwsem;
166 unsigned long flags;
167 unsigned long vdev_id;
168 int id;
169 u32 cache[MOCK_DEV_CACHE_NUM];
170 atomic_t pasid_1024_fake_error;
171 };
172
to_mock_dev(struct device * dev)173 static inline struct mock_dev *to_mock_dev(struct device *dev)
174 {
175 return container_of(dev, struct mock_dev, dev);
176 }
177
178 struct selftest_obj {
179 struct iommufd_object obj;
180 enum selftest_obj_type type;
181
182 union {
183 struct {
184 struct iommufd_device *idev;
185 struct iommufd_ctx *ictx;
186 struct mock_dev *mock_dev;
187 } idev;
188 };
189 };
190
to_selftest_obj(struct iommufd_object * obj)191 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
192 {
193 return container_of(obj, struct selftest_obj, obj);
194 }
195
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)196 static int mock_domain_nop_attach(struct iommu_domain *domain,
197 struct device *dev)
198 {
199 struct mock_dev *mdev = to_mock_dev(dev);
200 struct mock_viommu *new_viommu = NULL;
201 unsigned long vdev_id = 0;
202 int rc;
203
204 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
205 return -EINVAL;
206
207 iommu_group_mutex_assert(dev);
208 if (domain->type == IOMMU_DOMAIN_NESTED) {
209 new_viommu = to_mock_nested(domain)->mock_viommu;
210 if (new_viommu) {
211 rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
212 &vdev_id);
213 if (rc)
214 return rc;
215 }
216 }
217 if (new_viommu != mdev->viommu) {
218 down_write(&mdev->viommu_rwsem);
219 mdev->viommu = new_viommu;
220 mdev->vdev_id = vdev_id;
221 up_write(&mdev->viommu_rwsem);
222 }
223
224 return 0;
225 }
226
mock_domain_set_dev_pasid_nop(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)227 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
228 struct device *dev, ioasid_t pasid,
229 struct iommu_domain *old)
230 {
231 struct mock_dev *mdev = to_mock_dev(dev);
232
233 /*
234 * Per the first attach with pasid 1024, set the
235 * mdev->pasid_1024_fake_error. Hence the second call of this op
236 * can fake an error to validate the error path of the core. This
237 * is helpful to test the case in which the iommu core needs to
238 * rollback to the old domain due to driver failure. e.g. replace.
239 * User should be careful about the third call of this op, it shall
240 * succeed since the mdev->pasid_1024_fake_error is cleared in the
241 * second call.
242 */
243 if (pasid == 1024) {
244 if (domain->type == IOMMU_DOMAIN_BLOCKED) {
245 atomic_set(&mdev->pasid_1024_fake_error, 0);
246 } else if (atomic_read(&mdev->pasid_1024_fake_error)) {
247 /*
248 * Clear the flag, and fake an error to fail the
249 * replacement.
250 */
251 atomic_set(&mdev->pasid_1024_fake_error, 0);
252 return -ENOMEM;
253 } else {
254 /* Set the flag to fake an error in next call */
255 atomic_set(&mdev->pasid_1024_fake_error, 1);
256 }
257 }
258
259 return 0;
260 }
261
262 static const struct iommu_domain_ops mock_blocking_ops = {
263 .attach_dev = mock_domain_nop_attach,
264 .set_dev_pasid = mock_domain_set_dev_pasid_nop
265 };
266
267 static struct iommu_domain mock_blocking_domain = {
268 .type = IOMMU_DOMAIN_BLOCKED,
269 .ops = &mock_blocking_ops,
270 };
271
mock_domain_hw_info(struct device * dev,u32 * length,u32 * type)272 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
273 {
274 struct iommu_test_hw_info *info;
275
276 info = kzalloc(sizeof(*info), GFP_KERNEL);
277 if (!info)
278 return ERR_PTR(-ENOMEM);
279
280 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
281 *length = sizeof(*info);
282 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
283
284 return info;
285 }
286
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)287 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
288 bool enable)
289 {
290 struct mock_iommu_domain *mock = to_mock_domain(domain);
291 unsigned long flags = mock->flags;
292
293 if (enable && !domain->dirty_ops)
294 return -EINVAL;
295
296 /* No change? */
297 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
298 return 0;
299
300 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
301
302 mock->flags = flags;
303 return 0;
304 }
305
mock_test_and_clear_dirty(struct mock_iommu_domain * mock,unsigned long iova,size_t page_size,unsigned long flags)306 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
307 unsigned long iova, size_t page_size,
308 unsigned long flags)
309 {
310 unsigned long cur, end = iova + page_size - 1;
311 bool dirty = false;
312 void *ent, *old;
313
314 for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
315 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
316 if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
317 continue;
318
319 dirty = true;
320 /* Clear dirty */
321 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
322 unsigned long val;
323
324 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
325 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
326 xa_mk_value(val), GFP_KERNEL);
327 WARN_ON_ONCE(ent != old);
328 }
329 }
330
331 return dirty;
332 }
333
mock_domain_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)334 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
335 unsigned long iova, size_t size,
336 unsigned long flags,
337 struct iommu_dirty_bitmap *dirty)
338 {
339 struct mock_iommu_domain *mock = to_mock_domain(domain);
340 unsigned long end = iova + size;
341 void *ent;
342
343 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
344 return -EINVAL;
345
346 do {
347 unsigned long pgsize = MOCK_IO_PAGE_SIZE;
348 unsigned long head;
349
350 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
351 if (!ent) {
352 iova += pgsize;
353 continue;
354 }
355
356 if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
357 pgsize = MOCK_HUGE_PAGE_SIZE;
358 head = iova & ~(pgsize - 1);
359
360 /* Clear dirty */
361 if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
362 iommu_dirty_bitmap_record(dirty, iova, pgsize);
363 iova += pgsize;
364 } while (iova < end);
365
366 return 0;
367 }
368
369 static const struct iommu_dirty_ops dirty_ops = {
370 .set_dirty_tracking = mock_domain_set_dirty_tracking,
371 .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
372 };
373
374 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)375 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
376 {
377 struct mock_iommu_domain_nested *mock_nested;
378 struct iommu_hwpt_selftest user_cfg;
379 int rc, i;
380
381 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
382 return ERR_PTR(-EOPNOTSUPP);
383
384 rc = iommu_copy_struct_from_user(&user_cfg, user_data,
385 IOMMU_HWPT_DATA_SELFTEST, iotlb);
386 if (rc)
387 return ERR_PTR(rc);
388
389 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
390 if (!mock_nested)
391 return ERR_PTR(-ENOMEM);
392 mock_nested->domain.ops = &domain_nested_ops;
393 mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
394 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
395 mock_nested->iotlb[i] = user_cfg.iotlb;
396 return mock_nested;
397 }
398
399 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)400 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
401 u32 flags, const struct iommu_user_data *user_data)
402 {
403 struct mock_iommu_domain_nested *mock_nested;
404 struct mock_iommu_domain *mock_parent;
405
406 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
407 return ERR_PTR(-EOPNOTSUPP);
408 if (!parent || parent->ops != mock_ops.default_domain_ops)
409 return ERR_PTR(-EINVAL);
410
411 mock_parent = to_mock_domain(parent);
412 if (!mock_parent)
413 return ERR_PTR(-EINVAL);
414
415 mock_nested = __mock_domain_alloc_nested(user_data);
416 if (IS_ERR(mock_nested))
417 return ERR_CAST(mock_nested);
418 mock_nested->parent = mock_parent;
419 return &mock_nested->domain;
420 }
421
422 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)423 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
424 const struct iommu_user_data *user_data)
425 {
426 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
427 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
428 IOMMU_HWPT_ALLOC_NEST_PARENT |
429 IOMMU_HWPT_ALLOC_PASID;
430 struct mock_dev *mdev = to_mock_dev(dev);
431 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
432 struct mock_iommu_domain *mock;
433
434 if (user_data)
435 return ERR_PTR(-EOPNOTSUPP);
436 if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
437 return ERR_PTR(-EOPNOTSUPP);
438
439 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
440 if (!mock)
441 return ERR_PTR(-ENOMEM);
442 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
443 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
444 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
445 if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
446 mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
447 mock->domain.ops = mock_ops.default_domain_ops;
448 mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
449 xa_init(&mock->pfns);
450
451 if (has_dirty_flag)
452 mock->domain.dirty_ops = &dirty_ops;
453 return &mock->domain;
454 }
455
mock_domain_free(struct iommu_domain * domain)456 static void mock_domain_free(struct iommu_domain *domain)
457 {
458 struct mock_iommu_domain *mock = to_mock_domain(domain);
459
460 WARN_ON(!xa_empty(&mock->pfns));
461 kfree(mock);
462 }
463
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)464 static int mock_domain_map_pages(struct iommu_domain *domain,
465 unsigned long iova, phys_addr_t paddr,
466 size_t pgsize, size_t pgcount, int prot,
467 gfp_t gfp, size_t *mapped)
468 {
469 struct mock_iommu_domain *mock = to_mock_domain(domain);
470 unsigned long flags = MOCK_PFN_START_IOVA;
471 unsigned long start_iova = iova;
472
473 /*
474 * xarray does not reliably work with fault injection because it does a
475 * retry allocation, so put our own failure point.
476 */
477 if (iommufd_should_fail())
478 return -ENOENT;
479
480 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
481 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
482 for (; pgcount; pgcount--) {
483 size_t cur;
484
485 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
486 void *old;
487
488 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
489 flags = MOCK_PFN_LAST_IOVA;
490 if (pgsize != MOCK_IO_PAGE_SIZE) {
491 flags |= MOCK_PFN_HUGE_IOVA;
492 }
493 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
494 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
495 flags),
496 gfp);
497 if (xa_is_err(old)) {
498 for (; start_iova != iova;
499 start_iova += MOCK_IO_PAGE_SIZE)
500 xa_erase(&mock->pfns,
501 start_iova /
502 MOCK_IO_PAGE_SIZE);
503 return xa_err(old);
504 }
505 WARN_ON(old);
506 iova += MOCK_IO_PAGE_SIZE;
507 paddr += MOCK_IO_PAGE_SIZE;
508 *mapped += MOCK_IO_PAGE_SIZE;
509 flags = 0;
510 }
511 }
512 return 0;
513 }
514
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)515 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
516 unsigned long iova, size_t pgsize,
517 size_t pgcount,
518 struct iommu_iotlb_gather *iotlb_gather)
519 {
520 struct mock_iommu_domain *mock = to_mock_domain(domain);
521 bool first = true;
522 size_t ret = 0;
523 void *ent;
524
525 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
526 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
527
528 for (; pgcount; pgcount--) {
529 size_t cur;
530
531 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
532 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
533
534 /*
535 * iommufd generates unmaps that must be a strict
536 * superset of the map's performend So every
537 * starting/ending IOVA should have been an iova passed
538 * to map.
539 *
540 * This simple logic doesn't work when the HUGE_PAGE is
541 * turned on since the core code will automatically
542 * switch between the two page sizes creating a break in
543 * the unmap calls. The break can land in the middle of
544 * contiguous IOVA.
545 */
546 if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
547 if (first) {
548 WARN_ON(ent && !(xa_to_value(ent) &
549 MOCK_PFN_START_IOVA));
550 first = false;
551 }
552 if (pgcount == 1 &&
553 cur + MOCK_IO_PAGE_SIZE == pgsize)
554 WARN_ON(ent && !(xa_to_value(ent) &
555 MOCK_PFN_LAST_IOVA));
556 }
557
558 iova += MOCK_IO_PAGE_SIZE;
559 ret += MOCK_IO_PAGE_SIZE;
560 }
561 }
562 return ret;
563 }
564
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)565 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
566 dma_addr_t iova)
567 {
568 struct mock_iommu_domain *mock = to_mock_domain(domain);
569 void *ent;
570
571 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
572 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
573 WARN_ON(!ent);
574 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
575 }
576
mock_domain_capable(struct device * dev,enum iommu_cap cap)577 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
578 {
579 struct mock_dev *mdev = to_mock_dev(dev);
580
581 switch (cap) {
582 case IOMMU_CAP_CACHE_COHERENCY:
583 return true;
584 case IOMMU_CAP_DIRTY_TRACKING:
585 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
586 default:
587 break;
588 }
589
590 return false;
591 }
592
593 static struct iopf_queue *mock_iommu_iopf_queue;
594
595 static struct mock_iommu_device {
596 struct iommu_device iommu_dev;
597 struct completion complete;
598 refcount_t users;
599 } mock_iommu;
600
mock_probe_device(struct device * dev)601 static struct iommu_device *mock_probe_device(struct device *dev)
602 {
603 if (dev->bus != &iommufd_mock_bus_type.bus)
604 return ERR_PTR(-ENODEV);
605 return &mock_iommu.iommu_dev;
606 }
607
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)608 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
609 struct iommu_page_response *msg)
610 {
611 }
612
mock_dev_enable_feat(struct device * dev,enum iommu_dev_features feat)613 static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
614 {
615 if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
616 return -ENODEV;
617
618 return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
619 }
620
mock_dev_disable_feat(struct device * dev,enum iommu_dev_features feat)621 static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
622 {
623 if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
624 return -ENODEV;
625
626 iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
627
628 return 0;
629 }
630
mock_viommu_destroy(struct iommufd_viommu * viommu)631 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
632 {
633 struct mock_iommu_device *mock_iommu = container_of(
634 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
635
636 if (refcount_dec_and_test(&mock_iommu->users))
637 complete(&mock_iommu->complete);
638
639 /* iommufd core frees mock_viommu and viommu */
640 }
641
642 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)643 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
644 const struct iommu_user_data *user_data)
645 {
646 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
647 struct mock_iommu_domain_nested *mock_nested;
648
649 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
650 return ERR_PTR(-EOPNOTSUPP);
651
652 mock_nested = __mock_domain_alloc_nested(user_data);
653 if (IS_ERR(mock_nested))
654 return ERR_CAST(mock_nested);
655 mock_nested->mock_viommu = mock_viommu;
656 mock_nested->parent = mock_viommu->s2_parent;
657 return &mock_nested->domain;
658 }
659
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)660 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
661 struct iommu_user_data_array *array)
662 {
663 struct iommu_viommu_invalidate_selftest *cmds;
664 struct iommu_viommu_invalidate_selftest *cur;
665 struct iommu_viommu_invalidate_selftest *end;
666 int rc;
667
668 /* A zero-length array is allowed to validate the array type */
669 if (array->entry_num == 0 &&
670 array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
671 array->entry_num = 0;
672 return 0;
673 }
674
675 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
676 if (!cmds)
677 return -ENOMEM;
678 cur = cmds;
679 end = cmds + array->entry_num;
680
681 static_assert(sizeof(*cmds) == 3 * sizeof(u32));
682 rc = iommu_copy_struct_from_full_user_array(
683 cmds, sizeof(*cmds), array,
684 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
685 if (rc)
686 goto out;
687
688 while (cur != end) {
689 struct mock_dev *mdev;
690 struct device *dev;
691 int i;
692
693 if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
694 rc = -EOPNOTSUPP;
695 goto out;
696 }
697
698 if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
699 rc = -EINVAL;
700 goto out;
701 }
702
703 xa_lock(&viommu->vdevs);
704 dev = iommufd_viommu_find_dev(viommu,
705 (unsigned long)cur->vdev_id);
706 if (!dev) {
707 xa_unlock(&viommu->vdevs);
708 rc = -EINVAL;
709 goto out;
710 }
711 mdev = container_of(dev, struct mock_dev, dev);
712
713 if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
714 /* Invalidate all cache entries and ignore cache_id */
715 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
716 mdev->cache[i] = 0;
717 } else {
718 mdev->cache[cur->cache_id] = 0;
719 }
720 xa_unlock(&viommu->vdevs);
721
722 cur++;
723 }
724 out:
725 array->entry_num = cur - cmds;
726 kfree(cmds);
727 return rc;
728 }
729
730 static struct iommufd_viommu_ops mock_viommu_ops = {
731 .destroy = mock_viommu_destroy,
732 .alloc_domain_nested = mock_viommu_alloc_domain_nested,
733 .cache_invalidate = mock_viommu_cache_invalidate,
734 };
735
mock_viommu_alloc(struct device * dev,struct iommu_domain * domain,struct iommufd_ctx * ictx,unsigned int viommu_type)736 static struct iommufd_viommu *mock_viommu_alloc(struct device *dev,
737 struct iommu_domain *domain,
738 struct iommufd_ctx *ictx,
739 unsigned int viommu_type)
740 {
741 struct mock_iommu_device *mock_iommu =
742 iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev);
743 struct mock_viommu *mock_viommu;
744
745 if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
746 return ERR_PTR(-EOPNOTSUPP);
747
748 mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core,
749 &mock_viommu_ops);
750 if (IS_ERR(mock_viommu))
751 return ERR_CAST(mock_viommu);
752
753 refcount_inc(&mock_iommu->users);
754 return &mock_viommu->core;
755 }
756
757 static const struct iommu_ops mock_ops = {
758 /*
759 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
760 * because it is zero.
761 */
762 .default_domain = &mock_blocking_domain,
763 .blocked_domain = &mock_blocking_domain,
764 .owner = THIS_MODULE,
765 .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
766 .hw_info = mock_domain_hw_info,
767 .domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
768 .domain_alloc_nested = mock_domain_alloc_nested,
769 .capable = mock_domain_capable,
770 .device_group = generic_device_group,
771 .probe_device = mock_probe_device,
772 .page_response = mock_domain_page_response,
773 .dev_enable_feat = mock_dev_enable_feat,
774 .dev_disable_feat = mock_dev_disable_feat,
775 .user_pasid_table = true,
776 .viommu_alloc = mock_viommu_alloc,
777 .default_domain_ops =
778 &(struct iommu_domain_ops){
779 .free = mock_domain_free,
780 .attach_dev = mock_domain_nop_attach,
781 .map_pages = mock_domain_map_pages,
782 .unmap_pages = mock_domain_unmap_pages,
783 .iova_to_phys = mock_domain_iova_to_phys,
784 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
785 },
786 };
787
mock_domain_free_nested(struct iommu_domain * domain)788 static void mock_domain_free_nested(struct iommu_domain *domain)
789 {
790 kfree(to_mock_nested(domain));
791 }
792
793 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)794 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
795 struct iommu_user_data_array *array)
796 {
797 struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
798 struct iommu_hwpt_invalidate_selftest inv;
799 u32 processed = 0;
800 int i = 0, j;
801 int rc = 0;
802
803 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
804 rc = -EINVAL;
805 goto out;
806 }
807
808 for ( ; i < array->entry_num; i++) {
809 rc = iommu_copy_struct_from_user_array(&inv, array,
810 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
811 i, iotlb_id);
812 if (rc)
813 break;
814
815 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
816 rc = -EOPNOTSUPP;
817 break;
818 }
819
820 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
821 rc = -EINVAL;
822 break;
823 }
824
825 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
826 /* Invalidate all mock iotlb entries and ignore iotlb_id */
827 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
828 mock_nested->iotlb[j] = 0;
829 } else {
830 mock_nested->iotlb[inv.iotlb_id] = 0;
831 }
832
833 processed++;
834 }
835
836 out:
837 array->entry_num = processed;
838 return rc;
839 }
840
841 static struct iommu_domain_ops domain_nested_ops = {
842 .free = mock_domain_free_nested,
843 .attach_dev = mock_domain_nop_attach,
844 .cache_invalidate_user = mock_domain_cache_invalidate_user,
845 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
846 };
847
848 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)849 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
850 {
851 struct iommufd_object *obj;
852
853 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
854 if (IS_ERR(obj))
855 return ERR_CAST(obj);
856 return container_of(obj, struct iommufd_hw_pagetable, obj);
857 }
858
859 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)860 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
861 struct mock_iommu_domain **mock)
862 {
863 struct iommufd_hw_pagetable *hwpt;
864
865 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
866 if (IS_ERR(hwpt))
867 return hwpt;
868 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
869 hwpt->domain->ops != mock_ops.default_domain_ops) {
870 iommufd_put_object(ucmd->ictx, &hwpt->obj);
871 return ERR_PTR(-EINVAL);
872 }
873 *mock = to_mock_domain(hwpt->domain);
874 return hwpt;
875 }
876
877 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)878 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
879 struct mock_iommu_domain_nested **mock_nested)
880 {
881 struct iommufd_hw_pagetable *hwpt;
882
883 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
884 if (IS_ERR(hwpt))
885 return hwpt;
886 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
887 hwpt->domain->ops != &domain_nested_ops) {
888 iommufd_put_object(ucmd->ictx, &hwpt->obj);
889 return ERR_PTR(-EINVAL);
890 }
891 *mock_nested = to_mock_nested(hwpt->domain);
892 return hwpt;
893 }
894
mock_dev_release(struct device * dev)895 static void mock_dev_release(struct device *dev)
896 {
897 struct mock_dev *mdev = to_mock_dev(dev);
898
899 ida_free(&mock_dev_ida, mdev->id);
900 kfree(mdev);
901 }
902
mock_dev_create(unsigned long dev_flags)903 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
904 {
905 struct property_entry prop[] = {
906 PROPERTY_ENTRY_U32("pasid-num-bits", 0),
907 {},
908 };
909 const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
910 MOCK_FLAGS_DEVICE_HUGE_IOVA |
911 MOCK_FLAGS_DEVICE_PASID;
912 struct mock_dev *mdev;
913 int rc, i;
914
915 if (dev_flags & ~valid_flags)
916 return ERR_PTR(-EINVAL);
917
918 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
919 if (!mdev)
920 return ERR_PTR(-ENOMEM);
921
922 init_rwsem(&mdev->viommu_rwsem);
923 device_initialize(&mdev->dev);
924 mdev->flags = dev_flags;
925 mdev->dev.release = mock_dev_release;
926 mdev->dev.bus = &iommufd_mock_bus_type.bus;
927 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
928 mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
929
930 rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
931 if (rc < 0)
932 goto err_put;
933 mdev->id = rc;
934
935 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
936 if (rc)
937 goto err_put;
938
939 if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
940 prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
941
942 rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
943 if (rc) {
944 dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
945 goto err_put;
946 }
947
948 rc = device_add(&mdev->dev);
949 if (rc)
950 goto err_put;
951 return mdev;
952
953 err_put:
954 put_device(&mdev->dev);
955 return ERR_PTR(rc);
956 }
957
mock_dev_destroy(struct mock_dev * mdev)958 static void mock_dev_destroy(struct mock_dev *mdev)
959 {
960 device_unregister(&mdev->dev);
961 }
962
iommufd_selftest_is_mock_dev(struct device * dev)963 bool iommufd_selftest_is_mock_dev(struct device *dev)
964 {
965 return dev->release == mock_dev_release;
966 }
967
968 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)969 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
970 struct iommu_test_cmd *cmd)
971 {
972 struct iommufd_device *idev;
973 struct selftest_obj *sobj;
974 u32 pt_id = cmd->id;
975 u32 dev_flags = 0;
976 u32 idev_id;
977 int rc;
978
979 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
980 if (IS_ERR(sobj))
981 return PTR_ERR(sobj);
982
983 sobj->idev.ictx = ucmd->ictx;
984 sobj->type = TYPE_IDEV;
985
986 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
987 dev_flags = cmd->mock_domain_flags.dev_flags;
988
989 sobj->idev.mock_dev = mock_dev_create(dev_flags);
990 if (IS_ERR(sobj->idev.mock_dev)) {
991 rc = PTR_ERR(sobj->idev.mock_dev);
992 goto out_sobj;
993 }
994
995 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
996 &idev_id);
997 if (IS_ERR(idev)) {
998 rc = PTR_ERR(idev);
999 goto out_mdev;
1000 }
1001 sobj->idev.idev = idev;
1002
1003 rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
1004 if (rc)
1005 goto out_unbind;
1006
1007 /* Userspace must destroy the device_id to destroy the object */
1008 cmd->mock_domain.out_hwpt_id = pt_id;
1009 cmd->mock_domain.out_stdev_id = sobj->obj.id;
1010 cmd->mock_domain.out_idev_id = idev_id;
1011 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1012 if (rc)
1013 goto out_detach;
1014 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
1015 return 0;
1016
1017 out_detach:
1018 iommufd_device_detach(idev, IOMMU_NO_PASID);
1019 out_unbind:
1020 iommufd_device_unbind(idev);
1021 out_mdev:
1022 mock_dev_destroy(sobj->idev.mock_dev);
1023 out_sobj:
1024 iommufd_object_abort(ucmd->ictx, &sobj->obj);
1025 return rc;
1026 }
1027
1028 static struct selftest_obj *
iommufd_test_get_selftest_obj(struct iommufd_ctx * ictx,u32 id)1029 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
1030 {
1031 struct iommufd_object *dev_obj;
1032 struct selftest_obj *sobj;
1033
1034 /*
1035 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
1036 * it doesn't race with detach, which is not allowed.
1037 */
1038 dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
1039 if (IS_ERR(dev_obj))
1040 return ERR_CAST(dev_obj);
1041
1042 sobj = to_selftest_obj(dev_obj);
1043 if (sobj->type != TYPE_IDEV) {
1044 iommufd_put_object(ictx, dev_obj);
1045 return ERR_PTR(-EINVAL);
1046 }
1047 return sobj;
1048 }
1049
1050 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)1051 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
1052 unsigned int device_id, u32 pt_id,
1053 struct iommu_test_cmd *cmd)
1054 {
1055 struct selftest_obj *sobj;
1056 int rc;
1057
1058 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
1059 if (IS_ERR(sobj))
1060 return PTR_ERR(sobj);
1061
1062 rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
1063 if (rc)
1064 goto out_sobj;
1065
1066 cmd->mock_domain_replace.pt_id = pt_id;
1067 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1068
1069 out_sobj:
1070 iommufd_put_object(ucmd->ictx, &sobj->obj);
1071 return rc;
1072 }
1073
1074 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)1075 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
1076 unsigned int mockpt_id,
1077 unsigned long start, size_t length)
1078 {
1079 struct iommufd_ioas *ioas;
1080 int rc;
1081
1082 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
1083 if (IS_ERR(ioas))
1084 return PTR_ERR(ioas);
1085 down_write(&ioas->iopt.iova_rwsem);
1086 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
1087 up_write(&ioas->iopt.iova_rwsem);
1088 iommufd_put_object(ucmd->ictx, &ioas->obj);
1089 return rc;
1090 }
1091
1092 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1093 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1094 unsigned int mockpt_id, unsigned long iova,
1095 size_t length, void __user *uptr)
1096 {
1097 struct iommufd_hw_pagetable *hwpt;
1098 struct mock_iommu_domain *mock;
1099 uintptr_t end;
1100 int rc;
1101
1102 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
1103 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
1104 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1105 return -EINVAL;
1106
1107 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1108 if (IS_ERR(hwpt))
1109 return PTR_ERR(hwpt);
1110
1111 for (; length; length -= MOCK_IO_PAGE_SIZE) {
1112 struct page *pages[1];
1113 unsigned long pfn;
1114 long npages;
1115 void *ent;
1116
1117 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1118 pages);
1119 if (npages < 0) {
1120 rc = npages;
1121 goto out_put;
1122 }
1123 if (WARN_ON(npages != 1)) {
1124 rc = -EFAULT;
1125 goto out_put;
1126 }
1127 pfn = page_to_pfn(pages[0]);
1128 put_page(pages[0]);
1129
1130 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
1131 if (!ent ||
1132 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
1133 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1134 rc = -EINVAL;
1135 goto out_put;
1136 }
1137 iova += MOCK_IO_PAGE_SIZE;
1138 uptr += MOCK_IO_PAGE_SIZE;
1139 }
1140 rc = 0;
1141
1142 out_put:
1143 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1144 return rc;
1145 }
1146
1147 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1148 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1149 void __user *uptr, size_t length,
1150 unsigned int refs)
1151 {
1152 uintptr_t end;
1153
1154 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1155 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1156 return -EINVAL;
1157
1158 for (; length; length -= PAGE_SIZE) {
1159 struct page *pages[1];
1160 long npages;
1161
1162 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1163 if (npages < 0)
1164 return npages;
1165 if (WARN_ON(npages != 1))
1166 return -EFAULT;
1167 if (!PageCompound(pages[0])) {
1168 unsigned int count;
1169
1170 count = page_ref_count(pages[0]);
1171 if (count / GUP_PIN_COUNTING_BIAS != refs) {
1172 put_page(pages[0]);
1173 return -EIO;
1174 }
1175 }
1176 put_page(pages[0]);
1177 uptr += PAGE_SIZE;
1178 }
1179 return 0;
1180 }
1181
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1182 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
1183 u32 mockpt_id, unsigned int iotlb_id,
1184 u32 iotlb)
1185 {
1186 struct mock_iommu_domain_nested *mock_nested;
1187 struct iommufd_hw_pagetable *hwpt;
1188 int rc = 0;
1189
1190 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1191 if (IS_ERR(hwpt))
1192 return PTR_ERR(hwpt);
1193
1194 mock_nested = to_mock_nested(hwpt->domain);
1195
1196 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1197 mock_nested->iotlb[iotlb_id] != iotlb)
1198 rc = -EINVAL;
1199 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1200 return rc;
1201 }
1202
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1203 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1204 unsigned int cache_id, u32 cache)
1205 {
1206 struct iommufd_device *idev;
1207 struct mock_dev *mdev;
1208 int rc = 0;
1209
1210 idev = iommufd_get_device(ucmd, idev_id);
1211 if (IS_ERR(idev))
1212 return PTR_ERR(idev);
1213 mdev = container_of(idev->dev, struct mock_dev, dev);
1214
1215 if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1216 rc = -EINVAL;
1217 iommufd_put_object(ucmd->ictx, &idev->obj);
1218 return rc;
1219 }
1220
1221 struct selftest_access {
1222 struct iommufd_access *access;
1223 struct file *file;
1224 struct mutex lock;
1225 struct list_head items;
1226 unsigned int next_id;
1227 bool destroying;
1228 };
1229
1230 struct selftest_access_item {
1231 struct list_head items_elm;
1232 unsigned long iova;
1233 size_t length;
1234 unsigned int id;
1235 };
1236
1237 static const struct file_operations iommfd_test_staccess_fops;
1238
iommufd_access_get(int fd)1239 static struct selftest_access *iommufd_access_get(int fd)
1240 {
1241 struct file *file;
1242
1243 file = fget(fd);
1244 if (!file)
1245 return ERR_PTR(-EBADFD);
1246
1247 if (file->f_op != &iommfd_test_staccess_fops) {
1248 fput(file);
1249 return ERR_PTR(-EBADFD);
1250 }
1251 return file->private_data;
1252 }
1253
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1254 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1255 unsigned long length)
1256 {
1257 unsigned long iova_last = iova + length - 1;
1258 struct selftest_access *staccess = data;
1259 struct selftest_access_item *item;
1260 struct selftest_access_item *tmp;
1261
1262 mutex_lock(&staccess->lock);
1263 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1264 if (iova > item->iova + item->length - 1 ||
1265 iova_last < item->iova)
1266 continue;
1267 list_del(&item->items_elm);
1268 iommufd_access_unpin_pages(staccess->access, item->iova,
1269 item->length);
1270 kfree(item);
1271 }
1272 mutex_unlock(&staccess->lock);
1273 }
1274
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1275 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1276 unsigned int access_id,
1277 unsigned int item_id)
1278 {
1279 struct selftest_access_item *item;
1280 struct selftest_access *staccess;
1281
1282 staccess = iommufd_access_get(access_id);
1283 if (IS_ERR(staccess))
1284 return PTR_ERR(staccess);
1285
1286 mutex_lock(&staccess->lock);
1287 list_for_each_entry(item, &staccess->items, items_elm) {
1288 if (item->id == item_id) {
1289 list_del(&item->items_elm);
1290 iommufd_access_unpin_pages(staccess->access, item->iova,
1291 item->length);
1292 mutex_unlock(&staccess->lock);
1293 kfree(item);
1294 fput(staccess->file);
1295 return 0;
1296 }
1297 }
1298 mutex_unlock(&staccess->lock);
1299 fput(staccess->file);
1300 return -ENOENT;
1301 }
1302
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1303 static int iommufd_test_staccess_release(struct inode *inode,
1304 struct file *filep)
1305 {
1306 struct selftest_access *staccess = filep->private_data;
1307
1308 if (staccess->access) {
1309 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1310 iommufd_access_destroy(staccess->access);
1311 }
1312 mutex_destroy(&staccess->lock);
1313 kfree(staccess);
1314 return 0;
1315 }
1316
1317 static const struct iommufd_access_ops selftest_access_ops_pin = {
1318 .needs_pin_pages = 1,
1319 .unmap = iommufd_test_access_unmap,
1320 };
1321
1322 static const struct iommufd_access_ops selftest_access_ops = {
1323 .unmap = iommufd_test_access_unmap,
1324 };
1325
1326 static const struct file_operations iommfd_test_staccess_fops = {
1327 .release = iommufd_test_staccess_release,
1328 };
1329
iommufd_test_alloc_access(void)1330 static struct selftest_access *iommufd_test_alloc_access(void)
1331 {
1332 struct selftest_access *staccess;
1333 struct file *filep;
1334
1335 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1336 if (!staccess)
1337 return ERR_PTR(-ENOMEM);
1338 INIT_LIST_HEAD(&staccess->items);
1339 mutex_init(&staccess->lock);
1340
1341 filep = anon_inode_getfile("[iommufd_test_staccess]",
1342 &iommfd_test_staccess_fops, staccess,
1343 O_RDWR);
1344 if (IS_ERR(filep)) {
1345 kfree(staccess);
1346 return ERR_CAST(filep);
1347 }
1348 staccess->file = filep;
1349 return staccess;
1350 }
1351
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1352 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1353 unsigned int ioas_id, unsigned int flags)
1354 {
1355 struct iommu_test_cmd *cmd = ucmd->cmd;
1356 struct selftest_access *staccess;
1357 struct iommufd_access *access;
1358 u32 id;
1359 int fdno;
1360 int rc;
1361
1362 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1363 return -EOPNOTSUPP;
1364
1365 staccess = iommufd_test_alloc_access();
1366 if (IS_ERR(staccess))
1367 return PTR_ERR(staccess);
1368
1369 fdno = get_unused_fd_flags(O_CLOEXEC);
1370 if (fdno < 0) {
1371 rc = -ENOMEM;
1372 goto out_free_staccess;
1373 }
1374
1375 access = iommufd_access_create(
1376 ucmd->ictx,
1377 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1378 &selftest_access_ops_pin :
1379 &selftest_access_ops,
1380 staccess, &id);
1381 if (IS_ERR(access)) {
1382 rc = PTR_ERR(access);
1383 goto out_put_fdno;
1384 }
1385 rc = iommufd_access_attach(access, ioas_id);
1386 if (rc)
1387 goto out_destroy;
1388 cmd->create_access.out_access_fd = fdno;
1389 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1390 if (rc)
1391 goto out_destroy;
1392
1393 staccess->access = access;
1394 fd_install(fdno, staccess->file);
1395 return 0;
1396
1397 out_destroy:
1398 iommufd_access_destroy(access);
1399 out_put_fdno:
1400 put_unused_fd(fdno);
1401 out_free_staccess:
1402 fput(staccess->file);
1403 return rc;
1404 }
1405
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1406 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1407 unsigned int access_id,
1408 unsigned int ioas_id)
1409 {
1410 struct selftest_access *staccess;
1411 int rc;
1412
1413 staccess = iommufd_access_get(access_id);
1414 if (IS_ERR(staccess))
1415 return PTR_ERR(staccess);
1416
1417 rc = iommufd_access_replace(staccess->access, ioas_id);
1418 fput(staccess->file);
1419 return rc;
1420 }
1421
1422 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1423 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1424 size_t npages)
1425 {
1426 for (; npages; npages--) {
1427 struct page *tmp_pages[1];
1428 long rc;
1429
1430 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1431 if (rc < 0)
1432 return rc;
1433 if (WARN_ON(rc != 1))
1434 return -EFAULT;
1435 put_page(tmp_pages[0]);
1436 if (tmp_pages[0] != *pages)
1437 return -EBADE;
1438 pages++;
1439 uptr += PAGE_SIZE;
1440 }
1441 return 0;
1442 }
1443
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1444 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1445 unsigned int access_id, unsigned long iova,
1446 size_t length, void __user *uptr,
1447 u32 flags)
1448 {
1449 struct iommu_test_cmd *cmd = ucmd->cmd;
1450 struct selftest_access_item *item;
1451 struct selftest_access *staccess;
1452 struct page **pages;
1453 size_t npages;
1454 int rc;
1455
1456 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1457 if (length > 16*1024*1024)
1458 return -ENOMEM;
1459
1460 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1461 return -EOPNOTSUPP;
1462
1463 staccess = iommufd_access_get(access_id);
1464 if (IS_ERR(staccess))
1465 return PTR_ERR(staccess);
1466
1467 if (staccess->access->ops != &selftest_access_ops_pin) {
1468 rc = -EOPNOTSUPP;
1469 goto out_put;
1470 }
1471
1472 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1473 iova = iommufd_test_syz_conv_iova(staccess->access,
1474 &cmd->access_pages.iova);
1475
1476 npages = (ALIGN(iova + length, PAGE_SIZE) -
1477 ALIGN_DOWN(iova, PAGE_SIZE)) /
1478 PAGE_SIZE;
1479 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1480 if (!pages) {
1481 rc = -ENOMEM;
1482 goto out_put;
1483 }
1484
1485 /*
1486 * Drivers will need to think very carefully about this locking. The
1487 * core code can do multiple unmaps instantaneously after
1488 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1489 * the range is unpinned. This simple implementation puts a global lock
1490 * around the pin, which may not suit drivers that want this to be a
1491 * performance path. drivers that get this wrong will trigger WARN_ON
1492 * races and cause EDEADLOCK failures to userspace.
1493 */
1494 mutex_lock(&staccess->lock);
1495 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1496 flags & MOCK_FLAGS_ACCESS_WRITE);
1497 if (rc)
1498 goto out_unlock;
1499
1500 /* For syzkaller allow uptr to be NULL to skip this check */
1501 if (uptr) {
1502 rc = iommufd_test_check_pages(
1503 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1504 npages);
1505 if (rc)
1506 goto out_unaccess;
1507 }
1508
1509 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1510 if (!item) {
1511 rc = -ENOMEM;
1512 goto out_unaccess;
1513 }
1514
1515 item->iova = iova;
1516 item->length = length;
1517 item->id = staccess->next_id++;
1518 list_add_tail(&item->items_elm, &staccess->items);
1519
1520 cmd->access_pages.out_access_pages_id = item->id;
1521 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1522 if (rc)
1523 goto out_free_item;
1524 goto out_unlock;
1525
1526 out_free_item:
1527 list_del(&item->items_elm);
1528 kfree(item);
1529 out_unaccess:
1530 iommufd_access_unpin_pages(staccess->access, iova, length);
1531 out_unlock:
1532 mutex_unlock(&staccess->lock);
1533 kvfree(pages);
1534 out_put:
1535 fput(staccess->file);
1536 return rc;
1537 }
1538
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1539 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1540 unsigned int access_id, unsigned long iova,
1541 size_t length, void __user *ubuf,
1542 unsigned int flags)
1543 {
1544 struct iommu_test_cmd *cmd = ucmd->cmd;
1545 struct selftest_access *staccess;
1546 void *tmp;
1547 int rc;
1548
1549 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1550 if (length > 16*1024*1024)
1551 return -ENOMEM;
1552
1553 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1554 MOCK_FLAGS_ACCESS_SYZ))
1555 return -EOPNOTSUPP;
1556
1557 staccess = iommufd_access_get(access_id);
1558 if (IS_ERR(staccess))
1559 return PTR_ERR(staccess);
1560
1561 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1562 if (!tmp) {
1563 rc = -ENOMEM;
1564 goto out_put;
1565 }
1566
1567 if (flags & MOCK_ACCESS_RW_WRITE) {
1568 if (copy_from_user(tmp, ubuf, length)) {
1569 rc = -EFAULT;
1570 goto out_free;
1571 }
1572 }
1573
1574 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1575 iova = iommufd_test_syz_conv_iova(staccess->access,
1576 &cmd->access_rw.iova);
1577
1578 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1579 if (rc)
1580 goto out_free;
1581 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1582 if (copy_to_user(ubuf, tmp, length)) {
1583 rc = -EFAULT;
1584 goto out_free;
1585 }
1586 }
1587
1588 out_free:
1589 kvfree(tmp);
1590 out_put:
1591 fput(staccess->file);
1592 return rc;
1593 }
1594 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1595 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1596 __IOMMUFD_ACCESS_RW_SLOW_PATH);
1597
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1598 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1599 unsigned long iova, size_t length,
1600 unsigned long page_size, void __user *uptr,
1601 u32 flags)
1602 {
1603 unsigned long i, max;
1604 struct iommu_test_cmd *cmd = ucmd->cmd;
1605 struct iommufd_hw_pagetable *hwpt;
1606 struct mock_iommu_domain *mock;
1607 int rc, count = 0;
1608 void *tmp;
1609
1610 if (!page_size || !length || iova % page_size || length % page_size ||
1611 !uptr)
1612 return -EINVAL;
1613
1614 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1615 if (IS_ERR(hwpt))
1616 return PTR_ERR(hwpt);
1617
1618 if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1619 rc = -EINVAL;
1620 goto out_put;
1621 }
1622
1623 max = length / page_size;
1624 tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1625 GFP_KERNEL_ACCOUNT);
1626 if (!tmp) {
1627 rc = -ENOMEM;
1628 goto out_put;
1629 }
1630
1631 if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1632 rc = -EFAULT;
1633 goto out_free;
1634 }
1635
1636 for (i = 0; i < max; i++) {
1637 unsigned long cur = iova + i * page_size;
1638 void *ent, *old;
1639
1640 if (!test_bit(i, (unsigned long *)tmp))
1641 continue;
1642
1643 ent = xa_load(&mock->pfns, cur / page_size);
1644 if (ent) {
1645 unsigned long val;
1646
1647 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1648 old = xa_store(&mock->pfns, cur / page_size,
1649 xa_mk_value(val), GFP_KERNEL);
1650 WARN_ON_ONCE(ent != old);
1651 count++;
1652 }
1653 }
1654
1655 cmd->dirty.out_nr_dirty = count;
1656 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1657 out_free:
1658 kvfree(tmp);
1659 out_put:
1660 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1661 return rc;
1662 }
1663
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1664 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1665 struct iommu_test_cmd *cmd)
1666 {
1667 struct iopf_fault event = { };
1668 struct iommufd_device *idev;
1669
1670 idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1671 if (IS_ERR(idev))
1672 return PTR_ERR(idev);
1673
1674 event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1675 if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1676 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1677 event.fault.type = IOMMU_FAULT_PAGE_REQ;
1678 event.fault.prm.addr = cmd->trigger_iopf.addr;
1679 event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1680 event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1681 event.fault.prm.perm = cmd->trigger_iopf.perm;
1682
1683 iommu_report_device_fault(idev->dev, &event);
1684 iommufd_put_object(ucmd->ictx, &idev->obj);
1685
1686 return 0;
1687 }
1688
iommufd_test_trigger_vevent(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1689 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
1690 struct iommu_test_cmd *cmd)
1691 {
1692 struct iommu_viommu_event_selftest test = {};
1693 struct iommufd_device *idev;
1694 struct mock_dev *mdev;
1695 int rc = -ENOENT;
1696
1697 idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
1698 if (IS_ERR(idev))
1699 return PTR_ERR(idev);
1700 mdev = to_mock_dev(idev->dev);
1701
1702 down_read(&mdev->viommu_rwsem);
1703 if (!mdev->viommu || !mdev->vdev_id)
1704 goto out_unlock;
1705
1706 test.virt_id = mdev->vdev_id;
1707 rc = iommufd_viommu_report_event(&mdev->viommu->core,
1708 IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
1709 sizeof(test));
1710 out_unlock:
1711 up_read(&mdev->viommu_rwsem);
1712 iommufd_put_object(ucmd->ictx, &idev->obj);
1713
1714 return rc;
1715 }
1716
1717 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt(struct iommufd_ucmd * ucmd,u32 id)1718 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
1719 {
1720 struct iommufd_object *pt_obj;
1721
1722 pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
1723 if (IS_ERR(pt_obj))
1724 return ERR_CAST(pt_obj);
1725
1726 if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
1727 pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
1728 iommufd_put_object(ucmd->ictx, pt_obj);
1729 return ERR_PTR(-EINVAL);
1730 }
1731
1732 return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
1733 }
1734
iommufd_test_pasid_check_hwpt(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1735 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
1736 struct iommu_test_cmd *cmd)
1737 {
1738 u32 hwpt_id = cmd->pasid_check.hwpt_id;
1739 struct iommu_domain *attached_domain;
1740 struct iommu_attach_handle *handle;
1741 struct iommufd_hw_pagetable *hwpt;
1742 struct selftest_obj *sobj;
1743 struct mock_dev *mdev;
1744 int rc = 0;
1745
1746 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1747 if (IS_ERR(sobj))
1748 return PTR_ERR(sobj);
1749
1750 mdev = sobj->idev.mock_dev;
1751
1752 handle = iommu_attach_handle_get(mdev->dev.iommu_group,
1753 cmd->pasid_check.pasid, 0);
1754 if (IS_ERR(handle))
1755 attached_domain = NULL;
1756 else
1757 attached_domain = handle->domain;
1758
1759 /* hwpt_id == 0 means to check if pasid is detached */
1760 if (!hwpt_id) {
1761 if (attached_domain)
1762 rc = -EINVAL;
1763 goto out_sobj;
1764 }
1765
1766 hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
1767 if (IS_ERR(hwpt)) {
1768 rc = PTR_ERR(hwpt);
1769 goto out_sobj;
1770 }
1771
1772 if (attached_domain != hwpt->domain)
1773 rc = -EINVAL;
1774
1775 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1776 out_sobj:
1777 iommufd_put_object(ucmd->ictx, &sobj->obj);
1778 return rc;
1779 }
1780
iommufd_test_pasid_attach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1781 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
1782 struct iommu_test_cmd *cmd)
1783 {
1784 struct selftest_obj *sobj;
1785 int rc;
1786
1787 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1788 if (IS_ERR(sobj))
1789 return PTR_ERR(sobj);
1790
1791 rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
1792 &cmd->pasid_attach.pt_id);
1793 if (rc)
1794 goto out_sobj;
1795
1796 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1797 if (rc)
1798 iommufd_device_detach(sobj->idev.idev,
1799 cmd->pasid_attach.pasid);
1800
1801 out_sobj:
1802 iommufd_put_object(ucmd->ictx, &sobj->obj);
1803 return rc;
1804 }
1805
iommufd_test_pasid_replace(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1806 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
1807 struct iommu_test_cmd *cmd)
1808 {
1809 struct selftest_obj *sobj;
1810 int rc;
1811
1812 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1813 if (IS_ERR(sobj))
1814 return PTR_ERR(sobj);
1815
1816 rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
1817 &cmd->pasid_attach.pt_id);
1818 if (rc)
1819 goto out_sobj;
1820
1821 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1822
1823 out_sobj:
1824 iommufd_put_object(ucmd->ictx, &sobj->obj);
1825 return rc;
1826 }
1827
iommufd_test_pasid_detach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1828 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
1829 struct iommu_test_cmd *cmd)
1830 {
1831 struct selftest_obj *sobj;
1832
1833 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1834 if (IS_ERR(sobj))
1835 return PTR_ERR(sobj);
1836
1837 iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
1838 iommufd_put_object(ucmd->ictx, &sobj->obj);
1839 return 0;
1840 }
1841
iommufd_selftest_destroy(struct iommufd_object * obj)1842 void iommufd_selftest_destroy(struct iommufd_object *obj)
1843 {
1844 struct selftest_obj *sobj = to_selftest_obj(obj);
1845
1846 switch (sobj->type) {
1847 case TYPE_IDEV:
1848 iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
1849 iommufd_device_unbind(sobj->idev.idev);
1850 mock_dev_destroy(sobj->idev.mock_dev);
1851 break;
1852 }
1853 }
1854
iommufd_test(struct iommufd_ucmd * ucmd)1855 int iommufd_test(struct iommufd_ucmd *ucmd)
1856 {
1857 struct iommu_test_cmd *cmd = ucmd->cmd;
1858
1859 switch (cmd->op) {
1860 case IOMMU_TEST_OP_ADD_RESERVED:
1861 return iommufd_test_add_reserved(ucmd, cmd->id,
1862 cmd->add_reserved.start,
1863 cmd->add_reserved.length);
1864 case IOMMU_TEST_OP_MOCK_DOMAIN:
1865 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1866 return iommufd_test_mock_domain(ucmd, cmd);
1867 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1868 return iommufd_test_mock_domain_replace(
1869 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1870 case IOMMU_TEST_OP_MD_CHECK_MAP:
1871 return iommufd_test_md_check_pa(
1872 ucmd, cmd->id, cmd->check_map.iova,
1873 cmd->check_map.length,
1874 u64_to_user_ptr(cmd->check_map.uptr));
1875 case IOMMU_TEST_OP_MD_CHECK_REFS:
1876 return iommufd_test_md_check_refs(
1877 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1878 cmd->check_refs.length, cmd->check_refs.refs);
1879 case IOMMU_TEST_OP_MD_CHECK_IOTLB:
1880 return iommufd_test_md_check_iotlb(ucmd, cmd->id,
1881 cmd->check_iotlb.id,
1882 cmd->check_iotlb.iotlb);
1883 case IOMMU_TEST_OP_DEV_CHECK_CACHE:
1884 return iommufd_test_dev_check_cache(ucmd, cmd->id,
1885 cmd->check_dev_cache.id,
1886 cmd->check_dev_cache.cache);
1887 case IOMMU_TEST_OP_CREATE_ACCESS:
1888 return iommufd_test_create_access(ucmd, cmd->id,
1889 cmd->create_access.flags);
1890 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1891 return iommufd_test_access_replace_ioas(
1892 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1893 case IOMMU_TEST_OP_ACCESS_PAGES:
1894 return iommufd_test_access_pages(
1895 ucmd, cmd->id, cmd->access_pages.iova,
1896 cmd->access_pages.length,
1897 u64_to_user_ptr(cmd->access_pages.uptr),
1898 cmd->access_pages.flags);
1899 case IOMMU_TEST_OP_ACCESS_RW:
1900 return iommufd_test_access_rw(
1901 ucmd, cmd->id, cmd->access_rw.iova,
1902 cmd->access_rw.length,
1903 u64_to_user_ptr(cmd->access_rw.uptr),
1904 cmd->access_rw.flags);
1905 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1906 return iommufd_test_access_item_destroy(
1907 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1908 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1909 /* Protect _batch_init(), can not be less than elmsz */
1910 if (cmd->memory_limit.limit <
1911 sizeof(unsigned long) + sizeof(u32))
1912 return -EINVAL;
1913 iommufd_test_memory_limit = cmd->memory_limit.limit;
1914 return 0;
1915 case IOMMU_TEST_OP_DIRTY:
1916 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1917 cmd->dirty.length,
1918 cmd->dirty.page_size,
1919 u64_to_user_ptr(cmd->dirty.uptr),
1920 cmd->dirty.flags);
1921 case IOMMU_TEST_OP_TRIGGER_IOPF:
1922 return iommufd_test_trigger_iopf(ucmd, cmd);
1923 case IOMMU_TEST_OP_TRIGGER_VEVENT:
1924 return iommufd_test_trigger_vevent(ucmd, cmd);
1925 case IOMMU_TEST_OP_PASID_ATTACH:
1926 return iommufd_test_pasid_attach(ucmd, cmd);
1927 case IOMMU_TEST_OP_PASID_REPLACE:
1928 return iommufd_test_pasid_replace(ucmd, cmd);
1929 case IOMMU_TEST_OP_PASID_DETACH:
1930 return iommufd_test_pasid_detach(ucmd, cmd);
1931 case IOMMU_TEST_OP_PASID_CHECK_HWPT:
1932 return iommufd_test_pasid_check_hwpt(ucmd, cmd);
1933 default:
1934 return -EOPNOTSUPP;
1935 }
1936 }
1937
iommufd_should_fail(void)1938 bool iommufd_should_fail(void)
1939 {
1940 return should_fail(&fail_iommufd, 1);
1941 }
1942
iommufd_test_init(void)1943 int __init iommufd_test_init(void)
1944 {
1945 struct platform_device_info pdevinfo = {
1946 .name = "iommufd_selftest_iommu",
1947 };
1948 int rc;
1949
1950 dbgfs_root =
1951 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1952
1953 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1954 if (IS_ERR(selftest_iommu_dev)) {
1955 rc = PTR_ERR(selftest_iommu_dev);
1956 goto err_dbgfs;
1957 }
1958
1959 rc = bus_register(&iommufd_mock_bus_type.bus);
1960 if (rc)
1961 goto err_platform;
1962
1963 rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
1964 &selftest_iommu_dev->dev, NULL, "%s",
1965 dev_name(&selftest_iommu_dev->dev));
1966 if (rc)
1967 goto err_bus;
1968
1969 rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
1970 &iommufd_mock_bus_type.bus,
1971 &iommufd_mock_bus_type.nb);
1972 if (rc)
1973 goto err_sysfs;
1974
1975 refcount_set(&mock_iommu.users, 1);
1976 init_completion(&mock_iommu.complete);
1977
1978 mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
1979 mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
1980
1981 return 0;
1982
1983 err_sysfs:
1984 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
1985 err_bus:
1986 bus_unregister(&iommufd_mock_bus_type.bus);
1987 err_platform:
1988 platform_device_unregister(selftest_iommu_dev);
1989 err_dbgfs:
1990 debugfs_remove_recursive(dbgfs_root);
1991 return rc;
1992 }
1993
iommufd_test_wait_for_users(void)1994 static void iommufd_test_wait_for_users(void)
1995 {
1996 if (refcount_dec_and_test(&mock_iommu.users))
1997 return;
1998 /*
1999 * Time out waiting for iommu device user count to become 0.
2000 *
2001 * Note that this is just making an example here, since the selftest is
2002 * built into the iommufd module, i.e. it only unplugs the iommu device
2003 * when unloading the module. So, it is expected that this WARN_ON will
2004 * not trigger, as long as any iommufd FDs are open.
2005 */
2006 WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
2007 msecs_to_jiffies(10000)));
2008 }
2009
iommufd_test_exit(void)2010 void iommufd_test_exit(void)
2011 {
2012 if (mock_iommu_iopf_queue) {
2013 iopf_queue_free(mock_iommu_iopf_queue);
2014 mock_iommu_iopf_queue = NULL;
2015 }
2016
2017 iommufd_test_wait_for_users();
2018 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2019 iommu_device_unregister_bus(&mock_iommu.iommu_dev,
2020 &iommufd_mock_bus_type.bus,
2021 &iommufd_mock_bus_type.nb);
2022 bus_unregister(&iommufd_mock_bus_type.bus);
2023 platform_device_unregister(selftest_iommu_dev);
2024 debugfs_remove_recursive(dbgfs_root);
2025 }
2026