1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11
12 #include "iommufd_utils.h"
13
14 static unsigned long HUGEPAGE_SIZE;
15
get_huge_page_size(void)16 static unsigned long get_huge_page_size(void)
17 {
18 char buf[80];
19 int ret;
20 int fd;
21
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 O_RDONLY);
24 if (fd < 0)
25 return 2 * 1024 * 1024;
26
27 ret = read(fd, buf, sizeof(buf));
28 close(fd);
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
31 buf[ret] = 0;
32 return strtoul(buf, NULL, 10);
33 }
34
setup_sizes(void)35 static __attribute__((constructor)) void setup_sizes(void)
36 {
37 void *vrc;
38 int rc;
39
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
42
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 assert(!rc);
46 assert(buffer);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
51
52 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
53 &mfd);
54 assert(mfd_buffer != MAP_FAILED);
55 assert(mfd > 0);
56 }
57
FIXTURE(iommufd)58 FIXTURE(iommufd)
59 {
60 int fd;
61 };
62
FIXTURE_SETUP(iommufd)63 FIXTURE_SETUP(iommufd)
64 {
65 self->fd = open("/dev/iommu", O_RDWR);
66 ASSERT_NE(-1, self->fd);
67 }
68
FIXTURE_TEARDOWN(iommufd)69 FIXTURE_TEARDOWN(iommufd)
70 {
71 teardown_iommufd(self->fd, _metadata);
72 }
73
TEST_F(iommufd,simple_close)74 TEST_F(iommufd, simple_close)
75 {
76 }
77
TEST_F(iommufd,cmd_fail)78 TEST_F(iommufd, cmd_fail)
79 {
80 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
81
82 /* object id is invalid */
83 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
84 /* Bad pointer */
85 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
86 /* Unknown ioctl */
87 EXPECT_ERRNO(ENOTTY,
88 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
89 &cmd));
90 }
91
TEST_F(iommufd,cmd_length)92 TEST_F(iommufd, cmd_length)
93 {
94 #define TEST_LENGTH(_struct, _ioctl, _last) \
95 { \
96 size_t min_size = offsetofend(struct _struct, _last); \
97 struct { \
98 struct _struct cmd; \
99 uint8_t extra; \
100 } cmd = { .cmd = { .size = min_size - 1 }, \
101 .extra = UINT8_MAX }; \
102 int old_errno; \
103 int rc; \
104 \
105 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
106 cmd.cmd.size = sizeof(struct _struct) + 1; \
107 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
108 cmd.cmd.size = sizeof(struct _struct); \
109 rc = ioctl(self->fd, _ioctl, &cmd); \
110 old_errno = errno; \
111 cmd.cmd.size = sizeof(struct _struct) + 1; \
112 cmd.extra = 0; \
113 if (rc) { \
114 EXPECT_ERRNO(old_errno, \
115 ioctl(self->fd, _ioctl, &cmd)); \
116 } else { \
117 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
118 } \
119 }
120
121 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
122 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
123 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
124 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
125 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
126 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
127 out_iova_alignment);
128 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
129 allowed_iovas);
130 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
131 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
132 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
133 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
134 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
135 TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
136 TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
137 TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
138 TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
139 __reserved);
140 #undef TEST_LENGTH
141 }
142
TEST_F(iommufd,cmd_ex_fail)143 TEST_F(iommufd, cmd_ex_fail)
144 {
145 struct {
146 struct iommu_destroy cmd;
147 __u64 future;
148 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
149
150 /* object id is invalid and command is longer */
151 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
152 /* future area is non-zero */
153 cmd.future = 1;
154 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
155 /* Original command "works" */
156 cmd.cmd.size = sizeof(cmd.cmd);
157 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
158 /* Short command fails */
159 cmd.cmd.size = sizeof(cmd.cmd) - 1;
160 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
161 }
162
TEST_F(iommufd,global_options)163 TEST_F(iommufd, global_options)
164 {
165 struct iommu_option cmd = {
166 .size = sizeof(cmd),
167 .option_id = IOMMU_OPTION_RLIMIT_MODE,
168 .op = IOMMU_OPTION_OP_GET,
169 .val64 = 1,
170 };
171
172 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
173 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
174 ASSERT_EQ(0, cmd.val64);
175
176 /* This requires root */
177 cmd.op = IOMMU_OPTION_OP_SET;
178 cmd.val64 = 1;
179 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
180 cmd.val64 = 2;
181 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
182
183 cmd.op = IOMMU_OPTION_OP_GET;
184 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
185 ASSERT_EQ(1, cmd.val64);
186
187 cmd.op = IOMMU_OPTION_OP_SET;
188 cmd.val64 = 0;
189 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
190
191 cmd.op = IOMMU_OPTION_OP_GET;
192 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
193 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
194 cmd.op = IOMMU_OPTION_OP_SET;
195 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
196 }
197
drop_cap_ipc_lock(struct __test_metadata * _metadata)198 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
199 {
200 cap_t caps;
201 cap_value_t cap_list[1] = { CAP_IPC_LOCK };
202
203 caps = cap_get_proc();
204 ASSERT_NE(caps, NULL);
205 ASSERT_NE(-1,
206 cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
207 ASSERT_NE(-1, cap_set_proc(caps));
208 cap_free(caps);
209 }
210
get_proc_status_value(pid_t pid,const char * var)211 static long get_proc_status_value(pid_t pid, const char *var)
212 {
213 FILE *fp;
214 char buf[80], tag[80];
215 long val = -1;
216
217 snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
218 fp = fopen(buf, "r");
219 if (!fp)
220 return val;
221
222 while (fgets(buf, sizeof(buf), fp))
223 if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
224 break;
225
226 fclose(fp);
227 return val;
228 }
229
get_vm_pinned(pid_t pid)230 static long get_vm_pinned(pid_t pid)
231 {
232 return get_proc_status_value(pid, "VmPin:");
233 }
234
get_vm_locked(pid_t pid)235 static long get_vm_locked(pid_t pid)
236 {
237 return get_proc_status_value(pid, "VmLck:");
238 }
239
FIXTURE(change_process)240 FIXTURE(change_process)
241 {
242 int fd;
243 uint32_t ioas_id;
244 };
245
FIXTURE_VARIANT(change_process)246 FIXTURE_VARIANT(change_process)
247 {
248 int accounting;
249 };
250
FIXTURE_SETUP(change_process)251 FIXTURE_SETUP(change_process)
252 {
253 self->fd = open("/dev/iommu", O_RDWR);
254 ASSERT_NE(-1, self->fd);
255
256 drop_cap_ipc_lock(_metadata);
257 if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
258 struct iommu_option set_limit_cmd = {
259 .size = sizeof(set_limit_cmd),
260 .option_id = IOMMU_OPTION_RLIMIT_MODE,
261 .op = IOMMU_OPTION_OP_SET,
262 .val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
263 };
264 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
265 }
266
267 test_ioctl_ioas_alloc(&self->ioas_id);
268 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
269 }
270
FIXTURE_TEARDOWN(change_process)271 FIXTURE_TEARDOWN(change_process)
272 {
273 teardown_iommufd(self->fd, _metadata);
274 }
275
FIXTURE_VARIANT_ADD(change_process,account_none)276 FIXTURE_VARIANT_ADD(change_process, account_none)
277 {
278 .accounting = IOPT_PAGES_ACCOUNT_NONE,
279 };
280
FIXTURE_VARIANT_ADD(change_process,account_user)281 FIXTURE_VARIANT_ADD(change_process, account_user)
282 {
283 .accounting = IOPT_PAGES_ACCOUNT_USER,
284 };
285
FIXTURE_VARIANT_ADD(change_process,account_mm)286 FIXTURE_VARIANT_ADD(change_process, account_mm)
287 {
288 .accounting = IOPT_PAGES_ACCOUNT_MM,
289 };
290
TEST_F(change_process,basic)291 TEST_F(change_process, basic)
292 {
293 pid_t parent = getpid();
294 pid_t child;
295 __u64 iova;
296 struct iommu_ioas_change_process cmd = {
297 .size = sizeof(cmd),
298 };
299
300 /* Expect failure if non-file maps exist */
301 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
302 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
303 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
304
305 /* Change process works in current process. */
306 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
307 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
308
309 /* Change process works in another process */
310 child = fork();
311 if (!child) {
312 int nlock = PAGE_SIZE / 1024;
313
314 /* Parent accounts for locked memory before */
315 ASSERT_EQ(nlock, get_vm_pinned(parent));
316 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
317 ASSERT_EQ(nlock, get_vm_locked(parent));
318 ASSERT_EQ(0, get_vm_pinned(getpid()));
319 ASSERT_EQ(0, get_vm_locked(getpid()));
320
321 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
322
323 /* Child accounts for locked memory after */
324 ASSERT_EQ(0, get_vm_pinned(parent));
325 ASSERT_EQ(0, get_vm_locked(parent));
326 ASSERT_EQ(nlock, get_vm_pinned(getpid()));
327 if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
328 ASSERT_EQ(nlock, get_vm_locked(getpid()));
329
330 exit(0);
331 }
332 ASSERT_NE(-1, child);
333 ASSERT_EQ(child, waitpid(child, NULL, 0));
334 }
335
FIXTURE(iommufd_ioas)336 FIXTURE(iommufd_ioas)
337 {
338 int fd;
339 uint32_t ioas_id;
340 uint32_t stdev_id;
341 uint32_t hwpt_id;
342 uint32_t device_id;
343 uint64_t base_iova;
344 uint32_t device_pasid_id;
345 };
346
FIXTURE_VARIANT(iommufd_ioas)347 FIXTURE_VARIANT(iommufd_ioas)
348 {
349 unsigned int mock_domains;
350 unsigned int memory_limit;
351 bool pasid_capable;
352 };
353
FIXTURE_SETUP(iommufd_ioas)354 FIXTURE_SETUP(iommufd_ioas)
355 {
356 unsigned int i;
357
358
359 self->fd = open("/dev/iommu", O_RDWR);
360 ASSERT_NE(-1, self->fd);
361 test_ioctl_ioas_alloc(&self->ioas_id);
362
363 if (!variant->memory_limit) {
364 test_ioctl_set_default_memory_limit();
365 } else {
366 test_ioctl_set_temp_memory_limit(variant->memory_limit);
367 }
368
369 for (i = 0; i != variant->mock_domains; i++) {
370 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
371 &self->hwpt_id, &self->device_id);
372 test_cmd_dev_check_cache_all(self->device_id,
373 IOMMU_TEST_DEV_CACHE_DEFAULT);
374 self->base_iova = MOCK_APERTURE_START;
375 }
376
377 if (variant->pasid_capable)
378 test_cmd_mock_domain_flags(self->ioas_id,
379 MOCK_FLAGS_DEVICE_PASID,
380 NULL, NULL,
381 &self->device_pasid_id);
382 }
383
FIXTURE_TEARDOWN(iommufd_ioas)384 FIXTURE_TEARDOWN(iommufd_ioas)
385 {
386 test_ioctl_set_default_memory_limit();
387 teardown_iommufd(self->fd, _metadata);
388 }
389
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)390 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
391 {
392 };
393
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)394 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
395 {
396 .mock_domains = 1,
397 .pasid_capable = true,
398 };
399
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)400 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
401 {
402 .mock_domains = 2,
403 };
404
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)405 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
406 {
407 .mock_domains = 1,
408 .memory_limit = 16,
409 };
410
TEST_F(iommufd_ioas,ioas_auto_destroy)411 TEST_F(iommufd_ioas, ioas_auto_destroy)
412 {
413 }
414
TEST_F(iommufd_ioas,ioas_destroy)415 TEST_F(iommufd_ioas, ioas_destroy)
416 {
417 if (self->stdev_id) {
418 /* IOAS cannot be freed while a device has a HWPT using it */
419 EXPECT_ERRNO(EBUSY,
420 _test_ioctl_destroy(self->fd, self->ioas_id));
421 } else {
422 /* Can allocate and manually free an IOAS table */
423 test_ioctl_destroy(self->ioas_id);
424 }
425 }
426
TEST_F(iommufd_ioas,alloc_hwpt_nested)427 TEST_F(iommufd_ioas, alloc_hwpt_nested)
428 {
429 const uint32_t min_data_len =
430 offsetofend(struct iommu_hwpt_selftest, iotlb);
431 struct iommu_hwpt_selftest data = {
432 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
433 };
434 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
435 uint32_t nested_hwpt_id[2] = {};
436 uint32_t num_inv;
437 uint32_t parent_hwpt_id = 0;
438 uint32_t parent_hwpt_id_not_work = 0;
439 uint32_t test_hwpt_id = 0;
440 uint32_t iopf_hwpt_id;
441 uint32_t fault_id;
442 uint32_t fault_fd;
443
444 if (self->device_id) {
445 /* Negative tests */
446 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
447 &test_hwpt_id);
448 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
449 &test_hwpt_id);
450 test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
451 IOMMU_HWPT_ALLOC_NEST_PARENT |
452 IOMMU_HWPT_FAULT_ID_VALID,
453 &test_hwpt_id);
454
455 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
456 IOMMU_HWPT_ALLOC_NEST_PARENT,
457 &parent_hwpt_id);
458
459 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
460 &parent_hwpt_id_not_work);
461
462 /* Negative nested tests */
463 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
464 parent_hwpt_id, 0,
465 &nested_hwpt_id[0],
466 IOMMU_HWPT_DATA_NONE, &data,
467 sizeof(data));
468 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
469 parent_hwpt_id, 0,
470 &nested_hwpt_id[0],
471 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
472 sizeof(data));
473 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
474 parent_hwpt_id, 0,
475 &nested_hwpt_id[0],
476 IOMMU_HWPT_DATA_SELFTEST, &data,
477 min_data_len - 1);
478 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
479 parent_hwpt_id, 0,
480 &nested_hwpt_id[0],
481 IOMMU_HWPT_DATA_SELFTEST, NULL,
482 sizeof(data));
483 test_err_hwpt_alloc_nested(
484 EOPNOTSUPP, self->device_id, parent_hwpt_id,
485 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
486 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
487 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
488 parent_hwpt_id_not_work, 0,
489 &nested_hwpt_id[0],
490 IOMMU_HWPT_DATA_SELFTEST, &data,
491 sizeof(data));
492
493 /* Allocate two nested hwpts sharing one common parent hwpt */
494 test_ioctl_fault_alloc(&fault_id, &fault_fd);
495 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
496 &nested_hwpt_id[0],
497 IOMMU_HWPT_DATA_SELFTEST, &data,
498 sizeof(data));
499 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
500 &nested_hwpt_id[1],
501 IOMMU_HWPT_DATA_SELFTEST, &data,
502 sizeof(data));
503 test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
504 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
505 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
506 &data, sizeof(data));
507 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
508 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
509 IOMMU_HWPT_DATA_SELFTEST, &data,
510 sizeof(data));
511 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
512 IOMMU_TEST_IOTLB_DEFAULT);
513 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
514 IOMMU_TEST_IOTLB_DEFAULT);
515
516 /* Negative test: a nested hwpt on top of a nested hwpt */
517 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
518 nested_hwpt_id[0], 0, &test_hwpt_id,
519 IOMMU_HWPT_DATA_SELFTEST, &data,
520 sizeof(data));
521 /* Negative test: parent hwpt now cannot be freed */
522 EXPECT_ERRNO(EBUSY,
523 _test_ioctl_destroy(self->fd, parent_hwpt_id));
524
525 /* hwpt_invalidate does not support a parent hwpt */
526 num_inv = 1;
527 test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
528 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
529 sizeof(*inv_reqs), &num_inv);
530 assert(!num_inv);
531
532 /* Check data_type by passing zero-length array */
533 num_inv = 0;
534 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
535 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
536 sizeof(*inv_reqs), &num_inv);
537 assert(!num_inv);
538
539 /* Negative test: Invalid data_type */
540 num_inv = 1;
541 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
542 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
543 sizeof(*inv_reqs), &num_inv);
544 assert(!num_inv);
545
546 /* Negative test: structure size sanity */
547 num_inv = 1;
548 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
549 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
550 sizeof(*inv_reqs) + 1, &num_inv);
551 assert(!num_inv);
552
553 num_inv = 1;
554 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
555 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
556 1, &num_inv);
557 assert(!num_inv);
558
559 /* Negative test: invalid flag is passed */
560 num_inv = 1;
561 inv_reqs[0].flags = 0xffffffff;
562 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
563 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
564 sizeof(*inv_reqs), &num_inv);
565 assert(!num_inv);
566
567 /* Negative test: invalid data_uptr when array is not empty */
568 num_inv = 1;
569 inv_reqs[0].flags = 0;
570 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
571 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
572 sizeof(*inv_reqs), &num_inv);
573 assert(!num_inv);
574
575 /* Negative test: invalid entry_len when array is not empty */
576 num_inv = 1;
577 inv_reqs[0].flags = 0;
578 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
579 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
580 0, &num_inv);
581 assert(!num_inv);
582
583 /* Negative test: invalid iotlb_id */
584 num_inv = 1;
585 inv_reqs[0].flags = 0;
586 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
587 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
588 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
589 sizeof(*inv_reqs), &num_inv);
590 assert(!num_inv);
591
592 /*
593 * Invalidate the 1st iotlb entry but fail the 2nd request
594 * due to invalid flags configuration in the 2nd request.
595 */
596 num_inv = 2;
597 inv_reqs[0].flags = 0;
598 inv_reqs[0].iotlb_id = 0;
599 inv_reqs[1].flags = 0xffffffff;
600 inv_reqs[1].iotlb_id = 1;
601 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
602 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
603 sizeof(*inv_reqs), &num_inv);
604 assert(num_inv == 1);
605 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
606 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
607 IOMMU_TEST_IOTLB_DEFAULT);
608 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
609 IOMMU_TEST_IOTLB_DEFAULT);
610 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
611 IOMMU_TEST_IOTLB_DEFAULT);
612
613 /*
614 * Invalidate the 1st iotlb entry but fail the 2nd request
615 * due to invalid iotlb_id configuration in the 2nd request.
616 */
617 num_inv = 2;
618 inv_reqs[0].flags = 0;
619 inv_reqs[0].iotlb_id = 0;
620 inv_reqs[1].flags = 0;
621 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
622 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
623 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
624 sizeof(*inv_reqs), &num_inv);
625 assert(num_inv == 1);
626 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
627 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
628 IOMMU_TEST_IOTLB_DEFAULT);
629 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
630 IOMMU_TEST_IOTLB_DEFAULT);
631 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
632 IOMMU_TEST_IOTLB_DEFAULT);
633
634 /* Invalidate the 2nd iotlb entry and verify */
635 num_inv = 1;
636 inv_reqs[0].flags = 0;
637 inv_reqs[0].iotlb_id = 1;
638 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
639 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
640 sizeof(*inv_reqs), &num_inv);
641 assert(num_inv == 1);
642 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
643 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
644 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
645 IOMMU_TEST_IOTLB_DEFAULT);
646 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
647 IOMMU_TEST_IOTLB_DEFAULT);
648
649 /* Invalidate the 3rd and 4th iotlb entries and verify */
650 num_inv = 2;
651 inv_reqs[0].flags = 0;
652 inv_reqs[0].iotlb_id = 2;
653 inv_reqs[1].flags = 0;
654 inv_reqs[1].iotlb_id = 3;
655 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
656 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
657 sizeof(*inv_reqs), &num_inv);
658 assert(num_inv == 2);
659 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
660
661 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
662 num_inv = 1;
663 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
664 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
665 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
666 sizeof(*inv_reqs), &num_inv);
667 assert(num_inv == 1);
668 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
669
670 /* Attach device to nested_hwpt_id[0] that then will be busy */
671 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
672 EXPECT_ERRNO(EBUSY,
673 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
674
675 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
676 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
677 EXPECT_ERRNO(EBUSY,
678 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
679 test_ioctl_destroy(nested_hwpt_id[0]);
680
681 /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
682 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
683 EXPECT_ERRNO(EBUSY,
684 _test_ioctl_destroy(self->fd, iopf_hwpt_id));
685 /* Trigger an IOPF on the device */
686 test_cmd_trigger_iopf(self->device_id, fault_fd);
687
688 /* Detach from nested_hwpt_id[1] and destroy it */
689 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
690 test_ioctl_destroy(nested_hwpt_id[1]);
691 test_ioctl_destroy(iopf_hwpt_id);
692
693 /* Detach from the parent hw_pagetable and destroy it */
694 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
695 test_ioctl_destroy(parent_hwpt_id);
696 test_ioctl_destroy(parent_hwpt_id_not_work);
697 close(fault_fd);
698 test_ioctl_destroy(fault_id);
699 } else {
700 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
701 &parent_hwpt_id);
702 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
703 parent_hwpt_id, 0,
704 &nested_hwpt_id[0],
705 IOMMU_HWPT_DATA_SELFTEST, &data,
706 sizeof(data));
707 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
708 parent_hwpt_id, 0,
709 &nested_hwpt_id[1],
710 IOMMU_HWPT_DATA_SELFTEST, &data,
711 sizeof(data));
712 test_err_mock_domain_replace(ENOENT, self->stdev_id,
713 nested_hwpt_id[0]);
714 test_err_mock_domain_replace(ENOENT, self->stdev_id,
715 nested_hwpt_id[1]);
716 }
717 }
718
TEST_F(iommufd_ioas,hwpt_attach)719 TEST_F(iommufd_ioas, hwpt_attach)
720 {
721 /* Create a device attached directly to a hwpt */
722 if (self->stdev_id) {
723 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
724 } else {
725 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
726 }
727 }
728
TEST_F(iommufd_ioas,ioas_area_destroy)729 TEST_F(iommufd_ioas, ioas_area_destroy)
730 {
731 /* Adding an area does not change ability to destroy */
732 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
733 if (self->stdev_id)
734 EXPECT_ERRNO(EBUSY,
735 _test_ioctl_destroy(self->fd, self->ioas_id));
736 else
737 test_ioctl_destroy(self->ioas_id);
738 }
739
TEST_F(iommufd_ioas,ioas_area_auto_destroy)740 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
741 {
742 int i;
743
744 /* Can allocate and automatically free an IOAS table with many areas */
745 for (i = 0; i != 10; i++) {
746 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
747 self->base_iova + i * PAGE_SIZE);
748 }
749 }
750
TEST_F(iommufd_ioas,get_hw_info)751 TEST_F(iommufd_ioas, get_hw_info)
752 {
753 struct iommu_test_hw_info buffer_exact;
754 struct iommu_test_hw_info_buffer_larger {
755 struct iommu_test_hw_info info;
756 uint64_t trailing_bytes;
757 } buffer_larger;
758
759 if (self->device_id) {
760 uint8_t max_pasid = 0;
761
762 /* Provide a zero-size user_buffer */
763 test_cmd_get_hw_info(self->device_id,
764 IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0);
765 /* Provide a user_buffer with exact size */
766 test_cmd_get_hw_info(self->device_id,
767 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
768 sizeof(buffer_exact));
769
770 /* Request for a wrong data_type, and a correct one */
771 test_err_get_hw_info(EOPNOTSUPP, self->device_id,
772 IOMMU_HW_INFO_TYPE_SELFTEST + 1,
773 &buffer_exact, sizeof(buffer_exact));
774 test_cmd_get_hw_info(self->device_id,
775 IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact,
776 sizeof(buffer_exact));
777 /*
778 * Provide a user_buffer with size larger than the exact size to check if
779 * kernel zero the trailing bytes.
780 */
781 test_cmd_get_hw_info(self->device_id,
782 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
783 sizeof(buffer_larger));
784 /*
785 * Provide a user_buffer with size smaller than the exact size to check if
786 * the fields within the size range still gets updated.
787 */
788 test_cmd_get_hw_info(self->device_id,
789 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
790 offsetofend(struct iommu_test_hw_info,
791 flags));
792 test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
793 ASSERT_EQ(0, max_pasid);
794 if (variant->pasid_capable) {
795 test_cmd_get_hw_info_pasid(self->device_pasid_id,
796 &max_pasid);
797 ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
798 }
799 } else {
800 test_err_get_hw_info(ENOENT, self->device_id,
801 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
802 sizeof(buffer_exact));
803 test_err_get_hw_info(ENOENT, self->device_id,
804 IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
805 sizeof(buffer_larger));
806 }
807 }
808
TEST_F(iommufd_ioas,area)809 TEST_F(iommufd_ioas, area)
810 {
811 int i;
812
813 /* Unmap fails if nothing is mapped */
814 for (i = 0; i != 10; i++)
815 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
816
817 /* Unmap works */
818 for (i = 0; i != 10; i++)
819 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
820 self->base_iova + i * PAGE_SIZE);
821 for (i = 0; i != 10; i++)
822 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
823 PAGE_SIZE);
824
825 /* Split fails */
826 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
827 self->base_iova + 16 * PAGE_SIZE);
828 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
829 PAGE_SIZE);
830 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
831 PAGE_SIZE);
832
833 /* Over map fails */
834 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
835 self->base_iova + 16 * PAGE_SIZE);
836 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
837 self->base_iova + 16 * PAGE_SIZE);
838 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
839 self->base_iova + 17 * PAGE_SIZE);
840 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
841 self->base_iova + 15 * PAGE_SIZE);
842 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
843 self->base_iova + 15 * PAGE_SIZE);
844
845 /* unmap all works */
846 test_ioctl_ioas_unmap(0, UINT64_MAX);
847
848 /* Unmap all succeeds on an empty IOAS */
849 test_ioctl_ioas_unmap(0, UINT64_MAX);
850 }
851
TEST_F(iommufd_ioas,unmap_fully_contained_areas)852 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
853 {
854 uint64_t unmap_len;
855 int i;
856
857 /* Give no_domain some space to rewind base_iova */
858 self->base_iova += 4 * PAGE_SIZE;
859
860 for (i = 0; i != 4; i++)
861 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
862 self->base_iova + i * 16 * PAGE_SIZE);
863
864 /* Unmap not fully contained area doesn't work */
865 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
866 8 * PAGE_SIZE);
867 test_err_ioctl_ioas_unmap(ENOENT,
868 self->base_iova + 3 * 16 * PAGE_SIZE +
869 8 * PAGE_SIZE - 4 * PAGE_SIZE,
870 8 * PAGE_SIZE);
871
872 /* Unmap fully contained areas works */
873 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
874 self->base_iova - 4 * PAGE_SIZE,
875 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
876 4 * PAGE_SIZE,
877 &unmap_len));
878 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
879 }
880
TEST_F(iommufd_ioas,area_auto_iova)881 TEST_F(iommufd_ioas, area_auto_iova)
882 {
883 struct iommu_test_cmd test_cmd = {
884 .size = sizeof(test_cmd),
885 .op = IOMMU_TEST_OP_ADD_RESERVED,
886 .id = self->ioas_id,
887 .add_reserved = { .start = PAGE_SIZE * 4,
888 .length = PAGE_SIZE * 100 },
889 };
890 struct iommu_iova_range ranges[1] = {};
891 struct iommu_ioas_allow_iovas allow_cmd = {
892 .size = sizeof(allow_cmd),
893 .ioas_id = self->ioas_id,
894 .num_iovas = 1,
895 .allowed_iovas = (uintptr_t)ranges,
896 };
897 __u64 iovas[10];
898 int i;
899
900 /* Simple 4k pages */
901 for (i = 0; i != 10; i++)
902 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
903 for (i = 0; i != 10; i++)
904 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
905
906 /* Kernel automatically aligns IOVAs properly */
907 for (i = 0; i != 10; i++) {
908 size_t length = PAGE_SIZE * (i + 1);
909
910 if (self->stdev_id) {
911 test_ioctl_ioas_map(buffer, length, &iovas[i]);
912 } else {
913 test_ioctl_ioas_map((void *)(1UL << 31), length,
914 &iovas[i]);
915 }
916 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
917 }
918 for (i = 0; i != 10; i++)
919 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
920
921 /* Avoids a reserved region */
922 ASSERT_EQ(0,
923 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
924 &test_cmd));
925 for (i = 0; i != 10; i++) {
926 size_t length = PAGE_SIZE * (i + 1);
927
928 test_ioctl_ioas_map(buffer, length, &iovas[i]);
929 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
930 EXPECT_EQ(false,
931 iovas[i] > test_cmd.add_reserved.start &&
932 iovas[i] <
933 test_cmd.add_reserved.start +
934 test_cmd.add_reserved.length);
935 }
936 for (i = 0; i != 10; i++)
937 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
938
939 /* Allowed region intersects with a reserved region */
940 ranges[0].start = PAGE_SIZE;
941 ranges[0].last = PAGE_SIZE * 600;
942 EXPECT_ERRNO(EADDRINUSE,
943 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
944
945 /* Allocate from an allowed region */
946 if (self->stdev_id) {
947 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
948 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
949 } else {
950 ranges[0].start = PAGE_SIZE * 200;
951 ranges[0].last = PAGE_SIZE * 600 - 1;
952 }
953 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
954 for (i = 0; i != 10; i++) {
955 size_t length = PAGE_SIZE * (i + 1);
956
957 test_ioctl_ioas_map(buffer, length, &iovas[i]);
958 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
959 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
960 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
961 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
962 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
963 }
964 for (i = 0; i != 10; i++)
965 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
966 }
967
968 /* https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com */
TEST_F(iommufd_ioas,reserved_overflow)969 TEST_F(iommufd_ioas, reserved_overflow)
970 {
971 struct iommu_test_cmd test_cmd = {
972 .size = sizeof(test_cmd),
973 .op = IOMMU_TEST_OP_ADD_RESERVED,
974 .id = self->ioas_id,
975 .add_reserved.start = 6,
976 };
977 unsigned int map_len;
978 __u64 iova;
979
980 if (PAGE_SIZE == 4096) {
981 test_cmd.add_reserved.length = 0xffffffffffff8001;
982 map_len = 0x5000;
983 } else {
984 test_cmd.add_reserved.length =
985 0xffffffffffffffff - MOCK_PAGE_SIZE * 16;
986 map_len = MOCK_PAGE_SIZE * 10;
987 }
988
989 ASSERT_EQ(0,
990 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
991 &test_cmd));
992 test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova);
993 }
994
TEST_F(iommufd_ioas,area_allowed)995 TEST_F(iommufd_ioas, area_allowed)
996 {
997 struct iommu_test_cmd test_cmd = {
998 .size = sizeof(test_cmd),
999 .op = IOMMU_TEST_OP_ADD_RESERVED,
1000 .id = self->ioas_id,
1001 .add_reserved = { .start = PAGE_SIZE * 4,
1002 .length = PAGE_SIZE * 100 },
1003 };
1004 struct iommu_iova_range ranges[1] = {};
1005 struct iommu_ioas_allow_iovas allow_cmd = {
1006 .size = sizeof(allow_cmd),
1007 .ioas_id = self->ioas_id,
1008 .num_iovas = 1,
1009 .allowed_iovas = (uintptr_t)ranges,
1010 };
1011
1012 /* Reserved intersects an allowed */
1013 allow_cmd.num_iovas = 1;
1014 ranges[0].start = self->base_iova;
1015 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1016 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1017 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
1018 test_cmd.add_reserved.length = PAGE_SIZE;
1019 EXPECT_ERRNO(EADDRINUSE,
1020 ioctl(self->fd,
1021 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1022 &test_cmd));
1023 allow_cmd.num_iovas = 0;
1024 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1025
1026 /* Allowed intersects a reserved */
1027 ASSERT_EQ(0,
1028 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1029 &test_cmd));
1030 allow_cmd.num_iovas = 1;
1031 ranges[0].start = self->base_iova;
1032 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
1033 EXPECT_ERRNO(EADDRINUSE,
1034 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
1035 }
1036
TEST_F(iommufd_ioas,copy_area)1037 TEST_F(iommufd_ioas, copy_area)
1038 {
1039 struct iommu_ioas_copy copy_cmd = {
1040 .size = sizeof(copy_cmd),
1041 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1042 .dst_ioas_id = self->ioas_id,
1043 .src_ioas_id = self->ioas_id,
1044 .length = PAGE_SIZE,
1045 };
1046
1047 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
1048
1049 /* Copy inside a single IOAS */
1050 copy_cmd.src_iova = self->base_iova;
1051 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
1052 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1053
1054 /* Copy between IOAS's */
1055 copy_cmd.src_iova = self->base_iova;
1056 copy_cmd.dst_iova = 0;
1057 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
1058 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1059 }
1060
TEST_F(iommufd_ioas,iova_ranges)1061 TEST_F(iommufd_ioas, iova_ranges)
1062 {
1063 struct iommu_test_cmd test_cmd = {
1064 .size = sizeof(test_cmd),
1065 .op = IOMMU_TEST_OP_ADD_RESERVED,
1066 .id = self->ioas_id,
1067 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1068 };
1069 struct iommu_iova_range *ranges = buffer;
1070 struct iommu_ioas_iova_ranges ranges_cmd = {
1071 .size = sizeof(ranges_cmd),
1072 .ioas_id = self->ioas_id,
1073 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
1074 .allowed_iovas = (uintptr_t)ranges,
1075 };
1076
1077 /* Range can be read */
1078 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1079 EXPECT_EQ(1, ranges_cmd.num_iovas);
1080 if (!self->stdev_id) {
1081 EXPECT_EQ(0, ranges[0].start);
1082 EXPECT_EQ(SIZE_MAX, ranges[0].last);
1083 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1084 } else {
1085 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1086 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1087 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1088 }
1089
1090 /* Buffer too small */
1091 memset(ranges, 0, BUFFER_SIZE);
1092 ranges_cmd.num_iovas = 0;
1093 EXPECT_ERRNO(EMSGSIZE,
1094 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1095 EXPECT_EQ(1, ranges_cmd.num_iovas);
1096 EXPECT_EQ(0, ranges[0].start);
1097 EXPECT_EQ(0, ranges[0].last);
1098
1099 /* 2 ranges */
1100 ASSERT_EQ(0,
1101 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1102 &test_cmd));
1103 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1104 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1105 if (!self->stdev_id) {
1106 EXPECT_EQ(2, ranges_cmd.num_iovas);
1107 EXPECT_EQ(0, ranges[0].start);
1108 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1109 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1110 EXPECT_EQ(SIZE_MAX, ranges[1].last);
1111 } else {
1112 EXPECT_EQ(1, ranges_cmd.num_iovas);
1113 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1114 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1115 }
1116
1117 /* Buffer too small */
1118 memset(ranges, 0, BUFFER_SIZE);
1119 ranges_cmd.num_iovas = 1;
1120 if (!self->stdev_id) {
1121 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1122 &ranges_cmd));
1123 EXPECT_EQ(2, ranges_cmd.num_iovas);
1124 EXPECT_EQ(0, ranges[0].start);
1125 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1126 } else {
1127 ASSERT_EQ(0,
1128 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1129 EXPECT_EQ(1, ranges_cmd.num_iovas);
1130 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1131 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1132 }
1133 EXPECT_EQ(0, ranges[1].start);
1134 EXPECT_EQ(0, ranges[1].last);
1135 }
1136
TEST_F(iommufd_ioas,access_domain_destory)1137 TEST_F(iommufd_ioas, access_domain_destory)
1138 {
1139 struct iommu_test_cmd access_cmd = {
1140 .size = sizeof(access_cmd),
1141 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1142 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
1143 .length = PAGE_SIZE},
1144 };
1145 size_t buf_size = 2 * HUGEPAGE_SIZE;
1146 uint8_t *buf;
1147
1148 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1149 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1150 0);
1151 ASSERT_NE(MAP_FAILED, buf);
1152 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1153
1154 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1155 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1156 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1157 ASSERT_EQ(0,
1158 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1159 &access_cmd));
1160
1161 /* Causes a complicated unpin across a huge page boundary */
1162 if (self->stdev_id)
1163 test_ioctl_destroy(self->stdev_id);
1164
1165 test_cmd_destroy_access_pages(
1166 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1167 test_cmd_destroy_access(access_cmd.id);
1168 ASSERT_EQ(0, munmap(buf, buf_size));
1169 }
1170
TEST_F(iommufd_ioas,access_pin)1171 TEST_F(iommufd_ioas, access_pin)
1172 {
1173 struct iommu_test_cmd access_cmd = {
1174 .size = sizeof(access_cmd),
1175 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1176 .access_pages = { .iova = MOCK_APERTURE_START,
1177 .length = BUFFER_SIZE,
1178 .uptr = (uintptr_t)buffer },
1179 };
1180 struct iommu_test_cmd check_map_cmd = {
1181 .size = sizeof(check_map_cmd),
1182 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
1183 .check_map = { .iova = MOCK_APERTURE_START,
1184 .length = BUFFER_SIZE,
1185 .uptr = (uintptr_t)buffer },
1186 };
1187 uint32_t access_pages_id;
1188 unsigned int npages;
1189
1190 test_cmd_create_access(self->ioas_id, &access_cmd.id,
1191 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1192
1193 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1194 uint32_t mock_stdev_id;
1195 uint32_t mock_hwpt_id;
1196
1197 access_cmd.access_pages.length = npages * PAGE_SIZE;
1198
1199 /* Single map/unmap */
1200 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1201 MOCK_APERTURE_START);
1202 ASSERT_EQ(0, ioctl(self->fd,
1203 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1204 &access_cmd));
1205 test_cmd_destroy_access_pages(
1206 access_cmd.id,
1207 access_cmd.access_pages.out_access_pages_id);
1208
1209 /* Double user */
1210 ASSERT_EQ(0, ioctl(self->fd,
1211 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1212 &access_cmd));
1213 access_pages_id = access_cmd.access_pages.out_access_pages_id;
1214 ASSERT_EQ(0, ioctl(self->fd,
1215 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1216 &access_cmd));
1217 test_cmd_destroy_access_pages(
1218 access_cmd.id,
1219 access_cmd.access_pages.out_access_pages_id);
1220 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1221
1222 /* Add/remove a domain with a user */
1223 ASSERT_EQ(0, ioctl(self->fd,
1224 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1225 &access_cmd));
1226 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1227 &mock_hwpt_id, NULL);
1228 check_map_cmd.id = mock_hwpt_id;
1229 ASSERT_EQ(0, ioctl(self->fd,
1230 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1231 &check_map_cmd));
1232
1233 test_ioctl_destroy(mock_stdev_id);
1234 test_cmd_destroy_access_pages(
1235 access_cmd.id,
1236 access_cmd.access_pages.out_access_pages_id);
1237
1238 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1239 }
1240 test_cmd_destroy_access(access_cmd.id);
1241 }
1242
TEST_F(iommufd_ioas,access_pin_unmap)1243 TEST_F(iommufd_ioas, access_pin_unmap)
1244 {
1245 struct iommu_test_cmd access_pages_cmd = {
1246 .size = sizeof(access_pages_cmd),
1247 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1248 .access_pages = { .iova = MOCK_APERTURE_START,
1249 .length = BUFFER_SIZE,
1250 .uptr = (uintptr_t)buffer },
1251 };
1252
1253 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1254 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1255 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1256 ASSERT_EQ(0,
1257 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1258 &access_pages_cmd));
1259
1260 /* Trigger the unmap op */
1261 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1262
1263 /* kernel removed the item for us */
1264 test_err_destroy_access_pages(
1265 ENOENT, access_pages_cmd.id,
1266 access_pages_cmd.access_pages.out_access_pages_id);
1267 }
1268
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)1269 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1270 unsigned int access_id, uint64_t iova,
1271 unsigned int def_flags)
1272 {
1273 uint16_t tmp[32];
1274 struct iommu_test_cmd access_cmd = {
1275 .size = sizeof(access_cmd),
1276 .op = IOMMU_TEST_OP_ACCESS_RW,
1277 .id = access_id,
1278 .access_rw = { .uptr = (uintptr_t)tmp },
1279 };
1280 uint16_t *buffer16 = buffer;
1281 unsigned int i;
1282 void *tmp2;
1283
1284 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1285 buffer16[i] = rand();
1286
1287 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1288 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1289 access_cmd.access_rw.iova++) {
1290 for (access_cmd.access_rw.length = 1;
1291 access_cmd.access_rw.length < sizeof(tmp);
1292 access_cmd.access_rw.length++) {
1293 access_cmd.access_rw.flags = def_flags;
1294 ASSERT_EQ(0, ioctl(fd,
1295 _IOMMU_TEST_CMD(
1296 IOMMU_TEST_OP_ACCESS_RW),
1297 &access_cmd));
1298 ASSERT_EQ(0,
1299 memcmp(buffer + (access_cmd.access_rw.iova -
1300 iova),
1301 tmp, access_cmd.access_rw.length));
1302
1303 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1304 tmp[i] = rand();
1305 access_cmd.access_rw.flags = def_flags |
1306 MOCK_ACCESS_RW_WRITE;
1307 ASSERT_EQ(0, ioctl(fd,
1308 _IOMMU_TEST_CMD(
1309 IOMMU_TEST_OP_ACCESS_RW),
1310 &access_cmd));
1311 ASSERT_EQ(0,
1312 memcmp(buffer + (access_cmd.access_rw.iova -
1313 iova),
1314 tmp, access_cmd.access_rw.length));
1315 }
1316 }
1317
1318 /* Multi-page test */
1319 tmp2 = malloc(BUFFER_SIZE);
1320 ASSERT_NE(NULL, tmp2);
1321 access_cmd.access_rw.iova = iova;
1322 access_cmd.access_rw.length = BUFFER_SIZE;
1323 access_cmd.access_rw.flags = def_flags;
1324 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1325 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1326 &access_cmd));
1327 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1328 free(tmp2);
1329 }
1330
TEST_F(iommufd_ioas,access_rw)1331 TEST_F(iommufd_ioas, access_rw)
1332 {
1333 __u32 access_id;
1334 __u64 iova;
1335
1336 test_cmd_create_access(self->ioas_id, &access_id, 0);
1337 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1338 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1339 check_access_rw(_metadata, self->fd, access_id, iova,
1340 MOCK_ACCESS_RW_SLOW_PATH);
1341 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1342 test_cmd_destroy_access(access_id);
1343 }
1344
TEST_F(iommufd_ioas,access_rw_unaligned)1345 TEST_F(iommufd_ioas, access_rw_unaligned)
1346 {
1347 __u32 access_id;
1348 __u64 iova;
1349
1350 test_cmd_create_access(self->ioas_id, &access_id, 0);
1351
1352 /* Unaligned pages */
1353 iova = self->base_iova + MOCK_PAGE_SIZE;
1354 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1355 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1356 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1357 test_cmd_destroy_access(access_id);
1358 }
1359
TEST_F(iommufd_ioas,fork_gone)1360 TEST_F(iommufd_ioas, fork_gone)
1361 {
1362 __u32 access_id;
1363 pid_t child;
1364
1365 test_cmd_create_access(self->ioas_id, &access_id, 0);
1366
1367 /* Create a mapping with a different mm */
1368 child = fork();
1369 if (!child) {
1370 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1371 MOCK_APERTURE_START);
1372 exit(0);
1373 }
1374 ASSERT_NE(-1, child);
1375 ASSERT_EQ(child, waitpid(child, NULL, 0));
1376
1377 if (self->stdev_id) {
1378 /*
1379 * If a domain already existed then everything was pinned within
1380 * the fork, so this copies from one domain to another.
1381 */
1382 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1383 check_access_rw(_metadata, self->fd, access_id,
1384 MOCK_APERTURE_START, 0);
1385
1386 } else {
1387 /*
1388 * Otherwise we need to actually pin pages which can't happen
1389 * since the fork is gone.
1390 */
1391 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1392 }
1393
1394 test_cmd_destroy_access(access_id);
1395 }
1396
TEST_F(iommufd_ioas,fork_present)1397 TEST_F(iommufd_ioas, fork_present)
1398 {
1399 __u32 access_id;
1400 int pipefds[2];
1401 uint64_t tmp;
1402 pid_t child;
1403 int efd;
1404
1405 test_cmd_create_access(self->ioas_id, &access_id, 0);
1406
1407 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1408 efd = eventfd(0, EFD_CLOEXEC);
1409 ASSERT_NE(-1, efd);
1410
1411 /* Create a mapping with a different mm */
1412 child = fork();
1413 if (!child) {
1414 __u64 iova;
1415 uint64_t one = 1;
1416
1417 close(pipefds[1]);
1418 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1419 MOCK_APERTURE_START);
1420 if (write(efd, &one, sizeof(one)) != sizeof(one))
1421 exit(100);
1422 if (read(pipefds[0], &iova, 1) != 1)
1423 exit(100);
1424 exit(0);
1425 }
1426 close(pipefds[0]);
1427 ASSERT_NE(-1, child);
1428 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1429
1430 /* Read pages from the remote process */
1431 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1432 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1433
1434 ASSERT_EQ(0, close(pipefds[1]));
1435 ASSERT_EQ(child, waitpid(child, NULL, 0));
1436
1437 test_cmd_destroy_access(access_id);
1438 }
1439
TEST_F(iommufd_ioas,ioas_option_huge_pages)1440 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1441 {
1442 struct iommu_option cmd = {
1443 .size = sizeof(cmd),
1444 .option_id = IOMMU_OPTION_HUGE_PAGES,
1445 .op = IOMMU_OPTION_OP_GET,
1446 .val64 = 3,
1447 .object_id = self->ioas_id,
1448 };
1449
1450 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1451 ASSERT_EQ(1, cmd.val64);
1452
1453 cmd.op = IOMMU_OPTION_OP_SET;
1454 cmd.val64 = 0;
1455 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1456
1457 cmd.op = IOMMU_OPTION_OP_GET;
1458 cmd.val64 = 3;
1459 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1460 ASSERT_EQ(0, cmd.val64);
1461
1462 cmd.op = IOMMU_OPTION_OP_SET;
1463 cmd.val64 = 2;
1464 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1465
1466 cmd.op = IOMMU_OPTION_OP_SET;
1467 cmd.val64 = 1;
1468 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1469 }
1470
TEST_F(iommufd_ioas,ioas_iova_alloc)1471 TEST_F(iommufd_ioas, ioas_iova_alloc)
1472 {
1473 unsigned int length;
1474 __u64 iova;
1475
1476 for (length = 1; length != PAGE_SIZE * 2; length++) {
1477 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1478 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1479 } else {
1480 test_ioctl_ioas_map(buffer, length, &iova);
1481 test_ioctl_ioas_unmap(iova, length);
1482 }
1483 }
1484 }
1485
TEST_F(iommufd_ioas,ioas_align_change)1486 TEST_F(iommufd_ioas, ioas_align_change)
1487 {
1488 struct iommu_option cmd = {
1489 .size = sizeof(cmd),
1490 .option_id = IOMMU_OPTION_HUGE_PAGES,
1491 .op = IOMMU_OPTION_OP_SET,
1492 .object_id = self->ioas_id,
1493 /* 0 means everything must be aligned to PAGE_SIZE */
1494 .val64 = 0,
1495 };
1496
1497 /*
1498 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1499 * and map are present.
1500 */
1501 if (variant->mock_domains)
1502 return;
1503
1504 /*
1505 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1506 */
1507 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1508 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1509
1510 /* Misalignment is rejected at map time */
1511 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1512 PAGE_SIZE,
1513 MOCK_APERTURE_START + PAGE_SIZE);
1514 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1515
1516 /* Reduce alignment */
1517 cmd.val64 = 1;
1518 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1519
1520 /* Confirm misalignment is rejected during alignment upgrade */
1521 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1522 MOCK_APERTURE_START + PAGE_SIZE);
1523 cmd.val64 = 0;
1524 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1525
1526 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1527 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1528 }
1529
TEST_F(iommufd_ioas,copy_sweep)1530 TEST_F(iommufd_ioas, copy_sweep)
1531 {
1532 struct iommu_ioas_copy copy_cmd = {
1533 .size = sizeof(copy_cmd),
1534 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1535 .src_ioas_id = self->ioas_id,
1536 .dst_iova = MOCK_APERTURE_START,
1537 .length = MOCK_PAGE_SIZE,
1538 };
1539 unsigned int dst_ioas_id;
1540 uint64_t last_iova;
1541 uint64_t iova;
1542
1543 test_ioctl_ioas_alloc(&dst_ioas_id);
1544 copy_cmd.dst_ioas_id = dst_ioas_id;
1545
1546 if (variant->mock_domains)
1547 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1548 else
1549 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1550
1551 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1552 MOCK_APERTURE_START);
1553
1554 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1555 iova += 511) {
1556 copy_cmd.src_iova = iova;
1557 if (iova < MOCK_APERTURE_START ||
1558 iova + copy_cmd.length - 1 > last_iova) {
1559 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1560 ©_cmd));
1561 } else {
1562 ASSERT_EQ(0,
1563 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1564 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1565 copy_cmd.length);
1566 }
1567 }
1568
1569 test_ioctl_destroy(dst_ioas_id);
1570 }
1571
TEST_F(iommufd_ioas,dmabuf_simple)1572 TEST_F(iommufd_ioas, dmabuf_simple)
1573 {
1574 size_t buf_size = PAGE_SIZE*4;
1575 __u64 iova;
1576 int dfd;
1577
1578 test_cmd_get_dmabuf(buf_size, &dfd);
1579 test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, 0, &iova);
1580 test_err_ioctl_ioas_map_file(EINVAL, dfd, buf_size, buf_size, &iova);
1581 test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, buf_size + 1, &iova);
1582 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1583
1584 close(dfd);
1585 }
1586
TEST_F(iommufd_ioas,dmabuf_revoke)1587 TEST_F(iommufd_ioas, dmabuf_revoke)
1588 {
1589 size_t buf_size = PAGE_SIZE*4;
1590 __u32 hwpt_id;
1591 __u64 iova;
1592 __u64 iova2;
1593 int dfd;
1594
1595 test_cmd_get_dmabuf(buf_size, &dfd);
1596 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
1597 test_cmd_revoke_dmabuf(dfd, true);
1598
1599 if (variant->mock_domains)
1600 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
1601 &hwpt_id);
1602
1603 test_err_ioctl_ioas_map_file(ENODEV, dfd, 0, buf_size, &iova2);
1604
1605 test_cmd_revoke_dmabuf(dfd, false);
1606 test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova2);
1607
1608 /* Restore the iova back */
1609 test_ioctl_ioas_unmap(iova, buf_size);
1610 test_ioctl_ioas_map_fixed_file(dfd, 0, buf_size, iova);
1611
1612 close(dfd);
1613 }
1614
FIXTURE(iommufd_mock_domain)1615 FIXTURE(iommufd_mock_domain)
1616 {
1617 int fd;
1618 uint32_t ioas_id;
1619 uint32_t hwpt_id;
1620 uint32_t hwpt_ids[2];
1621 uint32_t stdev_ids[2];
1622 uint32_t idev_ids[2];
1623 int mmap_flags;
1624 size_t mmap_buf_size;
1625 };
1626
FIXTURE_VARIANT(iommufd_mock_domain)1627 FIXTURE_VARIANT(iommufd_mock_domain)
1628 {
1629 unsigned int mock_domains;
1630 bool hugepages;
1631 bool file;
1632 };
1633
FIXTURE_SETUP(iommufd_mock_domain)1634 FIXTURE_SETUP(iommufd_mock_domain)
1635 {
1636 unsigned int i;
1637
1638 self->fd = open("/dev/iommu", O_RDWR);
1639 ASSERT_NE(-1, self->fd);
1640 test_ioctl_ioas_alloc(&self->ioas_id);
1641
1642 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1643
1644 for (i = 0; i != variant->mock_domains; i++) {
1645 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1646 &self->hwpt_ids[i], &self->idev_ids[i]);
1647 test_cmd_dev_check_cache_all(self->idev_ids[0],
1648 IOMMU_TEST_DEV_CACHE_DEFAULT);
1649 }
1650 self->hwpt_id = self->hwpt_ids[0];
1651
1652 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1653 self->mmap_buf_size = PAGE_SIZE * 8;
1654 if (variant->hugepages) {
1655 /*
1656 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1657 * not available.
1658 */
1659 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1660 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1661 }
1662 }
1663
FIXTURE_TEARDOWN(iommufd_mock_domain)1664 FIXTURE_TEARDOWN(iommufd_mock_domain)
1665 {
1666 teardown_iommufd(self->fd, _metadata);
1667 }
1668
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1669 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1670 {
1671 .mock_domains = 1,
1672 .hugepages = false,
1673 .file = false,
1674 };
1675
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1676 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1677 {
1678 .mock_domains = 2,
1679 .hugepages = false,
1680 .file = false,
1681 };
1682
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1683 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1684 {
1685 .mock_domains = 1,
1686 .hugepages = true,
1687 .file = false,
1688 };
1689
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1690 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1691 {
1692 .mock_domains = 2,
1693 .hugepages = true,
1694 .file = false,
1695 };
1696
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file)1697 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1698 {
1699 .mock_domains = 1,
1700 .hugepages = false,
1701 .file = true,
1702 };
1703
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_file_hugepage)1704 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1705 {
1706 .mock_domains = 1,
1707 .hugepages = true,
1708 .file = true,
1709 };
1710
1711
1712 /* Have the kernel check that the user pages made it to the iommu_domain */
1713 #define check_mock_iova(_ptr, _iova, _length) \
1714 ({ \
1715 struct iommu_test_cmd check_map_cmd = { \
1716 .size = sizeof(check_map_cmd), \
1717 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1718 .id = self->hwpt_id, \
1719 .check_map = { .iova = _iova, \
1720 .length = _length, \
1721 .uptr = (uintptr_t)(_ptr) }, \
1722 }; \
1723 ASSERT_EQ(0, \
1724 ioctl(self->fd, \
1725 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1726 &check_map_cmd)); \
1727 if (self->hwpt_ids[1]) { \
1728 check_map_cmd.id = self->hwpt_ids[1]; \
1729 ASSERT_EQ(0, \
1730 ioctl(self->fd, \
1731 _IOMMU_TEST_CMD( \
1732 IOMMU_TEST_OP_MD_CHECK_MAP), \
1733 &check_map_cmd)); \
1734 } \
1735 })
1736
1737 static void
test_basic_mmap(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1738 test_basic_mmap(struct __test_metadata *_metadata,
1739 struct _test_data_iommufd_mock_domain *self,
1740 const struct _fixture_variant_iommufd_mock_domain *variant)
1741 {
1742 size_t buf_size = self->mmap_buf_size;
1743 uint8_t *buf;
1744 __u64 iova;
1745
1746 /* Simple one page map */
1747 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1748 check_mock_iova(buffer, iova, PAGE_SIZE);
1749
1750 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1751 0);
1752 ASSERT_NE(MAP_FAILED, buf);
1753
1754 /* EFAULT half way through mapping */
1755 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1756 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1757
1758 /* EFAULT on first page */
1759 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1760 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1761 }
1762
1763 static void
test_basic_file(struct __test_metadata * _metadata,struct _test_data_iommufd_mock_domain * self,const struct _fixture_variant_iommufd_mock_domain * variant)1764 test_basic_file(struct __test_metadata *_metadata,
1765 struct _test_data_iommufd_mock_domain *self,
1766 const struct _fixture_variant_iommufd_mock_domain *variant)
1767 {
1768 size_t buf_size = self->mmap_buf_size;
1769 uint8_t *buf;
1770 __u64 iova;
1771 int mfd_tmp;
1772 int prot = PROT_READ | PROT_WRITE;
1773
1774 /* Simple one page map */
1775 test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1776 check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1777
1778 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1779 ASSERT_NE(MAP_FAILED, buf);
1780
1781 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1782
1783 ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1784 test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1785
1786 close(mfd_tmp);
1787 }
1788
TEST_F(iommufd_mock_domain,basic)1789 TEST_F(iommufd_mock_domain, basic)
1790 {
1791 if (variant->file)
1792 test_basic_file(_metadata, self, variant);
1793 else
1794 test_basic_mmap(_metadata, self, variant);
1795 }
1796
TEST_F(iommufd_mock_domain,ro_unshare)1797 TEST_F(iommufd_mock_domain, ro_unshare)
1798 {
1799 uint8_t *buf;
1800 __u64 iova;
1801 int fd;
1802
1803 fd = open("/proc/self/exe", O_RDONLY);
1804 ASSERT_NE(-1, fd);
1805
1806 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1807 ASSERT_NE(MAP_FAILED, buf);
1808 close(fd);
1809
1810 /*
1811 * There have been lots of changes to the "unshare" mechanism in
1812 * get_user_pages(), make sure it works right. The write to the page
1813 * after we map it for reading should not change the assigned PFN.
1814 */
1815 ASSERT_EQ(0,
1816 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1817 &iova, IOMMU_IOAS_MAP_READABLE));
1818 check_mock_iova(buf, iova, PAGE_SIZE);
1819 memset(buf, 1, PAGE_SIZE);
1820 check_mock_iova(buf, iova, PAGE_SIZE);
1821 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1822 }
1823
TEST_F(iommufd_mock_domain,all_aligns)1824 TEST_F(iommufd_mock_domain, all_aligns)
1825 {
1826 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1827 MOCK_PAGE_SIZE;
1828 size_t buf_size = self->mmap_buf_size;
1829 unsigned int start;
1830 unsigned int end;
1831 uint8_t *buf;
1832 int prot = PROT_READ | PROT_WRITE;
1833 int mfd = -1;
1834
1835 if (variant->file)
1836 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1837 else
1838 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1839 ASSERT_NE(MAP_FAILED, buf);
1840 if (variant->file)
1841 ASSERT_GT(mfd, 0);
1842 check_refs(buf, buf_size, 0);
1843
1844 /*
1845 * Map every combination of page size and alignment within a big region,
1846 * less for hugepage case as it takes so long to finish.
1847 */
1848 for (start = 0; start < buf_size; start += test_step) {
1849 if (variant->hugepages)
1850 end = buf_size;
1851 else
1852 end = start + MOCK_PAGE_SIZE;
1853 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1854 size_t length = end - start;
1855 __u64 iova;
1856
1857 if (variant->file) {
1858 test_ioctl_ioas_map_file(mfd, start, length,
1859 &iova);
1860 } else {
1861 test_ioctl_ioas_map(buf + start, length, &iova);
1862 }
1863 check_mock_iova(buf + start, iova, length);
1864 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1865 end / PAGE_SIZE * PAGE_SIZE -
1866 start / PAGE_SIZE * PAGE_SIZE,
1867 1);
1868
1869 test_ioctl_ioas_unmap(iova, length);
1870 }
1871 }
1872 check_refs(buf, buf_size, 0);
1873 ASSERT_EQ(0, munmap(buf, buf_size));
1874 if (variant->file)
1875 close(mfd);
1876 }
1877
TEST_F(iommufd_mock_domain,all_aligns_copy)1878 TEST_F(iommufd_mock_domain, all_aligns_copy)
1879 {
1880 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1881 MOCK_PAGE_SIZE;
1882 size_t buf_size = self->mmap_buf_size;
1883 unsigned int start;
1884 unsigned int end;
1885 uint8_t *buf;
1886 int prot = PROT_READ | PROT_WRITE;
1887 int mfd = -1;
1888
1889 if (variant->file)
1890 buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1891 else
1892 buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1893 ASSERT_NE(MAP_FAILED, buf);
1894 if (variant->file)
1895 ASSERT_GT(mfd, 0);
1896 check_refs(buf, buf_size, 0);
1897
1898 /*
1899 * Map every combination of page size and alignment within a big region,
1900 * less for hugepage case as it takes so long to finish.
1901 */
1902 for (start = 0; start < buf_size; start += test_step) {
1903 if (variant->hugepages)
1904 end = buf_size;
1905 else
1906 end = start + MOCK_PAGE_SIZE;
1907 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1908 size_t length = end - start;
1909 unsigned int old_id;
1910 uint32_t mock_stdev_id;
1911 __u64 iova;
1912
1913 if (variant->file) {
1914 test_ioctl_ioas_map_file(mfd, start, length,
1915 &iova);
1916 } else {
1917 test_ioctl_ioas_map(buf + start, length, &iova);
1918 }
1919
1920 /* Add and destroy a domain while the area exists */
1921 old_id = self->hwpt_ids[1];
1922 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1923 &self->hwpt_ids[1], NULL);
1924
1925 check_mock_iova(buf + start, iova, length);
1926 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1927 end / PAGE_SIZE * PAGE_SIZE -
1928 start / PAGE_SIZE * PAGE_SIZE,
1929 1);
1930
1931 test_ioctl_destroy(mock_stdev_id);
1932 self->hwpt_ids[1] = old_id;
1933
1934 test_ioctl_ioas_unmap(iova, length);
1935 }
1936 }
1937 check_refs(buf, buf_size, 0);
1938 ASSERT_EQ(0, munmap(buf, buf_size));
1939 if (variant->file)
1940 close(mfd);
1941 }
1942
TEST_F(iommufd_mock_domain,user_copy)1943 TEST_F(iommufd_mock_domain, user_copy)
1944 {
1945 void *buf = variant->file ? mfd_buffer : buffer;
1946 struct iommu_test_cmd access_cmd = {
1947 .size = sizeof(access_cmd),
1948 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1949 .access_pages = { .length = BUFFER_SIZE,
1950 .uptr = (uintptr_t)buf },
1951 };
1952 struct iommu_ioas_copy copy_cmd = {
1953 .size = sizeof(copy_cmd),
1954 .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1955 .dst_ioas_id = self->ioas_id,
1956 .dst_iova = MOCK_APERTURE_START,
1957 .length = BUFFER_SIZE,
1958 };
1959 struct iommu_ioas_unmap unmap_cmd = {
1960 .size = sizeof(unmap_cmd),
1961 .ioas_id = self->ioas_id,
1962 .iova = MOCK_APERTURE_START,
1963 .length = BUFFER_SIZE,
1964 };
1965 unsigned int new_ioas_id, ioas_id;
1966
1967 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1968 test_ioctl_ioas_alloc(&ioas_id);
1969 if (variant->file) {
1970 test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1971 ©_cmd.src_iova);
1972 } else {
1973 test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1974 ©_cmd.src_iova);
1975 }
1976 test_cmd_create_access(ioas_id, &access_cmd.id,
1977 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1978
1979 access_cmd.access_pages.iova = copy_cmd.src_iova;
1980 ASSERT_EQ(0,
1981 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1982 &access_cmd));
1983 copy_cmd.src_ioas_id = ioas_id;
1984 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1985 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1986
1987 /* Now replace the ioas with a new one */
1988 test_ioctl_ioas_alloc(&new_ioas_id);
1989 if (variant->file) {
1990 test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1991 ©_cmd.src_iova);
1992 } else {
1993 test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1994 ©_cmd.src_iova);
1995 }
1996 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1997
1998 /* Destroy the old ioas and cleanup copied mapping */
1999 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
2000 test_ioctl_destroy(ioas_id);
2001
2002 /* Then run the same test again with the new ioas */
2003 access_cmd.access_pages.iova = copy_cmd.src_iova;
2004 ASSERT_EQ(0,
2005 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
2006 &access_cmd));
2007 copy_cmd.src_ioas_id = new_ioas_id;
2008 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
2009 check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
2010
2011 test_cmd_destroy_access_pages(
2012 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
2013 test_cmd_destroy_access(access_cmd.id);
2014
2015 test_ioctl_destroy(new_ioas_id);
2016 }
2017
TEST_F(iommufd_mock_domain,replace)2018 TEST_F(iommufd_mock_domain, replace)
2019 {
2020 uint32_t ioas_id;
2021
2022 test_ioctl_ioas_alloc(&ioas_id);
2023
2024 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2025
2026 /*
2027 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
2028 * should get enoent when we try to use it.
2029 */
2030 if (variant->mock_domains == 1)
2031 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
2032 self->hwpt_ids[0]);
2033
2034 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
2035 if (variant->mock_domains >= 2) {
2036 test_cmd_mock_domain_replace(self->stdev_ids[0],
2037 self->hwpt_ids[1]);
2038 test_cmd_mock_domain_replace(self->stdev_ids[0],
2039 self->hwpt_ids[1]);
2040 test_cmd_mock_domain_replace(self->stdev_ids[0],
2041 self->hwpt_ids[0]);
2042 }
2043
2044 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
2045 test_ioctl_destroy(ioas_id);
2046 }
2047
TEST_F(iommufd_mock_domain,alloc_hwpt)2048 TEST_F(iommufd_mock_domain, alloc_hwpt)
2049 {
2050 int i;
2051
2052 for (i = 0; i != variant->mock_domains; i++) {
2053 uint32_t hwpt_id[2];
2054 uint32_t stddev_id;
2055
2056 test_err_hwpt_alloc(EOPNOTSUPP,
2057 self->idev_ids[i], self->ioas_id,
2058 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
2059 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2060 0, &hwpt_id[0]);
2061 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
2062 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
2063
2064 /* Do a hw_pagetable rotation test */
2065 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
2066 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
2067 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
2068 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
2069 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
2070 test_ioctl_destroy(hwpt_id[1]);
2071
2072 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
2073 test_ioctl_destroy(stddev_id);
2074 test_ioctl_destroy(hwpt_id[0]);
2075 }
2076 }
2077
FIXTURE(iommufd_dirty_tracking)2078 FIXTURE(iommufd_dirty_tracking)
2079 {
2080 int fd;
2081 uint32_t ioas_id;
2082 uint32_t hwpt_id;
2083 uint32_t stdev_id;
2084 uint32_t idev_id;
2085 unsigned long page_size;
2086 unsigned long bitmap_size;
2087 void *bitmap;
2088 void *buffer;
2089 };
2090
FIXTURE_VARIANT(iommufd_dirty_tracking)2091 FIXTURE_VARIANT(iommufd_dirty_tracking)
2092 {
2093 unsigned long buffer_size;
2094 bool hugepages;
2095 };
2096
FIXTURE_SETUP(iommufd_dirty_tracking)2097 FIXTURE_SETUP(iommufd_dirty_tracking)
2098 {
2099 struct iommu_option cmd = {
2100 .size = sizeof(cmd),
2101 .option_id = IOMMU_OPTION_HUGE_PAGES,
2102 .op = IOMMU_OPTION_OP_SET,
2103 .val64 = 0,
2104 };
2105 size_t mmap_buffer_size;
2106 unsigned long size;
2107 int mmap_flags;
2108 void *vrc;
2109 int rc;
2110
2111 if (variant->buffer_size < MOCK_PAGE_SIZE) {
2112 SKIP(return,
2113 "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%u",
2114 variant->buffer_size, MOCK_PAGE_SIZE);
2115 }
2116
2117 self->fd = open("/dev/iommu", O_RDWR);
2118 ASSERT_NE(-1, self->fd);
2119
2120 mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2121 mmap_buffer_size = variant->buffer_size;
2122 if (variant->hugepages) {
2123 /*
2124 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2125 * not available.
2126 */
2127 mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2128
2129 /*
2130 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
2131 * following mmap() will automatically align the length to be a
2132 * multiple of the underlying huge page size. Failing to do the
2133 * same at this allocation will result in a memory overwrite by
2134 * the mmap().
2135 */
2136 if (mmap_buffer_size < HUGEPAGE_SIZE)
2137 mmap_buffer_size = HUGEPAGE_SIZE;
2138 }
2139
2140 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
2141 if (rc || !self->buffer) {
2142 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2143 mmap_buffer_size, rc);
2144 }
2145 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2146 vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
2147 mmap_flags, -1, 0);
2148 assert(vrc == self->buffer);
2149
2150 self->page_size = MOCK_PAGE_SIZE;
2151 self->bitmap_size = variant->buffer_size / self->page_size;
2152
2153 /* Provision with an extra (PAGE_SIZE) for the unaligned case */
2154 size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2155 rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2156 assert(!rc);
2157 assert(self->bitmap);
2158 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2159
2160 test_ioctl_ioas_alloc(&self->ioas_id);
2161
2162 /*
2163 * For dirty testing it is important that the page size fed into
2164 * the iommu page tables matches the size the dirty logic
2165 * expects, or set_dirty can touch too much stuff.
2166 */
2167 cmd.object_id = self->ioas_id;
2168 if (!variant->hugepages)
2169 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2170
2171 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
2172 &self->idev_id);
2173 }
2174
FIXTURE_TEARDOWN(iommufd_dirty_tracking)2175 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2176 {
2177 free(self->buffer);
2178 free(self->bitmap);
2179 teardown_iommufd(self->fd, _metadata);
2180 }
2181
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty8k)2182 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2183 {
2184 /* half of an u8 index bitmap */
2185 .buffer_size = 8UL * 1024UL,
2186 };
2187
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty16k)2188 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2189 {
2190 /* one u8 index bitmap */
2191 .buffer_size = 16UL * 1024UL,
2192 };
2193
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64k)2194 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2195 {
2196 /* one u32 index bitmap */
2197 .buffer_size = 64UL * 1024UL,
2198 };
2199
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128k)2200 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2201 {
2202 /* one u64 index bitmap */
2203 .buffer_size = 128UL * 1024UL,
2204 };
2205
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty320k)2206 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2207 {
2208 /* two u64 index and trailing end bitmap */
2209 .buffer_size = 320UL * 1024UL,
2210 };
2211
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M)2212 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2213 {
2214 /* 4K bitmap (64M IOVA range) */
2215 .buffer_size = 64UL * 1024UL * 1024UL,
2216 };
2217
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty64M_huge)2218 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2219 {
2220 /* 4K bitmap (64M IOVA range) */
2221 .buffer_size = 64UL * 1024UL * 1024UL,
2222 .hugepages = true,
2223 };
2224
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M)2225 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2226 {
2227 /* 8K bitmap (128M IOVA range) */
2228 .buffer_size = 128UL * 1024UL * 1024UL,
2229 };
2230
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking,domain_dirty128M_huge)2231 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2232 {
2233 /* 8K bitmap (128M IOVA range) */
2234 .buffer_size = 128UL * 1024UL * 1024UL,
2235 .hugepages = true,
2236 };
2237
TEST_F(iommufd_dirty_tracking,enforce_dirty)2238 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2239 {
2240 uint32_t ioas_id, stddev_id, idev_id;
2241 uint32_t hwpt_id, _hwpt_id;
2242 uint32_t dev_flags;
2243
2244 /* Regular case */
2245 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2246 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2247 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2248 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2249 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2250 NULL);
2251 test_ioctl_destroy(stddev_id);
2252 test_ioctl_destroy(hwpt_id);
2253
2254 /* IOMMU device does not support dirty tracking */
2255 test_ioctl_ioas_alloc(&ioas_id);
2256 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2257 &idev_id);
2258 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2259 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2260 test_ioctl_destroy(stddev_id);
2261 }
2262
TEST_F(iommufd_dirty_tracking,set_dirty_tracking)2263 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2264 {
2265 uint32_t stddev_id;
2266 uint32_t hwpt_id;
2267
2268 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2269 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2270 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2271 test_cmd_set_dirty_tracking(hwpt_id, true);
2272 test_cmd_set_dirty_tracking(hwpt_id, false);
2273
2274 test_ioctl_destroy(stddev_id);
2275 test_ioctl_destroy(hwpt_id);
2276 }
2277
TEST_F(iommufd_dirty_tracking,pasid_set_dirty_tracking)2278 TEST_F(iommufd_dirty_tracking, pasid_set_dirty_tracking)
2279 {
2280 uint32_t stddev_id, ioas_id, hwpt_id, pasid = 100;
2281 uint32_t dev_flags = MOCK_FLAGS_DEVICE_PASID;
2282
2283 /* Regular case */
2284 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2285 IOMMU_HWPT_ALLOC_PASID | IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2286 &hwpt_id);
2287 test_cmd_mock_domain_flags(hwpt_id, dev_flags, &stddev_id, NULL, NULL);
2288 ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, stddev_id, pasid, hwpt_id));
2289 test_cmd_set_dirty_tracking(hwpt_id, true);
2290 test_cmd_set_dirty_tracking(hwpt_id, false);
2291 ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, stddev_id, pasid));
2292
2293 test_ioctl_destroy(stddev_id);
2294
2295 /* IOMMU device does not support dirty tracking */
2296 dev_flags |= MOCK_FLAGS_DEVICE_NO_DIRTY;
2297 test_ioctl_ioas_alloc(&ioas_id);
2298 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, NULL, NULL);
2299 EXPECT_ERRNO(EINVAL, _test_cmd_pasid_attach(self->fd, stddev_id, pasid, hwpt_id));
2300
2301 test_ioctl_destroy(stddev_id);
2302 test_ioctl_destroy(hwpt_id);
2303 }
2304
TEST_F(iommufd_dirty_tracking,device_dirty_capability)2305 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2306 {
2307 uint32_t caps = 0;
2308 uint32_t stddev_id;
2309 uint32_t hwpt_id;
2310
2311 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2312 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2313 test_cmd_get_hw_capabilities(self->idev_id, caps);
2314 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2315 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2316
2317 test_ioctl_destroy(stddev_id);
2318 test_ioctl_destroy(hwpt_id);
2319 }
2320
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap)2321 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2322 {
2323 uint32_t page_size = MOCK_PAGE_SIZE;
2324 uint32_t ioas_id = self->ioas_id;
2325 uint32_t hwpt_id;
2326
2327 if (variant->hugepages)
2328 page_size = MOCK_HUGE_PAGE_SIZE;
2329
2330 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2331 variant->buffer_size, MOCK_APERTURE_START);
2332
2333 if (variant->hugepages)
2334 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2335 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2336 MOCK_IOMMUPT_HUGE, &hwpt_id);
2337 else
2338 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2339 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2340 MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2341
2342 test_cmd_set_dirty_tracking(hwpt_id, true);
2343
2344 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2345 MOCK_APERTURE_START, self->page_size, page_size,
2346 self->bitmap, self->bitmap_size, 0, _metadata);
2347
2348 /* PAGE_SIZE unaligned bitmap */
2349 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2350 MOCK_APERTURE_START, self->page_size, page_size,
2351 self->bitmap + MOCK_PAGE_SIZE,
2352 self->bitmap_size, 0, _metadata);
2353
2354 /* u64 unaligned bitmap */
2355 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2356 MOCK_APERTURE_START, self->page_size, page_size,
2357 self->bitmap + 0xff1, self->bitmap_size, 0,
2358 _metadata);
2359
2360 test_ioctl_destroy(hwpt_id);
2361 }
2362
TEST_F(iommufd_dirty_tracking,get_dirty_bitmap_no_clear)2363 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2364 {
2365 uint32_t page_size = MOCK_PAGE_SIZE;
2366 uint32_t ioas_id = self->ioas_id;
2367 uint32_t hwpt_id;
2368
2369 if (variant->hugepages)
2370 page_size = MOCK_HUGE_PAGE_SIZE;
2371
2372 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2373 variant->buffer_size, MOCK_APERTURE_START);
2374
2375
2376 if (variant->hugepages)
2377 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2378 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2379 MOCK_IOMMUPT_HUGE, &hwpt_id);
2380 else
2381 test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
2382 IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
2383 MOCK_IOMMUPT_DEFAULT, &hwpt_id);
2384
2385 test_cmd_set_dirty_tracking(hwpt_id, true);
2386
2387 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2388 MOCK_APERTURE_START, self->page_size, page_size,
2389 self->bitmap, self->bitmap_size,
2390 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2391 _metadata);
2392
2393 /* Unaligned bitmap */
2394 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2395 MOCK_APERTURE_START, self->page_size, page_size,
2396 self->bitmap + MOCK_PAGE_SIZE,
2397 self->bitmap_size,
2398 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2399 _metadata);
2400
2401 /* u64 unaligned bitmap */
2402 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2403 MOCK_APERTURE_START, self->page_size, page_size,
2404 self->bitmap + 0xff1, self->bitmap_size,
2405 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2406 _metadata);
2407
2408 test_ioctl_destroy(hwpt_id);
2409 }
2410
2411 /* VFIO compatibility IOCTLs */
2412
TEST_F(iommufd,simple_ioctls)2413 TEST_F(iommufd, simple_ioctls)
2414 {
2415 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2416 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2417 }
2418
TEST_F(iommufd,unmap_cmd)2419 TEST_F(iommufd, unmap_cmd)
2420 {
2421 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2422 .iova = MOCK_APERTURE_START,
2423 .size = PAGE_SIZE,
2424 };
2425
2426 unmap_cmd.argsz = 1;
2427 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2428
2429 unmap_cmd.argsz = sizeof(unmap_cmd);
2430 unmap_cmd.flags = 1 << 31;
2431 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2432
2433 unmap_cmd.flags = 0;
2434 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2435 }
2436
TEST_F(iommufd,map_cmd)2437 TEST_F(iommufd, map_cmd)
2438 {
2439 struct vfio_iommu_type1_dma_map map_cmd = {
2440 .iova = MOCK_APERTURE_START,
2441 .size = PAGE_SIZE,
2442 .vaddr = (__u64)buffer,
2443 };
2444
2445 map_cmd.argsz = 1;
2446 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2447
2448 map_cmd.argsz = sizeof(map_cmd);
2449 map_cmd.flags = 1 << 31;
2450 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2451
2452 /* Requires a domain to be attached */
2453 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2454 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2455 }
2456
TEST_F(iommufd,info_cmd)2457 TEST_F(iommufd, info_cmd)
2458 {
2459 struct vfio_iommu_type1_info info_cmd = {};
2460
2461 /* Invalid argsz */
2462 info_cmd.argsz = 1;
2463 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2464
2465 info_cmd.argsz = sizeof(info_cmd);
2466 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2467 }
2468
TEST_F(iommufd,set_iommu_cmd)2469 TEST_F(iommufd, set_iommu_cmd)
2470 {
2471 /* Requires a domain to be attached */
2472 EXPECT_ERRNO(ENODEV,
2473 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2474 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2475 }
2476
TEST_F(iommufd,vfio_ioas)2477 TEST_F(iommufd, vfio_ioas)
2478 {
2479 struct iommu_vfio_ioas vfio_ioas_cmd = {
2480 .size = sizeof(vfio_ioas_cmd),
2481 .op = IOMMU_VFIO_IOAS_GET,
2482 };
2483 __u32 ioas_id;
2484
2485 /* ENODEV if there is no compat ioas */
2486 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2487
2488 /* Invalid id for set */
2489 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2490 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2491
2492 /* Valid id for set*/
2493 test_ioctl_ioas_alloc(&ioas_id);
2494 vfio_ioas_cmd.ioas_id = ioas_id;
2495 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2496
2497 /* Same id comes back from get */
2498 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2499 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2500 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2501
2502 /* Clear works */
2503 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2504 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2505 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2506 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2507 }
2508
FIXTURE(vfio_compat_mock_domain)2509 FIXTURE(vfio_compat_mock_domain)
2510 {
2511 int fd;
2512 uint32_t ioas_id;
2513 };
2514
FIXTURE_VARIANT(vfio_compat_mock_domain)2515 FIXTURE_VARIANT(vfio_compat_mock_domain)
2516 {
2517 unsigned int version;
2518 };
2519
FIXTURE_SETUP(vfio_compat_mock_domain)2520 FIXTURE_SETUP(vfio_compat_mock_domain)
2521 {
2522 struct iommu_vfio_ioas vfio_ioas_cmd = {
2523 .size = sizeof(vfio_ioas_cmd),
2524 .op = IOMMU_VFIO_IOAS_SET,
2525 };
2526
2527 self->fd = open("/dev/iommu", O_RDWR);
2528 ASSERT_NE(-1, self->fd);
2529
2530 /* Create what VFIO would consider a group */
2531 test_ioctl_ioas_alloc(&self->ioas_id);
2532 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2533
2534 /* Attach it to the vfio compat */
2535 vfio_ioas_cmd.ioas_id = self->ioas_id;
2536 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2537 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2538 }
2539
FIXTURE_TEARDOWN(vfio_compat_mock_domain)2540 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2541 {
2542 teardown_iommufd(self->fd, _metadata);
2543 }
2544
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)2545 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2546 {
2547 .version = VFIO_TYPE1v2_IOMMU,
2548 };
2549
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)2550 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2551 {
2552 .version = VFIO_TYPE1_IOMMU,
2553 };
2554
TEST_F(vfio_compat_mock_domain,simple_close)2555 TEST_F(vfio_compat_mock_domain, simple_close)
2556 {
2557 }
2558
TEST_F(vfio_compat_mock_domain,option_huge_pages)2559 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2560 {
2561 struct iommu_option cmd = {
2562 .size = sizeof(cmd),
2563 .option_id = IOMMU_OPTION_HUGE_PAGES,
2564 .op = IOMMU_OPTION_OP_GET,
2565 .val64 = 3,
2566 .object_id = self->ioas_id,
2567 };
2568
2569 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2570 if (variant->version == VFIO_TYPE1_IOMMU) {
2571 ASSERT_EQ(0, cmd.val64);
2572 } else {
2573 ASSERT_EQ(1, cmd.val64);
2574 }
2575 }
2576
2577 /*
2578 * Execute an ioctl command stored in buffer and check that the result does not
2579 * overflow memory.
2580 */
is_filled(const void * buf,uint8_t c,size_t len)2581 static bool is_filled(const void *buf, uint8_t c, size_t len)
2582 {
2583 const uint8_t *cbuf = buf;
2584
2585 for (; len; cbuf++, len--)
2586 if (*cbuf != c)
2587 return false;
2588 return true;
2589 }
2590
2591 #define ioctl_check_buf(fd, cmd) \
2592 ({ \
2593 size_t _cmd_len = *(__u32 *)buffer; \
2594 \
2595 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2596 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2597 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2598 BUFFER_SIZE - _cmd_len)); \
2599 })
2600
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)2601 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2602 struct vfio_iommu_type1_info *info_cmd)
2603 {
2604 const struct vfio_info_cap_header *cap;
2605
2606 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2607 cap = buffer + info_cmd->cap_offset;
2608 while (true) {
2609 size_t cap_size;
2610
2611 if (cap->next)
2612 cap_size = (buffer + cap->next) - (void *)cap;
2613 else
2614 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2615
2616 switch (cap->id) {
2617 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2618 struct vfio_iommu_type1_info_cap_iova_range *data =
2619 (void *)cap;
2620
2621 ASSERT_EQ(1, data->header.version);
2622 ASSERT_EQ(1, data->nr_iovas);
2623 EXPECT_EQ(MOCK_APERTURE_START,
2624 data->iova_ranges[0].start);
2625 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2626 break;
2627 }
2628 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2629 struct vfio_iommu_type1_info_dma_avail *data =
2630 (void *)cap;
2631
2632 ASSERT_EQ(1, data->header.version);
2633 ASSERT_EQ(sizeof(*data), cap_size);
2634 break;
2635 }
2636 default:
2637 ASSERT_EQ(false, true);
2638 break;
2639 }
2640 if (!cap->next)
2641 break;
2642
2643 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2644 ASSERT_GE(buffer + cap->next, (void *)cap);
2645 cap = buffer + cap->next;
2646 }
2647 }
2648
TEST_F(vfio_compat_mock_domain,get_info)2649 TEST_F(vfio_compat_mock_domain, get_info)
2650 {
2651 struct vfio_iommu_type1_info *info_cmd = buffer;
2652 unsigned int i;
2653 size_t caplen;
2654
2655 /* Pre-cap ABI */
2656 *info_cmd = (struct vfio_iommu_type1_info){
2657 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2658 };
2659 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2660 ASSERT_NE(0, info_cmd->iova_pgsizes);
2661 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2662 info_cmd->flags);
2663
2664 /* Read the cap chain size */
2665 *info_cmd = (struct vfio_iommu_type1_info){
2666 .argsz = sizeof(*info_cmd),
2667 };
2668 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2669 ASSERT_NE(0, info_cmd->iova_pgsizes);
2670 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2671 info_cmd->flags);
2672 ASSERT_EQ(0, info_cmd->cap_offset);
2673 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2674
2675 /* Read the caps, kernel should never create a corrupted caps */
2676 caplen = info_cmd->argsz;
2677 for (i = sizeof(*info_cmd); i < caplen; i++) {
2678 *info_cmd = (struct vfio_iommu_type1_info){
2679 .argsz = i,
2680 };
2681 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2682 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2683 info_cmd->flags);
2684 if (!info_cmd->cap_offset)
2685 continue;
2686 check_vfio_info_cap_chain(_metadata, info_cmd);
2687 }
2688 }
2689
shuffle_array(unsigned long * array,size_t nelms)2690 static void shuffle_array(unsigned long *array, size_t nelms)
2691 {
2692 unsigned int i;
2693
2694 /* Shuffle */
2695 for (i = 0; i != nelms; i++) {
2696 unsigned long tmp = array[i];
2697 unsigned int other = rand() % (nelms - i);
2698
2699 array[i] = array[other];
2700 array[other] = tmp;
2701 }
2702 }
2703
TEST_F(vfio_compat_mock_domain,map)2704 TEST_F(vfio_compat_mock_domain, map)
2705 {
2706 struct vfio_iommu_type1_dma_map map_cmd = {
2707 .argsz = sizeof(map_cmd),
2708 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2709 .vaddr = (uintptr_t)buffer,
2710 .size = BUFFER_SIZE,
2711 .iova = MOCK_APERTURE_START,
2712 };
2713 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2714 .argsz = sizeof(unmap_cmd),
2715 .size = BUFFER_SIZE,
2716 .iova = MOCK_APERTURE_START,
2717 };
2718 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2719 unsigned int i;
2720
2721 /* Simple map/unmap */
2722 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2723 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2724 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2725 /* Unmap of empty is success */
2726 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2727
2728 /* UNMAP_FLAG_ALL requires 0 iova/size */
2729 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2730 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2731 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2732
2733 unmap_cmd.iova = 0;
2734 unmap_cmd.size = 0;
2735 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2736 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2737
2738 /* Small pages */
2739 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2740 map_cmd.iova = pages_iova[i] =
2741 MOCK_APERTURE_START + i * PAGE_SIZE;
2742 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2743 map_cmd.size = PAGE_SIZE;
2744 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2745 }
2746 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2747
2748 unmap_cmd.flags = 0;
2749 unmap_cmd.size = PAGE_SIZE;
2750 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2751 unmap_cmd.iova = pages_iova[i];
2752 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2753 }
2754 }
2755
TEST_F(vfio_compat_mock_domain,huge_map)2756 TEST_F(vfio_compat_mock_domain, huge_map)
2757 {
2758 size_t buf_size = HUGEPAGE_SIZE * 2;
2759 struct vfio_iommu_type1_dma_map map_cmd = {
2760 .argsz = sizeof(map_cmd),
2761 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2762 .size = buf_size,
2763 .iova = MOCK_APERTURE_START,
2764 };
2765 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2766 .argsz = sizeof(unmap_cmd),
2767 };
2768 unsigned long pages_iova[16];
2769 unsigned int i;
2770 void *buf;
2771
2772 /* Test huge pages and splitting */
2773 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2774 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2775 0);
2776 ASSERT_NE(MAP_FAILED, buf);
2777 map_cmd.vaddr = (uintptr_t)buf;
2778 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2779
2780 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2781 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2782 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2783 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2784
2785 /* type1 mode can cut up larger mappings, type1v2 always fails */
2786 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2787 unmap_cmd.iova = pages_iova[i];
2788 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2789 if (variant->version == VFIO_TYPE1_IOMMU) {
2790 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2791 &unmap_cmd));
2792 } else {
2793 EXPECT_ERRNO(ENOENT,
2794 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2795 &unmap_cmd));
2796 }
2797 }
2798 }
2799
FIXTURE(iommufd_viommu)2800 FIXTURE(iommufd_viommu)
2801 {
2802 int fd;
2803 uint32_t ioas_id;
2804 uint32_t stdev_id;
2805 uint32_t hwpt_id;
2806 uint32_t nested_hwpt_id;
2807 uint32_t device_id;
2808 uint32_t viommu_id;
2809 };
2810
FIXTURE_VARIANT(iommufd_viommu)2811 FIXTURE_VARIANT(iommufd_viommu)
2812 {
2813 unsigned int viommu;
2814 };
2815
FIXTURE_SETUP(iommufd_viommu)2816 FIXTURE_SETUP(iommufd_viommu)
2817 {
2818 self->fd = open("/dev/iommu", O_RDWR);
2819 ASSERT_NE(-1, self->fd);
2820 test_ioctl_ioas_alloc(&self->ioas_id);
2821 test_ioctl_set_default_memory_limit();
2822
2823 if (variant->viommu) {
2824 struct iommu_hwpt_selftest data = {
2825 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2826 };
2827
2828 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2829 &self->device_id);
2830
2831 /* Allocate a nesting parent hwpt */
2832 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2833 IOMMU_HWPT_ALLOC_NEST_PARENT,
2834 &self->hwpt_id);
2835
2836 /* Allocate a vIOMMU taking refcount of the parent hwpt */
2837 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2838 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2839 &self->viommu_id);
2840
2841 /* Allocate a regular nested hwpt */
2842 test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2843 &self->nested_hwpt_id,
2844 IOMMU_HWPT_DATA_SELFTEST, &data,
2845 sizeof(data));
2846 }
2847 }
2848
FIXTURE_TEARDOWN(iommufd_viommu)2849 FIXTURE_TEARDOWN(iommufd_viommu)
2850 {
2851 teardown_iommufd(self->fd, _metadata);
2852 }
2853
FIXTURE_VARIANT_ADD(iommufd_viommu,no_viommu)2854 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2855 {
2856 .viommu = 0,
2857 };
2858
FIXTURE_VARIANT_ADD(iommufd_viommu,mock_viommu)2859 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2860 {
2861 .viommu = 1,
2862 };
2863
TEST_F(iommufd_viommu,viommu_auto_destroy)2864 TEST_F(iommufd_viommu, viommu_auto_destroy)
2865 {
2866 }
2867
TEST_F(iommufd_viommu,viommu_negative_tests)2868 TEST_F(iommufd_viommu, viommu_negative_tests)
2869 {
2870 uint32_t device_id = self->device_id;
2871 uint32_t ioas_id = self->ioas_id;
2872 uint32_t hwpt_id;
2873
2874 if (self->device_id) {
2875 /* Negative test -- invalid hwpt (hwpt_id=0) */
2876 test_err_viommu_alloc(ENOENT, device_id, 0,
2877 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2878 NULL);
2879
2880 /* Negative test -- not a nesting parent hwpt */
2881 test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2882 test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2883 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2884 NULL);
2885 test_ioctl_destroy(hwpt_id);
2886
2887 /* Negative test -- unsupported viommu type */
2888 test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2889 0xdead, NULL, 0, NULL);
2890 EXPECT_ERRNO(EBUSY,
2891 _test_ioctl_destroy(self->fd, self->hwpt_id));
2892 EXPECT_ERRNO(EBUSY,
2893 _test_ioctl_destroy(self->fd, self->viommu_id));
2894 } else {
2895 test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2896 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
2897 NULL);
2898 }
2899 }
2900
TEST_F(iommufd_viommu,viommu_alloc_nested_iopf)2901 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2902 {
2903 struct iommu_hwpt_selftest data = {
2904 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2905 };
2906 uint32_t viommu_id = self->viommu_id;
2907 uint32_t dev_id = self->device_id;
2908 uint32_t iopf_hwpt_id;
2909 uint32_t fault_id;
2910 uint32_t fault_fd;
2911 uint32_t vdev_id;
2912
2913 if (!dev_id)
2914 SKIP(return, "Skipping test for variant no_viommu");
2915
2916 test_ioctl_fault_alloc(&fault_id, &fault_fd);
2917 test_err_hwpt_alloc_iopf(ENOENT, dev_id, viommu_id, UINT32_MAX,
2918 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2919 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2920 test_err_hwpt_alloc_iopf(EOPNOTSUPP, dev_id, viommu_id, fault_id,
2921 IOMMU_HWPT_FAULT_ID_VALID | (1 << 31),
2922 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2923 sizeof(data));
2924 test_cmd_hwpt_alloc_iopf(dev_id, viommu_id, fault_id,
2925 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2926 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2927
2928 /* Must allocate vdevice before attaching to a nested hwpt */
2929 test_err_mock_domain_replace(ENOENT, self->stdev_id, iopf_hwpt_id);
2930 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2931 test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2932 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2933 test_cmd_trigger_iopf(dev_id, fault_fd);
2934
2935 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2936 test_ioctl_destroy(iopf_hwpt_id);
2937 close(fault_fd);
2938 test_ioctl_destroy(fault_id);
2939 }
2940
TEST_F(iommufd_viommu,viommu_alloc_with_data)2941 TEST_F(iommufd_viommu, viommu_alloc_with_data)
2942 {
2943 struct iommu_viommu_selftest data = {
2944 .in_data = 0xbeef,
2945 };
2946 uint32_t *test;
2947
2948 if (!self->device_id)
2949 SKIP(return, "Skipping test for variant no_viommu");
2950
2951 test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2952 IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
2953 &self->viommu_id);
2954 ASSERT_EQ(data.out_data, data.in_data);
2955
2956 /* Negative mmap tests -- offset and length cannot be changed */
2957 test_err_mmap(ENXIO, data.out_mmap_length,
2958 data.out_mmap_offset + PAGE_SIZE);
2959 test_err_mmap(ENXIO, data.out_mmap_length,
2960 data.out_mmap_offset + PAGE_SIZE * 2);
2961 test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
2962 test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
2963
2964 /* Now do a correct mmap for a loopback test */
2965 test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
2966 MAP_SHARED, self->fd, data.out_mmap_offset);
2967 ASSERT_NE(MAP_FAILED, test);
2968 ASSERT_EQ(data.in_data, *test);
2969
2970 /* The owner of the mmap region should be blocked */
2971 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
2972 munmap(test, data.out_mmap_length);
2973 }
2974
TEST_F(iommufd_viommu,vdevice_alloc)2975 TEST_F(iommufd_viommu, vdevice_alloc)
2976 {
2977 uint32_t viommu_id = self->viommu_id;
2978 uint32_t dev_id = self->device_id;
2979 uint32_t vdev_id = 0;
2980 uint32_t veventq_id;
2981 uint32_t veventq_fd;
2982 int prev_seq = -1;
2983
2984 if (dev_id) {
2985 /* Must allocate vdevice before attaching to a nested hwpt */
2986 test_err_mock_domain_replace(ENOENT, self->stdev_id,
2987 self->nested_hwpt_id);
2988
2989 /* Allocate a vEVENTQ with veventq_depth=2 */
2990 test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
2991 &veventq_id, &veventq_fd);
2992 test_err_veventq_alloc(EEXIST, viommu_id,
2993 IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
2994 /* Set vdev_id to 0x99, unset it, and set to 0x88 */
2995 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2996 test_cmd_mock_domain_replace(self->stdev_id,
2997 self->nested_hwpt_id);
2998 test_cmd_trigger_vevents(dev_id, 1);
2999 test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
3000 test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
3001 &vdev_id);
3002 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3003 test_ioctl_destroy(vdev_id);
3004
3005 /* Try again with 0x88 */
3006 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
3007 test_cmd_mock_domain_replace(self->stdev_id,
3008 self->nested_hwpt_id);
3009 /* Trigger an overflow with three events */
3010 test_cmd_trigger_vevents(dev_id, 3);
3011 test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
3012 &prev_seq);
3013 /* Overflow must be gone after the previous reads */
3014 test_cmd_trigger_vevents(dev_id, 1);
3015 test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
3016 close(veventq_fd);
3017 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3018 test_ioctl_destroy(vdev_id);
3019 test_ioctl_destroy(veventq_id);
3020 } else {
3021 test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
3022 }
3023 }
3024
TEST_F(iommufd_viommu,vdevice_cache)3025 TEST_F(iommufd_viommu, vdevice_cache)
3026 {
3027 struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
3028 uint32_t viommu_id = self->viommu_id;
3029 uint32_t dev_id = self->device_id;
3030 uint32_t vdev_id = 0;
3031 uint32_t num_inv;
3032
3033 if (!dev_id)
3034 SKIP(return, "Skipping test for variant no_viommu");
3035
3036 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3037
3038 test_cmd_dev_check_cache_all(dev_id, IOMMU_TEST_DEV_CACHE_DEFAULT);
3039
3040 /* Check data_type by passing zero-length array */
3041 num_inv = 0;
3042 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3043 &num_inv);
3044 assert(!num_inv);
3045
3046 /* Negative test: Invalid data_type */
3047 num_inv = 1;
3048 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3049 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
3050 sizeof(*inv_reqs), &num_inv);
3051 assert(!num_inv);
3052
3053 /* Negative test: structure size sanity */
3054 num_inv = 1;
3055 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3056 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3057 sizeof(*inv_reqs) + 1, &num_inv);
3058 assert(!num_inv);
3059
3060 num_inv = 1;
3061 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3062 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 1,
3063 &num_inv);
3064 assert(!num_inv);
3065
3066 /* Negative test: invalid flag is passed */
3067 num_inv = 1;
3068 inv_reqs[0].flags = 0xffffffff;
3069 inv_reqs[0].vdev_id = 0x99;
3070 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3071 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3072 sizeof(*inv_reqs), &num_inv);
3073 assert(!num_inv);
3074
3075 /* Negative test: invalid data_uptr when array is not empty */
3076 num_inv = 1;
3077 inv_reqs[0].flags = 0;
3078 inv_reqs[0].vdev_id = 0x99;
3079 test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
3080 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3081 sizeof(*inv_reqs), &num_inv);
3082 assert(!num_inv);
3083
3084 /* Negative test: invalid entry_len when array is not empty */
3085 num_inv = 1;
3086 inv_reqs[0].flags = 0;
3087 inv_reqs[0].vdev_id = 0x99;
3088 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3089 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 0,
3090 &num_inv);
3091 assert(!num_inv);
3092
3093 /* Negative test: invalid cache_id */
3094 num_inv = 1;
3095 inv_reqs[0].flags = 0;
3096 inv_reqs[0].vdev_id = 0x99;
3097 inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3098 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3099 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3100 sizeof(*inv_reqs), &num_inv);
3101 assert(!num_inv);
3102
3103 /* Negative test: invalid vdev_id */
3104 num_inv = 1;
3105 inv_reqs[0].flags = 0;
3106 inv_reqs[0].vdev_id = 0x9;
3107 inv_reqs[0].cache_id = 0;
3108 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3109 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3110 sizeof(*inv_reqs), &num_inv);
3111 assert(!num_inv);
3112
3113 /*
3114 * Invalidate the 1st cache entry but fail the 2nd request
3115 * due to invalid flags configuration in the 2nd request.
3116 */
3117 num_inv = 2;
3118 inv_reqs[0].flags = 0;
3119 inv_reqs[0].vdev_id = 0x99;
3120 inv_reqs[0].cache_id = 0;
3121 inv_reqs[1].flags = 0xffffffff;
3122 inv_reqs[1].vdev_id = 0x99;
3123 inv_reqs[1].cache_id = 1;
3124 test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
3125 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3126 sizeof(*inv_reqs), &num_inv);
3127 assert(num_inv == 1);
3128 test_cmd_dev_check_cache(dev_id, 0, 0);
3129 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3130 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3131 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3132
3133 /*
3134 * Invalidate the 1st cache entry but fail the 2nd request
3135 * due to invalid cache_id configuration in the 2nd request.
3136 */
3137 num_inv = 2;
3138 inv_reqs[0].flags = 0;
3139 inv_reqs[0].vdev_id = 0x99;
3140 inv_reqs[0].cache_id = 0;
3141 inv_reqs[1].flags = 0;
3142 inv_reqs[1].vdev_id = 0x99;
3143 inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
3144 test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
3145 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
3146 sizeof(*inv_reqs), &num_inv);
3147 assert(num_inv == 1);
3148 test_cmd_dev_check_cache(dev_id, 0, 0);
3149 test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
3150 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3151 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3152
3153 /* Invalidate the 2nd cache entry and verify */
3154 num_inv = 1;
3155 inv_reqs[0].flags = 0;
3156 inv_reqs[0].vdev_id = 0x99;
3157 inv_reqs[0].cache_id = 1;
3158 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3159 &num_inv);
3160 assert(num_inv == 1);
3161 test_cmd_dev_check_cache(dev_id, 0, 0);
3162 test_cmd_dev_check_cache(dev_id, 1, 0);
3163 test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
3164 test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
3165
3166 /* Invalidate the 3rd and 4th cache entries and verify */
3167 num_inv = 2;
3168 inv_reqs[0].flags = 0;
3169 inv_reqs[0].vdev_id = 0x99;
3170 inv_reqs[0].cache_id = 2;
3171 inv_reqs[1].flags = 0;
3172 inv_reqs[1].vdev_id = 0x99;
3173 inv_reqs[1].cache_id = 3;
3174 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3175 &num_inv);
3176 assert(num_inv == 2);
3177 test_cmd_dev_check_cache_all(dev_id, 0);
3178
3179 /* Invalidate all cache entries for nested_dev_id[1] and verify */
3180 num_inv = 1;
3181 inv_reqs[0].vdev_id = 0x99;
3182 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
3183 test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
3184 &num_inv);
3185 assert(num_inv == 1);
3186 test_cmd_dev_check_cache_all(dev_id, 0);
3187 test_ioctl_destroy(vdev_id);
3188 }
3189
TEST_F(iommufd_viommu,hw_queue)3190 TEST_F(iommufd_viommu, hw_queue)
3191 {
3192 __u64 iova = MOCK_APERTURE_START, iova2;
3193 uint32_t viommu_id = self->viommu_id;
3194 uint32_t hw_queue_id[2];
3195
3196 if (!viommu_id)
3197 SKIP(return, "Skipping test for variant no_viommu");
3198
3199 /* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
3200 test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
3201 IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
3202 &hw_queue_id[0]);
3203 /* Fail queue addr and length */
3204 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3205 0, iova, 0, &hw_queue_id[0]);
3206 test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
3207 IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
3208 PAGE_SIZE, &hw_queue_id[0]);
3209 /* Fail missing iova */
3210 test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3211 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3212
3213 /* Map iova */
3214 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
3215 test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
3216
3217 /* Fail index=1 and =MAX; must start from index=0 */
3218 test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3219 iova, PAGE_SIZE, &hw_queue_id[0]);
3220 test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3221 IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
3222 &hw_queue_id[0]);
3223
3224 /* Allocate index=0, declare ownership of the iova */
3225 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
3226 iova, PAGE_SIZE, &hw_queue_id[0]);
3227 /* Fail duplicated index */
3228 test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3229 0, iova, PAGE_SIZE, &hw_queue_id[0]);
3230 /* Fail unmap, due to iova ownership */
3231 test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
3232 /* The 2nd page is not pinned, so it can be unmmap */
3233 test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
3234
3235 /* Allocate index=1, with an unaligned case */
3236 test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3237 iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
3238 &hw_queue_id[1]);
3239 /* Fail to destroy, due to dependency */
3240 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
3241
3242 /* Destroy in descending order */
3243 test_ioctl_destroy(hw_queue_id[1]);
3244 test_ioctl_destroy(hw_queue_id[0]);
3245 /* Now it can unmap the first page */
3246 test_ioctl_ioas_unmap(iova, PAGE_SIZE);
3247 }
3248
TEST_F(iommufd_viommu,vdevice_tombstone)3249 TEST_F(iommufd_viommu, vdevice_tombstone)
3250 {
3251 uint32_t viommu_id = self->viommu_id;
3252 uint32_t dev_id = self->device_id;
3253 uint32_t vdev_id = 0;
3254
3255 if (!dev_id)
3256 SKIP(return, "Skipping test for variant no_viommu");
3257
3258 test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
3259 test_ioctl_destroy(self->stdev_id);
3260 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, vdev_id));
3261 }
3262
FIXTURE(iommufd_device_pasid)3263 FIXTURE(iommufd_device_pasid)
3264 {
3265 int fd;
3266 uint32_t ioas_id;
3267 uint32_t hwpt_id;
3268 uint32_t stdev_id;
3269 uint32_t device_id;
3270 uint32_t no_pasid_stdev_id;
3271 uint32_t no_pasid_device_id;
3272 };
3273
FIXTURE_VARIANT(iommufd_device_pasid)3274 FIXTURE_VARIANT(iommufd_device_pasid)
3275 {
3276 bool pasid_capable;
3277 };
3278
FIXTURE_SETUP(iommufd_device_pasid)3279 FIXTURE_SETUP(iommufd_device_pasid)
3280 {
3281 self->fd = open("/dev/iommu", O_RDWR);
3282 ASSERT_NE(-1, self->fd);
3283 test_ioctl_ioas_alloc(&self->ioas_id);
3284
3285 test_cmd_mock_domain_flags(self->ioas_id,
3286 MOCK_FLAGS_DEVICE_PASID,
3287 &self->stdev_id, &self->hwpt_id,
3288 &self->device_id);
3289 if (!variant->pasid_capable)
3290 test_cmd_mock_domain_flags(self->ioas_id, 0,
3291 &self->no_pasid_stdev_id, NULL,
3292 &self->no_pasid_device_id);
3293 }
3294
FIXTURE_TEARDOWN(iommufd_device_pasid)3295 FIXTURE_TEARDOWN(iommufd_device_pasid)
3296 {
3297 teardown_iommufd(self->fd, _metadata);
3298 }
3299
FIXTURE_VARIANT_ADD(iommufd_device_pasid,no_pasid)3300 FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
3301 {
3302 .pasid_capable = false,
3303 };
3304
FIXTURE_VARIANT_ADD(iommufd_device_pasid,has_pasid)3305 FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
3306 {
3307 .pasid_capable = true,
3308 };
3309
TEST_F(iommufd_device_pasid,pasid_attach)3310 TEST_F(iommufd_device_pasid, pasid_attach)
3311 {
3312 struct iommu_hwpt_selftest data = {
3313 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
3314 };
3315 uint32_t nested_hwpt_id[3] = {};
3316 uint32_t parent_hwpt_id = 0;
3317 uint32_t fault_id, fault_fd;
3318 uint32_t s2_hwpt_id = 0;
3319 uint32_t iopf_hwpt_id;
3320 uint32_t pasid = 100;
3321 uint32_t viommu_id;
3322
3323 /*
3324 * Negative, detach pasid without attaching, this is not expected.
3325 * But it should not result in failure anyway.
3326 */
3327 test_cmd_pasid_detach(pasid);
3328
3329 /* Allocate two nested hwpts sharing one common parent hwpt */
3330 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3331 IOMMU_HWPT_ALLOC_NEST_PARENT,
3332 &parent_hwpt_id);
3333 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3334 IOMMU_HWPT_ALLOC_PASID,
3335 &nested_hwpt_id[0],
3336 IOMMU_HWPT_DATA_SELFTEST,
3337 &data, sizeof(data));
3338 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
3339 IOMMU_HWPT_ALLOC_PASID,
3340 &nested_hwpt_id[1],
3341 IOMMU_HWPT_DATA_SELFTEST,
3342 &data, sizeof(data));
3343
3344 /* Fault related preparation */
3345 test_ioctl_fault_alloc(&fault_id, &fault_fd);
3346 test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
3347 IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
3348 &iopf_hwpt_id,
3349 IOMMU_HWPT_DATA_SELFTEST, &data,
3350 sizeof(data));
3351
3352 /* Allocate a regular nested hwpt based on viommu */
3353 test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
3354 IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0, &viommu_id);
3355 test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
3356 IOMMU_HWPT_ALLOC_PASID,
3357 &nested_hwpt_id[2],
3358 IOMMU_HWPT_DATA_SELFTEST, &data,
3359 sizeof(data));
3360
3361 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
3362 IOMMU_HWPT_ALLOC_PASID,
3363 &s2_hwpt_id);
3364
3365 /* Attach RID to non-pasid compat domain, */
3366 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
3367 /* then attach to pasid should fail */
3368 test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
3369
3370 /* Attach RID to pasid compat domain, */
3371 test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
3372 /* then attach to pasid should succeed, */
3373 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3374 /* but attach RID to non-pasid compat domain should fail now. */
3375 test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
3376 /*
3377 * Detach hwpt from pasid 100, and check if the pasid 100
3378 * has null domain.
3379 */
3380 test_cmd_pasid_detach(pasid);
3381 ASSERT_EQ(0,
3382 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3383 pasid, 0));
3384 /* RID is attached to pasid-comapt domain, pasid path is not used */
3385
3386 if (!variant->pasid_capable) {
3387 /*
3388 * PASID-compatible domain can be used by non-PASID-capable
3389 * device.
3390 */
3391 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
3392 test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
3393 /*
3394 * Attach hwpt to pasid 100 of non-PASID-capable device,
3395 * should fail, no matter domain is pasid-comapt or not.
3396 */
3397 EXPECT_ERRNO(EINVAL,
3398 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3399 pasid, parent_hwpt_id));
3400 EXPECT_ERRNO(EINVAL,
3401 _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
3402 pasid, s2_hwpt_id));
3403 }
3404
3405 /*
3406 * Attach non pasid compat hwpt to pasid-capable device, should
3407 * fail, and have null domain.
3408 */
3409 test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
3410 ASSERT_EQ(0,
3411 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3412 pasid, 0));
3413
3414 /*
3415 * Attach ioas to pasid 100, should fail, domain should
3416 * be null.
3417 */
3418 test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
3419 ASSERT_EQ(0,
3420 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3421 pasid, 0));
3422
3423 /*
3424 * Attach the s2_hwpt to pasid 100, should succeed, domain should
3425 * be valid.
3426 */
3427 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3428 ASSERT_EQ(0,
3429 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3430 pasid, s2_hwpt_id));
3431
3432 /*
3433 * Try attach pasid 100 with another hwpt, should FAIL
3434 * as attach does not allow overwrite, use REPLACE instead.
3435 */
3436 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3437
3438 /*
3439 * Detach hwpt from pasid 100 for next test, should succeed,
3440 * and have null domain.
3441 */
3442 test_cmd_pasid_detach(pasid);
3443 ASSERT_EQ(0,
3444 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3445 pasid, 0));
3446
3447 /*
3448 * Attach nested hwpt to pasid 100, should succeed, domain
3449 * should be valid.
3450 */
3451 test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
3452 ASSERT_EQ(0,
3453 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3454 pasid, nested_hwpt_id[0]));
3455
3456 /* Attach to pasid 100 which has been attached, should fail. */
3457 test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
3458
3459 /* cleanup pasid 100 */
3460 test_cmd_pasid_detach(pasid);
3461
3462 /* Replace tests */
3463
3464 pasid = 200;
3465 /*
3466 * Replace pasid 200 without attaching it, should fail
3467 * with -EINVAL.
3468 */
3469 test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
3470
3471 /*
3472 * Attach the s2 hwpt to pasid 200, should succeed, domain should
3473 * be valid.
3474 */
3475 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3476 ASSERT_EQ(0,
3477 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3478 pasid, s2_hwpt_id));
3479
3480 /*
3481 * Replace pasid 200 with self->ioas_id, should fail
3482 * and domain should be the prior s2 hwpt.
3483 */
3484 test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
3485 ASSERT_EQ(0,
3486 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3487 pasid, s2_hwpt_id));
3488
3489 /*
3490 * Replace a nested hwpt for pasid 200, should succeed,
3491 * and have valid domain.
3492 */
3493 test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
3494 ASSERT_EQ(0,
3495 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3496 pasid, nested_hwpt_id[0]));
3497
3498 /*
3499 * Replace with another nested hwpt for pasid 200, should
3500 * succeed, and have valid domain.
3501 */
3502 test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
3503 ASSERT_EQ(0,
3504 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3505 pasid, nested_hwpt_id[1]));
3506
3507 /* cleanup pasid 200 */
3508 test_cmd_pasid_detach(pasid);
3509
3510 /* Negative Tests for pasid replace, use pasid 1024 */
3511
3512 /*
3513 * Attach the s2 hwpt to pasid 1024, should succeed, domain should
3514 * be valid.
3515 */
3516 pasid = 1024;
3517 test_cmd_pasid_attach(pasid, s2_hwpt_id);
3518 ASSERT_EQ(0,
3519 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3520 pasid, s2_hwpt_id));
3521
3522 /*
3523 * Replace pasid 1024 with nested_hwpt_id[0], should fail,
3524 * but have the old valid domain. This is a designed
3525 * negative case. Normally, this shall succeed.
3526 */
3527 test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
3528 ASSERT_EQ(0,
3529 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3530 pasid, s2_hwpt_id));
3531
3532 /* cleanup pasid 1024 */
3533 test_cmd_pasid_detach(pasid);
3534
3535 /* Attach to iopf-capable hwpt */
3536
3537 /*
3538 * Attach an iopf hwpt to pasid 2048, should succeed, domain should
3539 * be valid.
3540 */
3541 pasid = 2048;
3542 test_cmd_pasid_attach(pasid, iopf_hwpt_id);
3543 ASSERT_EQ(0,
3544 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3545 pasid, iopf_hwpt_id));
3546
3547 test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
3548
3549 /*
3550 * Replace with s2_hwpt_id for pasid 2048, should
3551 * succeed, and have valid domain.
3552 */
3553 test_cmd_pasid_replace(pasid, s2_hwpt_id);
3554 ASSERT_EQ(0,
3555 test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
3556 pasid, s2_hwpt_id));
3557
3558 /* cleanup pasid 2048 */
3559 test_cmd_pasid_detach(pasid);
3560
3561 test_ioctl_destroy(iopf_hwpt_id);
3562 close(fault_fd);
3563 test_ioctl_destroy(fault_id);
3564
3565 /* Detach the s2_hwpt_id from RID */
3566 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
3567 }
3568
3569 TEST_HARNESS_MAIN
3570