1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 #include <poll.h>
13 
14 #include "../kselftest_harness.h"
15 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 
17 /* Hack to make assertions more readable */
18 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 
20 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
21 #define BITS_PER_BYTE 8
22 #define BITS_PER_LONG __BITS_PER_LONG
23 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
24 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25 
26 enum {
27 	IOPT_PAGES_ACCOUNT_NONE = 0,
28 	IOPT_PAGES_ACCOUNT_USER = 1,
29 	IOPT_PAGES_ACCOUNT_MM = 2,
30 };
31 
32 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33 
set_bit(unsigned int nr,unsigned long * addr)34 static inline void set_bit(unsigned int nr, unsigned long *addr)
35 {
36 	unsigned long mask = BIT_MASK(nr);
37 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
38 
39 	*p |= mask;
40 }
41 
test_bit(unsigned int nr,unsigned long * addr)42 static inline bool test_bit(unsigned int nr, unsigned long *addr)
43 {
44 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
45 }
46 
47 static void *buffer;
48 static unsigned long BUFFER_SIZE;
49 
50 static void *mfd_buffer;
51 static int mfd;
52 
53 static unsigned long PAGE_SIZE;
54 
55 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
56 #define offsetofend(TYPE, MEMBER) \
57 	(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58 
memfd_mmap(size_t length,int prot,int flags,int * mfd_p)59 static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
60 {
61 	int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
62 	int mfd = memfd_create("buffer", mfd_flags);
63 
64 	if (mfd <= 0)
65 		return MAP_FAILED;
66 	if (ftruncate(mfd, length))
67 		return MAP_FAILED;
68 	*mfd_p = mfd;
69 	return mmap(0, length, prot, flags, mfd, 0);
70 }
71 
72 /*
73  * Have the kernel check the refcount on pages. I don't know why a freshly
74  * mmap'd anon non-compound page starts out with a ref of 3
75  */
76 #define check_refs(_ptr, _length, _refs)                                      \
77 	({                                                                    \
78 		struct iommu_test_cmd test_cmd = {                            \
79 			.size = sizeof(test_cmd),                             \
80 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
81 			.check_refs = { .length = _length,                    \
82 					.uptr = (uintptr_t)(_ptr),            \
83 					.refs = _refs },                      \
84 		};                                                            \
85 		ASSERT_EQ(0,                                                  \
86 			  ioctl(self->fd,                                     \
87 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
88 				&test_cmd));                                  \
89 	})
90 
_test_cmd_mock_domain(int fd,unsigned int ioas_id,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)91 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
92 				 __u32 *hwpt_id, __u32 *idev_id)
93 {
94 	struct iommu_test_cmd cmd = {
95 		.size = sizeof(cmd),
96 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
97 		.id = ioas_id,
98 		.mock_domain = {},
99 	};
100 	int ret;
101 
102 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
103 	if (ret)
104 		return ret;
105 	if (stdev_id)
106 		*stdev_id = cmd.mock_domain.out_stdev_id;
107 	assert(cmd.id != 0);
108 	if (hwpt_id)
109 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
110 	if (idev_id)
111 		*idev_id = cmd.mock_domain.out_idev_id;
112 	return 0;
113 }
114 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id)       \
115 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
116 					   hwpt_id, idev_id))
117 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id)      \
118 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
119 						   stdev_id, hwpt_id, NULL))
120 
_test_cmd_mock_domain_flags(int fd,unsigned int ioas_id,__u32 stdev_flags,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)121 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
122 				       __u32 stdev_flags, __u32 *stdev_id,
123 				       __u32 *hwpt_id, __u32 *idev_id)
124 {
125 	struct iommu_test_cmd cmd = {
126 		.size = sizeof(cmd),
127 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
128 		.id = ioas_id,
129 		.mock_domain_flags = { .dev_flags = stdev_flags },
130 	};
131 	int ret;
132 
133 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
134 	if (ret)
135 		return ret;
136 	if (stdev_id)
137 		*stdev_id = cmd.mock_domain_flags.out_stdev_id;
138 	assert(cmd.id != 0);
139 	if (hwpt_id)
140 		*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
141 	if (idev_id)
142 		*idev_id = cmd.mock_domain_flags.out_idev_id;
143 	return 0;
144 }
145 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
146 	ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,     \
147 						 stdev_id, hwpt_id, idev_id))
148 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
149 	EXPECT_ERRNO(_errno,                                                  \
150 		     _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,    \
151 						 stdev_id, hwpt_id, NULL))
152 
_test_cmd_mock_domain_replace(int fd,__u32 stdev_id,__u32 pt_id,__u32 * hwpt_id)153 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
154 					 __u32 *hwpt_id)
155 {
156 	struct iommu_test_cmd cmd = {
157 		.size = sizeof(cmd),
158 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
159 		.id = stdev_id,
160 		.mock_domain_replace = {
161 			.pt_id = pt_id,
162 		},
163 	};
164 	int ret;
165 
166 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
167 	if (ret)
168 		return ret;
169 	if (hwpt_id)
170 		*hwpt_id = cmd.mock_domain_replace.pt_id;
171 	return 0;
172 }
173 
174 #define test_cmd_mock_domain_replace(stdev_id, pt_id)                         \
175 	ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
176 						   NULL))
177 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id)                  \
178 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
179 							   pt_id, NULL))
180 
_test_cmd_hwpt_alloc(int fd,__u32 device_id,__u32 pt_id,__u32 ft_id,__u32 flags,__u32 * hwpt_id,__u32 data_type,void * data,size_t data_len)181 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
182 				__u32 flags, __u32 *hwpt_id, __u32 data_type,
183 				void *data, size_t data_len)
184 {
185 	struct iommu_hwpt_alloc cmd = {
186 		.size = sizeof(cmd),
187 		.flags = flags,
188 		.dev_id = device_id,
189 		.pt_id = pt_id,
190 		.data_type = data_type,
191 		.data_len = data_len,
192 		.data_uptr = (uint64_t)data,
193 		.fault_id = ft_id,
194 	};
195 	int ret;
196 
197 	ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
198 	if (ret)
199 		return ret;
200 	if (hwpt_id)
201 		*hwpt_id = cmd.out_hwpt_id;
202 	return 0;
203 }
204 
205 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id)                  \
206 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags,   \
207 					  hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
208 					  0))
209 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id)   \
210 	EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(                      \
211 				     self->fd, device_id, pt_id, 0, flags, \
212 				     hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
213 
214 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id,         \
215 				   data_type, data, data_len)                \
216 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
217 					  hwpt_id, data_type, data, data_len))
218 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
219 				   data_type, data, data_len)                \
220 	EXPECT_ERRNO(_errno,                                                 \
221 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
222 					  hwpt_id, data_type, data, data_len))
223 
224 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id,    \
225 				   data_type, data, data_len)                   \
226 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
227 					  flags, hwpt_id, data_type, data,      \
228 					  data_len))
229 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags,     \
230 				 hwpt_id, data_type, data, data_len)            \
231 	EXPECT_ERRNO(_errno,                                                    \
232 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
233 					  flags, hwpt_id, data_type, data,      \
234 					  data_len))
235 
236 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
237 	({                                                                     \
238 		struct iommu_test_cmd test_cmd = {                             \
239 			.size = sizeof(test_cmd),                              \
240 			.op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
241 			.id = hwpt_id,                                         \
242 			.check_iotlb = {                                       \
243 				.id = iotlb_id,                                \
244 				.iotlb = expected,                             \
245 			},                                                     \
246 		};                                                             \
247 		ASSERT_EQ(0,                                                   \
248 			  ioctl(self->fd,                                      \
249 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
250 				&test_cmd));                                   \
251 	})
252 
253 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
254 	({                                                                     \
255 		int i;                                                         \
256 		for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
257 			test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
258 	})
259 
260 #define test_cmd_dev_check_cache(device_id, cache_id, expected)                \
261 	({                                                                     \
262 		struct iommu_test_cmd test_cmd = {                             \
263 			.size = sizeof(test_cmd),                              \
264 			.op = IOMMU_TEST_OP_DEV_CHECK_CACHE,                   \
265 			.id = device_id,                                       \
266 			.check_dev_cache = {                                   \
267 				.id = cache_id,                                \
268 				.cache = expected,                             \
269 			},                                                     \
270 		};                                                             \
271 		ASSERT_EQ(0, ioctl(self->fd,                                   \
272 				   _IOMMU_TEST_CMD(                            \
273 					   IOMMU_TEST_OP_DEV_CHECK_CACHE),     \
274 				   &test_cmd));                                \
275 	})
276 
277 #define test_cmd_dev_check_cache_all(device_id, expected)                      \
278 	({                                                                     \
279 		int c;                                                         \
280 		for (c = 0; c < MOCK_DEV_CACHE_NUM; c++)                       \
281 			test_cmd_dev_check_cache(device_id, c, expected);      \
282 	})
283 
_test_cmd_hwpt_invalidate(int fd,__u32 hwpt_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)284 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
285 				     uint32_t data_type, uint32_t lreq,
286 				     uint32_t *nreqs)
287 {
288 	struct iommu_hwpt_invalidate cmd = {
289 		.size = sizeof(cmd),
290 		.hwpt_id = hwpt_id,
291 		.data_type = data_type,
292 		.data_uptr = (uint64_t)reqs,
293 		.entry_len = lreq,
294 		.entry_num = *nreqs,
295 	};
296 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
297 	*nreqs = cmd.entry_num;
298 	return rc;
299 }
300 
301 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
302 	({                                                                    \
303 		ASSERT_EQ(0,                                                  \
304 			  _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
305 						    data_type, lreq, nreqs)); \
306 	})
307 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
308 				 nreqs)                                  \
309 	({                                                               \
310 		EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
311 					     self->fd, hwpt_id, reqs,    \
312 					     data_type, lreq, nreqs));   \
313 	})
314 
_test_cmd_viommu_invalidate(int fd,__u32 viommu_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)315 static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
316 				       uint32_t data_type, uint32_t lreq,
317 				       uint32_t *nreqs)
318 {
319 	struct iommu_hwpt_invalidate cmd = {
320 		.size = sizeof(cmd),
321 		.hwpt_id = viommu_id,
322 		.data_type = data_type,
323 		.data_uptr = (uint64_t)reqs,
324 		.entry_len = lreq,
325 		.entry_num = *nreqs,
326 	};
327 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
328 	*nreqs = cmd.entry_num;
329 	return rc;
330 }
331 
332 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs)                  \
333 	({                                                                     \
334 		ASSERT_EQ(0,                                                   \
335 			  _test_cmd_viommu_invalidate(self->fd, viommu, reqs,  \
336 					IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
337 					lreq, nreqs));                         \
338 	})
339 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq,   \
340 				 nreqs)                                        \
341 	({                                                                     \
342 		EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate(              \
343 					     self->fd, viommu_id, reqs,        \
344 					     data_type, lreq, nreqs));         \
345 	})
346 
_test_cmd_access_replace_ioas(int fd,__u32 access_id,unsigned int ioas_id)347 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
348 					 unsigned int ioas_id)
349 {
350 	struct iommu_test_cmd cmd = {
351 		.size = sizeof(cmd),
352 		.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
353 		.id = access_id,
354 		.access_replace_ioas = { .ioas_id = ioas_id },
355 	};
356 	int ret;
357 
358 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
359 	if (ret)
360 		return ret;
361 	return 0;
362 }
363 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
364 	ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
365 
_test_cmd_set_dirty_tracking(int fd,__u32 hwpt_id,bool enabled)366 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
367 {
368 	struct iommu_hwpt_set_dirty_tracking cmd = {
369 		.size = sizeof(cmd),
370 		.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
371 		.hwpt_id = hwpt_id,
372 	};
373 	int ret;
374 
375 	ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
376 	if (ret)
377 		return -errno;
378 	return 0;
379 }
380 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
381 	ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
382 
_test_cmd_get_dirty_bitmap(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u32 flags)383 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
384 				      __u64 iova, size_t page_size,
385 				      __u64 *bitmap, __u32 flags)
386 {
387 	struct iommu_hwpt_get_dirty_bitmap cmd = {
388 		.size = sizeof(cmd),
389 		.hwpt_id = hwpt_id,
390 		.flags = flags,
391 		.iova = iova,
392 		.length = length,
393 		.page_size = page_size,
394 		.data = (uintptr_t)bitmap,
395 	};
396 	int ret;
397 
398 	ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
399 	if (ret)
400 		return ret;
401 	return 0;
402 }
403 
404 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size,    \
405 				  bitmap, flags)                           \
406 	ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
407 						page_size, bitmap, flags))
408 
_test_cmd_mock_domain_set_dirty(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u64 * dirty)409 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
410 					   __u64 iova, size_t page_size,
411 					   __u64 *bitmap, __u64 *dirty)
412 {
413 	struct iommu_test_cmd cmd = {
414 		.size = sizeof(cmd),
415 		.op = IOMMU_TEST_OP_DIRTY,
416 		.id = hwpt_id,
417 		.dirty = {
418 			.iova = iova,
419 			.length = length,
420 			.page_size = page_size,
421 			.uptr = (uintptr_t)bitmap,
422 		}
423 	};
424 	int ret;
425 
426 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
427 	if (ret)
428 		return -ret;
429 	if (dirty)
430 		*dirty = cmd.dirty.out_nr_dirty;
431 	return 0;
432 }
433 
434 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
435 				       bitmap, nr)                           \
436 	ASSERT_EQ(0,                                                         \
437 		  _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
438 						  page_size, bitmap, nr))
439 
_test_mock_dirty_bitmaps(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,size_t pte_page_size,__u64 * bitmap,__u64 nbits,__u32 flags,struct __test_metadata * _metadata)440 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
441 				    __u64 iova, size_t page_size,
442 				    size_t pte_page_size, __u64 *bitmap,
443 				    __u64 nbits, __u32 flags,
444 				    struct __test_metadata *_metadata)
445 {
446 	unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
447 	unsigned long j, i, nr = nbits / pteset ?: 1;
448 	unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
449 	__u64 out_dirty = 0;
450 
451 	/* Mark all even bits as dirty in the mock domain */
452 	memset(bitmap, 0, bitmap_size);
453 	for (i = 0; i < nbits; i += pteset)
454 		set_bit(i, (unsigned long *)bitmap);
455 
456 	test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
457 				       bitmap, &out_dirty);
458 	ASSERT_EQ(nr, out_dirty);
459 
460 	/* Expect all even bits as dirty in the user bitmap */
461 	memset(bitmap, 0, bitmap_size);
462 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
463 				  flags);
464 	/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
465 	for (i = 0; i < nbits; i += pteset) {
466 		for (j = 0; j < pteset; j++) {
467 			ASSERT_EQ(j < npte,
468 				  test_bit(i + j, (unsigned long *)bitmap));
469 		}
470 		ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
471 	}
472 
473 	memset(bitmap, 0, bitmap_size);
474 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
475 				  flags);
476 
477 	/* It as read already -- expect all zeroes */
478 	for (i = 0; i < nbits; i += pteset) {
479 		for (j = 0; j < pteset; j++) {
480 			ASSERT_EQ(
481 				(j < npte) &&
482 					(flags &
483 					 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
484 				test_bit(i + j, (unsigned long *)bitmap));
485 		}
486 	}
487 
488 	return 0;
489 }
490 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
491 				bitmap, bitmap_size, flags, _metadata)     \
492 	ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
493 					      page_size, pte_size, bitmap,     \
494 					      bitmap_size, flags, _metadata))
495 
_test_cmd_create_access(int fd,unsigned int ioas_id,__u32 * access_id,unsigned int flags)496 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
497 				   __u32 *access_id, unsigned int flags)
498 {
499 	struct iommu_test_cmd cmd = {
500 		.size = sizeof(cmd),
501 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
502 		.id = ioas_id,
503 		.create_access = { .flags = flags },
504 	};
505 	int ret;
506 
507 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
508 	if (ret)
509 		return ret;
510 	*access_id = cmd.create_access.out_access_fd;
511 	return 0;
512 }
513 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
514 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
515 					     flags))
516 
_test_cmd_destroy_access(unsigned int access_id)517 static int _test_cmd_destroy_access(unsigned int access_id)
518 {
519 	return close(access_id);
520 }
521 #define test_cmd_destroy_access(access_id) \
522 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
523 
_test_cmd_destroy_access_pages(int fd,unsigned int access_id,unsigned int access_pages_id)524 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
525 					  unsigned int access_pages_id)
526 {
527 	struct iommu_test_cmd cmd = {
528 		.size = sizeof(cmd),
529 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
530 		.id = access_id,
531 		.destroy_access_pages = { .access_pages_id = access_pages_id },
532 	};
533 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
534 }
535 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
536 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
537 						    access_pages_id))
538 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
539 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
540 				     self->fd, access_id, access_pages_id))
541 
_test_ioctl_destroy(int fd,unsigned int id)542 static int _test_ioctl_destroy(int fd, unsigned int id)
543 {
544 	struct iommu_destroy cmd = {
545 		.size = sizeof(cmd),
546 		.id = id,
547 	};
548 	return ioctl(fd, IOMMU_DESTROY, &cmd);
549 }
550 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
551 
_test_ioctl_ioas_alloc(int fd,__u32 * id)552 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
553 {
554 	struct iommu_ioas_alloc cmd = {
555 		.size = sizeof(cmd),
556 	};
557 	int ret;
558 
559 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
560 	if (ret)
561 		return ret;
562 	*id = cmd.out_ioas_id;
563 	return 0;
564 }
565 #define test_ioctl_ioas_alloc(id)                                   \
566 	({                                                          \
567 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
568 		ASSERT_NE(0, *(id));                                \
569 	})
570 
_test_ioctl_ioas_map(int fd,unsigned int ioas_id,void * buffer,size_t length,__u64 * iova,unsigned int flags)571 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
572 				size_t length, __u64 *iova, unsigned int flags)
573 {
574 	struct iommu_ioas_map cmd = {
575 		.size = sizeof(cmd),
576 		.flags = flags,
577 		.ioas_id = ioas_id,
578 		.user_va = (uintptr_t)buffer,
579 		.length = length,
580 	};
581 	int ret;
582 
583 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
584 		cmd.iova = *iova;
585 
586 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
587 	*iova = cmd.iova;
588 	return ret;
589 }
590 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
591 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
592 					  length, iova_p,                  \
593 					  IOMMU_IOAS_MAP_WRITEABLE |       \
594 						  IOMMU_IOAS_MAP_READABLE))
595 
596 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
597 	EXPECT_ERRNO(_errno,                                               \
598 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
599 					  length, iova_p,                  \
600 					  IOMMU_IOAS_MAP_WRITEABLE |       \
601 						  IOMMU_IOAS_MAP_READABLE))
602 
603 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
604 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
605 					  iova_p,                            \
606 					  IOMMU_IOAS_MAP_WRITEABLE |         \
607 						  IOMMU_IOAS_MAP_READABLE))
608 
609 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
610 	({                                                                    \
611 		__u64 __iova = iova;                                          \
612 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
613 				     self->fd, self->ioas_id, buffer, length, \
614 				     &__iova,                                 \
615 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
616 					     IOMMU_IOAS_MAP_WRITEABLE |       \
617 					     IOMMU_IOAS_MAP_READABLE));       \
618 	})
619 
620 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova)           \
621 	({                                                                    \
622 		__u64 __iova = iova;                                          \
623 		ASSERT_EQ(0,                                                  \
624 			  _test_ioctl_ioas_map(                               \
625 				  self->fd, ioas_id, buffer, length, &__iova, \
626 				  IOMMU_IOAS_MAP_FIXED_IOVA |                 \
627 					  IOMMU_IOAS_MAP_WRITEABLE |          \
628 					  IOMMU_IOAS_MAP_READABLE));          \
629 	})
630 
631 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
632 	({                                                                    \
633 		__u64 __iova = iova;                                          \
634 		EXPECT_ERRNO(_errno,                                          \
635 			     _test_ioctl_ioas_map(                            \
636 				     self->fd, self->ioas_id, buffer, length, \
637 				     &__iova,                                 \
638 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
639 					     IOMMU_IOAS_MAP_WRITEABLE |       \
640 					     IOMMU_IOAS_MAP_READABLE));       \
641 	})
642 
_test_ioctl_ioas_unmap(int fd,unsigned int ioas_id,uint64_t iova,size_t length,uint64_t * out_len)643 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
644 				  size_t length, uint64_t *out_len)
645 {
646 	struct iommu_ioas_unmap cmd = {
647 		.size = sizeof(cmd),
648 		.ioas_id = ioas_id,
649 		.iova = iova,
650 		.length = length,
651 	};
652 	int ret;
653 
654 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
655 	if (out_len)
656 		*out_len = cmd.length;
657 	return ret;
658 }
659 #define test_ioctl_ioas_unmap(iova, length)                                \
660 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
661 					    length, NULL))
662 
663 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
664 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
665 					    NULL))
666 
667 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
668 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
669 						    iova, length, NULL))
670 
_test_ioctl_ioas_map_file(int fd,unsigned int ioas_id,int mfd,size_t start,size_t length,__u64 * iova,unsigned int flags)671 static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
672 				     size_t start, size_t length, __u64 *iova,
673 				     unsigned int flags)
674 {
675 	struct iommu_ioas_map_file cmd = {
676 		.size = sizeof(cmd),
677 		.flags = flags,
678 		.ioas_id = ioas_id,
679 		.fd = mfd,
680 		.start = start,
681 		.length = length,
682 	};
683 	int ret;
684 
685 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
686 		cmd.iova = *iova;
687 
688 	ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
689 	*iova = cmd.iova;
690 	return ret;
691 }
692 
693 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p)                   \
694 	ASSERT_EQ(0,                                                           \
695 		  _test_ioctl_ioas_map_file(                                   \
696 			  self->fd, self->ioas_id, mfd, start, length, iova_p, \
697 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
698 
699 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p)     \
700 	EXPECT_ERRNO(                                                        \
701 		_errno,                                                      \
702 		_test_ioctl_ioas_map_file(                                   \
703 			self->fd, self->ioas_id, mfd, start, length, iova_p, \
704 			IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
705 
706 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p)     \
707 	ASSERT_EQ(0,                                                         \
708 		  _test_ioctl_ioas_map_file(                                 \
709 			  self->fd, ioas_id, mfd, start, length, iova_p,     \
710 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
711 
_test_ioctl_set_temp_memory_limit(int fd,unsigned int limit)712 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
713 {
714 	struct iommu_test_cmd memlimit_cmd = {
715 		.size = sizeof(memlimit_cmd),
716 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
717 		.memory_limit = { .limit = limit },
718 	};
719 
720 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
721 		     &memlimit_cmd);
722 }
723 
724 #define test_ioctl_set_temp_memory_limit(limit) \
725 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
726 
727 #define test_ioctl_set_default_memory_limit() \
728 	test_ioctl_set_temp_memory_limit(65536)
729 
teardown_iommufd(int fd,struct __test_metadata * _metadata)730 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
731 {
732 	struct iommu_test_cmd test_cmd = {
733 		.size = sizeof(test_cmd),
734 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
735 		.check_refs = { .length = BUFFER_SIZE,
736 				.uptr = (uintptr_t)buffer },
737 	};
738 
739 	if (fd == -1)
740 		return;
741 
742 	EXPECT_EQ(0, close(fd));
743 
744 	fd = open("/dev/iommu", O_RDWR);
745 	EXPECT_NE(-1, fd);
746 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
747 			   &test_cmd));
748 	EXPECT_EQ(0, close(fd));
749 }
750 
751 #define EXPECT_ERRNO(expected_errno, cmd)         \
752 	({                                        \
753 		ASSERT_EQ(-1, cmd);               \
754 		EXPECT_EQ(expected_errno, errno); \
755 	})
756 
757 #endif
758 
759 /* @data can be NULL */
_test_cmd_get_hw_info(int fd,__u32 device_id,void * data,size_t data_len,uint32_t * capabilities,uint8_t * max_pasid)760 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
761 				 size_t data_len, uint32_t *capabilities,
762 				 uint8_t *max_pasid)
763 {
764 	struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
765 	struct iommu_hw_info cmd = {
766 		.size = sizeof(cmd),
767 		.dev_id = device_id,
768 		.data_len = data_len,
769 		.data_uptr = (uint64_t)data,
770 		.out_capabilities = 0,
771 	};
772 	int ret;
773 
774 	ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
775 	if (ret)
776 		return ret;
777 
778 	assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
779 
780 	/*
781 	 * The struct iommu_test_hw_info should be the one defined
782 	 * by the current kernel.
783 	 */
784 	assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
785 
786 	/*
787 	 * Trailing bytes should be 0 if user buffer is larger than
788 	 * the data that kernel reports.
789 	 */
790 	if (data_len > cmd.data_len) {
791 		char *ptr = (char *)(data + cmd.data_len);
792 		int idx = 0;
793 
794 		while (idx < data_len - cmd.data_len) {
795 			assert(!*(ptr + idx));
796 			idx++;
797 		}
798 	}
799 
800 	if (info) {
801 		if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
802 			assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
803 		if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
804 			assert(!info->flags);
805 	}
806 
807 	if (max_pasid)
808 		*max_pasid = cmd.out_max_pasid_log2;
809 
810 	if (capabilities)
811 		*capabilities = cmd.out_capabilities;
812 
813 	return 0;
814 }
815 
816 #define test_cmd_get_hw_info(device_id, data, data_len)               \
817 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
818 					   data_len, NULL, NULL))
819 
820 #define test_err_get_hw_info(_errno, device_id, data, data_len)               \
821 	EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
822 						   data_len, NULL, NULL))
823 
824 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
825 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
826 					   0, &caps, NULL))
827 
828 #define test_cmd_get_hw_info_pasid(device_id, max_pasid)              \
829 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
830 					   0, NULL, max_pasid))
831 
_test_ioctl_fault_alloc(int fd,__u32 * fault_id,__u32 * fault_fd)832 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
833 {
834 	struct iommu_fault_alloc cmd = {
835 		.size = sizeof(cmd),
836 	};
837 	int ret;
838 
839 	ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
840 	if (ret)
841 		return ret;
842 	*fault_id = cmd.out_fault_id;
843 	*fault_fd = cmd.out_fault_fd;
844 	return 0;
845 }
846 
847 #define test_ioctl_fault_alloc(fault_id, fault_fd)                       \
848 	({                                                               \
849 		ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
850 						     fault_fd));         \
851 		ASSERT_NE(0, *(fault_id));                               \
852 		ASSERT_NE(0, *(fault_fd));                               \
853 	})
854 
_test_cmd_trigger_iopf(int fd,__u32 device_id,__u32 pasid,__u32 fault_fd)855 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
856 				  __u32 fault_fd)
857 {
858 	struct iommu_test_cmd trigger_iopf_cmd = {
859 		.size = sizeof(trigger_iopf_cmd),
860 		.op = IOMMU_TEST_OP_TRIGGER_IOPF,
861 		.trigger_iopf = {
862 			.dev_id = device_id,
863 			.pasid = pasid,
864 			.grpid = 0x2,
865 			.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
866 			.addr = 0xdeadbeaf,
867 		},
868 	};
869 	struct iommu_hwpt_page_response response = {
870 		.code = IOMMUFD_PAGE_RESP_SUCCESS,
871 	};
872 	struct iommu_hwpt_pgfault fault = {};
873 	ssize_t bytes;
874 	int ret;
875 
876 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
877 	if (ret)
878 		return ret;
879 
880 	bytes = read(fault_fd, &fault, sizeof(fault));
881 	if (bytes <= 0)
882 		return -EIO;
883 
884 	response.cookie = fault.cookie;
885 
886 	bytes = write(fault_fd, &response, sizeof(response));
887 	if (bytes <= 0)
888 		return -EIO;
889 
890 	return 0;
891 }
892 
893 #define test_cmd_trigger_iopf(device_id, fault_fd) \
894 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, 0x1, fault_fd))
895 #define test_cmd_trigger_iopf_pasid(device_id, pasid, fault_fd) \
896 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, \
897 					    pasid, fault_fd))
898 
_test_cmd_viommu_alloc(int fd,__u32 device_id,__u32 hwpt_id,__u32 type,__u32 flags,__u32 * viommu_id)899 static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
900 				  __u32 type, __u32 flags, __u32 *viommu_id)
901 {
902 	struct iommu_viommu_alloc cmd = {
903 		.size = sizeof(cmd),
904 		.flags = flags,
905 		.type = type,
906 		.dev_id = device_id,
907 		.hwpt_id = hwpt_id,
908 	};
909 	int ret;
910 
911 	ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
912 	if (ret)
913 		return ret;
914 	if (viommu_id)
915 		*viommu_id = cmd.out_viommu_id;
916 	return 0;
917 }
918 
919 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id)        \
920 	ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
921 					    type, 0, viommu_id))
922 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
923 	EXPECT_ERRNO(_errno,                                               \
924 		     _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id,  \
925 					    type, 0, viommu_id))
926 
_test_cmd_vdevice_alloc(int fd,__u32 viommu_id,__u32 idev_id,__u64 virt_id,__u32 * vdev_id)927 static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
928 				   __u64 virt_id, __u32 *vdev_id)
929 {
930 	struct iommu_vdevice_alloc cmd = {
931 		.size = sizeof(cmd),
932 		.dev_id = idev_id,
933 		.viommu_id = viommu_id,
934 		.virt_id = virt_id,
935 	};
936 	int ret;
937 
938 	ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
939 	if (ret)
940 		return ret;
941 	if (vdev_id)
942 		*vdev_id = cmd.out_vdevice_id;
943 	return 0;
944 }
945 
946 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id)       \
947 	ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
948 					     virt_id, vdev_id))
949 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
950 	EXPECT_ERRNO(_errno,                                                 \
951 		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
952 					     virt_id, vdev_id))
953 
_test_cmd_veventq_alloc(int fd,__u32 viommu_id,__u32 type,__u32 * veventq_id,__u32 * veventq_fd)954 static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
955 				   __u32 *veventq_id, __u32 *veventq_fd)
956 {
957 	struct iommu_veventq_alloc cmd = {
958 		.size = sizeof(cmd),
959 		.type = type,
960 		.veventq_depth = 2,
961 		.viommu_id = viommu_id,
962 	};
963 	int ret;
964 
965 	ret = ioctl(fd, IOMMU_VEVENTQ_ALLOC, &cmd);
966 	if (ret)
967 		return ret;
968 	if (veventq_id)
969 		*veventq_id = cmd.out_veventq_id;
970 	if (veventq_fd)
971 		*veventq_fd = cmd.out_veventq_fd;
972 	return 0;
973 }
974 
975 #define test_cmd_veventq_alloc(viommu_id, type, veventq_id, veventq_fd) \
976 	ASSERT_EQ(0, _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
977 					     veventq_id, veventq_fd))
978 #define test_err_veventq_alloc(_errno, viommu_id, type, veventq_id,     \
979 			       veventq_fd)                              \
980 	EXPECT_ERRNO(_errno,                                            \
981 		     _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
982 					     veventq_id, veventq_fd))
983 
_test_cmd_trigger_vevents(int fd,__u32 dev_id,__u32 nvevents)984 static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
985 {
986 	struct iommu_test_cmd trigger_vevent_cmd = {
987 		.size = sizeof(trigger_vevent_cmd),
988 		.op = IOMMU_TEST_OP_TRIGGER_VEVENT,
989 		.trigger_vevent = {
990 			.dev_id = dev_id,
991 		},
992 	};
993 	int ret;
994 
995 	while (nvevents--) {
996 		ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
997 			    &trigger_vevent_cmd);
998 		if (ret < 0)
999 			return -1;
1000 	}
1001 	return ret;
1002 }
1003 
1004 #define test_cmd_trigger_vevents(dev_id, nvevents) \
1005 	ASSERT_EQ(0, _test_cmd_trigger_vevents(self->fd, dev_id, nvevents))
1006 
_test_cmd_read_vevents(int fd,__u32 event_fd,__u32 nvevents,__u32 virt_id,int * prev_seq)1007 static int _test_cmd_read_vevents(int fd, __u32 event_fd, __u32 nvevents,
1008 				  __u32 virt_id, int *prev_seq)
1009 {
1010 	struct pollfd pollfd = { .fd = event_fd, .events = POLLIN };
1011 	struct iommu_viommu_event_selftest *event;
1012 	struct iommufd_vevent_header *hdr;
1013 	ssize_t bytes;
1014 	void *data;
1015 	int ret, i;
1016 
1017 	ret = poll(&pollfd, 1, 1000);
1018 	if (ret < 0)
1019 		return -1;
1020 
1021 	data = calloc(nvevents, sizeof(*hdr) + sizeof(*event));
1022 	if (!data) {
1023 		errno = ENOMEM;
1024 		return -1;
1025 	}
1026 
1027 	bytes = read(event_fd, data,
1028 		     nvevents * (sizeof(*hdr) + sizeof(*event)));
1029 	if (bytes <= 0) {
1030 		errno = EFAULT;
1031 		ret = -1;
1032 		goto out_free;
1033 	}
1034 
1035 	for (i = 0; i < nvevents; i++) {
1036 		hdr = data + i * (sizeof(*hdr) + sizeof(*event));
1037 
1038 		if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS ||
1039 		    hdr->sequence - *prev_seq > 1) {
1040 			*prev_seq = hdr->sequence;
1041 			errno = EOVERFLOW;
1042 			ret = -1;
1043 			goto out_free;
1044 		}
1045 		*prev_seq = hdr->sequence;
1046 		event = data + sizeof(*hdr);
1047 		if (event->virt_id != virt_id) {
1048 			errno = EINVAL;
1049 			ret = -1;
1050 			goto out_free;
1051 		}
1052 	}
1053 
1054 	ret = 0;
1055 out_free:
1056 	free(data);
1057 	return ret;
1058 }
1059 
1060 #define test_cmd_read_vevents(event_fd, nvevents, virt_id, prev_seq)      \
1061 	ASSERT_EQ(0, _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
1062 					    virt_id, prev_seq))
1063 #define test_err_read_vevents(_errno, event_fd, nvevents, virt_id, prev_seq) \
1064 	EXPECT_ERRNO(_errno,                                                 \
1065 		     _test_cmd_read_vevents(self->fd, event_fd, nvevents,    \
1066 					    virt_id, prev_seq))
1067 
_test_cmd_pasid_attach(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1068 static int _test_cmd_pasid_attach(int fd, __u32 stdev_id, __u32 pasid,
1069 				  __u32 pt_id)
1070 {
1071 	struct iommu_test_cmd test_attach = {
1072 		.size = sizeof(test_attach),
1073 		.op = IOMMU_TEST_OP_PASID_ATTACH,
1074 		.id = stdev_id,
1075 		.pasid_attach = {
1076 			.pasid = pasid,
1077 			.pt_id = pt_id,
1078 		},
1079 	};
1080 
1081 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_ATTACH),
1082 		     &test_attach);
1083 }
1084 
1085 #define test_cmd_pasid_attach(pasid, hwpt_id) \
1086 	ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1087 					    pasid, hwpt_id))
1088 
1089 #define test_err_pasid_attach(_errno, pasid, hwpt_id) \
1090 	EXPECT_ERRNO(_errno, \
1091 		     _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1092 					    pasid, hwpt_id))
1093 
_test_cmd_pasid_replace(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1094 static int _test_cmd_pasid_replace(int fd, __u32 stdev_id, __u32 pasid,
1095 				   __u32 pt_id)
1096 {
1097 	struct iommu_test_cmd test_replace = {
1098 		.size = sizeof(test_replace),
1099 		.op = IOMMU_TEST_OP_PASID_REPLACE,
1100 		.id = stdev_id,
1101 		.pasid_replace = {
1102 			.pasid = pasid,
1103 			.pt_id = pt_id,
1104 		},
1105 	};
1106 
1107 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_REPLACE),
1108 		     &test_replace);
1109 }
1110 
1111 #define test_cmd_pasid_replace(pasid, hwpt_id) \
1112 	ASSERT_EQ(0, _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1113 					     pasid, hwpt_id))
1114 
1115 #define test_err_pasid_replace(_errno, pasid, hwpt_id) \
1116 	EXPECT_ERRNO(_errno, \
1117 		     _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1118 					     pasid, hwpt_id))
1119 
_test_cmd_pasid_detach(int fd,__u32 stdev_id,__u32 pasid)1120 static int _test_cmd_pasid_detach(int fd, __u32 stdev_id, __u32 pasid)
1121 {
1122 	struct iommu_test_cmd test_detach = {
1123 		.size = sizeof(test_detach),
1124 		.op = IOMMU_TEST_OP_PASID_DETACH,
1125 		.id = stdev_id,
1126 		.pasid_detach = {
1127 			.pasid = pasid,
1128 		},
1129 	};
1130 
1131 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_DETACH),
1132 		     &test_detach);
1133 }
1134 
1135 #define test_cmd_pasid_detach(pasid) \
1136 	ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, self->stdev_id, pasid))
1137 
test_cmd_pasid_check_hwpt(int fd,__u32 stdev_id,__u32 pasid,__u32 hwpt_id)1138 static int test_cmd_pasid_check_hwpt(int fd, __u32 stdev_id, __u32 pasid,
1139 				     __u32 hwpt_id)
1140 {
1141 	struct iommu_test_cmd test_pasid_check = {
1142 		.size = sizeof(test_pasid_check),
1143 		.op = IOMMU_TEST_OP_PASID_CHECK_HWPT,
1144 		.id = stdev_id,
1145 		.pasid_check = {
1146 			.pasid = pasid,
1147 			.hwpt_id = hwpt_id,
1148 		},
1149 	};
1150 
1151 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_CHECK_HWPT),
1152 		     &test_pasid_check);
1153 }
1154