xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c (revision 1e0ea4dff0f46a3575b6882941dc7331c232d72c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2024 Google LLC.
4  */
5 #include <kunit/test.h>
6 #include <linux/io-pgtable.h>
7 
8 #include "arm-smmu-v3.h"
9 
10 struct arm_smmu_test_writer {
11 	struct arm_smmu_entry_writer writer;
12 	struct kunit *test;
13 	const __le64 *init_entry;
14 	const __le64 *target_entry;
15 	__le64 *entry;
16 
17 	bool invalid_entry_written;
18 	unsigned int num_syncs;
19 };
20 
21 #define NUM_ENTRY_QWORDS 8
22 #define NUM_EXPECTED_SYNCS(x) x
23 
24 static struct arm_smmu_ste bypass_ste;
25 static struct arm_smmu_ste abort_ste;
26 static struct arm_smmu_device smmu = {
27 	.features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
28 };
29 static struct mm_struct sva_mm = {
30 	.pgd = (void *)0xdaedbeefdeadbeefULL,
31 };
32 
33 enum arm_smmu_test_master_feat {
34 	ARM_SMMU_MASTER_TEST_ATS = BIT(0),
35 	ARM_SMMU_MASTER_TEST_STALL = BIT(1),
36 	ARM_SMMU_MASTER_TEST_NESTED = BIT(2),
37 };
38 
39 static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
40 				      enum arm_smmu_test_master_feat feat);
41 
arm_smmu_entry_differs_in_used_bits(const __le64 * entry,const __le64 * used_bits,const __le64 * target,const __le64 * safe,unsigned int length)42 static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
43 						const __le64 *used_bits,
44 						const __le64 *target,
45 						const __le64 *safe,
46 						unsigned int length)
47 {
48 	bool differs = false;
49 	unsigned int i;
50 
51 	for (i = 0; i < length; i++) {
52 		__le64 used = used_bits[i] & ~safe[i];
53 
54 		if ((entry[i] & used) != (target[i] & used))
55 			differs = true;
56 	}
57 	return differs;
58 }
59 
60 static void
arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer * writer)61 arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
62 {
63 	struct arm_smmu_test_writer *test_writer =
64 		container_of(writer, struct arm_smmu_test_writer, writer);
65 	__le64 *entry_used_bits;
66 	__le64 *safe_target;
67 	__le64 *safe_init;
68 
69 	entry_used_bits = kunit_kzalloc(
70 		test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
71 		GFP_KERNEL);
72 	KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
73 
74 	safe_target = kunit_kzalloc(test_writer->test,
75 				    sizeof(*safe_target) * NUM_ENTRY_QWORDS,
76 				    GFP_KERNEL);
77 	KUNIT_ASSERT_NOT_NULL(test_writer->test, safe_target);
78 
79 	safe_init = kunit_kzalloc(test_writer->test,
80 				  sizeof(*safe_init) * NUM_ENTRY_QWORDS,
81 				  GFP_KERNEL);
82 	KUNIT_ASSERT_NOT_NULL(test_writer->test, safe_init);
83 
84 	pr_debug("STE value is now set to: ");
85 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8,
86 			     test_writer->entry,
87 			     NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
88 			     false);
89 
90 	test_writer->num_syncs += 1;
91 	if (!test_writer->entry[0]) {
92 		test_writer->invalid_entry_written = true;
93 	} else {
94 		/*
95 		 * At any stage in a hitless transition, the entry must be
96 		 * equivalent to either the initial entry or the target entry
97 		 * when only considering the bits used by the current
98 		 * configuration.
99 		 */
100 		writer->ops->get_used(test_writer->entry, entry_used_bits);
101 		if (writer->ops->get_update_safe)
102 			writer->ops->get_update_safe(test_writer->entry,
103 						     test_writer->init_entry,
104 						     safe_init);
105 		if (writer->ops->get_update_safe)
106 			writer->ops->get_update_safe(test_writer->entry,
107 						     test_writer->target_entry,
108 						     safe_target);
109 		KUNIT_EXPECT_FALSE(
110 			test_writer->test,
111 			arm_smmu_entry_differs_in_used_bits(
112 				test_writer->entry, entry_used_bits,
113 				test_writer->init_entry, safe_init,
114 				NUM_ENTRY_QWORDS) &&
115 				arm_smmu_entry_differs_in_used_bits(
116 					test_writer->entry, entry_used_bits,
117 					test_writer->target_entry, safe_target,
118 					NUM_ENTRY_QWORDS));
119 	}
120 }
121 
122 static void
arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer * writer,const __le64 * ste)123 arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
124 				       const __le64 *ste)
125 {
126 	__le64 used_bits[NUM_ENTRY_QWORDS] = {};
127 
128 	arm_smmu_get_ste_used(ste, used_bits);
129 	pr_debug("STE used bits: ");
130 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, used_bits,
131 			     sizeof(used_bits), false);
132 }
133 
134 static const struct arm_smmu_entry_writer_ops test_ste_ops = {
135 	.sync = arm_smmu_test_writer_record_syncs,
136 	.get_used = arm_smmu_get_ste_used,
137 	.get_update_safe = arm_smmu_get_ste_update_safe,
138 };
139 
140 static const struct arm_smmu_entry_writer_ops test_cd_ops = {
141 	.sync = arm_smmu_test_writer_record_syncs,
142 	.get_used = arm_smmu_get_cd_used,
143 };
144 
arm_smmu_v3_test_ste_expect_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected,bool hitless)145 static void arm_smmu_v3_test_ste_expect_transition(
146 	struct kunit *test, const struct arm_smmu_ste *cur,
147 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
148 	bool hitless)
149 {
150 	struct arm_smmu_ste cur_copy = *cur;
151 	struct arm_smmu_test_writer test_writer = {
152 		.writer = {
153 			.ops = &test_ste_ops,
154 		},
155 		.test = test,
156 		.init_entry = cur->data,
157 		.target_entry = target->data,
158 		.entry = cur_copy.data,
159 		.num_syncs = 0,
160 		.invalid_entry_written = false,
161 
162 	};
163 
164 	pr_debug("STE initial value: ");
165 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
166 			     sizeof(cur_copy), false);
167 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
168 	pr_debug("STE target value: ");
169 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
170 			     sizeof(cur_copy), false);
171 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
172 					       target->data);
173 
174 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
175 
176 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
177 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
178 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
179 }
180 
arm_smmu_v3_test_ste_expect_non_hitless_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected)181 static void arm_smmu_v3_test_ste_expect_non_hitless_transition(
182 	struct kunit *test, const struct arm_smmu_ste *cur,
183 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
184 {
185 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
186 					       num_syncs_expected, false);
187 }
188 
arm_smmu_v3_test_ste_expect_hitless_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected)189 static void arm_smmu_v3_test_ste_expect_hitless_transition(
190 	struct kunit *test, const struct arm_smmu_ste *cur,
191 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
192 {
193 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
194 					       num_syncs_expected, true);
195 }
196 
197 static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
198 
arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste * ste,unsigned int s1dss,const dma_addr_t dma_addr,enum arm_smmu_test_master_feat feat)199 static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
200 					   unsigned int s1dss,
201 					   const dma_addr_t dma_addr,
202 					   enum arm_smmu_test_master_feat feat)
203 {
204 	bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
205 	bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
206 
207 	struct arm_smmu_master master = {
208 		.ats_enabled = ats_enabled,
209 		.cd_table.cdtab_dma = dma_addr,
210 		.cd_table.s1cdmax = 0xFF,
211 		.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
212 		.smmu = &smmu,
213 		.stall_enabled = stall_enabled,
214 	};
215 
216 	arm_smmu_make_cdtable_ste(ste, &master, ats_enabled, s1dss);
217 	if (feat & ARM_SMMU_MASTER_TEST_NESTED) {
218 		struct arm_smmu_ste s2ste;
219 		int i;
220 
221 		arm_smmu_test_make_s2_ste(&s2ste,
222 					  feat & ~ARM_SMMU_MASTER_TEST_NESTED);
223 		ste->data[0] |= cpu_to_le64(
224 			FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_NESTED));
225 		ste->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
226 		for (i = 2; i < NUM_ENTRY_QWORDS; i++)
227 			ste->data[i] = s2ste.data[i];
228 	}
229 }
230 
arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit * test)231 static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
232 {
233 	/*
234 	 * Bypass STEs has used bits in the first two Qwords, while abort STEs
235 	 * only have used bits in the first QWord. Transitioning from bypass to
236 	 * abort requires two syncs: the first to set the first qword and make
237 	 * the STE into an abort, the second to clean up the second qword.
238 	 */
239 	arm_smmu_v3_test_ste_expect_hitless_transition(
240 		test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
241 }
242 
arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit * test)243 static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
244 {
245 	/*
246 	 * Transitioning from abort to bypass also requires two syncs: the first
247 	 * to set the second qword data required by the bypass STE, and the
248 	 * second to set the first qword and switch to bypass.
249 	 */
250 	arm_smmu_v3_test_ste_expect_hitless_transition(
251 		test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
252 }
253 
arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit * test)254 static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
255 {
256 	struct arm_smmu_ste ste;
257 
258 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
259 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
260 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
261 						       NUM_EXPECTED_SYNCS(2));
262 }
263 
arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit * test)264 static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
265 {
266 	struct arm_smmu_ste ste;
267 
268 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
269 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
270 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
271 						       NUM_EXPECTED_SYNCS(2));
272 }
273 
arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit * test)274 static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
275 {
276 	struct arm_smmu_ste ste;
277 
278 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
279 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
280 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
281 						       NUM_EXPECTED_SYNCS(3));
282 }
283 
arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit * test)284 static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
285 {
286 	struct arm_smmu_ste ste;
287 
288 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
289 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
290 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
291 						       NUM_EXPECTED_SYNCS(3));
292 }
293 
arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit * test)294 static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
295 {
296 	struct arm_smmu_ste ste;
297 	struct arm_smmu_ste s1dss_bypass;
298 
299 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
300 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
301 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
302 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
303 
304 	/*
305 	 * Flipping s1dss on a CD table STE only involves changes to the second
306 	 * qword of an STE and can be done in a single write.
307 	 */
308 	arm_smmu_v3_test_ste_expect_hitless_transition(
309 		test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1));
310 	arm_smmu_v3_test_ste_expect_hitless_transition(
311 		test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1));
312 }
313 
314 static void
arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit * test)315 arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
316 {
317 	struct arm_smmu_ste s1dss_bypass;
318 
319 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
320 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
321 	arm_smmu_v3_test_ste_expect_hitless_transition(
322 		test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
323 }
324 
325 static void
arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit * test)326 arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
327 {
328 	struct arm_smmu_ste s1dss_bypass;
329 
330 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
331 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
332 	arm_smmu_v3_test_ste_expect_hitless_transition(
333 		test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
334 }
335 
arm_smmu_test_make_s2_ste(struct arm_smmu_ste * ste,enum arm_smmu_test_master_feat feat)336 static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
337 				      enum arm_smmu_test_master_feat feat)
338 {
339 	bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
340 	bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
341 	struct arm_smmu_master master = {
342 		.ats_enabled = ats_enabled,
343 		.smmu = &smmu,
344 		.stall_enabled = stall_enabled,
345 	};
346 	struct io_pgtable io_pgtable = {};
347 	struct arm_smmu_domain smmu_domain = {
348 		.pgtbl_ops = &io_pgtable.ops,
349 	};
350 
351 	io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
352 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
353 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
354 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
355 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
356 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
357 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
358 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
359 
360 	arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled);
361 }
362 
arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit * test)363 static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
364 {
365 	struct arm_smmu_ste ste;
366 
367 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
368 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
369 						       NUM_EXPECTED_SYNCS(2));
370 }
371 
arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit * test)372 static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
373 {
374 	struct arm_smmu_ste ste;
375 
376 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
377 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
378 						       NUM_EXPECTED_SYNCS(2));
379 }
380 
arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit * test)381 static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
382 {
383 	struct arm_smmu_ste ste;
384 
385 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
386 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
387 						       NUM_EXPECTED_SYNCS(2));
388 }
389 
arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit * test)390 static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
391 {
392 	struct arm_smmu_ste ste;
393 
394 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
395 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
396 						       NUM_EXPECTED_SYNCS(2));
397 }
398 
arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit * test)399 static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
400 {
401 	struct arm_smmu_ste s1_ste;
402 	struct arm_smmu_ste s2_ste;
403 
404 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
405 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
406 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
407 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
408 						       NUM_EXPECTED_SYNCS(3));
409 }
410 
arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit * test)411 static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
412 {
413 	struct arm_smmu_ste s1_ste;
414 	struct arm_smmu_ste s2_ste;
415 
416 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
417 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
418 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
419 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
420 						       NUM_EXPECTED_SYNCS(3));
421 }
422 
arm_smmu_v3_write_ste_test_non_hitless(struct kunit * test)423 static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
424 {
425 	struct arm_smmu_ste ste;
426 	struct arm_smmu_ste ste_2;
427 
428 	/*
429 	 * Although no flow resembles this in practice, one way to force an STE
430 	 * update to be non-hitless is to change its CD table pointer as well as
431 	 * s1 dss field in the same update.
432 	 */
433 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
434 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
435 	arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
436 				       0x4B4B4b4B4B, ARM_SMMU_MASTER_TEST_ATS);
437 	arm_smmu_v3_test_ste_expect_non_hitless_transition(
438 		test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
439 }
440 
arm_smmu_v3_test_cd_expect_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected,bool hitless)441 static void arm_smmu_v3_test_cd_expect_transition(
442 	struct kunit *test, const struct arm_smmu_cd *cur,
443 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
444 	bool hitless)
445 {
446 	struct arm_smmu_cd cur_copy = *cur;
447 	struct arm_smmu_test_writer test_writer = {
448 		.writer = {
449 			.ops = &test_cd_ops,
450 		},
451 		.test = test,
452 		.init_entry = cur->data,
453 		.target_entry = target->data,
454 		.entry = cur_copy.data,
455 		.num_syncs = 0,
456 		.invalid_entry_written = false,
457 
458 	};
459 
460 	pr_debug("CD initial value: ");
461 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
462 			     sizeof(cur_copy), false);
463 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
464 	pr_debug("CD target value: ");
465 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
466 			     sizeof(cur_copy), false);
467 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
468 					       target->data);
469 
470 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
471 
472 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
473 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
474 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
475 }
476 
arm_smmu_v3_test_cd_expect_non_hitless_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected)477 static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
478 	struct kunit *test, const struct arm_smmu_cd *cur,
479 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
480 {
481 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
482 					      num_syncs_expected, false);
483 }
484 
arm_smmu_v3_test_cd_expect_hitless_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected)485 static void arm_smmu_v3_test_cd_expect_hitless_transition(
486 	struct kunit *test, const struct arm_smmu_cd *cur,
487 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
488 {
489 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
490 					      num_syncs_expected, true);
491 }
492 
arm_smmu_test_make_s1_cd(struct arm_smmu_cd * cd,unsigned int asid)493 static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
494 {
495 	struct arm_smmu_master master = {
496 		.smmu = &smmu,
497 	};
498 	struct io_pgtable io_pgtable = {};
499 	struct arm_smmu_domain smmu_domain = {
500 		.pgtbl_ops = &io_pgtable.ops,
501 		.cd = {
502 			.asid = asid,
503 		},
504 	};
505 
506 	io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
507 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
508 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
509 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
510 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
511 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
512 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
513 	io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
514 
515 	arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
516 }
517 
arm_smmu_v3_write_cd_test_s1_clear(struct kunit * test)518 static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
519 {
520 	struct arm_smmu_cd cd = {};
521 	struct arm_smmu_cd cd_2;
522 
523 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
524 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
525 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
526 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
527 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
528 }
529 
arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit * test)530 static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
531 {
532 	struct arm_smmu_cd cd = {};
533 	struct arm_smmu_cd cd_2;
534 
535 	arm_smmu_test_make_s1_cd(&cd, 778);
536 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
537 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
538 						      NUM_EXPECTED_SYNCS(1));
539 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
540 						      NUM_EXPECTED_SYNCS(1));
541 }
542 
arm_smmu_test_make_sva_cd(struct arm_smmu_cd * cd,unsigned int asid)543 static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
544 {
545 	struct arm_smmu_master master = {
546 		.smmu = &smmu,
547 	};
548 
549 	arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
550 }
551 
arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd * cd,unsigned int asid)552 static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
553 					      unsigned int asid)
554 {
555 	struct arm_smmu_master master = {
556 		.smmu = &smmu,
557 	};
558 
559 	arm_smmu_make_sva_cd(cd, &master, NULL, asid);
560 }
561 
arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit * test)562 static void arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit *test)
563 {
564 	struct arm_smmu_ste s1_ste;
565 	struct arm_smmu_ste s2_ste;
566 
567 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
568 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
569 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
570 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
571 						       NUM_EXPECTED_SYNCS(3));
572 }
573 
arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit * test)574 static void arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit *test)
575 {
576 	struct arm_smmu_ste s1_ste;
577 	struct arm_smmu_ste s2_ste;
578 
579 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
580 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
581 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
582 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
583 						       NUM_EXPECTED_SYNCS(3));
584 }
585 
586 static void
arm_smmu_v3_write_ste_test_nested_s1dssbypass_to_s1bypass(struct kunit * test)587 arm_smmu_v3_write_ste_test_nested_s1dssbypass_to_s1bypass(struct kunit *test)
588 {
589 	struct arm_smmu_ste s1_ste;
590 	struct arm_smmu_ste s2_ste;
591 
592 	arm_smmu_test_make_cdtable_ste(
593 		&s1_ste, STRTAB_STE_1_S1DSS_BYPASS, fake_cdtab_dma_addr,
594 		ARM_SMMU_MASTER_TEST_ATS | ARM_SMMU_MASTER_TEST_NESTED);
595 	arm_smmu_test_make_s2_ste(&s2_ste, 0);
596 	/* Expect an additional sync to unset ignored bits: EATS and MEV */
597 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
598 						       NUM_EXPECTED_SYNCS(3));
599 }
600 
601 static void
arm_smmu_v3_write_ste_test_nested_s1bypass_to_s1dssbypass(struct kunit * test)602 arm_smmu_v3_write_ste_test_nested_s1bypass_to_s1dssbypass(struct kunit *test)
603 {
604 	struct arm_smmu_ste s1_ste;
605 	struct arm_smmu_ste s2_ste;
606 
607 	arm_smmu_test_make_cdtable_ste(
608 		&s1_ste, STRTAB_STE_1_S1DSS_BYPASS, fake_cdtab_dma_addr,
609 		ARM_SMMU_MASTER_TEST_ATS | ARM_SMMU_MASTER_TEST_NESTED);
610 	arm_smmu_test_make_s2_ste(&s2_ste, 0);
611 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
612 						       NUM_EXPECTED_SYNCS(2));
613 }
614 
arm_smmu_v3_write_cd_test_sva_clear(struct kunit * test)615 static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
616 {
617 	struct arm_smmu_cd cd = {};
618 	struct arm_smmu_cd cd_2;
619 
620 	arm_smmu_test_make_sva_cd(&cd_2, 1997);
621 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
622 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
623 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
624 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
625 }
626 
arm_smmu_v3_write_cd_test_sva_release(struct kunit * test)627 static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
628 {
629 	struct arm_smmu_cd cd;
630 	struct arm_smmu_cd cd_2;
631 
632 	arm_smmu_test_make_sva_cd(&cd, 1997);
633 	arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
634 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
635 						      NUM_EXPECTED_SYNCS(2));
636 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
637 						      NUM_EXPECTED_SYNCS(2));
638 }
639 
640 static struct kunit_case arm_smmu_v3_test_cases[] = {
641 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
642 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
643 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
644 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
645 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
646 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
647 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change),
648 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass),
649 	KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass),
650 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
651 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
652 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
653 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
654 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2),
655 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1),
656 	KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
657 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
658 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
659 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2_stall),
660 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1_stall),
661 	KUNIT_CASE(arm_smmu_v3_write_ste_test_nested_s1dssbypass_to_s1bypass),
662 	KUNIT_CASE(arm_smmu_v3_write_ste_test_nested_s1bypass_to_s1dssbypass),
663 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
664 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
665 	{},
666 };
667 
arm_smmu_v3_test_suite_init(struct kunit_suite * test)668 static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
669 {
670 	arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
671 	arm_smmu_make_abort_ste(&abort_ste);
672 	return 0;
673 }
674 
675 static struct kunit_suite arm_smmu_v3_test_module = {
676 	.name = "arm-smmu-v3-kunit-test",
677 	.suite_init = arm_smmu_v3_test_suite_init,
678 	.test_cases = arm_smmu_v3_test_cases,
679 };
680 kunit_test_suites(&arm_smmu_v3_test_module);
681 
682 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
683 MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver");
684 MODULE_LICENSE("GPL v2");
685