xref: /kvm-unit-tests/arm/gic.c (revision d74708246bd9a593e03ecca476a5f1ed36e47288)
1 /*
2  * GIC tests
3  *
4  * GICv2
5  *   + test sending/receiving IPIs
6  *   + MMIO access tests
7  * GICv3
8  *   + test sending/receiving IPIs
9  *
10  * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
11  *
12  * This work is licensed under the terms of the GNU LGPL, version 2.
13  */
14 #include <libcflat.h>
15 #include <errata.h>
16 #include <asm/setup.h>
17 #include <asm/processor.h>
18 #include <asm/delay.h>
19 #include <asm/gic.h>
20 #include <asm/gic-v3-its.h>
21 #include <asm/smp.h>
22 #include <asm/barrier.h>
23 #include <asm/io.h>
24 
25 #define IPI_SENDER	1
26 #define IPI_IRQ		1
27 
28 struct gic {
29 	struct {
30 		void (*send_self)(void);
31 		void (*send_broadcast)(void);
32 	} ipi;
33 };
34 
35 static struct gic *gic;
36 static int acked[NR_CPUS], spurious[NR_CPUS];
37 static int bad_sender[NR_CPUS], bad_irq[NR_CPUS];
38 static cpumask_t ready;
39 
40 static void nr_cpu_check(int nr)
41 {
42 	if (nr_cpus < nr)
43 		report_abort("At least %d cpus required", nr);
44 }
45 
46 static void wait_on_ready(void)
47 {
48 	cpumask_set_cpu(smp_processor_id(), &ready);
49 	while (!cpumask_full(&ready))
50 		cpu_relax();
51 }
52 
53 static void stats_reset(void)
54 {
55 	int i;
56 
57 	for (i = 0; i < nr_cpus; ++i) {
58 		acked[i] = 0;
59 		bad_sender[i] = -1;
60 		bad_irq[i] = -1;
61 	}
62 	smp_wmb();
63 }
64 
65 static void check_acked(const char *testname, cpumask_t *mask)
66 {
67 	int missing = 0, extra = 0, unexpected = 0;
68 	int nr_pass, cpu, i;
69 	bool bad = false;
70 
71 	/* Wait up to 5s for all interrupts to be delivered */
72 	for (i = 0; i < 50; ++i) {
73 		mdelay(100);
74 		nr_pass = 0;
75 		for_each_present_cpu(cpu) {
76 			smp_rmb();
77 			nr_pass += cpumask_test_cpu(cpu, mask) ?
78 				acked[cpu] == 1 : acked[cpu] == 0;
79 
80 			if (bad_sender[cpu] != -1) {
81 				printf("cpu%d received IPI from wrong sender %d\n",
82 					cpu, bad_sender[cpu]);
83 				bad = true;
84 			}
85 
86 			if (bad_irq[cpu] != -1) {
87 				printf("cpu%d received wrong irq %d\n",
88 					cpu, bad_irq[cpu]);
89 				bad = true;
90 			}
91 		}
92 		if (nr_pass == nr_cpus) {
93 			report(!bad, "%s", testname);
94 			if (i)
95 				report_info("took more than %d ms", i * 100);
96 			return;
97 		}
98 	}
99 
100 	for_each_present_cpu(cpu) {
101 		if (cpumask_test_cpu(cpu, mask)) {
102 			if (!acked[cpu])
103 				++missing;
104 			else if (acked[cpu] > 1)
105 				++extra;
106 		} else {
107 			if (acked[cpu])
108 				++unexpected;
109 		}
110 	}
111 
112 	report(false, "%s", testname);
113 	report_info("Timed-out (5s). ACKS: missing=%d extra=%d unexpected=%d",
114 		    missing, extra, unexpected);
115 }
116 
117 static void check_spurious(void)
118 {
119 	int cpu;
120 
121 	smp_rmb();
122 	for_each_present_cpu(cpu) {
123 		if (spurious[cpu])
124 			report_info("WARN: cpu%d got %d spurious interrupts",
125 				cpu, spurious[cpu]);
126 	}
127 }
128 
129 static void check_ipi_sender(u32 irqstat)
130 {
131 	if (gic_version() == 2) {
132 		int src = (irqstat >> 10) & 7;
133 
134 		if (src != IPI_SENDER)
135 			bad_sender[smp_processor_id()] = src;
136 	}
137 }
138 
139 static void check_irqnr(u32 irqnr)
140 {
141 	if (irqnr != IPI_IRQ)
142 		bad_irq[smp_processor_id()] = irqnr;
143 }
144 
145 static void ipi_handler(struct pt_regs *regs __unused)
146 {
147 	u32 irqstat = gic_read_iar();
148 	u32 irqnr = gic_iar_irqnr(irqstat);
149 
150 	if (irqnr != GICC_INT_SPURIOUS) {
151 		gic_write_eoir(irqstat);
152 		smp_rmb(); /* pairs with wmb in stats_reset */
153 		++acked[smp_processor_id()];
154 		check_ipi_sender(irqstat);
155 		check_irqnr(irqnr);
156 		smp_wmb(); /* pairs with rmb in check_acked */
157 	} else {
158 		++spurious[smp_processor_id()];
159 		smp_wmb();
160 	}
161 }
162 
163 static void setup_irq(irq_handler_fn handler)
164 {
165 	gic_enable_defaults();
166 #ifdef __arm__
167 	install_exception_handler(EXCPTN_IRQ, handler);
168 #else
169 	install_irq_handler(EL1H_IRQ, handler);
170 #endif
171 	local_irq_enable();
172 }
173 
174 #if defined(__aarch64__)
175 struct its_event {
176 	int cpu_id;
177 	int lpi_id;
178 };
179 
180 struct its_stats {
181 	struct its_event expected;
182 	struct its_event observed;
183 };
184 
185 static struct its_stats lpi_stats;
186 
187 static void lpi_handler(struct pt_regs *regs __unused)
188 {
189 	u32 irqstat = gic_read_iar();
190 	int irqnr = gic_iar_irqnr(irqstat);
191 
192 	gic_write_eoir(irqstat);
193 	assert(irqnr >= 8192);
194 	smp_rmb(); /* pairs with wmb in lpi_stats_expect */
195 	lpi_stats.observed.cpu_id = smp_processor_id();
196 	lpi_stats.observed.lpi_id = irqnr;
197 	acked[lpi_stats.observed.cpu_id]++;
198 	smp_wmb(); /* pairs with rmb in check_lpi_stats */
199 }
200 
201 static void lpi_stats_expect(int exp_cpu_id, int exp_lpi_id)
202 {
203 	lpi_stats.expected.cpu_id = exp_cpu_id;
204 	lpi_stats.expected.lpi_id = exp_lpi_id;
205 	lpi_stats.observed.cpu_id = -1;
206 	lpi_stats.observed.lpi_id = -1;
207 	smp_wmb(); /* pairs with rmb in handler */
208 }
209 
210 static void check_lpi_stats(const char *msg)
211 {
212 	int i;
213 
214 	for (i = 0; i < 50; i++) {
215 		mdelay(100);
216 		smp_rmb(); /* pairs with wmb in lpi_handler */
217 		if (lpi_stats.observed.cpu_id == lpi_stats.expected.cpu_id &&
218 		    lpi_stats.observed.lpi_id == lpi_stats.expected.lpi_id) {
219 			report(true, "%s", msg);
220 			return;
221 		}
222 	}
223 
224 	if (lpi_stats.observed.cpu_id == -1 && lpi_stats.observed.lpi_id == -1) {
225 		report_info("No LPI received whereas (cpuid=%d, intid=%d) "
226 			    "was expected", lpi_stats.expected.cpu_id,
227 			    lpi_stats.expected.lpi_id);
228 	} else {
229 		report_info("Unexpected LPI (cpuid=%d, intid=%d)",
230 			    lpi_stats.observed.cpu_id,
231 			    lpi_stats.observed.lpi_id);
232 	}
233 	report(false, "%s", msg);
234 }
235 
236 static void secondary_lpi_test(void)
237 {
238 	setup_irq(lpi_handler);
239 	cpumask_set_cpu(smp_processor_id(), &ready);
240 	while (1)
241 		wfi();
242 }
243 
244 static void check_lpi_hits(int *expected, const char *msg)
245 {
246 	bool pass = true;
247 	int i;
248 
249 	for_each_present_cpu(i) {
250 		if (acked[i] != expected[i]) {
251 			report_info("expected %d LPIs on PE #%d, %d observed",
252 				    expected[i], i, acked[i]);
253 			pass = false;
254 			break;
255 		}
256 	}
257 	report(pass, "%s", msg);
258 }
259 #endif
260 
261 static void gicv2_ipi_send_self(void)
262 {
263 	writel(2 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
264 }
265 
266 static void gicv2_ipi_send_broadcast(void)
267 {
268 	writel(1 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
269 }
270 
271 static void gicv3_ipi_send_self(void)
272 {
273 	gic_ipi_send_single(IPI_IRQ, smp_processor_id());
274 }
275 
276 static void gicv3_ipi_send_broadcast(void)
277 {
278 	gicv3_write_sgi1r(1ULL << 40 | IPI_IRQ << 24);
279 	isb();
280 }
281 
282 static void ipi_test_self(void)
283 {
284 	cpumask_t mask;
285 
286 	report_prefix_push("self");
287 	stats_reset();
288 	cpumask_clear(&mask);
289 	cpumask_set_cpu(smp_processor_id(), &mask);
290 	gic->ipi.send_self();
291 	check_acked("IPI: self", &mask);
292 	report_prefix_pop();
293 }
294 
295 static void ipi_test_smp(void)
296 {
297 	cpumask_t mask;
298 	int i;
299 
300 	report_prefix_push("target-list");
301 	stats_reset();
302 	cpumask_copy(&mask, &cpu_present_mask);
303 	for (i = smp_processor_id() & 1; i < nr_cpus; i += 2)
304 		cpumask_clear_cpu(i, &mask);
305 	gic_ipi_send_mask(IPI_IRQ, &mask);
306 	check_acked("IPI: directed", &mask);
307 	report_prefix_pop();
308 
309 	report_prefix_push("broadcast");
310 	stats_reset();
311 	cpumask_copy(&mask, &cpu_present_mask);
312 	cpumask_clear_cpu(smp_processor_id(), &mask);
313 	gic->ipi.send_broadcast();
314 	check_acked("IPI: broadcast", &mask);
315 	report_prefix_pop();
316 }
317 
318 static void ipi_send(void)
319 {
320 	setup_irq(ipi_handler);
321 	wait_on_ready();
322 	ipi_test_self();
323 	ipi_test_smp();
324 	check_spurious();
325 	exit(report_summary());
326 }
327 
328 static void ipi_recv(void)
329 {
330 	setup_irq(ipi_handler);
331 	cpumask_set_cpu(smp_processor_id(), &ready);
332 	while (1)
333 		wfi();
334 }
335 
336 static void ipi_test(void *data __unused)
337 {
338 	if (smp_processor_id() == IPI_SENDER)
339 		ipi_send();
340 	else
341 		ipi_recv();
342 }
343 
344 static struct gic gicv2 = {
345 	.ipi = {
346 		.send_self = gicv2_ipi_send_self,
347 		.send_broadcast = gicv2_ipi_send_broadcast,
348 	},
349 };
350 
351 static struct gic gicv3 = {
352 	.ipi = {
353 		.send_self = gicv3_ipi_send_self,
354 		.send_broadcast = gicv3_ipi_send_broadcast,
355 	},
356 };
357 
358 static void ipi_clear_active_handler(struct pt_regs *regs __unused)
359 {
360 	u32 irqstat = gic_read_iar();
361 	u32 irqnr = gic_iar_irqnr(irqstat);
362 
363 	if (irqnr != GICC_INT_SPURIOUS) {
364 		void *base;
365 		u32 val = 1 << IPI_IRQ;
366 
367 		if (gic_version() == 2)
368 			base = gicv2_dist_base();
369 		else
370 			base = gicv3_sgi_base();
371 
372 		writel(val, base + GICD_ICACTIVER);
373 
374 		smp_rmb(); /* pairs with wmb in stats_reset */
375 		++acked[smp_processor_id()];
376 		check_irqnr(irqnr);
377 		smp_wmb(); /* pairs with rmb in check_acked */
378 	} else {
379 		++spurious[smp_processor_id()];
380 		smp_wmb();
381 	}
382 }
383 
384 static void run_active_clear_test(void)
385 {
386 	report_prefix_push("active");
387 	setup_irq(ipi_clear_active_handler);
388 	ipi_test_self();
389 	report_prefix_pop();
390 }
391 
392 static bool test_ro_pattern_32(void *address, u32 pattern, u32 orig)
393 {
394 	u32 reg;
395 
396 	writel(pattern, address);
397 	reg = readl(address);
398 
399 	if (reg != orig)
400 		writel(orig, address);
401 
402 	return reg == orig;
403 }
404 
405 static bool test_readonly_32(void *address, bool razwi)
406 {
407 	u32 orig, pattern;
408 
409 	orig = readl(address);
410 	if (razwi && orig)
411 		return false;
412 
413 	pattern = 0xffffffff;
414 	if (orig != pattern) {
415 		if (!test_ro_pattern_32(address, pattern, orig))
416 			return false;
417 	}
418 
419 	pattern = 0xa5a55a5a;
420 	if (orig != pattern) {
421 		if (!test_ro_pattern_32(address, pattern, orig))
422 			return false;
423 	}
424 
425 	pattern = 0;
426 	if (orig != pattern) {
427 		if (!test_ro_pattern_32(address, pattern, orig))
428 			return false;
429 	}
430 
431 	return true;
432 }
433 
434 static void test_typer_v2(uint32_t reg)
435 {
436 	int nr_gic_cpus = ((reg >> 5) & 0x7) + 1;
437 
438 	report_info("nr_cpus=%d", nr_cpus);
439 	report(nr_cpus == nr_gic_cpus, "all CPUs have interrupts");
440 }
441 
442 #define BYTE(reg32, byte) (((reg32) >> ((byte) * 8)) & 0xff)
443 #define REPLACE_BYTE(reg32, byte, new) (((reg32) & ~(0xff << ((byte) * 8))) |\
444 					((new) << ((byte) * 8)))
445 
446 /*
447  * Some registers are byte accessible, do a byte-wide read and write of known
448  * content to check for this.
449  * Apply a @mask to cater for special register properties.
450  * @pattern contains the value already in the register.
451  */
452 static void test_byte_access(void *base_addr, u32 pattern, u32 mask)
453 {
454 	u32 reg = readb(base_addr + 1);
455 	bool res;
456 
457 	res = (reg == (BYTE(pattern, 1) & (mask >> 8)));
458 	report(res, "byte reads successful");
459 	if (!res)
460 		report_info("byte 1 of 0x%08"PRIx32" => 0x%02"PRIx32, pattern & mask, reg);
461 
462 	pattern = REPLACE_BYTE(pattern, 2, 0x1f);
463 	writeb(BYTE(pattern, 2), base_addr + 2);
464 	reg = readl(base_addr);
465 	res = (reg == (pattern & mask));
466 	report(res, "byte writes successful");
467 	if (!res)
468 		report_info("writing 0x%02"PRIx32" into bytes 2 => 0x%08"PRIx32,
469 			    BYTE(pattern, 2), reg);
470 }
471 
472 static void test_priorities(int nr_irqs, void *priptr)
473 {
474 	u32 orig_prio, reg, pri_bits;
475 	u32 pri_mask, pattern;
476 	void *first_spi = priptr + GIC_FIRST_SPI;
477 
478 	orig_prio = readl(first_spi);
479 	report_prefix_push("IPRIORITYR");
480 
481 	/*
482 	 * Determine implemented number of priority bits by writing all 1's
483 	 * and checking the number of cleared bits in the value read back.
484 	 */
485 	writel(0xffffffff, first_spi);
486 	pri_mask = readl(first_spi);
487 
488 	reg = ~pri_mask;
489 	report((((reg >> 16) == (reg & 0xffff)) &&
490 	        ((reg & 0xff) == ((reg >> 8) & 0xff))),
491 	       "consistent priority masking");
492 	report_info("priority mask is 0x%08"PRIx32, pri_mask);
493 
494 	reg = reg & 0xff;
495 	for (pri_bits = 8; reg & 1; reg >>= 1, pri_bits--)
496 		;
497 	report(pri_bits >= 4, "implements at least 4 priority bits");
498 	report_info("%"PRIu32" priority bits implemented", pri_bits);
499 
500 	pattern = 0;
501 	writel(pattern, first_spi);
502 	report(readl(first_spi) == pattern, "clearing priorities");
503 
504 	/* setting all priorities to their max valus was tested above */
505 
506 	report(test_readonly_32(priptr + nr_irqs, true),
507 	       "accesses beyond limit RAZ/WI");
508 
509 	writel(pattern, priptr + nr_irqs - 4);
510 	report(readl(priptr + nr_irqs - 4) == (pattern & pri_mask),
511 	       "accessing last SPIs");
512 
513 	pattern = 0xff7fbf3f;
514 	writel(pattern, first_spi);
515 	report(readl(first_spi) == (pattern & pri_mask),
516 	       "priorities are preserved");
517 
518 	/* The PRIORITY registers are byte accessible. */
519 	test_byte_access(first_spi, pattern, pri_mask);
520 
521 	report_prefix_pop();
522 	writel(orig_prio, first_spi);
523 }
524 
525 /* GICD_ITARGETSR is only used by GICv2. */
526 static void test_targets(int nr_irqs)
527 {
528 	void *targetsptr = gicv2_dist_base() + GICD_ITARGETSR;
529 	u32 orig_targets;
530 	u32 cpu_mask;
531 	u32 pattern, reg;
532 
533 	orig_targets = readl(targetsptr + GIC_FIRST_SPI);
534 	report_prefix_push("ITARGETSR");
535 
536 	cpu_mask = (1 << nr_cpus) - 1;
537 	cpu_mask |= cpu_mask << 8;
538 	cpu_mask |= cpu_mask << 16;
539 
540 	/* Check that bits for non implemented CPUs are RAZ/WI. */
541 	if (nr_cpus < 8) {
542 		writel(0xffffffff, targetsptr + GIC_FIRST_SPI);
543 		report(!(readl(targetsptr + GIC_FIRST_SPI) & ~cpu_mask),
544 		       "bits for non-existent CPUs masked");
545 		report_info("%d non-existent CPUs", 8 - nr_cpus);
546 	} else {
547 		report_skip("CPU masking (all CPUs implemented)");
548 	}
549 
550 	report(test_readonly_32(targetsptr + nr_irqs, true),
551 	       "accesses beyond limit RAZ/WI");
552 
553 	pattern = 0x0103020f;
554 	writel(pattern, targetsptr + GIC_FIRST_SPI);
555 	reg = readl(targetsptr + GIC_FIRST_SPI);
556 	report(reg == (pattern & cpu_mask), "register content preserved");
557 	if (reg != (pattern & cpu_mask))
558 		report_info("writing %08"PRIx32" reads back as %08"PRIx32,
559 			    pattern & cpu_mask, reg);
560 
561 	/* The TARGETS registers are byte accessible. */
562 	test_byte_access(targetsptr + GIC_FIRST_SPI, pattern, cpu_mask);
563 
564 	writel(orig_targets, targetsptr + GIC_FIRST_SPI);
565 
566 	report_prefix_pop();
567 }
568 
569 static void gic_test_mmio(void)
570 {
571 	u32 reg;
572 	int nr_irqs;
573 	void *gic_dist_base, *idreg;
574 
575 	switch(gic_version()) {
576 	case 0x2:
577 		gic_dist_base = gicv2_dist_base();
578 		idreg = gic_dist_base + GICD_ICPIDR2;
579 		break;
580 	case 0x3:
581 		report_abort("GICv3 MMIO tests NYI");
582 	default:
583 		report_abort("GIC version %d not supported", gic_version());
584 	}
585 
586 	reg = readl(gic_dist_base + GICD_TYPER);
587 	nr_irqs = GICD_TYPER_IRQS(reg);
588 	report_info("number of implemented SPIs: %d", nr_irqs - GIC_FIRST_SPI);
589 
590 	test_typer_v2(reg);
591 
592 	report_info("IIDR: 0x%08"PRIx32, readl(gic_dist_base + GICD_IIDR));
593 
594 	report(test_readonly_32(gic_dist_base + GICD_TYPER, false),
595                "GICD_TYPER is read-only");
596 	report(test_readonly_32(gic_dist_base + GICD_IIDR, false),
597                "GICD_IIDR is read-only");
598 
599 	reg = readl(idreg);
600 	report(test_readonly_32(idreg, false), "ICPIDR2 is read-only");
601 	report_info("value of ICPIDR2: 0x%08"PRIx32, reg);
602 
603 	test_priorities(nr_irqs, gic_dist_base + GICD_IPRIORITYR);
604 
605 	if (gic_version() == 2)
606 		test_targets(nr_irqs);
607 }
608 
609 #if defined(__arm__)
610 
611 static void test_its_introspection(void) {}
612 static void test_its_trigger(void) {}
613 static void test_its_migration(void) {}
614 static void test_its_pending_migration(void) {}
615 static void test_migrate_unmapped_collection(void) {}
616 
617 #else /* __aarch64__ */
618 
619 static void test_its_introspection(void)
620 {
621 	struct its_baser *dev_baser = &its_data.device_baser;
622 	struct its_baser *coll_baser = &its_data.coll_baser;
623 	struct its_typer *typer = &its_data.typer;
624 
625 	if (!gicv3_its_base()) {
626 		report_skip("No ITS, skip ...");
627 		return;
628 	}
629 
630 	/* IIDR */
631 	report(test_readonly_32(gicv3_its_base() + GITS_IIDR, false),
632 	       "GITS_IIDR is read-only"),
633 
634 	/* TYPER */
635 	report(test_readonly_32(gicv3_its_base() + GITS_TYPER, false),
636 	       "GITS_TYPER is read-only");
637 
638 	report(typer->phys_lpi, "ITS supports physical LPIs");
639 	report_info("vLPI support: %s", typer->virt_lpi ? "yes" : "no");
640 	report_info("ITT entry size = 0x%x", typer->ite_size);
641 	report_info("Bit Count: EventID=%d DeviceId=%d CollId=%d",
642 		    typer->eventid_bits, typer->deviceid_bits,
643 		    typer->collid_bits);
644 	report(typer->eventid_bits && typer->deviceid_bits &&
645 	       typer->collid_bits, "ID spaces");
646 	report_info("Target address format %s",
647 			typer->pta ? "Redist base address" : "PE #");
648 
649 	report(dev_baser && coll_baser, "detect device and collection BASER");
650 	report_info("device table entry_size = 0x%x", dev_baser->esz);
651 	report_info("collection table entry_size = 0x%x", coll_baser->esz);
652 }
653 
654 static int its_prerequisites(int nb_cpus)
655 {
656 	int cpu;
657 
658 	if (!gicv3_its_base()) {
659 		report_skip("No ITS, skip ...");
660 		return -1;
661 	}
662 
663 	if (nr_cpus < nb_cpus) {
664 		report_skip("Test requires at least %d vcpus", nb_cpus);
665 		return -1;
666 	}
667 
668 	stats_reset();
669 
670 	setup_irq(lpi_handler);
671 
672 	for_each_present_cpu(cpu) {
673 		if (cpu == 0)
674 			continue;
675 		smp_boot_secondary(cpu, secondary_lpi_test);
676 	}
677 	wait_on_ready();
678 
679 	its_enable_defaults();
680 
681 	return 0;
682 }
683 
684 /*
685  * Setup the configuration for those mappings:
686  * dev_id=2 event=20 -> vcpu 3, intid=8195
687  * dev_id=7 event=255 -> vcpu 2, intid=8196
688  * LPIs ready to hit
689  */
690 static int its_setup1(void)
691 {
692 	struct its_collection *col3, *col2;
693 	struct its_device *dev2, *dev7;
694 
695 	if (its_prerequisites(4))
696 		return -1;
697 
698 	dev2 = its_create_device(2 /* dev id */, 8 /* nb_ites */);
699 	dev7 = its_create_device(7 /* dev id */, 8 /* nb_ites */);
700 
701 	col3 = its_create_collection(3 /* col id */, 3/* target PE */);
702 	col2 = its_create_collection(2 /* col id */, 2/* target PE */);
703 
704 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
705 	gicv3_lpi_set_config(8196, LPI_PROP_DEFAULT);
706 
707 	/*
708 	 * dev=2, eventid=20  -> lpi= 8195, col=3
709 	 * dev=7, eventid=255 -> lpi= 8196, col=2
710 	 */
711 	its_send_mapd(dev2, true);
712 	its_send_mapd(dev7, true);
713 
714 	its_send_mapc(col3, true);
715 	its_send_mapc(col2, true);
716 
717 	its_send_invall(col2);
718 	its_send_invall(col3);
719 
720 	its_send_mapti(dev2, 8195 /* lpi id */, 20 /* event id */, col3);
721 	its_send_mapti(dev7, 8196 /* lpi id */, 255 /* event id */, col2);
722 	return 0;
723 }
724 
725 static void test_its_trigger(void)
726 {
727 	struct its_collection *col3;
728 	struct its_device *dev2, *dev7;
729 
730 	if (its_setup1())
731 		return;
732 
733 	col3 = its_get_collection(3);
734 	dev2 = its_get_device(2);
735 	dev7 = its_get_device(7);
736 
737 	report_prefix_push("int");
738 
739 	lpi_stats_expect(3, 8195);
740 	its_send_int(dev2, 20);
741 	check_lpi_stats("dev=2, eventid=20  -> lpi= 8195, col=3");
742 
743 	lpi_stats_expect(2, 8196);
744 	its_send_int(dev7, 255);
745 	check_lpi_stats("dev=7, eventid=255 -> lpi= 8196, col=2");
746 
747 	report_prefix_pop();
748 
749 	report_prefix_push("inv/invall");
750 
751 	/*
752 	 * disable 8195, check dev2/eventid=20 does not trigger the
753 	 * corresponding LPI
754 	 */
755 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT & ~LPI_PROP_ENABLED);
756 	its_send_inv(dev2, 20);
757 
758 	lpi_stats_expect(-1, -1);
759 	its_send_int(dev2, 20);
760 	check_lpi_stats("dev2/eventid=20 does not trigger any LPI");
761 
762 	/*
763 	 * re-enable the LPI but willingly do not call invall
764 	 * so the change in config is not taken into account.
765 	 * The LPI should not hit
766 	 */
767 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
768 	lpi_stats_expect(-1, -1);
769 	its_send_int(dev2, 20);
770 	check_lpi_stats("dev2/eventid=20 still does not trigger any LPI");
771 
772 	/* Now call the invall and check the LPI hits */
773 	its_send_invall(col3);
774 	lpi_stats_expect(3, 8195);
775 	its_send_int(dev2, 20);
776 	check_lpi_stats("dev2/eventid=20 now triggers an LPI");
777 
778 	report_prefix_pop();
779 
780 	report_prefix_push("mapd valid=false");
781 	/*
782 	 * Unmap device 2 and check the eventid 20 formerly
783 	 * attached to it does not hit anymore
784 	 */
785 
786 	its_send_mapd(dev2, false);
787 	lpi_stats_expect(-1, -1);
788 	its_send_int(dev2, 20);
789 	check_lpi_stats("no LPI after device unmap");
790 	report_prefix_pop();
791 }
792 
793 static void test_its_migration(void)
794 {
795 	struct its_device *dev2, *dev7;
796 	bool test_skipped = false;
797 
798 	if (its_setup1()) {
799 		test_skipped = true;
800 		goto do_migrate;
801 	}
802 
803 	dev2 = its_get_device(2);
804 	dev7 = its_get_device(7);
805 
806 do_migrate:
807 	puts("Now migrate the VM, then press a key to continue...\n");
808 	(void)getchar();
809 	report_info("Migration complete");
810 	if (test_skipped)
811 		return;
812 
813 	lpi_stats_expect(3, 8195);
814 	its_send_int(dev2, 20);
815 	check_lpi_stats("dev2/eventid=20 triggers LPI 8195 on PE #3 after migration");
816 
817 	lpi_stats_expect(2, 8196);
818 	its_send_int(dev7, 255);
819 	check_lpi_stats("dev7/eventid=255 triggers LPI 8196 on PE #2 after migration");
820 }
821 
822 #define ERRATA_UNMAPPED_COLLECTIONS "ERRATA_8c58be34494b"
823 
824 static void test_migrate_unmapped_collection(void)
825 {
826 	struct its_collection *col = NULL;
827 	struct its_device *dev2 = NULL, *dev7 = NULL;
828 	bool test_skipped = false;
829 	int pe0 = 0;
830 	u8 config;
831 
832 	if (its_setup1()) {
833 		test_skipped = true;
834 		goto do_migrate;
835 	}
836 
837 	if (!errata(ERRATA_UNMAPPED_COLLECTIONS)) {
838 		report_skip("Skipping test, as this test hangs without the fix. "
839 			    "Set %s=y to enable.", ERRATA_UNMAPPED_COLLECTIONS);
840 		test_skipped = true;
841 		goto do_migrate;
842 	}
843 
844 	col = its_create_collection(pe0, pe0);
845 	dev2 = its_get_device(2);
846 	dev7 = its_get_device(7);
847 
848 	/* MAPTI with the collection unmapped */
849 	its_send_mapti(dev2, 8192, 0, col);
850 	gicv3_lpi_set_config(8192, LPI_PROP_DEFAULT);
851 
852 do_migrate:
853 	puts("Now migrate the VM, then press a key to continue...\n");
854 	(void)getchar();
855 	report_info("Migration complete");
856 	if (test_skipped)
857 		return;
858 
859 	/* on the destination, map the collection */
860 	its_send_mapc(col, true);
861 	its_send_invall(col);
862 
863 	lpi_stats_expect(2, 8196);
864 	its_send_int(dev7, 255);
865 	check_lpi_stats("dev7/eventid= 255 triggered LPI 8196 on PE #2");
866 
867 	config = gicv3_lpi_get_config(8192);
868 	report(config == LPI_PROP_DEFAULT,
869 	       "Config of LPI 8192 was properly migrated");
870 
871 	lpi_stats_expect(pe0, 8192);
872 	its_send_int(dev2, 0);
873 	check_lpi_stats("dev2/eventid = 0 triggered LPI 8192 on PE0");
874 }
875 
876 static void test_its_pending_migration(void)
877 {
878 	struct its_device *dev;
879 	struct its_collection *collection[2];
880 	int *expected = calloc(nr_cpus, sizeof(int));
881 	int pe0 = nr_cpus - 1, pe1 = nr_cpus - 2;
882 	bool test_skipped = false;
883 	u64 pendbaser;
884 	void *ptr;
885 	int i;
886 
887 	if (its_prerequisites(4)) {
888 		test_skipped = true;
889 		goto do_migrate;
890 	}
891 
892 	dev = its_create_device(2 /* dev id */, 8 /* nb_ites */);
893 	its_send_mapd(dev, true);
894 
895 	collection[0] = its_create_collection(pe0, pe0);
896 	collection[1] = its_create_collection(pe1, pe1);
897 	its_send_mapc(collection[0], true);
898 	its_send_mapc(collection[1], true);
899 
900 	/* disable lpi at redist level */
901 	gicv3_lpi_rdist_disable(pe0);
902 	gicv3_lpi_rdist_disable(pe1);
903 
904 	/* lpis are interleaved inbetween the 2 PEs */
905 	for (i = 0; i < 256; i++) {
906 		struct its_collection *col = i % 2 ? collection[0] :
907 						     collection[1];
908 		int vcpu = col->target_address >> 16;
909 
910 		its_send_mapti(dev, LPI(i), i, col);
911 		gicv3_lpi_set_config(LPI(i), LPI_PROP_DEFAULT);
912 		gicv3_lpi_set_clr_pending(vcpu, LPI(i), true);
913 	}
914 	its_send_invall(collection[0]);
915 	its_send_invall(collection[1]);
916 
917 	/* Clear the PTZ bit on each pendbaser */
918 
919 	expected[pe0] = 128;
920 	expected[pe1] = 128;
921 
922 	ptr = gicv3_data.redist_base[pe0] + GICR_PENDBASER;
923 	pendbaser = readq(ptr);
924 	writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
925 
926 	ptr = gicv3_data.redist_base[pe1] + GICR_PENDBASER;
927 	pendbaser = readq(ptr);
928 	writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
929 
930 	gicv3_lpi_rdist_enable(pe0);
931 	gicv3_lpi_rdist_enable(pe1);
932 
933 do_migrate:
934 	puts("Now migrate the VM, then press a key to continue...\n");
935 	(void)getchar();
936 	report_info("Migration complete");
937 	if (test_skipped)
938 		return;
939 
940 	/* let's wait for the 256 LPIs to be handled */
941 	mdelay(1000);
942 
943 	check_lpi_hits(expected, "128 LPIs on both PE0 and PE1 after migration");
944 }
945 #endif
946 
947 int main(int argc, char **argv)
948 {
949 	if (!gic_init()) {
950 		printf("No supported gic present, skipping tests...\n");
951 		return report_summary();
952 	}
953 
954 	report_prefix_pushf("gicv%d", gic_version());
955 
956 	switch (gic_version()) {
957 	case 2:
958 		gic = &gicv2;
959 		break;
960 	case 3:
961 		gic = &gicv3;
962 		break;
963 	}
964 
965 	if (argc < 2)
966 		report_abort("no test specified");
967 
968 	if (strcmp(argv[1], "ipi") == 0) {
969 		report_prefix_push(argv[1]);
970 		nr_cpu_check(2);
971 		on_cpus(ipi_test, NULL);
972 	} else if (strcmp(argv[1], "active") == 0) {
973 		run_active_clear_test();
974 	} else if (strcmp(argv[1], "mmio") == 0) {
975 		report_prefix_push(argv[1]);
976 		gic_test_mmio();
977 		report_prefix_pop();
978 	} else if (!strcmp(argv[1], "its-trigger")) {
979 		report_prefix_push(argv[1]);
980 		test_its_trigger();
981 		report_prefix_pop();
982 	} else if (!strcmp(argv[1], "its-migration")) {
983 		report_prefix_push(argv[1]);
984 		test_its_migration();
985 		report_prefix_pop();
986 	} else if (!strcmp(argv[1], "its-pending-migration")) {
987 		report_prefix_push(argv[1]);
988 		test_its_pending_migration();
989 		report_prefix_pop();
990 	} else if (!strcmp(argv[1], "its-migrate-unmapped-collection")) {
991 		report_prefix_push(argv[1]);
992 		test_migrate_unmapped_collection();
993 		report_prefix_pop();
994 	} else if (strcmp(argv[1], "its-introspection") == 0) {
995 		report_prefix_push(argv[1]);
996 		test_its_introspection();
997 		report_prefix_pop();
998 	} else {
999 		report_abort("Unknown subtest '%s'", argv[1]);
1000 	}
1001 
1002 	return report_summary();
1003 }
1004