xref: /kvm-unit-tests/arm/gic.c (revision c604fa931a1cb70c3649ac1b7223178fc79eab6a)
1 /*
2  * GIC tests
3  *
4  * GICv2
5  *   + test sending/receiving IPIs
6  *   + MMIO access tests
7  * GICv3
8  *   + test sending/receiving IPIs
9  *
10  * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
11  *
12  * This work is licensed under the terms of the GNU LGPL, version 2.
13  */
14 #include <libcflat.h>
15 #include <errata.h>
16 #include <asm/setup.h>
17 #include <asm/processor.h>
18 #include <asm/delay.h>
19 #include <asm/gic.h>
20 #include <asm/gic-v3-its.h>
21 #include <asm/smp.h>
22 #include <asm/barrier.h>
23 #include <asm/io.h>
24 
25 #define IPI_SENDER	1
26 #define IPI_IRQ		1
27 
28 struct gic {
29 	struct {
30 		void (*send_self)(void);
31 		void (*send_broadcast)(void);
32 	} ipi;
33 };
34 
35 static struct gic *gic;
36 static int acked[NR_CPUS], spurious[NR_CPUS];
37 static int irq_sender[NR_CPUS], irq_number[NR_CPUS];
38 static cpumask_t ready;
39 
40 static void nr_cpu_check(int nr)
41 {
42 	if (nr_cpus < nr)
43 		report_abort("At least %d cpus required", nr);
44 }
45 
46 static void wait_on_ready(void)
47 {
48 	cpumask_set_cpu(smp_processor_id(), &ready);
49 	while (!cpumask_full(&ready))
50 		cpu_relax();
51 }
52 
53 static void stats_reset(void)
54 {
55 	int i;
56 
57 	for (i = 0; i < nr_cpus; ++i) {
58 		acked[i] = 0;
59 		irq_sender[i] = -1;
60 		irq_number[i] = -1;
61 	}
62 }
63 
64 static void wait_for_interrupts(cpumask_t *mask)
65 {
66 	int nr_pass, cpu, i;
67 
68 	/* Wait up to 5s for all interrupts to be delivered */
69 	for (i = 0; i < 50; i++) {
70 		mdelay(100);
71 		nr_pass = 0;
72 		for_each_present_cpu(cpu) {
73 			/*
74 			 * A CPU having received more than one interrupts will
75 			 * show up in check_acked(), and no matter how long we
76 			 * wait it cannot un-receive it. Consider at least one
77 			 * interrupt as a pass.
78 			 */
79 			nr_pass += cpumask_test_cpu(cpu, mask) ?
80 				acked[cpu] >= 1 : acked[cpu] == 0;
81 		}
82 
83 		if (nr_pass == nr_cpus) {
84 			if (i)
85 				report_info("interrupts took more than %d ms", i * 100);
86 			/* Wait for unexpected interrupts to fire */
87 			mdelay(100);
88 			return;
89 		}
90 	}
91 
92 	report_info("interrupts timed-out (5s)");
93 }
94 
95 static bool check_acked(cpumask_t *mask, int sender, int irqnum)
96 {
97 	int missing = 0, extra = 0, unexpected = 0;
98 	bool has_gicv2 = (gic_version() == 2);
99 	bool pass = true;
100 	int cpu;
101 
102 	for_each_present_cpu(cpu) {
103 		if (cpumask_test_cpu(cpu, mask)) {
104 			if (!acked[cpu])
105 				++missing;
106 			else if (acked[cpu] > 1)
107 				++extra;
108 		} else if (acked[cpu]) {
109 				++unexpected;
110 		}
111 		if (!acked[cpu])
112 			continue;
113 		smp_rmb(); /* pairs with smp_wmb in irq_handler */
114 
115 		if (has_gicv2 && irq_sender[cpu] != sender) {
116 			report_info("cpu%d received IPI from wrong sender %d",
117 					cpu, irq_sender[cpu]);
118 			pass = false;
119 		}
120 
121 		if (irq_number[cpu] != irqnum) {
122 			report_info("cpu%d received wrong irq %d",
123 					cpu, irq_number[cpu]);
124 			pass = false;
125 		}
126 	}
127 
128 	if (missing || extra || unexpected) {
129 		report_info("ACKS: missing=%d extra=%d unexpected=%d",
130 				missing, extra, unexpected);
131 		pass = false;
132 	}
133 
134 	return pass;
135 }
136 
137 static void check_spurious(void)
138 {
139 	int cpu;
140 
141 	for_each_present_cpu(cpu) {
142 		if (spurious[cpu])
143 			report_info("WARN: cpu%d got %d spurious interrupts",
144 				cpu, spurious[cpu]);
145 	}
146 }
147 
148 static int gic_get_sender(int irqstat)
149 {
150 	if (gic_version() == 2)
151 		/* GICC_IAR.CPUID is RAZ for non-SGIs */
152 		return (irqstat >> 10) & 7;
153 	return -1;
154 }
155 
156 static void irq_handler(struct pt_regs *regs __unused)
157 {
158 	u32 irqstat = gic_read_iar();
159 	u32 irqnr = gic_iar_irqnr(irqstat);
160 	int this_cpu = smp_processor_id();
161 
162 	if (irqnr != GICC_INT_SPURIOUS) {
163 		gic_write_eoir(irqstat);
164 		irq_sender[this_cpu] = gic_get_sender(irqstat);
165 		irq_number[this_cpu] = irqnr;
166 		smp_wmb(); /* pairs with smp_rmb in check_acked */
167 		++acked[this_cpu];
168 	} else {
169 		++spurious[this_cpu];
170 	}
171 
172 	/* Wait for writes to acked/spurious to complete */
173 	dsb(ishst);
174 }
175 
176 static void setup_irq(irq_handler_fn handler)
177 {
178 	gic_enable_defaults();
179 #ifdef __arm__
180 	install_exception_handler(EXCPTN_IRQ, handler);
181 #else
182 	install_irq_handler(EL1H_IRQ, handler);
183 #endif
184 	local_irq_enable();
185 }
186 
187 #if defined(__aarch64__)
188 static void check_lpi_hits(int *expected, const char *msg)
189 {
190 	bool pass = true;
191 	int i;
192 
193 	for_each_present_cpu(i) {
194 		if (acked[i] != expected[i]) {
195 			report_info("expected %d LPIs on PE #%d, %d observed",
196 				    expected[i], i, acked[i]);
197 			pass = false;
198 			break;
199 		}
200 	}
201 	report(pass, "%s", msg);
202 }
203 #endif
204 
205 static void gicv2_ipi_send_self(void)
206 {
207 	/*
208 	 * The wmb() in writel and rmb() when acknowledging the interrupt are
209 	 * sufficient for ensuring that writes that happen in program order
210 	 * before the interrupt are observed in the interrupt handler after
211 	 * acknowledging the interrupt.
212 	 */
213 	writel(2 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
214 }
215 
216 static void gicv2_ipi_send_broadcast(void)
217 {
218 	/* No barriers are needed, same situation as gicv2_ipi_send_self() */
219 	writel(1 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
220 }
221 
222 static void gicv3_ipi_send_self(void)
223 {
224 	gic_ipi_send_single(IPI_IRQ, smp_processor_id());
225 }
226 
227 static void gicv3_ipi_send_broadcast(void)
228 {
229 	/*
230 	 * Ensure stores to Normal memory are visible to other CPUs before
231 	 * sending the IPI
232 	 */
233 	wmb();
234 	gicv3_write_sgi1r(1ULL << 40 | IPI_IRQ << 24);
235 	isb();
236 }
237 
238 static void ipi_test_self(void)
239 {
240 	int this_cpu = smp_processor_id();
241 	cpumask_t mask;
242 
243 	report_prefix_push("self");
244 	stats_reset();
245 	cpumask_clear(&mask);
246 	cpumask_set_cpu(this_cpu, &mask);
247 	gic->ipi.send_self();
248 	wait_for_interrupts(&mask);
249 	report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
250 	report_prefix_pop();
251 }
252 
253 static void ipi_test_smp(void)
254 {
255 	int this_cpu = smp_processor_id();
256 	cpumask_t mask;
257 	int i;
258 
259 	report_prefix_push("target-list");
260 	stats_reset();
261 	cpumask_copy(&mask, &cpu_present_mask);
262 	for (i = this_cpu & 1; i < nr_cpus; i += 2)
263 		cpumask_clear_cpu(i, &mask);
264 	gic_ipi_send_mask(IPI_IRQ, &mask);
265 	wait_for_interrupts(&mask);
266 	report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
267 	report_prefix_pop();
268 
269 	report_prefix_push("broadcast");
270 	stats_reset();
271 	cpumask_copy(&mask, &cpu_present_mask);
272 	cpumask_clear_cpu(this_cpu, &mask);
273 	gic->ipi.send_broadcast();
274 	wait_for_interrupts(&mask);
275 	report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
276 	report_prefix_pop();
277 }
278 
279 static void ipi_send(void)
280 {
281 	setup_irq(irq_handler);
282 	wait_on_ready();
283 	ipi_test_self();
284 	ipi_test_smp();
285 	check_spurious();
286 	exit(report_summary());
287 }
288 
289 static void irq_recv(void)
290 {
291 	setup_irq(irq_handler);
292 	cpumask_set_cpu(smp_processor_id(), &ready);
293 	while (1)
294 		wfi();
295 }
296 
297 static void ipi_test(void *data __unused)
298 {
299 	if (smp_processor_id() == IPI_SENDER)
300 		ipi_send();
301 	else
302 		irq_recv();
303 }
304 
305 static struct gic gicv2 = {
306 	.ipi = {
307 		.send_self = gicv2_ipi_send_self,
308 		.send_broadcast = gicv2_ipi_send_broadcast,
309 	},
310 };
311 
312 static struct gic gicv3 = {
313 	.ipi = {
314 		.send_self = gicv3_ipi_send_self,
315 		.send_broadcast = gicv3_ipi_send_broadcast,
316 	},
317 };
318 
319 /* Runs on the same CPU as the sender, no need for memory synchronization */
320 static void ipi_clear_active_handler(struct pt_regs *regs __unused)
321 {
322 	u32 irqstat = gic_read_iar();
323 	u32 irqnr = gic_iar_irqnr(irqstat);
324 	int this_cpu = smp_processor_id();
325 
326 	if (irqnr != GICC_INT_SPURIOUS) {
327 		void *base;
328 		u32 val = 1 << IPI_IRQ;
329 
330 		if (gic_version() == 2)
331 			base = gicv2_dist_base();
332 		else
333 			base = gicv3_sgi_base();
334 
335 		writel(val, base + GICD_ICACTIVER);
336 
337 		irq_sender[this_cpu] = gic_get_sender(irqstat);
338 		irq_number[this_cpu] = irqnr;
339 		++acked[this_cpu];
340 	} else {
341 		++spurious[this_cpu];
342 	}
343 }
344 
345 static void run_active_clear_test(void)
346 {
347 	report_prefix_push("active");
348 	setup_irq(ipi_clear_active_handler);
349 	ipi_test_self();
350 	check_spurious();
351 	report_prefix_pop();
352 }
353 
354 static bool test_ro_pattern_32(void *address, u32 pattern, u32 orig)
355 {
356 	u32 reg;
357 
358 	writel(pattern, address);
359 	reg = readl(address);
360 
361 	if (reg != orig)
362 		writel(orig, address);
363 
364 	return reg == orig;
365 }
366 
367 static bool test_readonly_32(void *address, bool razwi)
368 {
369 	u32 orig, pattern;
370 
371 	orig = readl(address);
372 	if (razwi && orig)
373 		return false;
374 
375 	pattern = 0xffffffff;
376 	if (orig != pattern) {
377 		if (!test_ro_pattern_32(address, pattern, orig))
378 			return false;
379 	}
380 
381 	pattern = 0xa5a55a5a;
382 	if (orig != pattern) {
383 		if (!test_ro_pattern_32(address, pattern, orig))
384 			return false;
385 	}
386 
387 	pattern = 0;
388 	if (orig != pattern) {
389 		if (!test_ro_pattern_32(address, pattern, orig))
390 			return false;
391 	}
392 
393 	return true;
394 }
395 
396 static void test_typer_v2(uint32_t reg)
397 {
398 	int nr_gic_cpus = ((reg >> 5) & 0x7) + 1;
399 
400 	report_info("nr_cpus=%d", nr_cpus);
401 	report(nr_cpus == nr_gic_cpus, "all CPUs have interrupts");
402 }
403 
404 #define BYTE(reg32, byte) (((reg32) >> ((byte) * 8)) & 0xff)
405 #define REPLACE_BYTE(reg32, byte, new) (((reg32) & ~(0xff << ((byte) * 8))) |\
406 					((new) << ((byte) * 8)))
407 
408 /*
409  * Some registers are byte accessible, do a byte-wide read and write of known
410  * content to check for this.
411  * Apply a @mask to cater for special register properties.
412  * @pattern contains the value already in the register.
413  */
414 static void test_byte_access(void *base_addr, u32 pattern, u32 mask)
415 {
416 	u32 reg = readb(base_addr + 1);
417 	bool res;
418 
419 	res = (reg == (BYTE(pattern, 1) & (mask >> 8)));
420 	report(res, "byte reads successful");
421 	if (!res)
422 		report_info("byte 1 of 0x%08"PRIx32" => 0x%02"PRIx32, pattern & mask, reg);
423 
424 	pattern = REPLACE_BYTE(pattern, 2, 0x1f);
425 	writeb(BYTE(pattern, 2), base_addr + 2);
426 	reg = readl(base_addr);
427 	res = (reg == (pattern & mask));
428 	report(res, "byte writes successful");
429 	if (!res)
430 		report_info("writing 0x%02"PRIx32" into bytes 2 => 0x%08"PRIx32,
431 			    BYTE(pattern, 2), reg);
432 }
433 
434 static void test_priorities(int nr_irqs, void *priptr)
435 {
436 	u32 orig_prio, reg, pri_bits;
437 	u32 pri_mask, pattern;
438 	void *first_spi = priptr + GIC_FIRST_SPI;
439 
440 	orig_prio = readl(first_spi);
441 	report_prefix_push("IPRIORITYR");
442 
443 	/*
444 	 * Determine implemented number of priority bits by writing all 1's
445 	 * and checking the number of cleared bits in the value read back.
446 	 */
447 	writel(0xffffffff, first_spi);
448 	pri_mask = readl(first_spi);
449 
450 	reg = ~pri_mask;
451 	report((((reg >> 16) == (reg & 0xffff)) &&
452 	        ((reg & 0xff) == ((reg >> 8) & 0xff))),
453 	       "consistent priority masking");
454 	report_info("priority mask is 0x%08"PRIx32, pri_mask);
455 
456 	reg = reg & 0xff;
457 	for (pri_bits = 8; reg & 1; reg >>= 1, pri_bits--)
458 		;
459 	report(pri_bits >= 4, "implements at least 4 priority bits");
460 	report_info("%"PRIu32" priority bits implemented", pri_bits);
461 
462 	pattern = 0;
463 	writel(pattern, first_spi);
464 	report(readl(first_spi) == pattern, "clearing priorities");
465 
466 	/* setting all priorities to their max valus was tested above */
467 
468 	report(test_readonly_32(priptr + nr_irqs, true),
469 	       "accesses beyond limit RAZ/WI");
470 
471 	writel(pattern, priptr + nr_irqs - 4);
472 	report(readl(priptr + nr_irqs - 4) == (pattern & pri_mask),
473 	       "accessing last SPIs");
474 
475 	pattern = 0xff7fbf3f;
476 	writel(pattern, first_spi);
477 	report(readl(first_spi) == (pattern & pri_mask),
478 	       "priorities are preserved");
479 
480 	/* The PRIORITY registers are byte accessible. */
481 	test_byte_access(first_spi, pattern, pri_mask);
482 
483 	report_prefix_pop();
484 	writel(orig_prio, first_spi);
485 }
486 
487 /* GICD_ITARGETSR is only used by GICv2. */
488 static void test_targets(int nr_irqs)
489 {
490 	void *targetsptr = gicv2_dist_base() + GICD_ITARGETSR;
491 	u32 orig_targets;
492 	u32 cpu_mask;
493 	u32 pattern, reg;
494 
495 	orig_targets = readl(targetsptr + GIC_FIRST_SPI);
496 	report_prefix_push("ITARGETSR");
497 
498 	cpu_mask = (1 << nr_cpus) - 1;
499 	cpu_mask |= cpu_mask << 8;
500 	cpu_mask |= cpu_mask << 16;
501 
502 	/* Check that bits for non implemented CPUs are RAZ/WI. */
503 	if (nr_cpus < 8) {
504 		writel(0xffffffff, targetsptr + GIC_FIRST_SPI);
505 		report(!(readl(targetsptr + GIC_FIRST_SPI) & ~cpu_mask),
506 		       "bits for non-existent CPUs masked");
507 		report_info("%d non-existent CPUs", 8 - nr_cpus);
508 	} else {
509 		report_skip("CPU masking (all CPUs implemented)");
510 	}
511 
512 	report(test_readonly_32(targetsptr + nr_irqs, true),
513 	       "accesses beyond limit RAZ/WI");
514 
515 	pattern = 0x0103020f;
516 	writel(pattern, targetsptr + GIC_FIRST_SPI);
517 	reg = readl(targetsptr + GIC_FIRST_SPI);
518 	report(reg == (pattern & cpu_mask), "register content preserved");
519 	if (reg != (pattern & cpu_mask))
520 		report_info("writing %08"PRIx32" reads back as %08"PRIx32,
521 			    pattern & cpu_mask, reg);
522 
523 	/* The TARGETS registers are byte accessible. */
524 	test_byte_access(targetsptr + GIC_FIRST_SPI, pattern, cpu_mask);
525 
526 	writel(orig_targets, targetsptr + GIC_FIRST_SPI);
527 
528 	report_prefix_pop();
529 }
530 
531 static void gic_test_mmio(void)
532 {
533 	u32 reg;
534 	int nr_irqs;
535 	void *gic_dist_base, *idreg;
536 
537 	switch(gic_version()) {
538 	case 0x2:
539 		gic_dist_base = gicv2_dist_base();
540 		idreg = gic_dist_base + GICD_ICPIDR2;
541 		break;
542 	case 0x3:
543 		report_abort("GICv3 MMIO tests NYI");
544 	default:
545 		report_abort("GIC version %d not supported", gic_version());
546 	}
547 
548 	reg = readl(gic_dist_base + GICD_TYPER);
549 	nr_irqs = GICD_TYPER_IRQS(reg);
550 	report_info("number of implemented SPIs: %d", nr_irqs - GIC_FIRST_SPI);
551 
552 	test_typer_v2(reg);
553 
554 	report_info("IIDR: 0x%08"PRIx32, readl(gic_dist_base + GICD_IIDR));
555 
556 	report(test_readonly_32(gic_dist_base + GICD_TYPER, false),
557                "GICD_TYPER is read-only");
558 	report(test_readonly_32(gic_dist_base + GICD_IIDR, false),
559                "GICD_IIDR is read-only");
560 
561 	reg = readl(idreg);
562 	report(test_readonly_32(idreg, false), "ICPIDR2 is read-only");
563 	report_info("value of ICPIDR2: 0x%08"PRIx32, reg);
564 
565 	test_priorities(nr_irqs, gic_dist_base + GICD_IPRIORITYR);
566 
567 	if (gic_version() == 2)
568 		test_targets(nr_irqs);
569 }
570 
571 #if defined(__arm__)
572 
573 static void test_its_introspection(void) {}
574 static void test_its_trigger(void) {}
575 static void test_its_migration(void) {}
576 static void test_its_pending_migration(void) {}
577 static void test_migrate_unmapped_collection(void) {}
578 
579 #else /* __aarch64__ */
580 
581 static void test_its_introspection(void)
582 {
583 	struct its_baser *dev_baser = &its_data.device_baser;
584 	struct its_baser *coll_baser = &its_data.coll_baser;
585 	struct its_typer *typer = &its_data.typer;
586 
587 	if (!gicv3_its_base()) {
588 		report_skip("No ITS, skip ...");
589 		return;
590 	}
591 
592 	/* IIDR */
593 	report(test_readonly_32(gicv3_its_base() + GITS_IIDR, false),
594 	       "GITS_IIDR is read-only"),
595 
596 	/* TYPER */
597 	report(test_readonly_32(gicv3_its_base() + GITS_TYPER, false),
598 	       "GITS_TYPER is read-only");
599 
600 	report(typer->phys_lpi, "ITS supports physical LPIs");
601 	report_info("vLPI support: %s", typer->virt_lpi ? "yes" : "no");
602 	report_info("ITT entry size = 0x%x", typer->ite_size);
603 	report_info("Bit Count: EventID=%d DeviceId=%d CollId=%d",
604 		    typer->eventid_bits, typer->deviceid_bits,
605 		    typer->collid_bits);
606 	report(typer->eventid_bits && typer->deviceid_bits &&
607 	       typer->collid_bits, "ID spaces");
608 	report_info("Target address format %s",
609 			typer->pta ? "Redist base address" : "PE #");
610 
611 	report(dev_baser && coll_baser, "detect device and collection BASER");
612 	report_info("device table entry_size = 0x%x", dev_baser->esz);
613 	report_info("collection table entry_size = 0x%x", coll_baser->esz);
614 }
615 
616 static int its_prerequisites(int nb_cpus)
617 {
618 	int cpu;
619 
620 	if (!gicv3_its_base()) {
621 		report_skip("No ITS, skip ...");
622 		return -1;
623 	}
624 
625 	if (nr_cpus < nb_cpus) {
626 		report_skip("Test requires at least %d vcpus", nb_cpus);
627 		return -1;
628 	}
629 
630 	setup_irq(irq_handler);
631 
632 	for_each_present_cpu(cpu) {
633 		if (cpu == 0)
634 			continue;
635 		smp_boot_secondary(cpu, irq_recv);
636 	}
637 	wait_on_ready();
638 
639 	its_enable_defaults();
640 
641 	return 0;
642 }
643 
644 /*
645  * Setup the configuration for those mappings:
646  * dev_id=2 event=20 -> vcpu 3, intid=8195
647  * dev_id=7 event=255 -> vcpu 2, intid=8196
648  * LPIs ready to hit
649  */
650 static int its_setup1(void)
651 {
652 	struct its_collection *col3, *col2;
653 	struct its_device *dev2, *dev7;
654 
655 	if (its_prerequisites(4))
656 		return -1;
657 
658 	dev2 = its_create_device(2 /* dev id */, 8 /* nb_ites */);
659 	dev7 = its_create_device(7 /* dev id */, 8 /* nb_ites */);
660 
661 	col3 = its_create_collection(3 /* col id */, 3/* target PE */);
662 	col2 = its_create_collection(2 /* col id */, 2/* target PE */);
663 
664 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
665 	gicv3_lpi_set_config(8196, LPI_PROP_DEFAULT);
666 
667 	/*
668 	 * dev=2, eventid=20  -> lpi= 8195, col=3
669 	 * dev=7, eventid=255 -> lpi= 8196, col=2
670 	 */
671 	its_send_mapd(dev2, true);
672 	its_send_mapd(dev7, true);
673 
674 	its_send_mapc(col3, true);
675 	its_send_mapc(col2, true);
676 
677 	its_send_invall(col2);
678 	its_send_invall(col3);
679 
680 	its_send_mapti(dev2, 8195 /* lpi id */, 20 /* event id */, col3);
681 	its_send_mapti(dev7, 8196 /* lpi id */, 255 /* event id */, col2);
682 	return 0;
683 }
684 
685 static void test_its_trigger(void)
686 {
687 	struct its_collection *col3;
688 	struct its_device *dev2, *dev7;
689 	cpumask_t mask;
690 
691 	if (its_setup1())
692 		return;
693 
694 	col3 = its_get_collection(3);
695 	dev2 = its_get_device(2);
696 	dev7 = its_get_device(7);
697 
698 	report_prefix_push("int");
699 
700 	stats_reset();
701 	cpumask_clear(&mask);
702 	cpumask_set_cpu(3, &mask);
703 	its_send_int(dev2, 20);
704 	wait_for_interrupts(&mask);
705 	report(check_acked(&mask, 0, 8195),
706 			"dev=2, eventid=20  -> lpi= 8195, col=3");
707 
708 	stats_reset();
709 	cpumask_clear(&mask);
710 	cpumask_set_cpu(2, &mask);
711 	its_send_int(dev7, 255);
712 	wait_for_interrupts(&mask);
713 	report(check_acked(&mask, 0, 8196),
714 			"dev=7, eventid=255 -> lpi= 8196, col=2");
715 
716 	report_prefix_pop();
717 
718 	report_prefix_push("inv/invall");
719 
720 	/*
721 	 * disable 8195, check dev2/eventid=20 does not trigger the
722 	 * corresponding LPI
723 	 */
724 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT & ~LPI_PROP_ENABLED);
725 	its_send_inv(dev2, 20);
726 
727 	stats_reset();
728 	cpumask_clear(&mask);
729 	its_send_int(dev2, 20);
730 	wait_for_interrupts(&mask);
731 	report(check_acked(&mask, -1, -1),
732 			"dev2/eventid=20 does not trigger any LPI");
733 
734 	/*
735 	 * re-enable the LPI. While "A change to the LPI configuration
736 	 * is not guaranteed to be visible until an appropriate
737 	 * invalidation operation has completed" hardware that doesn't
738 	 * implement caches may have delivered the event at any point
739 	 * after the enabling. Check the LPI has hit by the time the
740 	 * invall is done.
741 	 */
742 	gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
743 	stats_reset();
744 	cpumask_clear(&mask);
745 	its_send_int(dev2, 20);
746 	cpumask_set_cpu(3, &mask);
747 	its_send_invall(col3);
748 	wait_for_interrupts(&mask);
749 	report(check_acked(&mask, 0, 8195),
750 			"dev2/eventid=20 pending LPI is received");
751 
752 	stats_reset();
753 	cpumask_clear(&mask);
754 	cpumask_set_cpu(3, &mask);
755 	its_send_int(dev2, 20);
756 	wait_for_interrupts(&mask);
757 	report(check_acked(&mask, 0, 8195),
758 			"dev2/eventid=20 now triggers an LPI");
759 
760 	report_prefix_pop();
761 
762 	report_prefix_push("mapd valid=false");
763 	/*
764 	 * Unmap device 2 and check the eventid 20 formerly
765 	 * attached to it does not hit anymore
766 	 */
767 
768 	its_send_mapd(dev2, false);
769 	stats_reset();
770 	cpumask_clear(&mask);
771 	its_send_int(dev2, 20);
772 	wait_for_interrupts(&mask);
773 	report(check_acked(&mask, -1, -1), "no LPI after device unmap");
774 
775 	check_spurious();
776 	report_prefix_pop();
777 }
778 
779 static void test_its_migration(void)
780 {
781 	struct its_device *dev2, *dev7;
782 	bool test_skipped = false;
783 	cpumask_t mask;
784 
785 	if (its_setup1()) {
786 		test_skipped = true;
787 		goto do_migrate;
788 	}
789 
790 	dev2 = its_get_device(2);
791 	dev7 = its_get_device(7);
792 
793 do_migrate:
794 	puts("Now migrate the VM, then press a key to continue...\n");
795 	(void)getchar();
796 	report_info("Migration complete");
797 	if (test_skipped)
798 		return;
799 
800 	stats_reset();
801 	cpumask_clear(&mask);
802 	cpumask_set_cpu(3, &mask);
803 	its_send_int(dev2, 20);
804 	wait_for_interrupts(&mask);
805 	report(check_acked(&mask, 0, 8195),
806 			"dev2/eventid=20 triggers LPI 8195 on PE #3 after migration");
807 
808 	stats_reset();
809 	cpumask_clear(&mask);
810 	cpumask_set_cpu(2, &mask);
811 	its_send_int(dev7, 255);
812 	wait_for_interrupts(&mask);
813 	report(check_acked(&mask, 0, 8196),
814 			"dev7/eventid=255 triggers LPI 8196 on PE #2 after migration");
815 
816 	check_spurious();
817 }
818 
819 #define ERRATA_UNMAPPED_COLLECTIONS "ERRATA_8c58be34494b"
820 
821 static void test_migrate_unmapped_collection(void)
822 {
823 	struct its_collection *col = NULL;
824 	struct its_device *dev2 = NULL, *dev7 = NULL;
825 	bool test_skipped = false;
826 	cpumask_t mask;
827 	int pe0 = 0;
828 	u8 config;
829 
830 	if (its_setup1()) {
831 		test_skipped = true;
832 		goto do_migrate;
833 	}
834 
835 	if (!errata(ERRATA_UNMAPPED_COLLECTIONS)) {
836 		report_skip("Skipping test, as this test hangs without the fix. "
837 			    "Set %s=y to enable.", ERRATA_UNMAPPED_COLLECTIONS);
838 		test_skipped = true;
839 		goto do_migrate;
840 	}
841 
842 	col = its_create_collection(pe0, pe0);
843 	dev2 = its_get_device(2);
844 	dev7 = its_get_device(7);
845 
846 	/* MAPTI with the collection unmapped */
847 	its_send_mapti(dev2, 8192, 0, col);
848 	gicv3_lpi_set_config(8192, LPI_PROP_DEFAULT);
849 
850 do_migrate:
851 	puts("Now migrate the VM, then press a key to continue...\n");
852 	(void)getchar();
853 	report_info("Migration complete");
854 	if (test_skipped)
855 		return;
856 
857 	/* on the destination, map the collection */
858 	its_send_mapc(col, true);
859 	its_send_invall(col);
860 
861 	stats_reset();
862 	cpumask_clear(&mask);
863 	cpumask_set_cpu(2, &mask);
864 	its_send_int(dev7, 255);
865 	wait_for_interrupts(&mask);
866 	report(check_acked(&mask, 0, 8196),
867 			"dev7/eventid= 255 triggered LPI 8196 on PE #2");
868 
869 	config = gicv3_lpi_get_config(8192);
870 	report(config == LPI_PROP_DEFAULT,
871 	       "Config of LPI 8192 was properly migrated");
872 
873 	stats_reset();
874 	cpumask_clear(&mask);
875 	cpumask_set_cpu(pe0, &mask);
876 	its_send_int(dev2, 0);
877 	wait_for_interrupts(&mask);
878 	report(check_acked(&mask, 0, 8192),
879 			"dev2/eventid = 0 triggered LPI 8192 on PE0");
880 
881 	check_spurious();
882 }
883 
884 static void test_its_pending_migration(void)
885 {
886 	struct its_device *dev;
887 	struct its_collection *collection[2];
888 	int *expected = calloc(nr_cpus, sizeof(int));
889 	int pe0 = nr_cpus - 1, pe1 = nr_cpus - 2;
890 	bool test_skipped = false;
891 	u64 pendbaser;
892 	void *ptr;
893 	int i;
894 
895 	if (its_prerequisites(4)) {
896 		test_skipped = true;
897 		goto do_migrate;
898 	}
899 
900 	dev = its_create_device(2 /* dev id */, 8 /* nb_ites */);
901 	its_send_mapd(dev, true);
902 
903 	collection[0] = its_create_collection(pe0, pe0);
904 	collection[1] = its_create_collection(pe1, pe1);
905 	its_send_mapc(collection[0], true);
906 	its_send_mapc(collection[1], true);
907 
908 	/* disable lpi at redist level */
909 	gicv3_lpi_rdist_disable(pe0);
910 	gicv3_lpi_rdist_disable(pe1);
911 
912 	/* lpis are interleaved between the 2 PEs */
913 	for (i = 0; i < 256; i++) {
914 		struct its_collection *col = i % 2 ? collection[0] :
915 						     collection[1];
916 		int vcpu = col->target_address >> 16;
917 
918 		its_send_mapti(dev, LPI(i), i, col);
919 		gicv3_lpi_set_config(LPI(i), LPI_PROP_DEFAULT);
920 		gicv3_lpi_set_clr_pending(vcpu, LPI(i), true);
921 	}
922 	its_send_invall(collection[0]);
923 	its_send_invall(collection[1]);
924 
925 	/* Clear the PTZ bit on each pendbaser */
926 
927 	expected[pe0] = 128;
928 	expected[pe1] = 128;
929 
930 	ptr = gicv3_data.redist_base[pe0] + GICR_PENDBASER;
931 	pendbaser = readq(ptr);
932 	writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
933 
934 	ptr = gicv3_data.redist_base[pe1] + GICR_PENDBASER;
935 	pendbaser = readq(ptr);
936 	writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
937 
938 	/*
939 	 * Reset and initialization values for acked are the same, so we don't
940 	 * need to explicitely call stats_reset().
941 	 */
942 	gicv3_lpi_rdist_enable(pe0);
943 	gicv3_lpi_rdist_enable(pe1);
944 
945 do_migrate:
946 	puts("Now migrate the VM, then press a key to continue...\n");
947 	(void)getchar();
948 	report_info("Migration complete");
949 	if (test_skipped)
950 		return;
951 
952 	/* let's wait for the 256 LPIs to be handled */
953 	mdelay(1000);
954 
955 	check_lpi_hits(expected, "128 LPIs on both PE0 and PE1 after migration");
956 }
957 #endif
958 
959 int main(int argc, char **argv)
960 {
961 	if (!gic_init()) {
962 		printf("No supported gic present, skipping tests...\n");
963 		return report_summary();
964 	}
965 
966 	report_prefix_pushf("gicv%d", gic_version());
967 
968 	switch (gic_version()) {
969 	case 2:
970 		gic = &gicv2;
971 		break;
972 	case 3:
973 		gic = &gicv3;
974 		break;
975 	}
976 
977 	if (argc < 2)
978 		report_abort("no test specified");
979 
980 	if (strcmp(argv[1], "ipi") == 0) {
981 		report_prefix_push(argv[1]);
982 		nr_cpu_check(2);
983 		on_cpus(ipi_test, NULL);
984 	} else if (strcmp(argv[1], "active") == 0) {
985 		run_active_clear_test();
986 	} else if (strcmp(argv[1], "mmio") == 0) {
987 		report_prefix_push(argv[1]);
988 		gic_test_mmio();
989 		report_prefix_pop();
990 	} else if (!strcmp(argv[1], "its-trigger")) {
991 		report_prefix_push(argv[1]);
992 		test_its_trigger();
993 		report_prefix_pop();
994 	} else if (!strcmp(argv[1], "its-migration")) {
995 		report_prefix_push(argv[1]);
996 		test_its_migration();
997 		report_prefix_pop();
998 	} else if (!strcmp(argv[1], "its-pending-migration")) {
999 		report_prefix_push(argv[1]);
1000 		test_its_pending_migration();
1001 		report_prefix_pop();
1002 	} else if (!strcmp(argv[1], "its-migrate-unmapped-collection")) {
1003 		report_prefix_push(argv[1]);
1004 		test_migrate_unmapped_collection();
1005 		report_prefix_pop();
1006 	} else if (strcmp(argv[1], "its-introspection") == 0) {
1007 		report_prefix_push(argv[1]);
1008 		test_its_introspection();
1009 		report_prefix_pop();
1010 	} else {
1011 		report_abort("Unknown subtest '%s'", argv[1]);
1012 	}
1013 
1014 	return report_summary();
1015 }
1016