1 /*
2 * GIC tests
3 *
4 * GICv2
5 * + test sending/receiving IPIs
6 * + MMIO access tests
7 * GICv3
8 * + test sending/receiving IPIs
9 *
10 * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU LGPL, version 2.
13 */
14 #include <libcflat.h>
15 #include <migrate.h>
16 #include <errata.h>
17 #include <asm/setup.h>
18 #include <asm/processor.h>
19 #include <asm/delay.h>
20 #include <asm/gic.h>
21 #include <asm/gic-v3-its.h>
22 #include <asm/smp.h>
23 #include <asm/barrier.h>
24 #include <asm/io.h>
25
26 #define IPI_SENDER 1
27 #define IPI_IRQ 1
28
29 struct gic {
30 struct {
31 void (*send_self)(void);
32 void (*send_broadcast)(void);
33 } ipi;
34 };
35
36 static struct gic *gic;
37 static int acked[NR_CPUS], spurious[NR_CPUS];
38 static int irq_sender[NR_CPUS], irq_number[NR_CPUS];
39 static cpumask_t ready;
40
nr_cpu_check(int nr)41 static void nr_cpu_check(int nr)
42 {
43 if (nr_cpus < nr)
44 report_abort("At least %d cpus required", nr);
45 }
46
wait_on_ready(void)47 static void wait_on_ready(void)
48 {
49 cpumask_set_cpu(smp_processor_id(), &ready);
50 while (!cpumask_full(&ready))
51 cpu_relax();
52 }
53
stats_reset(void)54 static void stats_reset(void)
55 {
56 int i;
57
58 for (i = 0; i < nr_cpus; ++i) {
59 acked[i] = 0;
60 irq_sender[i] = -1;
61 irq_number[i] = -1;
62 }
63 }
64
wait_for_interrupts(cpumask_t * mask)65 static void wait_for_interrupts(cpumask_t *mask)
66 {
67 int nr_pass, cpu, i;
68
69 /* Wait up to 5s for all interrupts to be delivered */
70 for (i = 0; i < 50; i++) {
71 mdelay(100);
72 nr_pass = 0;
73 for_each_present_cpu(cpu) {
74 /*
75 * A CPU having received more than one interrupts will
76 * show up in check_acked(), and no matter how long we
77 * wait it cannot un-receive it. Consider at least one
78 * interrupt as a pass.
79 */
80 nr_pass += cpumask_test_cpu(cpu, mask) ?
81 acked[cpu] >= 1 : acked[cpu] == 0;
82 }
83
84 if (nr_pass == nr_cpus) {
85 if (i)
86 report_info("interrupts took more than %d ms", i * 100);
87 /* Wait for unexpected interrupts to fire */
88 mdelay(100);
89 return;
90 }
91 }
92
93 report_info("interrupts timed-out (5s)");
94 }
95
check_acked(cpumask_t * mask,int sender,int irqnum)96 static bool check_acked(cpumask_t *mask, int sender, int irqnum)
97 {
98 int missing = 0, extra = 0, unexpected = 0;
99 bool has_gicv2 = (gic_version() == 2);
100 bool pass = true;
101 int cpu;
102
103 for_each_present_cpu(cpu) {
104 if (cpumask_test_cpu(cpu, mask)) {
105 if (!acked[cpu])
106 ++missing;
107 else if (acked[cpu] > 1)
108 ++extra;
109 } else if (acked[cpu]) {
110 ++unexpected;
111 }
112 if (!acked[cpu])
113 continue;
114 smp_rmb(); /* pairs with smp_wmb in irq_handler */
115
116 if (has_gicv2 && irq_sender[cpu] != sender) {
117 report_info("cpu%d received IPI from wrong sender %d",
118 cpu, irq_sender[cpu]);
119 pass = false;
120 }
121
122 if (irq_number[cpu] != irqnum) {
123 report_info("cpu%d received wrong irq %d",
124 cpu, irq_number[cpu]);
125 pass = false;
126 }
127 }
128
129 if (missing || extra || unexpected) {
130 report_info("ACKS: missing=%d extra=%d unexpected=%d",
131 missing, extra, unexpected);
132 pass = false;
133 }
134
135 return pass;
136 }
137
check_spurious(void)138 static void check_spurious(void)
139 {
140 int cpu;
141
142 for_each_present_cpu(cpu) {
143 if (spurious[cpu])
144 report_info("WARN: cpu%d got %d spurious interrupts",
145 cpu, spurious[cpu]);
146 }
147 }
148
gic_get_sender(int irqstat)149 static int gic_get_sender(int irqstat)
150 {
151 if (gic_version() == 2)
152 /* GICC_IAR.CPUID is RAZ for non-SGIs */
153 return (irqstat >> 10) & 7;
154 return -1;
155 }
156
irq_handler(struct pt_regs * regs __unused)157 static void irq_handler(struct pt_regs *regs __unused)
158 {
159 u32 irqstat = gic_read_iar();
160 u32 irqnr = gic_iar_irqnr(irqstat);
161 int this_cpu = smp_processor_id();
162
163 if (irqnr != GICC_INT_SPURIOUS) {
164 gic_write_eoir(irqstat);
165 irq_sender[this_cpu] = gic_get_sender(irqstat);
166 irq_number[this_cpu] = irqnr;
167 smp_wmb(); /* pairs with smp_rmb in check_acked */
168 ++acked[this_cpu];
169 } else {
170 ++spurious[this_cpu];
171 }
172
173 /* Wait for writes to acked/spurious to complete */
174 dsb(ishst);
175 }
176
setup_irq(irq_handler_fn handler)177 static void setup_irq(irq_handler_fn handler)
178 {
179 gic_enable_defaults();
180 #ifdef __arm__
181 install_exception_handler(EXCPTN_IRQ, handler);
182 #else
183 install_irq_handler(EL1H_IRQ, handler);
184 #endif
185 local_irq_enable();
186 }
187
188 #if defined(__aarch64__)
check_lpi_hits(int * expected,const char * msg)189 static void check_lpi_hits(int *expected, const char *msg)
190 {
191 bool pass = true;
192 int i;
193
194 for_each_present_cpu(i) {
195 if (acked[i] != expected[i]) {
196 report_info("expected %d LPIs on PE #%d, %d observed",
197 expected[i], i, acked[i]);
198 pass = false;
199 break;
200 }
201 }
202 report(pass, "%s", msg);
203 }
204 #endif
205
gicv2_ipi_send_self(void)206 static void gicv2_ipi_send_self(void)
207 {
208 /*
209 * The wmb() in writel and rmb() when acknowledging the interrupt are
210 * sufficient for ensuring that writes that happen in program order
211 * before the interrupt are observed in the interrupt handler after
212 * acknowledging the interrupt.
213 */
214 writel(2 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
215 }
216
gicv2_ipi_send_broadcast(void)217 static void gicv2_ipi_send_broadcast(void)
218 {
219 /* No barriers are needed, same situation as gicv2_ipi_send_self() */
220 writel(1 << 24 | IPI_IRQ, gicv2_dist_base() + GICD_SGIR);
221 }
222
gicv3_ipi_send_self(void)223 static void gicv3_ipi_send_self(void)
224 {
225 gic_ipi_send_single(IPI_IRQ, smp_processor_id());
226 }
227
gicv3_ipi_send_broadcast(void)228 static void gicv3_ipi_send_broadcast(void)
229 {
230 /*
231 * Ensure stores to Normal memory are visible to other CPUs before
232 * sending the IPI
233 */
234 wmb();
235 gicv3_write_sgi1r(1ULL << 40 | IPI_IRQ << 24);
236 isb();
237 }
238
ipi_test_self(void)239 static void ipi_test_self(void)
240 {
241 int this_cpu = smp_processor_id();
242 cpumask_t mask;
243
244 report_prefix_push("self");
245 stats_reset();
246 cpumask_clear(&mask);
247 cpumask_set_cpu(this_cpu, &mask);
248 gic->ipi.send_self();
249 wait_for_interrupts(&mask);
250 report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
251 report_prefix_pop();
252 }
253
ipi_test_smp(void)254 static void ipi_test_smp(void)
255 {
256 int this_cpu = smp_processor_id();
257 cpumask_t mask;
258 int i;
259
260 report_prefix_push("target-list");
261 stats_reset();
262 cpumask_copy(&mask, &cpu_present_mask);
263 for (i = this_cpu & 1; i < nr_cpus; i += 2)
264 cpumask_clear_cpu(i, &mask);
265 gic_ipi_send_mask(IPI_IRQ, &mask);
266 wait_for_interrupts(&mask);
267 report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
268 report_prefix_pop();
269
270 report_prefix_push("broadcast");
271 stats_reset();
272 cpumask_copy(&mask, &cpu_present_mask);
273 cpumask_clear_cpu(this_cpu, &mask);
274 gic->ipi.send_broadcast();
275 wait_for_interrupts(&mask);
276 report(check_acked(&mask, this_cpu, IPI_IRQ), "Interrupts received");
277 report_prefix_pop();
278 }
279
ipi_send(void)280 static void ipi_send(void)
281 {
282 setup_irq(irq_handler);
283 wait_on_ready();
284 ipi_test_self();
285 ipi_test_smp();
286 check_spurious();
287 exit(report_summary());
288 }
289
irq_recv(void)290 static void irq_recv(void)
291 {
292 setup_irq(irq_handler);
293 cpumask_set_cpu(smp_processor_id(), &ready);
294 while (1)
295 wfi();
296 }
297
ipi_test(void * data __unused)298 static void ipi_test(void *data __unused)
299 {
300 if (smp_processor_id() == IPI_SENDER)
301 ipi_send();
302 else
303 irq_recv();
304 }
305
306 static struct gic gicv2 = {
307 .ipi = {
308 .send_self = gicv2_ipi_send_self,
309 .send_broadcast = gicv2_ipi_send_broadcast,
310 },
311 };
312
313 static struct gic gicv3 = {
314 .ipi = {
315 .send_self = gicv3_ipi_send_self,
316 .send_broadcast = gicv3_ipi_send_broadcast,
317 },
318 };
319
320 /* Runs on the same CPU as the sender, no need for memory synchronization */
ipi_clear_active_handler(struct pt_regs * regs __unused)321 static void ipi_clear_active_handler(struct pt_regs *regs __unused)
322 {
323 u32 irqstat = gic_read_iar();
324 u32 irqnr = gic_iar_irqnr(irqstat);
325 int this_cpu = smp_processor_id();
326
327 if (irqnr != GICC_INT_SPURIOUS) {
328 void *base;
329 u32 val = 1 << IPI_IRQ;
330
331 if (gic_version() == 2)
332 base = gicv2_dist_base();
333 else
334 base = gicv3_sgi_base();
335
336 writel(val, base + GICD_ICACTIVER);
337
338 irq_sender[this_cpu] = gic_get_sender(irqstat);
339 irq_number[this_cpu] = irqnr;
340 ++acked[this_cpu];
341 } else {
342 ++spurious[this_cpu];
343 }
344 }
345
run_active_clear_test(void)346 static void run_active_clear_test(void)
347 {
348 report_prefix_push("active");
349 setup_irq(ipi_clear_active_handler);
350 ipi_test_self();
351 check_spurious();
352 report_prefix_pop();
353 }
354
test_ro_pattern_32(void * address,u32 pattern,u32 orig)355 static bool test_ro_pattern_32(void *address, u32 pattern, u32 orig)
356 {
357 u32 reg;
358
359 writel(pattern, address);
360 reg = readl(address);
361
362 if (reg != orig)
363 writel(orig, address);
364
365 return reg == orig;
366 }
367
test_readonly_32(void * address,bool razwi)368 static bool test_readonly_32(void *address, bool razwi)
369 {
370 u32 orig, pattern;
371
372 orig = readl(address);
373 if (razwi && orig)
374 return false;
375
376 pattern = 0xffffffff;
377 if (orig != pattern) {
378 if (!test_ro_pattern_32(address, pattern, orig))
379 return false;
380 }
381
382 pattern = 0xa5a55a5a;
383 if (orig != pattern) {
384 if (!test_ro_pattern_32(address, pattern, orig))
385 return false;
386 }
387
388 pattern = 0;
389 if (orig != pattern) {
390 if (!test_ro_pattern_32(address, pattern, orig))
391 return false;
392 }
393
394 return true;
395 }
396
test_typer_v2(uint32_t reg)397 static void test_typer_v2(uint32_t reg)
398 {
399 int nr_gic_cpus = ((reg >> 5) & 0x7) + 1;
400
401 report_info("nr_cpus=%d", nr_cpus);
402 report(nr_cpus == nr_gic_cpus, "all CPUs have interrupts");
403 }
404
405 #define BYTE(reg32, byte) (((reg32) >> ((byte) * 8)) & 0xff)
406 #define REPLACE_BYTE(reg32, byte, new) (((reg32) & ~(0xff << ((byte) * 8))) |\
407 ((new) << ((byte) * 8)))
408
409 /*
410 * Some registers are byte accessible, do a byte-wide read and write of known
411 * content to check for this.
412 * Apply a @mask to cater for special register properties.
413 * @pattern contains the value already in the register.
414 */
test_byte_access(void * base_addr,u32 pattern,u32 mask)415 static void test_byte_access(void *base_addr, u32 pattern, u32 mask)
416 {
417 u32 reg = readb(base_addr + 1);
418 bool res;
419
420 res = (reg == (BYTE(pattern, 1) & (mask >> 8)));
421 report(res, "byte reads successful");
422 if (!res)
423 report_info("byte 1 of 0x%08"PRIx32" => 0x%02"PRIx32, pattern & mask, reg);
424
425 pattern = REPLACE_BYTE(pattern, 2, 0x1f);
426 writeb(BYTE(pattern, 2), base_addr + 2);
427 reg = readl(base_addr);
428 res = (reg == (pattern & mask));
429 report(res, "byte writes successful");
430 if (!res)
431 report_info("writing 0x%02"PRIx32" into bytes 2 => 0x%08"PRIx32,
432 BYTE(pattern, 2), reg);
433 }
434
test_priorities(int nr_irqs,void * priptr)435 static void test_priorities(int nr_irqs, void *priptr)
436 {
437 u32 orig_prio, reg, pri_bits;
438 u32 pri_mask, pattern;
439 void *first_spi = priptr + GIC_FIRST_SPI;
440
441 orig_prio = readl(first_spi);
442 report_prefix_push("IPRIORITYR");
443
444 /*
445 * Determine implemented number of priority bits by writing all 1's
446 * and checking the number of cleared bits in the value read back.
447 */
448 writel(0xffffffff, first_spi);
449 pri_mask = readl(first_spi);
450
451 reg = ~pri_mask;
452 report((((reg >> 16) == (reg & 0xffff)) &&
453 ((reg & 0xff) == ((reg >> 8) & 0xff))),
454 "consistent priority masking");
455 report_info("priority mask is 0x%08"PRIx32, pri_mask);
456
457 reg = reg & 0xff;
458 for (pri_bits = 8; reg & 1; reg >>= 1, pri_bits--)
459 ;
460 report(pri_bits >= 4, "implements at least 4 priority bits");
461 report_info("%"PRIu32" priority bits implemented", pri_bits);
462
463 pattern = 0;
464 writel(pattern, first_spi);
465 report(readl(first_spi) == pattern, "clearing priorities");
466
467 /* setting all priorities to their max valus was tested above */
468
469 report(test_readonly_32(priptr + nr_irqs, true),
470 "accesses beyond limit RAZ/WI");
471
472 writel(pattern, priptr + nr_irqs - 4);
473 report(readl(priptr + nr_irqs - 4) == (pattern & pri_mask),
474 "accessing last SPIs");
475
476 pattern = 0xff7fbf3f;
477 writel(pattern, first_spi);
478 report(readl(first_spi) == (pattern & pri_mask),
479 "priorities are preserved");
480
481 /* The PRIORITY registers are byte accessible. */
482 test_byte_access(first_spi, pattern, pri_mask);
483
484 report_prefix_pop();
485 writel(orig_prio, first_spi);
486 }
487
488 /* GICD_ITARGETSR is only used by GICv2. */
test_targets(int nr_irqs)489 static void test_targets(int nr_irqs)
490 {
491 void *targetsptr = gicv2_dist_base() + GICD_ITARGETSR;
492 u32 orig_targets;
493 u32 cpu_mask;
494 u32 pattern, reg;
495
496 orig_targets = readl(targetsptr + GIC_FIRST_SPI);
497 report_prefix_push("ITARGETSR");
498
499 cpu_mask = (1 << nr_cpus) - 1;
500 cpu_mask |= cpu_mask << 8;
501 cpu_mask |= cpu_mask << 16;
502
503 /* Check that bits for non implemented CPUs are RAZ/WI. */
504 if (nr_cpus < 8) {
505 writel(0xffffffff, targetsptr + GIC_FIRST_SPI);
506 report(!(readl(targetsptr + GIC_FIRST_SPI) & ~cpu_mask),
507 "bits for non-existent CPUs masked");
508 report_info("%d non-existent CPUs", 8 - nr_cpus);
509 } else {
510 report_skip("CPU masking (all CPUs implemented)");
511 }
512
513 report(test_readonly_32(targetsptr + nr_irqs, true),
514 "accesses beyond limit RAZ/WI");
515
516 pattern = 0x0103020f;
517 writel(pattern, targetsptr + GIC_FIRST_SPI);
518 reg = readl(targetsptr + GIC_FIRST_SPI);
519 report(reg == (pattern & cpu_mask), "register content preserved");
520 if (reg != (pattern & cpu_mask))
521 report_info("writing %08"PRIx32" reads back as %08"PRIx32,
522 pattern & cpu_mask, reg);
523
524 /* The TARGETS registers are byte accessible. */
525 test_byte_access(targetsptr + GIC_FIRST_SPI, pattern, cpu_mask);
526
527 writel(orig_targets, targetsptr + GIC_FIRST_SPI);
528
529 report_prefix_pop();
530 }
531
gic_test_mmio(void)532 static void gic_test_mmio(void)
533 {
534 u32 reg;
535 int nr_irqs;
536 void *gic_dist_base, *idreg;
537
538 switch(gic_version()) {
539 case 0x2:
540 gic_dist_base = gicv2_dist_base();
541 idreg = gic_dist_base + GICD_ICPIDR2;
542 break;
543 case 0x3:
544 report_abort("GICv3 MMIO tests NYI");
545 default:
546 report_abort("GIC version %d not supported", gic_version());
547 }
548
549 reg = readl(gic_dist_base + GICD_TYPER);
550 nr_irqs = GICD_TYPER_IRQS(reg);
551 report_info("number of implemented SPIs: %d", nr_irqs - GIC_FIRST_SPI);
552
553 test_typer_v2(reg);
554
555 report_info("IIDR: 0x%08"PRIx32, readl(gic_dist_base + GICD_IIDR));
556
557 report(test_readonly_32(gic_dist_base + GICD_TYPER, false),
558 "GICD_TYPER is read-only");
559 report(test_readonly_32(gic_dist_base + GICD_IIDR, false),
560 "GICD_IIDR is read-only");
561
562 reg = readl(idreg);
563 report(test_readonly_32(idreg, false), "ICPIDR2 is read-only");
564 report_info("value of ICPIDR2: 0x%08"PRIx32, reg);
565
566 test_priorities(nr_irqs, gic_dist_base + GICD_IPRIORITYR);
567
568 if (gic_version() == 2)
569 test_targets(nr_irqs);
570 }
571
572 #if defined(__arm__)
573
test_its_introspection(void)574 static void test_its_introspection(void) {}
test_its_trigger(void)575 static void test_its_trigger(void) {}
test_its_migration(void)576 static void test_its_migration(void) {}
test_its_pending_migration(void)577 static void test_its_pending_migration(void) {}
test_migrate_unmapped_collection(void)578 static void test_migrate_unmapped_collection(void) {}
579
580 #else /* __aarch64__ */
581
test_its_introspection(void)582 static void test_its_introspection(void)
583 {
584 struct its_baser *dev_baser = &its_data.device_baser;
585 struct its_baser *coll_baser = &its_data.coll_baser;
586 struct its_typer *typer = &its_data.typer;
587
588 if (!gicv3_its_base()) {
589 report_skip("No ITS, skip ...");
590 return;
591 }
592
593 /* IIDR */
594 report(test_readonly_32(gicv3_its_base() + GITS_IIDR, false),
595 "GITS_IIDR is read-only"),
596
597 /* TYPER */
598 report(test_readonly_32(gicv3_its_base() + GITS_TYPER, false),
599 "GITS_TYPER is read-only");
600
601 report(typer->phys_lpi, "ITS supports physical LPIs");
602 report_info("vLPI support: %s", typer->virt_lpi ? "yes" : "no");
603 report_info("ITT entry size = 0x%x", typer->ite_size);
604 report_info("Bit Count: EventID=%d DeviceId=%d CollId=%d",
605 typer->eventid_bits, typer->deviceid_bits,
606 typer->collid_bits);
607 report(typer->eventid_bits && typer->deviceid_bits &&
608 typer->collid_bits, "ID spaces");
609 report_info("Target address format %s",
610 typer->pta ? "Redist base address" : "PE #");
611
612 report(dev_baser && coll_baser, "detect device and collection BASER");
613 report_info("device table entry_size = 0x%x", dev_baser->esz);
614 report_info("collection table entry_size = 0x%x", coll_baser->esz);
615 }
616
its_prerequisites(int nb_cpus)617 static int its_prerequisites(int nb_cpus)
618 {
619 int cpu;
620
621 if (!gicv3_its_base()) {
622 report_skip("No ITS, skip ...");
623 return -1;
624 }
625
626 if (nr_cpus < nb_cpus) {
627 report_skip("Test requires at least %d vcpus", nb_cpus);
628 return -1;
629 }
630
631 setup_irq(irq_handler);
632
633 for_each_present_cpu(cpu) {
634 if (cpu == 0)
635 continue;
636 smp_boot_secondary(cpu, irq_recv);
637 }
638 wait_on_ready();
639
640 its_enable_defaults();
641
642 return 0;
643 }
644
645 /*
646 * Setup the configuration for those mappings:
647 * dev_id=2 event=20 -> vcpu 3, intid=8195
648 * dev_id=7 event=255 -> vcpu 2, intid=8196
649 * LPIs ready to hit
650 */
its_setup1(void)651 static int its_setup1(void)
652 {
653 struct its_collection *col3, *col2;
654 struct its_device *dev2, *dev7;
655
656 if (its_prerequisites(4))
657 return -1;
658
659 dev2 = its_create_device(2 /* dev id */, 8 /* nb_ites */);
660 dev7 = its_create_device(7 /* dev id */, 8 /* nb_ites */);
661
662 col3 = its_create_collection(3 /* col id */, 3/* target PE */);
663 col2 = its_create_collection(2 /* col id */, 2/* target PE */);
664
665 gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
666 gicv3_lpi_set_config(8196, LPI_PROP_DEFAULT);
667
668 /*
669 * dev=2, eventid=20 -> lpi= 8195, col=3
670 * dev=7, eventid=255 -> lpi= 8196, col=2
671 */
672 its_send_mapd(dev2, true);
673 its_send_mapd(dev7, true);
674
675 its_send_mapc(col3, true);
676 its_send_mapc(col2, true);
677
678 its_send_invall(col2);
679 its_send_invall(col3);
680
681 its_send_mapti(dev2, 8195 /* lpi id */, 20 /* event id */, col3);
682 its_send_mapti(dev7, 8196 /* lpi id */, 255 /* event id */, col2);
683 return 0;
684 }
685
test_its_trigger(void)686 static void test_its_trigger(void)
687 {
688 struct its_collection *col3;
689 struct its_device *dev2, *dev7;
690 cpumask_t mask;
691
692 if (its_setup1())
693 return;
694
695 col3 = its_get_collection(3);
696 dev2 = its_get_device(2);
697 dev7 = its_get_device(7);
698
699 report_prefix_push("int");
700
701 stats_reset();
702 cpumask_clear(&mask);
703 cpumask_set_cpu(3, &mask);
704 its_send_int(dev2, 20);
705 wait_for_interrupts(&mask);
706 report(check_acked(&mask, 0, 8195),
707 "dev=2, eventid=20 -> lpi= 8195, col=3");
708
709 stats_reset();
710 cpumask_clear(&mask);
711 cpumask_set_cpu(2, &mask);
712 its_send_int(dev7, 255);
713 wait_for_interrupts(&mask);
714 report(check_acked(&mask, 0, 8196),
715 "dev=7, eventid=255 -> lpi= 8196, col=2");
716
717 report_prefix_pop();
718
719 report_prefix_push("inv/invall");
720
721 /*
722 * disable 8195, check dev2/eventid=20 does not trigger the
723 * corresponding LPI
724 */
725 gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT & ~LPI_PROP_ENABLED);
726 its_send_inv(dev2, 20);
727
728 stats_reset();
729 cpumask_clear(&mask);
730 its_send_int(dev2, 20);
731 wait_for_interrupts(&mask);
732 report(check_acked(&mask, -1, -1),
733 "dev2/eventid=20 does not trigger any LPI");
734
735 /*
736 * re-enable the LPI. While "A change to the LPI configuration
737 * is not guaranteed to be visible until an appropriate
738 * invalidation operation has completed" hardware that doesn't
739 * implement caches may have delivered the event at any point
740 * after the enabling. Check the LPI has hit by the time the
741 * invall is done.
742 */
743 gicv3_lpi_set_config(8195, LPI_PROP_DEFAULT);
744 stats_reset();
745 cpumask_clear(&mask);
746 its_send_int(dev2, 20);
747 cpumask_set_cpu(3, &mask);
748 its_send_invall(col3);
749 wait_for_interrupts(&mask);
750 report(check_acked(&mask, 0, 8195),
751 "dev2/eventid=20 pending LPI is received");
752
753 stats_reset();
754 cpumask_clear(&mask);
755 cpumask_set_cpu(3, &mask);
756 its_send_int(dev2, 20);
757 wait_for_interrupts(&mask);
758 report(check_acked(&mask, 0, 8195),
759 "dev2/eventid=20 now triggers an LPI");
760
761 report_prefix_pop();
762
763 report_prefix_push("mapd valid=false");
764 /*
765 * Unmap device 2 and check the eventid 20 formerly
766 * attached to it does not hit anymore
767 */
768
769 its_send_mapd(dev2, false);
770 stats_reset();
771 cpumask_clear(&mask);
772 its_send_int(dev2, 20);
773 wait_for_interrupts(&mask);
774 report(check_acked(&mask, -1, -1), "no LPI after device unmap");
775
776 check_spurious();
777 report_prefix_pop();
778 }
779
test_its_migration(void)780 static void test_its_migration(void)
781 {
782 struct its_device *dev2, *dev7;
783 cpumask_t mask;
784
785 if (its_setup1()) {
786 migrate_skip();
787 return;
788 }
789
790 dev2 = its_get_device(2);
791 dev7 = its_get_device(7);
792
793 migrate();
794
795 stats_reset();
796 cpumask_clear(&mask);
797 cpumask_set_cpu(3, &mask);
798 its_send_int(dev2, 20);
799 wait_for_interrupts(&mask);
800 report(check_acked(&mask, 0, 8195),
801 "dev2/eventid=20 triggers LPI 8195 on PE #3 after migration");
802
803 stats_reset();
804 cpumask_clear(&mask);
805 cpumask_set_cpu(2, &mask);
806 its_send_int(dev7, 255);
807 wait_for_interrupts(&mask);
808 report(check_acked(&mask, 0, 8196),
809 "dev7/eventid=255 triggers LPI 8196 on PE #2 after migration");
810
811 check_spurious();
812 }
813
814 #define ERRATA_UNMAPPED_COLLECTIONS "ERRATA_8c58be34494b"
815
test_migrate_unmapped_collection(void)816 static void test_migrate_unmapped_collection(void)
817 {
818 struct its_collection *col = NULL;
819 struct its_device *dev2 = NULL, *dev7 = NULL;
820 cpumask_t mask;
821 int pe0 = 0;
822 u8 config;
823
824 if (its_setup1()) {
825 migrate_skip();
826 return;
827 }
828
829 if (!errata(ERRATA_UNMAPPED_COLLECTIONS)) {
830 report_skip("Skipping test, as this test hangs without the fix. "
831 "Set %s=y to enable.", ERRATA_UNMAPPED_COLLECTIONS);
832 migrate_skip();
833 return;
834 }
835
836 col = its_create_collection(pe0, pe0);
837 dev2 = its_get_device(2);
838 dev7 = its_get_device(7);
839
840 /* MAPTI with the collection unmapped */
841 its_send_mapti(dev2, 8192, 0, col);
842 gicv3_lpi_set_config(8192, LPI_PROP_DEFAULT);
843
844 migrate();
845
846 /* on the destination, map the collection */
847 its_send_mapc(col, true);
848 its_send_invall(col);
849
850 stats_reset();
851 cpumask_clear(&mask);
852 cpumask_set_cpu(2, &mask);
853 its_send_int(dev7, 255);
854 wait_for_interrupts(&mask);
855 report(check_acked(&mask, 0, 8196),
856 "dev7/eventid= 255 triggered LPI 8196 on PE #2");
857
858 config = gicv3_lpi_get_config(8192);
859 report(config == LPI_PROP_DEFAULT,
860 "Config of LPI 8192 was properly migrated");
861
862 stats_reset();
863 cpumask_clear(&mask);
864 cpumask_set_cpu(pe0, &mask);
865 its_send_int(dev2, 0);
866 wait_for_interrupts(&mask);
867 report(check_acked(&mask, 0, 8192),
868 "dev2/eventid = 0 triggered LPI 8192 on PE0");
869
870 check_spurious();
871 }
872
test_its_pending_migration(void)873 static void test_its_pending_migration(void)
874 {
875 struct its_device *dev;
876 struct its_collection *collection[2];
877 int *expected = calloc(nr_cpus, sizeof(int));
878 int pe0 = nr_cpus - 1, pe1 = nr_cpus - 2;
879 u64 pendbaser;
880 void *ptr;
881 int i;
882
883 if (its_prerequisites(4)) {
884 migrate_skip();
885 return;
886 }
887
888 dev = its_create_device(2 /* dev id */, 8 /* nb_ites */);
889 its_send_mapd(dev, true);
890
891 collection[0] = its_create_collection(pe0, pe0);
892 collection[1] = its_create_collection(pe1, pe1);
893 its_send_mapc(collection[0], true);
894 its_send_mapc(collection[1], true);
895
896 /* disable lpi at redist level */
897 gicv3_lpi_rdist_disable(pe0);
898 gicv3_lpi_rdist_disable(pe1);
899
900 /* lpis are interleaved between the 2 PEs */
901 for (i = 0; i < 256; i++) {
902 struct its_collection *col = i % 2 ? collection[0] :
903 collection[1];
904 int vcpu = col->target_address >> 16;
905
906 its_send_mapti(dev, LPI(i), i, col);
907 gicv3_lpi_set_config(LPI(i), LPI_PROP_DEFAULT);
908 gicv3_lpi_set_clr_pending(vcpu, LPI(i), true);
909 }
910 its_send_invall(collection[0]);
911 its_send_invall(collection[1]);
912
913 /* Clear the PTZ bit on each pendbaser */
914
915 expected[pe0] = 128;
916 expected[pe1] = 128;
917
918 ptr = gicv3_data.redist_base[pe0] + GICR_PENDBASER;
919 pendbaser = readq(ptr);
920 writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
921
922 ptr = gicv3_data.redist_base[pe1] + GICR_PENDBASER;
923 pendbaser = readq(ptr);
924 writeq(pendbaser & ~GICR_PENDBASER_PTZ, ptr);
925
926 /*
927 * Reset and initialization values for acked are the same, so we don't
928 * need to explicitely call stats_reset().
929 */
930 gicv3_lpi_rdist_enable(pe0);
931 gicv3_lpi_rdist_enable(pe1);
932
933 migrate();
934
935 /* let's wait for the 256 LPIs to be handled */
936 mdelay(1000);
937
938 check_lpi_hits(expected, "128 LPIs on both PE0 and PE1 after migration");
939 }
940 #endif
941
main(int argc,char ** argv)942 int main(int argc, char **argv)
943 {
944 if (!gic_init()) {
945 printf("No supported gic present, skipping tests...\n");
946 return report_summary();
947 }
948
949 report_prefix_pushf("gicv%d", gic_version());
950
951 switch (gic_version()) {
952 case 2:
953 gic = &gicv2;
954 break;
955 case 3:
956 gic = &gicv3;
957 break;
958 }
959
960 if (argc < 2)
961 report_abort("no test specified");
962
963 if (strcmp(argv[1], "ipi") == 0) {
964 report_prefix_push(argv[1]);
965 nr_cpu_check(2);
966 on_cpus(ipi_test, NULL);
967 } else if (strcmp(argv[1], "active") == 0) {
968 run_active_clear_test();
969 } else if (strcmp(argv[1], "mmio") == 0) {
970 report_prefix_push(argv[1]);
971 gic_test_mmio();
972 report_prefix_pop();
973 } else if (!strcmp(argv[1], "its-trigger")) {
974 report_prefix_push(argv[1]);
975 test_its_trigger();
976 report_prefix_pop();
977 } else if (!strcmp(argv[1], "its-migration")) {
978 report_prefix_push(argv[1]);
979 test_its_migration();
980 report_prefix_pop();
981 } else if (!strcmp(argv[1], "its-pending-migration")) {
982 report_prefix_push(argv[1]);
983 test_its_pending_migration();
984 report_prefix_pop();
985 } else if (!strcmp(argv[1], "its-migrate-unmapped-collection")) {
986 report_prefix_push(argv[1]);
987 test_migrate_unmapped_collection();
988 report_prefix_pop();
989 } else if (strcmp(argv[1], "its-introspection") == 0) {
990 report_prefix_push(argv[1]);
991 test_its_introspection();
992 report_prefix_pop();
993 } else {
994 report_abort("Unknown subtest '%s'", argv[1]);
995 }
996
997 return report_summary();
998 }
999