191a6c3ceSAndrew Jones /* 291a6c3ceSAndrew Jones * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com> 391a6c3ceSAndrew Jones * 491a6c3ceSAndrew Jones * This work is licensed under the terms of the GNU LGPL, version 2. 591a6c3ceSAndrew Jones */ 691a6c3ceSAndrew Jones #include <asm/gic.h> 791a6c3ceSAndrew Jones #include <asm/io.h> 8*05d1bb91SEric Auger #include <alloc_page.h> 991a6c3ceSAndrew Jones 1091a6c3ceSAndrew Jones void gicv3_set_redist_base(size_t stride) 1191a6c3ceSAndrew Jones { 1291a6c3ceSAndrew Jones u32 aff = mpidr_compress(get_mpidr()); 1391a6c3ceSAndrew Jones u64 typer; 14a5a2d35cSAndrew Jones int i = 0; 1591a6c3ceSAndrew Jones 16a5a2d35cSAndrew Jones while (gicv3_data.redist_bases[i]) { 17a5a2d35cSAndrew Jones void *ptr = gicv3_data.redist_bases[i]; 1891a6c3ceSAndrew Jones do { 1991a6c3ceSAndrew Jones typer = gicv3_read_typer(ptr + GICR_TYPER); 2091a6c3ceSAndrew Jones if ((typer >> 32) == aff) { 2191a6c3ceSAndrew Jones gicv3_redist_base() = ptr; 2291a6c3ceSAndrew Jones return; 2391a6c3ceSAndrew Jones } 2491a6c3ceSAndrew Jones ptr += stride; /* skip RD_base, SGI_base, etc. */ 2591a6c3ceSAndrew Jones } while (!(typer & GICR_TYPER_LAST)); 26a5a2d35cSAndrew Jones ++i; 27a5a2d35cSAndrew Jones } 2891a6c3ceSAndrew Jones 2991a6c3ceSAndrew Jones /* should never reach here */ 3091a6c3ceSAndrew Jones assert(0); 3191a6c3ceSAndrew Jones } 3291a6c3ceSAndrew Jones 3391a6c3ceSAndrew Jones void gicv3_enable_defaults(void) 3491a6c3ceSAndrew Jones { 3591a6c3ceSAndrew Jones void *dist = gicv3_dist_base(); 3691a6c3ceSAndrew Jones void *sgi_base; 3791a6c3ceSAndrew Jones unsigned int i; 3891a6c3ceSAndrew Jones 3991a6c3ceSAndrew Jones gicv3_data.irq_nr = GICD_TYPER_IRQS(readl(dist + GICD_TYPER)); 4091a6c3ceSAndrew Jones if (gicv3_data.irq_nr > 1020) 4191a6c3ceSAndrew Jones gicv3_data.irq_nr = 1020; 4291a6c3ceSAndrew Jones 4391a6c3ceSAndrew Jones writel(0, dist + GICD_CTLR); 4491a6c3ceSAndrew Jones gicv3_dist_wait_for_rwp(); 4591a6c3ceSAndrew Jones 4691a6c3ceSAndrew Jones writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, 4791a6c3ceSAndrew Jones dist + GICD_CTLR); 4891a6c3ceSAndrew Jones gicv3_dist_wait_for_rwp(); 4991a6c3ceSAndrew Jones 5091a6c3ceSAndrew Jones for (i = 0; i < gicv3_data.irq_nr; i += 4) 5191a6c3ceSAndrew Jones writel(~0, dist + GICD_IGROUPR + i); 5291a6c3ceSAndrew Jones 5391a6c3ceSAndrew Jones if (!gicv3_redist_base()) 5491a6c3ceSAndrew Jones gicv3_set_redist_base(SZ_64K * 2); 5591a6c3ceSAndrew Jones sgi_base = gicv3_sgi_base(); 5691a6c3ceSAndrew Jones 5791a6c3ceSAndrew Jones writel(~0, sgi_base + GICR_IGROUPR0); 5891a6c3ceSAndrew Jones 5991a6c3ceSAndrew Jones for (i = 0; i < 16; i += 4) 6091a6c3ceSAndrew Jones writel(GICD_INT_DEF_PRI_X4, sgi_base + GICR_IPRIORITYR0 + i); 6191a6c3ceSAndrew Jones 6291a6c3ceSAndrew Jones writel(GICD_INT_EN_SET_SGI, sgi_base + GICR_ISENABLER0); 6391a6c3ceSAndrew Jones 6491a6c3ceSAndrew Jones gicv3_write_pmr(GICC_INT_PRI_THRESHOLD); 6591a6c3ceSAndrew Jones gicv3_write_grpen1(1); 6691a6c3ceSAndrew Jones } 672e2d471dSAndrew Jones 682e2d471dSAndrew Jones u32 gicv3_iar_irqnr(u32 iar) 692e2d471dSAndrew Jones { 702e2d471dSAndrew Jones return iar & ((1 << 24) - 1); 712e2d471dSAndrew Jones } 722e2d471dSAndrew Jones 732e2d471dSAndrew Jones void gicv3_ipi_send_mask(int irq, const cpumask_t *dest) 742e2d471dSAndrew Jones { 752e2d471dSAndrew Jones u16 tlist; 762e2d471dSAndrew Jones int cpu; 772e2d471dSAndrew Jones 782e2d471dSAndrew Jones assert(irq < 16); 792e2d471dSAndrew Jones 802e2d471dSAndrew Jones /* 812e2d471dSAndrew Jones * For each cpu in the mask collect its peers, which are also in 822e2d471dSAndrew Jones * the mask, in order to form target lists. 832e2d471dSAndrew Jones */ 842e2d471dSAndrew Jones for_each_cpu(cpu, dest) { 852e2d471dSAndrew Jones u64 mpidr = cpus[cpu], sgi1r; 862e2d471dSAndrew Jones u64 cluster_id; 872e2d471dSAndrew Jones 882e2d471dSAndrew Jones /* 892e2d471dSAndrew Jones * GICv3 can send IPIs to up 16 peer cpus with a single 902e2d471dSAndrew Jones * write to ICC_SGI1R_EL1 (using the target list). Peers 912e2d471dSAndrew Jones * are cpus that have nearly identical MPIDRs, the only 922e2d471dSAndrew Jones * difference being Aff0. The matching upper affinity 932e2d471dSAndrew Jones * levels form the cluster ID. 942e2d471dSAndrew Jones */ 952e2d471dSAndrew Jones cluster_id = mpidr & ~0xffUL; 962e2d471dSAndrew Jones tlist = 0; 972e2d471dSAndrew Jones 982e2d471dSAndrew Jones /* 992e2d471dSAndrew Jones * Sort of open code for_each_cpu in order to have a 1002e2d471dSAndrew Jones * nested for_each_cpu loop. 1012e2d471dSAndrew Jones */ 1022e2d471dSAndrew Jones while (cpu < nr_cpus) { 1032e2d471dSAndrew Jones if ((mpidr & 0xff) >= 16) { 1042e2d471dSAndrew Jones printf("cpu%d MPIDR:aff0 is %d (>= 16)!\n", 1052e2d471dSAndrew Jones cpu, (int)(mpidr & 0xff)); 1062e2d471dSAndrew Jones break; 1072e2d471dSAndrew Jones } 1082e2d471dSAndrew Jones 1092e2d471dSAndrew Jones tlist |= 1 << (mpidr & 0xf); 1102e2d471dSAndrew Jones 1112e2d471dSAndrew Jones cpu = cpumask_next(cpu, dest); 1122e2d471dSAndrew Jones if (cpu >= nr_cpus) 1132e2d471dSAndrew Jones break; 1142e2d471dSAndrew Jones 1152e2d471dSAndrew Jones mpidr = cpus[cpu]; 1162e2d471dSAndrew Jones 1172e2d471dSAndrew Jones if (cluster_id != (mpidr & ~0xffUL)) { 1182e2d471dSAndrew Jones /* 1192e2d471dSAndrew Jones * The next cpu isn't in our cluster. Roll 1202e2d471dSAndrew Jones * back the cpu index allowing the outer 1212e2d471dSAndrew Jones * for_each_cpu to find it again with 1222e2d471dSAndrew Jones * cpumask_next 1232e2d471dSAndrew Jones */ 1242e2d471dSAndrew Jones --cpu; 1252e2d471dSAndrew Jones break; 1262e2d471dSAndrew Jones } 1272e2d471dSAndrew Jones } 1282e2d471dSAndrew Jones 1292e2d471dSAndrew Jones /* Send the IPIs for the target list of this cluster */ 1302e2d471dSAndrew Jones sgi1r = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1312e2d471dSAndrew Jones MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1322e2d471dSAndrew Jones irq << 24 | 1332e2d471dSAndrew Jones MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1342e2d471dSAndrew Jones tlist); 1352e2d471dSAndrew Jones 1362e2d471dSAndrew Jones gicv3_write_sgi1r(sgi1r); 1372e2d471dSAndrew Jones } 1382e2d471dSAndrew Jones 1392e2d471dSAndrew Jones /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1402e2d471dSAndrew Jones isb(); 1412e2d471dSAndrew Jones } 1422e2d471dSAndrew Jones 1432e2d471dSAndrew Jones void gicv3_ipi_send_single(int irq, int cpu) 1442e2d471dSAndrew Jones { 1452e2d471dSAndrew Jones cpumask_t dest; 1462e2d471dSAndrew Jones 1472e2d471dSAndrew Jones cpumask_clear(&dest); 1482e2d471dSAndrew Jones cpumask_set_cpu(cpu, &dest); 1492e2d471dSAndrew Jones gicv3_ipi_send_mask(irq, &dest); 1502e2d471dSAndrew Jones } 151*05d1bb91SEric Auger 152*05d1bb91SEric Auger #if defined(__aarch64__) 153*05d1bb91SEric Auger 154*05d1bb91SEric Auger /* 155*05d1bb91SEric Auger * alloc_lpi_tables - Allocate LPI config and pending tables 156*05d1bb91SEric Auger * and set PROPBASER (shared by all rdistributors) and per 157*05d1bb91SEric Auger * redistributor PENDBASER. 158*05d1bb91SEric Auger * 159*05d1bb91SEric Auger * gicv3_set_redist_base() must be called before 160*05d1bb91SEric Auger */ 161*05d1bb91SEric Auger void gicv3_lpi_alloc_tables(void) 162*05d1bb91SEric Auger { 163*05d1bb91SEric Auger unsigned long n = SZ_64K >> PAGE_SHIFT; 164*05d1bb91SEric Auger unsigned long order = fls(n); 165*05d1bb91SEric Auger u64 prop_val; 166*05d1bb91SEric Auger int cpu; 167*05d1bb91SEric Auger 168*05d1bb91SEric Auger assert(gicv3_redist_base()); 169*05d1bb91SEric Auger 170*05d1bb91SEric Auger gicv3_data.lpi_prop = alloc_pages(order); 171*05d1bb91SEric Auger 172*05d1bb91SEric Auger /* ID bits = 13, ie. up to 14b LPI INTID */ 173*05d1bb91SEric Auger prop_val = (u64)(virt_to_phys(gicv3_data.lpi_prop)) | 13; 174*05d1bb91SEric Auger 175*05d1bb91SEric Auger for_each_present_cpu(cpu) { 176*05d1bb91SEric Auger u64 pend_val; 177*05d1bb91SEric Auger void *ptr; 178*05d1bb91SEric Auger 179*05d1bb91SEric Auger ptr = gicv3_data.redist_base[cpu]; 180*05d1bb91SEric Auger 181*05d1bb91SEric Auger writeq(prop_val, ptr + GICR_PROPBASER); 182*05d1bb91SEric Auger 183*05d1bb91SEric Auger gicv3_data.lpi_pend[cpu] = alloc_pages(order); 184*05d1bb91SEric Auger pend_val = (u64)(virt_to_phys(gicv3_data.lpi_pend[cpu])); 185*05d1bb91SEric Auger writeq(pend_val, ptr + GICR_PENDBASER); 186*05d1bb91SEric Auger } 187*05d1bb91SEric Auger } 188*05d1bb91SEric Auger 189*05d1bb91SEric Auger void gicv3_lpi_set_clr_pending(int rdist, int n, bool set) 190*05d1bb91SEric Auger { 191*05d1bb91SEric Auger u8 *ptr = gicv3_data.lpi_pend[rdist]; 192*05d1bb91SEric Auger u8 mask = 1 << (n % 8), byte; 193*05d1bb91SEric Auger 194*05d1bb91SEric Auger ptr += (n / 8); 195*05d1bb91SEric Auger byte = *ptr; 196*05d1bb91SEric Auger if (set) 197*05d1bb91SEric Auger byte |= mask; 198*05d1bb91SEric Auger else 199*05d1bb91SEric Auger byte &= ~mask; 200*05d1bb91SEric Auger *ptr = byte; 201*05d1bb91SEric Auger } 202*05d1bb91SEric Auger #endif /* __aarch64__ */ 203