191a6c3ceSAndrew Jones /* 291a6c3ceSAndrew Jones * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com> 391a6c3ceSAndrew Jones * 491a6c3ceSAndrew Jones * This work is licensed under the terms of the GNU LGPL, version 2. 591a6c3ceSAndrew Jones */ 691a6c3ceSAndrew Jones #include <asm/gic.h> 791a6c3ceSAndrew Jones #include <asm/io.h> 805d1bb91SEric Auger #include <alloc_page.h> 991a6c3ceSAndrew Jones 1091a6c3ceSAndrew Jones void gicv3_set_redist_base(size_t stride) 1191a6c3ceSAndrew Jones { 1291a6c3ceSAndrew Jones u32 aff = mpidr_compress(get_mpidr()); 1391a6c3ceSAndrew Jones u64 typer; 14a5a2d35cSAndrew Jones int i = 0; 1591a6c3ceSAndrew Jones 16a5a2d35cSAndrew Jones while (gicv3_data.redist_bases[i]) { 17a5a2d35cSAndrew Jones void *ptr = gicv3_data.redist_bases[i]; 1891a6c3ceSAndrew Jones do { 1991a6c3ceSAndrew Jones typer = gicv3_read_typer(ptr + GICR_TYPER); 2091a6c3ceSAndrew Jones if ((typer >> 32) == aff) { 2191a6c3ceSAndrew Jones gicv3_redist_base() = ptr; 2291a6c3ceSAndrew Jones return; 2391a6c3ceSAndrew Jones } 2491a6c3ceSAndrew Jones ptr += stride; /* skip RD_base, SGI_base, etc. */ 2591a6c3ceSAndrew Jones } while (!(typer & GICR_TYPER_LAST)); 26a5a2d35cSAndrew Jones ++i; 27a5a2d35cSAndrew Jones } 2891a6c3ceSAndrew Jones 2991a6c3ceSAndrew Jones /* should never reach here */ 3091a6c3ceSAndrew Jones assert(0); 3191a6c3ceSAndrew Jones } 3291a6c3ceSAndrew Jones 3391a6c3ceSAndrew Jones void gicv3_enable_defaults(void) 3491a6c3ceSAndrew Jones { 3591a6c3ceSAndrew Jones void *dist = gicv3_dist_base(); 3691a6c3ceSAndrew Jones void *sgi_base; 3791a6c3ceSAndrew Jones unsigned int i; 3891a6c3ceSAndrew Jones 3991a6c3ceSAndrew Jones gicv3_data.irq_nr = GICD_TYPER_IRQS(readl(dist + GICD_TYPER)); 4091a6c3ceSAndrew Jones if (gicv3_data.irq_nr > 1020) 4191a6c3ceSAndrew Jones gicv3_data.irq_nr = 1020; 4291a6c3ceSAndrew Jones 4391a6c3ceSAndrew Jones writel(0, dist + GICD_CTLR); 4491a6c3ceSAndrew Jones gicv3_dist_wait_for_rwp(); 4591a6c3ceSAndrew Jones 4691a6c3ceSAndrew Jones writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, 4791a6c3ceSAndrew Jones dist + GICD_CTLR); 4891a6c3ceSAndrew Jones gicv3_dist_wait_for_rwp(); 4991a6c3ceSAndrew Jones 5091a6c3ceSAndrew Jones for (i = 0; i < gicv3_data.irq_nr; i += 4) 5191a6c3ceSAndrew Jones writel(~0, dist + GICD_IGROUPR + i); 5291a6c3ceSAndrew Jones 5391a6c3ceSAndrew Jones if (!gicv3_redist_base()) 5491a6c3ceSAndrew Jones gicv3_set_redist_base(SZ_64K * 2); 5591a6c3ceSAndrew Jones sgi_base = gicv3_sgi_base(); 5691a6c3ceSAndrew Jones 5791a6c3ceSAndrew Jones writel(~0, sgi_base + GICR_IGROUPR0); 5891a6c3ceSAndrew Jones 5991a6c3ceSAndrew Jones for (i = 0; i < 16; i += 4) 6091a6c3ceSAndrew Jones writel(GICD_INT_DEF_PRI_X4, sgi_base + GICR_IPRIORITYR0 + i); 6191a6c3ceSAndrew Jones 6291a6c3ceSAndrew Jones writel(GICD_INT_EN_SET_SGI, sgi_base + GICR_ISENABLER0); 6391a6c3ceSAndrew Jones 6491a6c3ceSAndrew Jones gicv3_write_pmr(GICC_INT_PRI_THRESHOLD); 6591a6c3ceSAndrew Jones gicv3_write_grpen1(1); 6691a6c3ceSAndrew Jones } 672e2d471dSAndrew Jones 682e2d471dSAndrew Jones u32 gicv3_iar_irqnr(u32 iar) 692e2d471dSAndrew Jones { 702e2d471dSAndrew Jones return iar & ((1 << 24) - 1); 712e2d471dSAndrew Jones } 722e2d471dSAndrew Jones 732e2d471dSAndrew Jones void gicv3_ipi_send_mask(int irq, const cpumask_t *dest) 742e2d471dSAndrew Jones { 752e2d471dSAndrew Jones u16 tlist; 762e2d471dSAndrew Jones int cpu; 772e2d471dSAndrew Jones 782e2d471dSAndrew Jones assert(irq < 16); 792e2d471dSAndrew Jones 802e2d471dSAndrew Jones /* 81*0c03f4b1SAlexandru Elisei * Ensure stores to Normal memory are visible to other CPUs before 82*0c03f4b1SAlexandru Elisei * sending the IPI. 83*0c03f4b1SAlexandru Elisei */ 84*0c03f4b1SAlexandru Elisei wmb(); 85*0c03f4b1SAlexandru Elisei 86*0c03f4b1SAlexandru Elisei /* 872e2d471dSAndrew Jones * For each cpu in the mask collect its peers, which are also in 882e2d471dSAndrew Jones * the mask, in order to form target lists. 892e2d471dSAndrew Jones */ 902e2d471dSAndrew Jones for_each_cpu(cpu, dest) { 912e2d471dSAndrew Jones u64 mpidr = cpus[cpu], sgi1r; 922e2d471dSAndrew Jones u64 cluster_id; 932e2d471dSAndrew Jones 942e2d471dSAndrew Jones /* 952e2d471dSAndrew Jones * GICv3 can send IPIs to up 16 peer cpus with a single 962e2d471dSAndrew Jones * write to ICC_SGI1R_EL1 (using the target list). Peers 972e2d471dSAndrew Jones * are cpus that have nearly identical MPIDRs, the only 982e2d471dSAndrew Jones * difference being Aff0. The matching upper affinity 992e2d471dSAndrew Jones * levels form the cluster ID. 1002e2d471dSAndrew Jones */ 1012e2d471dSAndrew Jones cluster_id = mpidr & ~0xffUL; 1022e2d471dSAndrew Jones tlist = 0; 1032e2d471dSAndrew Jones 1042e2d471dSAndrew Jones /* 1052e2d471dSAndrew Jones * Sort of open code for_each_cpu in order to have a 1062e2d471dSAndrew Jones * nested for_each_cpu loop. 1072e2d471dSAndrew Jones */ 1082e2d471dSAndrew Jones while (cpu < nr_cpus) { 1092e2d471dSAndrew Jones if ((mpidr & 0xff) >= 16) { 1102e2d471dSAndrew Jones printf("cpu%d MPIDR:aff0 is %d (>= 16)!\n", 1112e2d471dSAndrew Jones cpu, (int)(mpidr & 0xff)); 1122e2d471dSAndrew Jones break; 1132e2d471dSAndrew Jones } 1142e2d471dSAndrew Jones 1152e2d471dSAndrew Jones tlist |= 1 << (mpidr & 0xf); 1162e2d471dSAndrew Jones 1172e2d471dSAndrew Jones cpu = cpumask_next(cpu, dest); 1182e2d471dSAndrew Jones if (cpu >= nr_cpus) 1192e2d471dSAndrew Jones break; 1202e2d471dSAndrew Jones 1212e2d471dSAndrew Jones mpidr = cpus[cpu]; 1222e2d471dSAndrew Jones 1232e2d471dSAndrew Jones if (cluster_id != (mpidr & ~0xffUL)) { 1242e2d471dSAndrew Jones /* 1252e2d471dSAndrew Jones * The next cpu isn't in our cluster. Roll 1262e2d471dSAndrew Jones * back the cpu index allowing the outer 1272e2d471dSAndrew Jones * for_each_cpu to find it again with 1282e2d471dSAndrew Jones * cpumask_next 1292e2d471dSAndrew Jones */ 1302e2d471dSAndrew Jones --cpu; 1312e2d471dSAndrew Jones break; 1322e2d471dSAndrew Jones } 1332e2d471dSAndrew Jones } 1342e2d471dSAndrew Jones 1352e2d471dSAndrew Jones /* Send the IPIs for the target list of this cluster */ 1362e2d471dSAndrew Jones sgi1r = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1372e2d471dSAndrew Jones MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1382e2d471dSAndrew Jones irq << 24 | 1392e2d471dSAndrew Jones MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1402e2d471dSAndrew Jones tlist); 1412e2d471dSAndrew Jones 1422e2d471dSAndrew Jones gicv3_write_sgi1r(sgi1r); 1432e2d471dSAndrew Jones } 1442e2d471dSAndrew Jones 1452e2d471dSAndrew Jones /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1462e2d471dSAndrew Jones isb(); 1472e2d471dSAndrew Jones } 1482e2d471dSAndrew Jones 1492e2d471dSAndrew Jones void gicv3_ipi_send_single(int irq, int cpu) 1502e2d471dSAndrew Jones { 1512e2d471dSAndrew Jones cpumask_t dest; 1522e2d471dSAndrew Jones 1532e2d471dSAndrew Jones cpumask_clear(&dest); 1542e2d471dSAndrew Jones cpumask_set_cpu(cpu, &dest); 1552e2d471dSAndrew Jones gicv3_ipi_send_mask(irq, &dest); 1562e2d471dSAndrew Jones } 15705d1bb91SEric Auger 15805d1bb91SEric Auger #if defined(__aarch64__) 15905d1bb91SEric Auger 16005d1bb91SEric Auger /* 16105d1bb91SEric Auger * alloc_lpi_tables - Allocate LPI config and pending tables 16205d1bb91SEric Auger * and set PROPBASER (shared by all rdistributors) and per 16305d1bb91SEric Auger * redistributor PENDBASER. 16405d1bb91SEric Auger * 16505d1bb91SEric Auger * gicv3_set_redist_base() must be called before 16605d1bb91SEric Auger */ 16705d1bb91SEric Auger void gicv3_lpi_alloc_tables(void) 16805d1bb91SEric Auger { 16905d1bb91SEric Auger unsigned long n = SZ_64K >> PAGE_SHIFT; 17005d1bb91SEric Auger unsigned long order = fls(n); 17105d1bb91SEric Auger u64 prop_val; 17205d1bb91SEric Auger int cpu; 17305d1bb91SEric Auger 17405d1bb91SEric Auger assert(gicv3_redist_base()); 17505d1bb91SEric Auger 17605d1bb91SEric Auger gicv3_data.lpi_prop = alloc_pages(order); 17705d1bb91SEric Auger 17805d1bb91SEric Auger /* ID bits = 13, ie. up to 14b LPI INTID */ 17905d1bb91SEric Auger prop_val = (u64)(virt_to_phys(gicv3_data.lpi_prop)) | 13; 18005d1bb91SEric Auger 18105d1bb91SEric Auger for_each_present_cpu(cpu) { 18205d1bb91SEric Auger u64 pend_val; 18305d1bb91SEric Auger void *ptr; 18405d1bb91SEric Auger 18505d1bb91SEric Auger ptr = gicv3_data.redist_base[cpu]; 18605d1bb91SEric Auger 18705d1bb91SEric Auger writeq(prop_val, ptr + GICR_PROPBASER); 18805d1bb91SEric Auger 18905d1bb91SEric Auger gicv3_data.lpi_pend[cpu] = alloc_pages(order); 19005d1bb91SEric Auger pend_val = (u64)(virt_to_phys(gicv3_data.lpi_pend[cpu])); 19105d1bb91SEric Auger writeq(pend_val, ptr + GICR_PENDBASER); 19205d1bb91SEric Auger } 19305d1bb91SEric Auger } 19405d1bb91SEric Auger 19505d1bb91SEric Auger void gicv3_lpi_set_clr_pending(int rdist, int n, bool set) 19605d1bb91SEric Auger { 19705d1bb91SEric Auger u8 *ptr = gicv3_data.lpi_pend[rdist]; 19805d1bb91SEric Auger u8 mask = 1 << (n % 8), byte; 19905d1bb91SEric Auger 20005d1bb91SEric Auger ptr += (n / 8); 20105d1bb91SEric Auger byte = *ptr; 20205d1bb91SEric Auger if (set) 20305d1bb91SEric Auger byte |= mask; 20405d1bb91SEric Auger else 20505d1bb91SEric Auger byte &= ~mask; 20605d1bb91SEric Auger *ptr = byte; 20705d1bb91SEric Auger } 20880374e12SEric Auger 20980374e12SEric Auger static void gicv3_lpi_rdist_ctrl(u32 redist, bool set) 21080374e12SEric Auger { 21180374e12SEric Auger void *ptr; 21280374e12SEric Auger u64 val; 21380374e12SEric Auger 21480374e12SEric Auger assert(redist < nr_cpus); 21580374e12SEric Auger 21680374e12SEric Auger ptr = gicv3_data.redist_base[redist]; 21780374e12SEric Auger val = readl(ptr + GICR_CTLR); 21880374e12SEric Auger if (set) 21980374e12SEric Auger val |= GICR_CTLR_ENABLE_LPIS; 22080374e12SEric Auger else 22180374e12SEric Auger val &= ~GICR_CTLR_ENABLE_LPIS; 22280374e12SEric Auger writel(val, ptr + GICR_CTLR); 22380374e12SEric Auger } 22480374e12SEric Auger 22580374e12SEric Auger void gicv3_lpi_rdist_enable(int redist) 22680374e12SEric Auger { 22780374e12SEric Auger gicv3_lpi_rdist_ctrl(redist, true); 22880374e12SEric Auger } 22980374e12SEric Auger void gicv3_lpi_rdist_disable(int redist) 23080374e12SEric Auger { 23180374e12SEric Auger gicv3_lpi_rdist_ctrl(redist, false); 23280374e12SEric Auger } 23305d1bb91SEric Auger #endif /* __aarch64__ */ 234