1 /*
2 * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU LGPL, version 2.
5 */
6 #include <asm/gic.h>
7 #include <asm/io.h>
8 #include <alloc_page.h>
9
gicv3_set_redist_base(size_t stride)10 void gicv3_set_redist_base(size_t stride)
11 {
12 u32 aff = mpidr_compress(get_mpidr());
13 u64 typer;
14 int i = 0;
15
16 while (gicv3_data.redist_bases[i]) {
17 void *ptr = gicv3_data.redist_bases[i];
18 do {
19 typer = gicv3_read_typer(ptr + GICR_TYPER);
20 if ((typer >> 32) == aff) {
21 gicv3_redist_base() = ptr;
22 return;
23 }
24 ptr += stride; /* skip RD_base, SGI_base, etc. */
25 } while (!(typer & GICR_TYPER_LAST));
26 ++i;
27 }
28
29 /* should never reach here */
30 assert(0);
31 }
32
gicv3_enable_defaults(void)33 void gicv3_enable_defaults(void)
34 {
35 void *dist = gicv3_dist_base();
36 void *sgi_base;
37 unsigned int i;
38
39 gicv3_data.irq_nr = GICD_TYPER_IRQS(readl(dist + GICD_TYPER));
40 if (gicv3_data.irq_nr > 1020)
41 gicv3_data.irq_nr = 1020;
42
43 writel(0, dist + GICD_CTLR);
44 gicv3_dist_wait_for_rwp();
45
46 writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
47 dist + GICD_CTLR);
48 gicv3_dist_wait_for_rwp();
49
50 for (i = 0; i < gicv3_data.irq_nr; i += 4)
51 writel(~0, dist + GICD_IGROUPR + i);
52
53 if (!gicv3_redist_base())
54 gicv3_set_redist_base(SZ_64K * 2);
55 sgi_base = gicv3_sgi_base();
56
57 writel(~0, sgi_base + GICR_IGROUPR0);
58
59 for (i = 0; i < 16; i += 4)
60 writel(GICD_INT_DEF_PRI_X4, sgi_base + GICR_IPRIORITYR0 + i);
61
62 writel(GICD_INT_EN_SET_SGI, sgi_base + GICR_ISENABLER0);
63
64 gicv3_write_pmr(GICC_INT_PRI_THRESHOLD);
65 gicv3_write_grpen1(1);
66 }
67
gicv3_iar_irqnr(u32 iar)68 u32 gicv3_iar_irqnr(u32 iar)
69 {
70 return iar & ((1 << 24) - 1);
71 }
72
gicv3_ipi_send_mask(int irq,const cpumask_t * dest)73 void gicv3_ipi_send_mask(int irq, const cpumask_t *dest)
74 {
75 u16 tlist;
76 int cpu;
77
78 assert(irq < 16);
79
80 /*
81 * Ensure stores to Normal memory are visible to other CPUs before
82 * sending the IPI.
83 */
84 wmb();
85
86 /*
87 * For each cpu in the mask collect its peers, which are also in
88 * the mask, in order to form target lists.
89 */
90 for_each_cpu(cpu, dest) {
91 u64 mpidr = cpus[cpu], sgi1r;
92 u64 cluster_id;
93
94 /*
95 * GICv3 can send IPIs to up 16 peer cpus with a single
96 * write to ICC_SGI1R_EL1 (using the target list). Peers
97 * are cpus that have nearly identical MPIDRs, the only
98 * difference being Aff0. The matching upper affinity
99 * levels form the cluster ID.
100 */
101 cluster_id = mpidr & ~0xffUL;
102 tlist = 0;
103
104 /*
105 * Sort of open code for_each_cpu in order to have a
106 * nested for_each_cpu loop.
107 */
108 while (cpu < nr_cpus) {
109 if ((mpidr & 0xff) >= 16) {
110 printf("cpu%d MPIDR:aff0 is %d (>= 16)!\n",
111 cpu, (int)(mpidr & 0xff));
112 break;
113 }
114
115 tlist |= 1 << (mpidr & 0xf);
116
117 cpu = cpumask_next(cpu, dest);
118 if (cpu >= nr_cpus)
119 break;
120
121 mpidr = cpus[cpu];
122
123 if (cluster_id != (mpidr & ~0xffUL)) {
124 /*
125 * The next cpu isn't in our cluster. Roll
126 * back the cpu index allowing the outer
127 * for_each_cpu to find it again with
128 * cpumask_next
129 */
130 --cpu;
131 break;
132 }
133 }
134
135 /* Send the IPIs for the target list of this cluster */
136 sgi1r = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
137 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
138 irq << 24 |
139 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
140 tlist);
141
142 gicv3_write_sgi1r(sgi1r);
143 }
144
145 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
146 isb();
147 }
148
gicv3_ipi_send_single(int irq,int cpu)149 void gicv3_ipi_send_single(int irq, int cpu)
150 {
151 cpumask_t dest;
152
153 cpumask_clear(&dest);
154 cpumask_set_cpu(cpu, &dest);
155 gicv3_ipi_send_mask(irq, &dest);
156 }
157
158 #if defined(__aarch64__)
159
160 /*
161 * alloc_lpi_tables - Allocate LPI config and pending tables
162 * and set PROPBASER (shared by all rdistributors) and per
163 * redistributor PENDBASER.
164 *
165 * gicv3_set_redist_base() must be called before
166 */
gicv3_lpi_alloc_tables(void)167 void gicv3_lpi_alloc_tables(void)
168 {
169 unsigned long n = SZ_64K >> PAGE_SHIFT;
170 unsigned long order = fls(n);
171 u64 prop_val;
172 int cpu;
173
174 gicv3_data.lpi_prop = alloc_pages(order);
175
176 /* ID bits = 13, ie. up to 14b LPI INTID */
177 prop_val = (u64)(virt_to_phys(gicv3_data.lpi_prop)) | 13;
178
179 for_each_online_cpu(cpu) {
180 u64 pend_val;
181 void *ptr;
182
183 assert_msg(gicv3_data.redist_base[cpu], "Redistributor for cpu%d not initialized. "
184 "Did cpu%d enable the GIC?", cpu, cpu);
185 ptr = gicv3_data.redist_base[cpu];
186
187 writeq(prop_val, ptr + GICR_PROPBASER);
188
189 gicv3_data.lpi_pend[cpu] = alloc_pages(order);
190 pend_val = (u64)(virt_to_phys(gicv3_data.lpi_pend[cpu]));
191 writeq(pend_val, ptr + GICR_PENDBASER);
192 }
193 }
194
gicv3_lpi_set_clr_pending(int rdist,int n,bool set)195 void gicv3_lpi_set_clr_pending(int rdist, int n, bool set)
196 {
197 u8 *ptr = gicv3_data.lpi_pend[rdist];
198 u8 mask = 1 << (n % 8), byte;
199
200 ptr += (n / 8);
201 byte = *ptr;
202 if (set)
203 byte |= mask;
204 else
205 byte &= ~mask;
206 *ptr = byte;
207 }
208
gicv3_lpi_rdist_ctrl(u32 redist,bool set)209 static void gicv3_lpi_rdist_ctrl(u32 redist, bool set)
210 {
211 void *ptr;
212 u64 val;
213
214 assert(redist < nr_cpus);
215
216 ptr = gicv3_data.redist_base[redist];
217 val = readl(ptr + GICR_CTLR);
218 if (set)
219 val |= GICR_CTLR_ENABLE_LPIS;
220 else
221 val &= ~GICR_CTLR_ENABLE_LPIS;
222 writel(val, ptr + GICR_CTLR);
223 }
224
gicv3_lpi_rdist_enable(int redist)225 void gicv3_lpi_rdist_enable(int redist)
226 {
227 gicv3_lpi_rdist_ctrl(redist, true);
228 }
gicv3_lpi_rdist_disable(int redist)229 void gicv3_lpi_rdist_disable(int redist)
230 {
231 gicv3_lpi_rdist_ctrl(redist, false);
232 }
233 #endif /* __aarch64__ */
234