1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
4 */
5
6 #define pr_fmt(fmt) "GICv5: " fmt
7
8 #include <linux/cpuhotplug.h>
9 #include <linux/idr.h>
10 #include <linux/irqdomain.h>
11 #include <linux/slab.h>
12 #include <linux/wordpart.h>
13
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/arm-gic-v5.h>
16 #include <linux/irqchip/arm-vgic-info.h>
17
18 #include <asm/cpufeature.h>
19 #include <asm/exception.h>
20
21 static u8 pri_bits __ro_after_init = 5;
22
23 #define GICV5_IRQ_PRI_MASK 0x1f
24 #define GICV5_IRQ_PRI_MI (GICV5_IRQ_PRI_MASK & GENMASK(4, 5 - pri_bits))
25
26 #define PPI_NR 128
27
gicv5_cpuif_has_gcie(void)28 static bool gicv5_cpuif_has_gcie(void)
29 {
30 return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF);
31 }
32
33 struct gicv5_chip_data gicv5_global_data __read_mostly;
34
35 static DEFINE_IDA(lpi_ida);
36 static u32 num_lpis __ro_after_init;
37
gicv5_init_lpis(u32 lpis)38 void __init gicv5_init_lpis(u32 lpis)
39 {
40 num_lpis = lpis;
41 }
42
gicv5_deinit_lpis(void)43 void __init gicv5_deinit_lpis(void)
44 {
45 num_lpis = 0;
46 }
47
alloc_lpi(void)48 static int alloc_lpi(void)
49 {
50 if (!num_lpis)
51 return -ENOSPC;
52
53 return ida_alloc_max(&lpi_ida, num_lpis - 1, GFP_KERNEL);
54 }
55
release_lpi(u32 lpi)56 static void release_lpi(u32 lpi)
57 {
58 ida_free(&lpi_ida, lpi);
59 }
60
gicv5_alloc_lpi(void)61 int gicv5_alloc_lpi(void)
62 {
63 return alloc_lpi();
64 }
65
gicv5_free_lpi(u32 lpi)66 void gicv5_free_lpi(u32 lpi)
67 {
68 release_lpi(lpi);
69 }
70
gicv5_ppi_priority_init(void)71 static void gicv5_ppi_priority_init(void)
72 {
73 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1);
74 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR1_EL1);
75 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR2_EL1);
76 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR3_EL1);
77 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR4_EL1);
78 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR5_EL1);
79 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR6_EL1);
80 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR7_EL1);
81 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR8_EL1);
82 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR9_EL1);
83 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR10_EL1);
84 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR11_EL1);
85 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR12_EL1);
86 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR13_EL1);
87 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR14_EL1);
88 write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR15_EL1);
89
90 /*
91 * Context syncronization required to make sure system register writes
92 * effects are synchronised.
93 */
94 isb();
95 }
96
gicv5_hwirq_init(irq_hw_number_t hwirq,u8 priority,u8 hwirq_type)97 static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type)
98 {
99 u64 cdpri, cdaff;
100 u16 iaffid;
101 int ret;
102
103 if (hwirq_type == GICV5_HWIRQ_TYPE_LPI || hwirq_type == GICV5_HWIRQ_TYPE_SPI) {
104 cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority) |
105 FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type) |
106 FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq);
107 gic_insn(cdpri, CDPRI);
108
109 ret = gicv5_irs_cpu_to_iaffid(smp_processor_id(), &iaffid);
110
111 if (WARN_ON_ONCE(ret))
112 return;
113
114 cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) |
115 FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) |
116 FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, hwirq);
117 gic_insn(cdaff, CDAFF);
118 }
119 }
120
gicv5_ppi_irq_mask(struct irq_data * d)121 static void gicv5_ppi_irq_mask(struct irq_data *d)
122 {
123 u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
124
125 if (d->hwirq < 64)
126 sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, hwirq_id_bit, 0);
127 else
128 sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, hwirq_id_bit, 0);
129
130 /*
131 * We must ensure that the disable takes effect immediately to
132 * guarantee that the lazy-disabled IRQ mechanism works.
133 * A context synchronization event is required to guarantee it.
134 * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
135 */
136 isb();
137 }
138
gicv5_iri_irq_mask(struct irq_data * d,u8 hwirq_type)139 static void gicv5_iri_irq_mask(struct irq_data *d, u8 hwirq_type)
140 {
141 u64 cddis;
142
143 cddis = FIELD_PREP(GICV5_GIC_CDDIS_ID_MASK, d->hwirq) |
144 FIELD_PREP(GICV5_GIC_CDDIS_TYPE_MASK, hwirq_type);
145
146 gic_insn(cddis, CDDIS);
147 /*
148 * We must make sure that GIC CDDIS write effects are propagated
149 * immediately to make sure the disable takes effect to guarantee
150 * that the lazy-disabled IRQ mechanism works.
151 * Rule R_XCLJC states that the effects of a GIC system instruction
152 * complete in finite time.
153 * The GSB ensures completion of the GIC instruction and prevents
154 * loads, stores and GIC instructions from executing part of their
155 * functionality before the GSB SYS.
156 */
157 gsb_sys();
158 }
159
gicv5_spi_irq_mask(struct irq_data * d)160 static void gicv5_spi_irq_mask(struct irq_data *d)
161 {
162 gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI);
163 }
164
gicv5_lpi_irq_mask(struct irq_data * d)165 static void gicv5_lpi_irq_mask(struct irq_data *d)
166 {
167 gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_LPI);
168 }
169
gicv5_ppi_irq_unmask(struct irq_data * d)170 static void gicv5_ppi_irq_unmask(struct irq_data *d)
171 {
172 u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
173
174 if (d->hwirq < 64)
175 sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, 0, hwirq_id_bit);
176 else
177 sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, 0, hwirq_id_bit);
178 /*
179 * We must ensure that the enable takes effect in finite time - a
180 * context synchronization event is required to guarantee it, we
181 * can not take for granted that would happen (eg a core going straight
182 * into idle after enabling a PPI).
183 * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
184 */
185 isb();
186 }
187
gicv5_iri_irq_unmask(struct irq_data * d,u8 hwirq_type)188 static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type)
189 {
190 u64 cden;
191
192 cden = FIELD_PREP(GICV5_GIC_CDEN_ID_MASK, d->hwirq) |
193 FIELD_PREP(GICV5_GIC_CDEN_TYPE_MASK, hwirq_type);
194 /*
195 * Rule R_XCLJC states that the effects of a GIC system instruction
196 * complete in finite time and that's the only requirement when
197 * unmasking an SPI/LPI IRQ.
198 */
199 gic_insn(cden, CDEN);
200 }
201
gicv5_spi_irq_unmask(struct irq_data * d)202 static void gicv5_spi_irq_unmask(struct irq_data *d)
203 {
204 gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI);
205 }
206
gicv5_lpi_irq_unmask(struct irq_data * d)207 static void gicv5_lpi_irq_unmask(struct irq_data *d)
208 {
209 gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_LPI);
210 }
211
gicv5_hwirq_eoi(u32 hwirq_id,u8 hwirq_type)212 static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type)
213 {
214 u64 cddi;
215
216 cddi = FIELD_PREP(GICV5_GIC_CDDI_ID_MASK, hwirq_id) |
217 FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, hwirq_type);
218
219 gic_insn(cddi, CDDI);
220
221 gic_insn(0, CDEOI);
222 }
223
gicv5_ppi_irq_eoi(struct irq_data * d)224 static void gicv5_ppi_irq_eoi(struct irq_data *d)
225 {
226 /* Skip deactivate for forwarded PPI interrupts */
227 if (irqd_is_forwarded_to_vcpu(d)) {
228 gic_insn(0, CDEOI);
229 return;
230 }
231
232 gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI);
233 }
234
gicv5_spi_irq_eoi(struct irq_data * d)235 static void gicv5_spi_irq_eoi(struct irq_data *d)
236 {
237 gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI);
238 }
239
gicv5_lpi_irq_eoi(struct irq_data * d)240 static void gicv5_lpi_irq_eoi(struct irq_data *d)
241 {
242 gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_LPI);
243 }
244
gicv5_iri_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force,u8 hwirq_type)245 static int gicv5_iri_irq_set_affinity(struct irq_data *d,
246 const struct cpumask *mask_val,
247 bool force, u8 hwirq_type)
248 {
249 int ret, cpuid;
250 u16 iaffid;
251 u64 cdaff;
252
253 if (force)
254 cpuid = cpumask_first(mask_val);
255 else
256 cpuid = cpumask_any_and(mask_val, cpu_online_mask);
257
258 ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid);
259 if (ret)
260 return ret;
261
262 cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) |
263 FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) |
264 FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, d->hwirq);
265 gic_insn(cdaff, CDAFF);
266
267 irq_data_update_effective_affinity(d, cpumask_of(cpuid));
268
269 return IRQ_SET_MASK_OK_DONE;
270 }
271
gicv5_spi_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)272 static int gicv5_spi_irq_set_affinity(struct irq_data *d,
273 const struct cpumask *mask_val,
274 bool force)
275 {
276 return gicv5_iri_irq_set_affinity(d, mask_val, force,
277 GICV5_HWIRQ_TYPE_SPI);
278 }
279
gicv5_lpi_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)280 static int gicv5_lpi_irq_set_affinity(struct irq_data *d,
281 const struct cpumask *mask_val,
282 bool force)
283 {
284 return gicv5_iri_irq_set_affinity(d, mask_val, force,
285 GICV5_HWIRQ_TYPE_LPI);
286 }
287
288 enum ppi_reg {
289 PPI_PENDING,
290 PPI_ACTIVE,
291 PPI_HM
292 };
293
read_ppi_sysreg_s(unsigned int irq,const enum ppi_reg which)294 static __always_inline u64 read_ppi_sysreg_s(unsigned int irq,
295 const enum ppi_reg which)
296 {
297 switch (which) {
298 case PPI_PENDING:
299 return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SPENDR0_EL1) :
300 read_sysreg_s(SYS_ICC_PPI_SPENDR1_EL1);
301 case PPI_ACTIVE:
302 return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SACTIVER0_EL1) :
303 read_sysreg_s(SYS_ICC_PPI_SACTIVER1_EL1);
304 case PPI_HM:
305 return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_HMR0_EL1) :
306 read_sysreg_s(SYS_ICC_PPI_HMR1_EL1);
307 default:
308 BUILD_BUG_ON(1);
309 }
310 }
311
write_ppi_sysreg_s(unsigned int irq,bool set,const enum ppi_reg which)312 static __always_inline void write_ppi_sysreg_s(unsigned int irq, bool set,
313 const enum ppi_reg which)
314 {
315 u64 bit = BIT_ULL(irq % 64);
316
317 switch (which) {
318 case PPI_PENDING:
319 if (set) {
320 if (irq < 64)
321 write_sysreg_s(bit, SYS_ICC_PPI_SPENDR0_EL1);
322 else
323 write_sysreg_s(bit, SYS_ICC_PPI_SPENDR1_EL1);
324 } else {
325 if (irq < 64)
326 write_sysreg_s(bit, SYS_ICC_PPI_CPENDR0_EL1);
327 else
328 write_sysreg_s(bit, SYS_ICC_PPI_CPENDR1_EL1);
329 }
330 return;
331 case PPI_ACTIVE:
332 if (set) {
333 if (irq < 64)
334 write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER0_EL1);
335 else
336 write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER1_EL1);
337 } else {
338 if (irq < 64)
339 write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER0_EL1);
340 else
341 write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER1_EL1);
342 }
343 return;
344 default:
345 BUILD_BUG_ON(1);
346 }
347 }
348
gicv5_ppi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)349 static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d,
350 enum irqchip_irq_state which,
351 bool *state)
352 {
353 u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
354
355 switch (which) {
356 case IRQCHIP_STATE_PENDING:
357 *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_PENDING) & hwirq_id_bit);
358 return 0;
359 case IRQCHIP_STATE_ACTIVE:
360 *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_ACTIVE) & hwirq_id_bit);
361 return 0;
362 default:
363 pr_debug("Unexpected PPI irqchip state\n");
364 return -EINVAL;
365 }
366 }
367
gicv5_iri_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state,u8 hwirq_type)368 static int gicv5_iri_irq_get_irqchip_state(struct irq_data *d,
369 enum irqchip_irq_state which,
370 bool *state, u8 hwirq_type)
371 {
372 u64 icsr, cdrcfg;
373
374 cdrcfg = d->hwirq | FIELD_PREP(GICV5_GIC_CDRCFG_TYPE_MASK, hwirq_type);
375
376 gic_insn(cdrcfg, CDRCFG);
377 isb();
378 icsr = read_sysreg_s(SYS_ICC_ICSR_EL1);
379
380 if (FIELD_GET(ICC_ICSR_EL1_F, icsr)) {
381 pr_err("ICSR_EL1 is invalid\n");
382 return -EINVAL;
383 }
384
385 switch (which) {
386 case IRQCHIP_STATE_PENDING:
387 *state = !!(FIELD_GET(ICC_ICSR_EL1_Pending, icsr));
388 return 0;
389
390 case IRQCHIP_STATE_ACTIVE:
391 *state = !!(FIELD_GET(ICC_ICSR_EL1_Active, icsr));
392 return 0;
393
394 default:
395 pr_debug("Unexpected irqchip_irq_state\n");
396 return -EINVAL;
397 }
398 }
399
gicv5_spi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)400 static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d,
401 enum irqchip_irq_state which,
402 bool *state)
403 {
404 return gicv5_iri_irq_get_irqchip_state(d, which, state,
405 GICV5_HWIRQ_TYPE_SPI);
406 }
407
gicv5_lpi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)408 static int gicv5_lpi_irq_get_irqchip_state(struct irq_data *d,
409 enum irqchip_irq_state which,
410 bool *state)
411 {
412 return gicv5_iri_irq_get_irqchip_state(d, which, state,
413 GICV5_HWIRQ_TYPE_LPI);
414 }
415
gicv5_ppi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)416 static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d,
417 enum irqchip_irq_state which,
418 bool state)
419 {
420 switch (which) {
421 case IRQCHIP_STATE_PENDING:
422 write_ppi_sysreg_s(d->hwirq, state, PPI_PENDING);
423 return 0;
424 case IRQCHIP_STATE_ACTIVE:
425 write_ppi_sysreg_s(d->hwirq, state, PPI_ACTIVE);
426 return 0;
427 default:
428 pr_debug("Unexpected PPI irqchip state\n");
429 return -EINVAL;
430 }
431 }
432
gicv5_iri_irq_write_pending_state(struct irq_data * d,bool state,u8 hwirq_type)433 static void gicv5_iri_irq_write_pending_state(struct irq_data *d, bool state,
434 u8 hwirq_type)
435 {
436 u64 cdpend;
437
438 cdpend = FIELD_PREP(GICV5_GIC_CDPEND_TYPE_MASK, hwirq_type) |
439 FIELD_PREP(GICV5_GIC_CDPEND_ID_MASK, d->hwirq) |
440 FIELD_PREP(GICV5_GIC_CDPEND_PENDING_MASK, state);
441
442 gic_insn(cdpend, CDPEND);
443 }
444
gicv5_spi_irq_write_pending_state(struct irq_data * d,bool state)445 static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state)
446 {
447 gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI);
448 }
449
gicv5_lpi_irq_write_pending_state(struct irq_data * d,bool state)450 static void gicv5_lpi_irq_write_pending_state(struct irq_data *d, bool state)
451 {
452 gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_LPI);
453 }
454
gicv5_spi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)455 static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d,
456 enum irqchip_irq_state which,
457 bool state)
458 {
459 switch (which) {
460 case IRQCHIP_STATE_PENDING:
461 gicv5_spi_irq_write_pending_state(d, state);
462 break;
463 default:
464 pr_debug("Unexpected irqchip_irq_state\n");
465 return -EINVAL;
466 }
467
468 return 0;
469 }
470
gicv5_lpi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)471 static int gicv5_lpi_irq_set_irqchip_state(struct irq_data *d,
472 enum irqchip_irq_state which,
473 bool state)
474 {
475 switch (which) {
476 case IRQCHIP_STATE_PENDING:
477 gicv5_lpi_irq_write_pending_state(d, state);
478 break;
479
480 default:
481 pr_debug("Unexpected irqchip_irq_state\n");
482 return -EINVAL;
483 }
484
485 return 0;
486 }
487
gicv5_spi_irq_retrigger(struct irq_data * data)488 static int gicv5_spi_irq_retrigger(struct irq_data *data)
489 {
490 return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
491 true);
492 }
493
gicv5_lpi_irq_retrigger(struct irq_data * data)494 static int gicv5_lpi_irq_retrigger(struct irq_data *data)
495 {
496 return !gicv5_lpi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
497 true);
498 }
499
gicv5_ipi_send_single(struct irq_data * d,unsigned int cpu)500 static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu)
501 {
502 /* Mark the LPI pending */
503 irq_chip_retrigger_hierarchy(d);
504 }
505
gicv5_ppi_irq_is_level(irq_hw_number_t hwirq)506 static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq)
507 {
508 u64 bit = BIT_ULL(hwirq % 64);
509
510 return !!(read_ppi_sysreg_s(hwirq, PPI_HM) & bit);
511 }
512
gicv5_ppi_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)513 static int gicv5_ppi_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
514 {
515 if (vcpu)
516 irqd_set_forwarded_to_vcpu(d);
517 else
518 irqd_clr_forwarded_to_vcpu(d);
519
520 return 0;
521 }
522
523 static const struct irq_chip gicv5_ppi_irq_chip = {
524 .name = "GICv5-PPI",
525 .irq_mask = gicv5_ppi_irq_mask,
526 .irq_unmask = gicv5_ppi_irq_unmask,
527 .irq_eoi = gicv5_ppi_irq_eoi,
528 .irq_get_irqchip_state = gicv5_ppi_irq_get_irqchip_state,
529 .irq_set_irqchip_state = gicv5_ppi_irq_set_irqchip_state,
530 .irq_set_vcpu_affinity = gicv5_ppi_irq_set_vcpu_affinity,
531 .flags = IRQCHIP_SKIP_SET_WAKE |
532 IRQCHIP_MASK_ON_SUSPEND,
533 };
534
535 static const struct irq_chip gicv5_spi_irq_chip = {
536 .name = "GICv5-SPI",
537 .irq_mask = gicv5_spi_irq_mask,
538 .irq_unmask = gicv5_spi_irq_unmask,
539 .irq_eoi = gicv5_spi_irq_eoi,
540 .irq_set_type = gicv5_spi_irq_set_type,
541 .irq_set_affinity = gicv5_spi_irq_set_affinity,
542 .irq_retrigger = gicv5_spi_irq_retrigger,
543 .irq_get_irqchip_state = gicv5_spi_irq_get_irqchip_state,
544 .irq_set_irqchip_state = gicv5_spi_irq_set_irqchip_state,
545 .flags = IRQCHIP_SET_TYPE_MASKED |
546 IRQCHIP_SKIP_SET_WAKE |
547 IRQCHIP_MASK_ON_SUSPEND,
548 };
549
550 static const struct irq_chip gicv5_lpi_irq_chip = {
551 .name = "GICv5-LPI",
552 .irq_mask = gicv5_lpi_irq_mask,
553 .irq_unmask = gicv5_lpi_irq_unmask,
554 .irq_eoi = gicv5_lpi_irq_eoi,
555 .irq_set_affinity = gicv5_lpi_irq_set_affinity,
556 .irq_retrigger = gicv5_lpi_irq_retrigger,
557 .irq_get_irqchip_state = gicv5_lpi_irq_get_irqchip_state,
558 .irq_set_irqchip_state = gicv5_lpi_irq_set_irqchip_state,
559 .flags = IRQCHIP_SKIP_SET_WAKE |
560 IRQCHIP_MASK_ON_SUSPEND,
561 };
562
563 static const struct irq_chip gicv5_ipi_irq_chip = {
564 .name = "GICv5-IPI",
565 .irq_mask = irq_chip_mask_parent,
566 .irq_unmask = irq_chip_unmask_parent,
567 .irq_eoi = irq_chip_eoi_parent,
568 .irq_set_affinity = irq_chip_set_affinity_parent,
569 .irq_get_irqchip_state = irq_chip_get_parent_state,
570 .irq_set_irqchip_state = irq_chip_set_parent_state,
571 .ipi_send_single = gicv5_ipi_send_single,
572 .flags = IRQCHIP_SKIP_SET_WAKE |
573 IRQCHIP_MASK_ON_SUSPEND,
574 };
575
gicv5_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type,const u8 hwirq_type)576 static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d,
577 struct irq_fwspec *fwspec,
578 irq_hw_number_t *hwirq,
579 unsigned int *type,
580 const u8 hwirq_type)
581 {
582 if (!is_of_node(fwspec->fwnode))
583 return -EINVAL;
584
585 if (fwspec->param_count < 3)
586 return -EINVAL;
587
588 if (fwspec->param[0] != hwirq_type)
589 return -EINVAL;
590
591 *hwirq = fwspec->param[1];
592
593 switch (hwirq_type) {
594 case GICV5_HWIRQ_TYPE_PPI:
595 /*
596 * Handling mode is hardcoded for PPIs, set the type using
597 * HW reported value.
598 */
599 *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW :
600 IRQ_TYPE_EDGE_RISING;
601 break;
602 case GICV5_HWIRQ_TYPE_SPI:
603 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
604 break;
605 default:
606 BUILD_BUG_ON(1);
607 }
608
609 return 0;
610 }
611
gicv5_irq_ppi_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type)612 static int gicv5_irq_ppi_domain_translate(struct irq_domain *d,
613 struct irq_fwspec *fwspec,
614 irq_hw_number_t *hwirq,
615 unsigned int *type)
616 {
617 return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
618 GICV5_HWIRQ_TYPE_PPI);
619 }
620
gicv5_irq_ppi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)621 static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq,
622 unsigned int nr_irqs, void *arg)
623 {
624 unsigned int type = IRQ_TYPE_NONE;
625 struct irq_fwspec *fwspec = arg;
626 irq_hw_number_t hwirq;
627 int ret;
628
629 if (WARN_ON_ONCE(nr_irqs != 1))
630 return -EINVAL;
631
632 ret = gicv5_irq_ppi_domain_translate(domain, fwspec, &hwirq, &type);
633 if (ret)
634 return ret;
635
636 if (type & IRQ_TYPE_LEVEL_MASK)
637 irq_set_status_flags(virq, IRQ_LEVEL);
638
639 irq_set_percpu_devid(virq);
640 irq_domain_set_info(domain, virq, hwirq, &gicv5_ppi_irq_chip, NULL,
641 handle_percpu_devid_irq, NULL, NULL);
642
643 return 0;
644 }
645
gicv5_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)646 static void gicv5_irq_domain_free(struct irq_domain *domain, unsigned int virq,
647 unsigned int nr_irqs)
648 {
649 struct irq_data *d;
650
651 if (WARN_ON_ONCE(nr_irqs != 1))
652 return;
653
654 d = irq_domain_get_irq_data(domain, virq);
655
656 irq_set_handler(virq, NULL);
657 irq_domain_reset_irq_data(d);
658 }
659
gicv5_irq_ppi_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)660 static int gicv5_irq_ppi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
661 enum irq_domain_bus_token bus_token)
662 {
663 if (fwspec->fwnode != d->fwnode)
664 return 0;
665
666 if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI)
667 return 0;
668
669 return (d == gicv5_global_data.ppi_domain);
670 }
671
672 static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = {
673 .translate = gicv5_irq_ppi_domain_translate,
674 .alloc = gicv5_irq_ppi_domain_alloc,
675 .free = gicv5_irq_domain_free,
676 .select = gicv5_irq_ppi_domain_select
677 };
678
gicv5_irq_spi_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type)679 static int gicv5_irq_spi_domain_translate(struct irq_domain *d,
680 struct irq_fwspec *fwspec,
681 irq_hw_number_t *hwirq,
682 unsigned int *type)
683 {
684 return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
685 GICV5_HWIRQ_TYPE_SPI);
686 }
687
gicv5_irq_spi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)688 static int gicv5_irq_spi_domain_alloc(struct irq_domain *domain, unsigned int virq,
689 unsigned int nr_irqs, void *arg)
690 {
691 struct gicv5_irs_chip_data *chip_data;
692 unsigned int type = IRQ_TYPE_NONE;
693 struct irq_fwspec *fwspec = arg;
694 struct irq_data *irqd;
695 irq_hw_number_t hwirq;
696 int ret;
697
698 if (WARN_ON_ONCE(nr_irqs != 1))
699 return -EINVAL;
700
701 ret = gicv5_irq_spi_domain_translate(domain, fwspec, &hwirq, &type);
702 if (ret)
703 return ret;
704
705 irqd = irq_desc_get_irq_data(irq_to_desc(virq));
706 chip_data = gicv5_irs_lookup_by_spi_id(hwirq);
707
708 irq_domain_set_info(domain, virq, hwirq, &gicv5_spi_irq_chip, chip_data,
709 handle_fasteoi_irq, NULL, NULL);
710 irq_set_probe(virq);
711 irqd_set_single_target(irqd);
712
713 gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_SPI);
714
715 return 0;
716 }
717
gicv5_irq_spi_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)718 static int gicv5_irq_spi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
719 enum irq_domain_bus_token bus_token)
720 {
721 if (fwspec->fwnode != d->fwnode)
722 return 0;
723
724 if (fwspec->param[0] != GICV5_HWIRQ_TYPE_SPI)
725 return 0;
726
727 return (d == gicv5_global_data.spi_domain);
728 }
729
730 static const struct irq_domain_ops gicv5_irq_spi_domain_ops = {
731 .translate = gicv5_irq_spi_domain_translate,
732 .alloc = gicv5_irq_spi_domain_alloc,
733 .free = gicv5_irq_domain_free,
734 .select = gicv5_irq_spi_domain_select
735 };
736
gicv5_lpi_config_reset(struct irq_data * d)737 static void gicv5_lpi_config_reset(struct irq_data *d)
738 {
739 u64 cdhm;
740
741 /*
742 * Reset LPIs handling mode to edge by default and clear pending
743 * state to make sure we start the LPI with a clean state from
744 * previous incarnations.
745 */
746 cdhm = FIELD_PREP(GICV5_GIC_CDHM_HM_MASK, 0) |
747 FIELD_PREP(GICV5_GIC_CDHM_TYPE_MASK, GICV5_HWIRQ_TYPE_LPI) |
748 FIELD_PREP(GICV5_GIC_CDHM_ID_MASK, d->hwirq);
749 gic_insn(cdhm, CDHM);
750
751 gicv5_lpi_irq_write_pending_state(d, false);
752 }
753
gicv5_irq_lpi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)754 static int gicv5_irq_lpi_domain_alloc(struct irq_domain *domain, unsigned int virq,
755 unsigned int nr_irqs, void *arg)
756 {
757 irq_hw_number_t hwirq;
758 struct irq_data *irqd;
759 u32 *lpi = arg;
760 int ret;
761
762 if (WARN_ON_ONCE(nr_irqs != 1))
763 return -EINVAL;
764
765 hwirq = *lpi;
766
767 irqd = irq_domain_get_irq_data(domain, virq);
768
769 irq_domain_set_info(domain, virq, hwirq, &gicv5_lpi_irq_chip, NULL,
770 handle_fasteoi_irq, NULL, NULL);
771 irqd_set_single_target(irqd);
772
773 ret = gicv5_irs_iste_alloc(hwirq);
774 if (ret < 0)
775 return ret;
776
777 gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_LPI);
778 gicv5_lpi_config_reset(irqd);
779
780 return 0;
781 }
782
783 static const struct irq_domain_ops gicv5_irq_lpi_domain_ops = {
784 .alloc = gicv5_irq_lpi_domain_alloc,
785 .free = gicv5_irq_domain_free,
786 };
787
gicv5_init_lpi_domain(void)788 void __init gicv5_init_lpi_domain(void)
789 {
790 struct irq_domain *d;
791
792 d = irq_domain_create_tree(NULL, &gicv5_irq_lpi_domain_ops, NULL);
793 gicv5_global_data.lpi_domain = d;
794 }
795
gicv5_free_lpi_domain(void)796 void __init gicv5_free_lpi_domain(void)
797 {
798 irq_domain_remove(gicv5_global_data.lpi_domain);
799 gicv5_global_data.lpi_domain = NULL;
800 }
801
gicv5_irq_ipi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)802 static int gicv5_irq_ipi_domain_alloc(struct irq_domain *domain, unsigned int virq,
803 unsigned int nr_irqs, void *arg)
804 {
805 struct irq_data *irqd;
806 int ret, i;
807 u32 lpi;
808
809 for (i = 0; i < nr_irqs; i++) {
810 ret = gicv5_alloc_lpi();
811 if (ret < 0)
812 return ret;
813
814 lpi = ret;
815
816 ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
817 if (ret) {
818 gicv5_free_lpi(lpi);
819 return ret;
820 }
821
822 irqd = irq_domain_get_irq_data(domain, virq + i);
823
824 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
825 &gicv5_ipi_irq_chip, NULL);
826
827 irqd_set_single_target(irqd);
828
829 irq_set_handler(virq + i, handle_percpu_irq);
830 }
831
832 return 0;
833 }
834
gicv5_irq_ipi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)835 static void gicv5_irq_ipi_domain_free(struct irq_domain *domain, unsigned int virq,
836 unsigned int nr_irqs)
837 {
838 struct irq_data *d;
839 unsigned int i;
840
841 for (i = 0; i < nr_irqs; i++) {
842 d = irq_domain_get_irq_data(domain, virq + i);
843
844 if (!d)
845 return;
846
847 gicv5_free_lpi(d->parent_data->hwirq);
848
849 irq_set_handler(virq + i, NULL);
850 irq_domain_reset_irq_data(d);
851 irq_domain_free_irqs_parent(domain, virq + i, 1);
852 }
853 }
854
855 static const struct irq_domain_ops gicv5_irq_ipi_domain_ops = {
856 .alloc = gicv5_irq_ipi_domain_alloc,
857 .free = gicv5_irq_ipi_domain_free,
858 };
859
handle_irq_per_domain(u32 hwirq)860 static void handle_irq_per_domain(u32 hwirq)
861 {
862 u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq);
863 u32 hwirq_id = FIELD_GET(GICV5_HWIRQ_ID, hwirq);
864 struct irq_domain *domain;
865
866 switch (hwirq_type) {
867 case GICV5_HWIRQ_TYPE_PPI:
868 domain = gicv5_global_data.ppi_domain;
869 break;
870 case GICV5_HWIRQ_TYPE_SPI:
871 domain = gicv5_global_data.spi_domain;
872 break;
873 case GICV5_HWIRQ_TYPE_LPI:
874 domain = gicv5_global_data.lpi_domain;
875 break;
876 default:
877 pr_err_once("Unknown IRQ type, bail out\n");
878 return;
879 }
880
881 if (generic_handle_domain_irq(domain, hwirq_id)) {
882 pr_err_once("Could not handle, hwirq = 0x%x", hwirq_id);
883 gicv5_hwirq_eoi(hwirq_id, hwirq_type);
884 }
885 }
886
gicv5_handle_irq(struct pt_regs * regs)887 static void __exception_irq_entry gicv5_handle_irq(struct pt_regs *regs)
888 {
889 bool valid;
890 u32 hwirq;
891 u64 ia;
892
893 ia = gicr_insn(CDIA);
894 valid = GICV5_GICR_CDIA_VALID(ia);
895
896 if (!valid)
897 return;
898
899 /*
900 * Ensure that the CDIA instruction effects (ie IRQ activation) are
901 * completed before handling the interrupt.
902 */
903 gsb_ack();
904
905 /*
906 * Ensure instruction ordering between an acknowledgment and subsequent
907 * instructions in the IRQ handler using an ISB.
908 */
909 isb();
910
911 hwirq = FIELD_GET(GICV5_HWIRQ_INTID, ia);
912
913 handle_irq_per_domain(hwirq);
914 }
915
gicv5_cpu_disable_interrupts(void)916 static void gicv5_cpu_disable_interrupts(void)
917 {
918 u64 cr0;
919
920 cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0);
921 write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
922 }
923
gicv5_cpu_enable_interrupts(void)924 static void gicv5_cpu_enable_interrupts(void)
925 {
926 u64 cr0, pcr;
927
928 write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1);
929 write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1);
930
931 gicv5_ppi_priority_init();
932
933 pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_PRI_MI);
934 write_sysreg_s(pcr, SYS_ICC_PCR_EL1);
935
936 cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1);
937 write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
938 }
939
940 static int base_ipi_virq;
941
gicv5_starting_cpu(unsigned int cpu)942 static int gicv5_starting_cpu(unsigned int cpu)
943 {
944 if (WARN(!gicv5_cpuif_has_gcie(),
945 "GICv5 system components present but CPU does not have FEAT_GCIE"))
946 return -ENODEV;
947
948 gicv5_cpu_enable_interrupts();
949
950 return gicv5_irs_register_cpu(cpu);
951 }
952
gicv5_smp_init(void)953 static void __init gicv5_smp_init(void)
954 {
955 unsigned int num_ipis = GICV5_IPIS_PER_CPU * nr_cpu_ids;
956
957 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
958 "irqchip/arm/gicv5:starting",
959 gicv5_starting_cpu, NULL);
960
961 base_ipi_virq = irq_domain_alloc_irqs(gicv5_global_data.ipi_domain,
962 num_ipis, NUMA_NO_NODE, NULL);
963 if (WARN(base_ipi_virq <= 0, "IPI IRQ allocation was not successful"))
964 return;
965
966 set_smp_ipi_range_percpu(base_ipi_virq, GICV5_IPIS_PER_CPU, nr_cpu_ids);
967 }
968
gicv5_free_domains(void)969 static void __init gicv5_free_domains(void)
970 {
971 if (gicv5_global_data.ppi_domain)
972 irq_domain_remove(gicv5_global_data.ppi_domain);
973 if (gicv5_global_data.spi_domain)
974 irq_domain_remove(gicv5_global_data.spi_domain);
975 if (gicv5_global_data.ipi_domain)
976 irq_domain_remove(gicv5_global_data.ipi_domain);
977
978 gicv5_global_data.ppi_domain = NULL;
979 gicv5_global_data.spi_domain = NULL;
980 gicv5_global_data.ipi_domain = NULL;
981 }
982
gicv5_init_domains(struct fwnode_handle * handle)983 static int __init gicv5_init_domains(struct fwnode_handle *handle)
984 {
985 u32 spi_count = gicv5_global_data.global_spi_count;
986 struct irq_domain *d;
987
988 d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL);
989 if (!d)
990 return -ENOMEM;
991
992 irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
993 gicv5_global_data.ppi_domain = d;
994
995 if (spi_count) {
996 d = irq_domain_create_linear(handle, spi_count,
997 &gicv5_irq_spi_domain_ops, NULL);
998
999 if (!d) {
1000 gicv5_free_domains();
1001 return -ENOMEM;
1002 }
1003
1004 gicv5_global_data.spi_domain = d;
1005 irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
1006 }
1007
1008 if (!WARN(!gicv5_global_data.lpi_domain,
1009 "LPI domain uninitialized, can't set up IPIs")) {
1010 d = irq_domain_create_hierarchy(gicv5_global_data.lpi_domain,
1011 0, GICV5_IPIS_PER_CPU * nr_cpu_ids,
1012 NULL, &gicv5_irq_ipi_domain_ops,
1013 NULL);
1014
1015 if (!d) {
1016 gicv5_free_domains();
1017 return -ENOMEM;
1018 }
1019 gicv5_global_data.ipi_domain = d;
1020 }
1021 gicv5_global_data.fwnode = handle;
1022
1023 return 0;
1024 }
1025
gicv5_set_cpuif_pribits(void)1026 static void gicv5_set_cpuif_pribits(void)
1027 {
1028 u64 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
1029
1030 switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) {
1031 case ICC_IDR0_EL1_PRI_BITS_4BITS:
1032 gicv5_global_data.cpuif_pri_bits = 4;
1033 break;
1034 case ICC_IDR0_EL1_PRI_BITS_5BITS:
1035 gicv5_global_data.cpuif_pri_bits = 5;
1036 break;
1037 default:
1038 pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4");
1039 gicv5_global_data.cpuif_pri_bits = 4;
1040 break;
1041 }
1042 }
1043
gicv5_set_cpuif_idbits(void)1044 static void gicv5_set_cpuif_idbits(void)
1045 {
1046 u32 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
1047
1048 switch (FIELD_GET(ICC_IDR0_EL1_ID_BITS, icc_idr0)) {
1049 case ICC_IDR0_EL1_ID_BITS_16BITS:
1050 gicv5_global_data.cpuif_id_bits = 16;
1051 break;
1052 case ICC_IDR0_EL1_ID_BITS_24BITS:
1053 gicv5_global_data.cpuif_id_bits = 24;
1054 break;
1055 default:
1056 pr_err("Unexpected ICC_IDR0_EL1_ID_BITS value, default to 16");
1057 gicv5_global_data.cpuif_id_bits = 16;
1058 break;
1059 }
1060 }
1061
1062 #ifdef CONFIG_KVM
1063 static struct gic_kvm_info gic_v5_kvm_info __initdata;
1064
gicv5_cpuif_has_gcie_legacy(void)1065 static bool __init gicv5_cpuif_has_gcie_legacy(void)
1066 {
1067 u64 idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
1068 return !!FIELD_GET(ICC_IDR0_EL1_GCIE_LEGACY, idr0);
1069 }
1070
gic_of_setup_kvm_info(struct device_node * node)1071 static void __init gic_of_setup_kvm_info(struct device_node *node)
1072 {
1073 gic_v5_kvm_info.type = GIC_V5;
1074 gic_v5_kvm_info.has_gcie_v3_compat = gicv5_cpuif_has_gcie_legacy();
1075
1076 /* GIC Virtual CPU interface maintenance interrupt */
1077 gic_v5_kvm_info.no_maint_irq_mask = false;
1078 gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1079 if (!gic_v5_kvm_info.maint_irq) {
1080 pr_warn("cannot find GICv5 virtual CPU interface maintenance interrupt\n");
1081 return;
1082 }
1083
1084 vgic_set_kvm_info(&gic_v5_kvm_info);
1085 }
1086 #else
gic_of_setup_kvm_info(struct device_node * node)1087 static inline void __init gic_of_setup_kvm_info(struct device_node *node)
1088 {
1089 }
1090 #endif // CONFIG_KVM
1091
gicv5_of_init(struct device_node * node,struct device_node * parent)1092 static int __init gicv5_of_init(struct device_node *node, struct device_node *parent)
1093 {
1094 int ret = gicv5_irs_of_probe(node);
1095 if (ret)
1096 return ret;
1097
1098 ret = gicv5_init_domains(of_fwnode_handle(node));
1099 if (ret)
1100 goto out_irs;
1101
1102 gicv5_set_cpuif_pribits();
1103 gicv5_set_cpuif_idbits();
1104
1105 pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits,
1106 gicv5_global_data.irs_pri_bits);
1107
1108 ret = gicv5_starting_cpu(smp_processor_id());
1109 if (ret)
1110 goto out_dom;
1111
1112 ret = set_handle_irq(gicv5_handle_irq);
1113 if (ret)
1114 goto out_int;
1115
1116 ret = gicv5_irs_enable();
1117 if (ret)
1118 goto out_int;
1119
1120 gicv5_smp_init();
1121
1122 gicv5_irs_its_probe();
1123
1124 gic_of_setup_kvm_info(node);
1125
1126 return 0;
1127
1128 out_int:
1129 gicv5_cpu_disable_interrupts();
1130 out_dom:
1131 gicv5_free_domains();
1132 out_irs:
1133 gicv5_irs_remove();
1134
1135 return ret;
1136 }
1137 IRQCHIP_DECLARE(gic_v5, "arm,gic-v5", gicv5_of_init);
1138