xref: /linux/drivers/irqchip/irq-gic-v5.c (revision cb5573868ea85ddbc74dd9a917acd1e434d21390)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved.
4  */
5 
6 #define pr_fmt(fmt)	"GICv5: " fmt
7 
8 #include <linux/acpi_iort.h>
9 #include <linux/cpuhotplug.h>
10 #include <linux/idr.h>
11 #include <linux/irqdomain.h>
12 #include <linux/slab.h>
13 #include <linux/wordpart.h>
14 
15 #include <linux/irqchip.h>
16 #include <linux/irqchip/arm-gic-v5.h>
17 #include <linux/irqchip/arm-vgic-info.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/exception.h>
21 
22 static u8 pri_bits __ro_after_init = 5;
23 
24 #define GICV5_IRQ_PRI_MASK	0x1f
25 #define GICV5_IRQ_PRI_MI	(GICV5_IRQ_PRI_MASK & GENMASK(4, 5 - pri_bits))
26 
27 #define PPI_NR	128
28 
gicv5_cpuif_has_gcie(void)29 static bool gicv5_cpuif_has_gcie(void)
30 {
31 	return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF);
32 }
33 
34 struct gicv5_chip_data gicv5_global_data __read_mostly;
35 
36 static DEFINE_IDA(lpi_ida);
37 static u32 num_lpis __ro_after_init;
38 
gicv5_init_lpis(u32 lpis)39 void __init gicv5_init_lpis(u32 lpis)
40 {
41 	num_lpis = lpis;
42 }
43 
gicv5_deinit_lpis(void)44 void __init gicv5_deinit_lpis(void)
45 {
46 	num_lpis = 0;
47 }
48 
alloc_lpi(void)49 static int alloc_lpi(void)
50 {
51 	if (!num_lpis)
52 		return -ENOSPC;
53 
54 	return ida_alloc_max(&lpi_ida, num_lpis - 1, GFP_KERNEL);
55 }
56 
release_lpi(u32 lpi)57 static void release_lpi(u32 lpi)
58 {
59 	ida_free(&lpi_ida, lpi);
60 }
61 
gicv5_alloc_lpi(void)62 int gicv5_alloc_lpi(void)
63 {
64 	return alloc_lpi();
65 }
66 
gicv5_free_lpi(u32 lpi)67 void gicv5_free_lpi(u32 lpi)
68 {
69 	release_lpi(lpi);
70 }
71 
gicv5_ppi_priority_init(void)72 static void gicv5_ppi_priority_init(void)
73 {
74 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1);
75 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR1_EL1);
76 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR2_EL1);
77 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR3_EL1);
78 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR4_EL1);
79 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR5_EL1);
80 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR6_EL1);
81 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR7_EL1);
82 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR8_EL1);
83 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR9_EL1);
84 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR10_EL1);
85 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR11_EL1);
86 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR12_EL1);
87 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR13_EL1);
88 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR14_EL1);
89 	write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR15_EL1);
90 
91 	/*
92 	 * Context syncronization required to make sure system register writes
93 	 * effects are synchronised.
94 	 */
95 	isb();
96 }
97 
gicv5_hwirq_init(irq_hw_number_t hwirq,u8 priority,u8 hwirq_type)98 static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type)
99 {
100 	u64 cdpri, cdaff;
101 	u16 iaffid;
102 	int ret;
103 
104 	if (hwirq_type == GICV5_HWIRQ_TYPE_LPI || hwirq_type == GICV5_HWIRQ_TYPE_SPI) {
105 		cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority)	|
106 			FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type)	|
107 			FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq);
108 		gic_insn(cdpri, CDPRI);
109 
110 		ret = gicv5_irs_cpu_to_iaffid(smp_processor_id(), &iaffid);
111 
112 		if (WARN_ON_ONCE(ret))
113 			return;
114 
115 		cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid)		|
116 			FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type)	|
117 			FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, hwirq);
118 		gic_insn(cdaff, CDAFF);
119 	}
120 }
121 
gicv5_ppi_irq_mask(struct irq_data * d)122 static void gicv5_ppi_irq_mask(struct irq_data *d)
123 {
124 	u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
125 
126 	if (d->hwirq < 64)
127 		sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, hwirq_id_bit, 0);
128 	else
129 		sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, hwirq_id_bit, 0);
130 
131 	/*
132 	 * We must ensure that the disable takes effect immediately to
133 	 * guarantee that the lazy-disabled IRQ mechanism works.
134 	 * A context synchronization event is required to guarantee it.
135 	 * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
136 	 */
137 	isb();
138 }
139 
gicv5_iri_irq_mask(struct irq_data * d,u8 hwirq_type)140 static void gicv5_iri_irq_mask(struct irq_data *d, u8 hwirq_type)
141 {
142 	u64 cddis;
143 
144 	cddis = FIELD_PREP(GICV5_GIC_CDDIS_ID_MASK, d->hwirq)	|
145 		FIELD_PREP(GICV5_GIC_CDDIS_TYPE_MASK, hwirq_type);
146 
147 	gic_insn(cddis, CDDIS);
148 	/*
149 	 * We must make sure that GIC CDDIS write effects are propagated
150 	 * immediately to make sure the disable takes effect to guarantee
151 	 * that the lazy-disabled IRQ mechanism works.
152 	 * Rule R_XCLJC states that the effects of a GIC system instruction
153 	 * complete in finite time.
154 	 * The GSB ensures completion of the GIC instruction and prevents
155 	 * loads, stores and GIC instructions from executing part of their
156 	 * functionality before the GSB SYS.
157 	 */
158 	gsb_sys();
159 }
160 
gicv5_spi_irq_mask(struct irq_data * d)161 static void gicv5_spi_irq_mask(struct irq_data *d)
162 {
163 	gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI);
164 }
165 
gicv5_lpi_irq_mask(struct irq_data * d)166 static void gicv5_lpi_irq_mask(struct irq_data *d)
167 {
168 	gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_LPI);
169 }
170 
gicv5_ppi_irq_unmask(struct irq_data * d)171 static void gicv5_ppi_irq_unmask(struct irq_data *d)
172 {
173 	u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
174 
175 	if (d->hwirq < 64)
176 		sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, 0, hwirq_id_bit);
177 	else
178 		sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, 0, hwirq_id_bit);
179 	/*
180 	 * We must ensure that the enable takes effect in finite time - a
181 	 * context synchronization event is required to guarantee it, we
182 	 * can not take for granted that would happen (eg a core going straight
183 	 * into idle after enabling a PPI).
184 	 * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1.
185 	 */
186 	isb();
187 }
188 
gicv5_iri_irq_unmask(struct irq_data * d,u8 hwirq_type)189 static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type)
190 {
191 	u64 cden;
192 
193 	cden = FIELD_PREP(GICV5_GIC_CDEN_ID_MASK, d->hwirq)	|
194 	       FIELD_PREP(GICV5_GIC_CDEN_TYPE_MASK, hwirq_type);
195 	/*
196 	 * Rule R_XCLJC states that the effects of a GIC system instruction
197 	 * complete in finite time and that's the only requirement when
198 	 * unmasking an SPI/LPI IRQ.
199 	 */
200 	gic_insn(cden, CDEN);
201 }
202 
gicv5_spi_irq_unmask(struct irq_data * d)203 static void gicv5_spi_irq_unmask(struct irq_data *d)
204 {
205 	gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI);
206 }
207 
gicv5_lpi_irq_unmask(struct irq_data * d)208 static void gicv5_lpi_irq_unmask(struct irq_data *d)
209 {
210 	gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_LPI);
211 }
212 
gicv5_hwirq_eoi(u32 hwirq_id,u8 hwirq_type)213 static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type)
214 {
215 	u64 cddi;
216 
217 	cddi = FIELD_PREP(GICV5_GIC_CDDI_ID_MASK, hwirq_id)	|
218 	       FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, hwirq_type);
219 
220 	gic_insn(cddi, CDDI);
221 
222 	gic_insn(0, CDEOI);
223 }
224 
gicv5_ppi_irq_eoi(struct irq_data * d)225 static void gicv5_ppi_irq_eoi(struct irq_data *d)
226 {
227 	/* Skip deactivate for forwarded PPI interrupts */
228 	if (irqd_is_forwarded_to_vcpu(d)) {
229 		gic_insn(0, CDEOI);
230 		return;
231 	}
232 
233 	gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI);
234 }
235 
gicv5_spi_irq_eoi(struct irq_data * d)236 static void gicv5_spi_irq_eoi(struct irq_data *d)
237 {
238 	gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI);
239 }
240 
gicv5_lpi_irq_eoi(struct irq_data * d)241 static void gicv5_lpi_irq_eoi(struct irq_data *d)
242 {
243 	gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_LPI);
244 }
245 
gicv5_iri_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force,u8 hwirq_type)246 static int gicv5_iri_irq_set_affinity(struct irq_data *d,
247 				      const struct cpumask *mask_val,
248 				      bool force, u8 hwirq_type)
249 {
250 	int ret, cpuid;
251 	u16 iaffid;
252 	u64 cdaff;
253 
254 	if (force)
255 		cpuid = cpumask_first(mask_val);
256 	else
257 		cpuid = cpumask_any_and(mask_val, cpu_online_mask);
258 
259 	ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid);
260 	if (ret)
261 		return ret;
262 
263 	cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid)		|
264 		FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type)	|
265 		FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, d->hwirq);
266 	gic_insn(cdaff, CDAFF);
267 
268 	irq_data_update_effective_affinity(d, cpumask_of(cpuid));
269 
270 	return IRQ_SET_MASK_OK_DONE;
271 }
272 
gicv5_spi_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)273 static int gicv5_spi_irq_set_affinity(struct irq_data *d,
274 				      const struct cpumask *mask_val,
275 				      bool force)
276 {
277 	return gicv5_iri_irq_set_affinity(d, mask_val, force,
278 					  GICV5_HWIRQ_TYPE_SPI);
279 }
280 
gicv5_lpi_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)281 static int gicv5_lpi_irq_set_affinity(struct irq_data *d,
282 				      const struct cpumask *mask_val,
283 				      bool force)
284 {
285 	return gicv5_iri_irq_set_affinity(d, mask_val, force,
286 					  GICV5_HWIRQ_TYPE_LPI);
287 }
288 
289 enum ppi_reg {
290 	PPI_PENDING,
291 	PPI_ACTIVE,
292 	PPI_HM
293 };
294 
read_ppi_sysreg_s(unsigned int irq,const enum ppi_reg which)295 static __always_inline u64 read_ppi_sysreg_s(unsigned int irq,
296 					     const enum ppi_reg which)
297 {
298 	switch (which) {
299 	case PPI_PENDING:
300 		return irq < 64	? read_sysreg_s(SYS_ICC_PPI_SPENDR0_EL1) :
301 				  read_sysreg_s(SYS_ICC_PPI_SPENDR1_EL1);
302 	case PPI_ACTIVE:
303 		return irq < 64	? read_sysreg_s(SYS_ICC_PPI_SACTIVER0_EL1) :
304 				  read_sysreg_s(SYS_ICC_PPI_SACTIVER1_EL1);
305 	case PPI_HM:
306 		return irq < 64	? read_sysreg_s(SYS_ICC_PPI_HMR0_EL1) :
307 				  read_sysreg_s(SYS_ICC_PPI_HMR1_EL1);
308 	default:
309 		BUILD_BUG_ON(1);
310 	}
311 }
312 
write_ppi_sysreg_s(unsigned int irq,bool set,const enum ppi_reg which)313 static __always_inline void write_ppi_sysreg_s(unsigned int irq, bool set,
314 					       const enum ppi_reg which)
315 {
316 	u64 bit = BIT_ULL(irq % 64);
317 
318 	switch (which) {
319 	case PPI_PENDING:
320 		if (set) {
321 			if (irq < 64)
322 				write_sysreg_s(bit, SYS_ICC_PPI_SPENDR0_EL1);
323 			else
324 				write_sysreg_s(bit, SYS_ICC_PPI_SPENDR1_EL1);
325 		} else {
326 			if (irq < 64)
327 				write_sysreg_s(bit, SYS_ICC_PPI_CPENDR0_EL1);
328 			else
329 				write_sysreg_s(bit, SYS_ICC_PPI_CPENDR1_EL1);
330 		}
331 		return;
332 	case PPI_ACTIVE:
333 		if (set) {
334 			if (irq < 64)
335 				write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER0_EL1);
336 			else
337 				write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER1_EL1);
338 		} else {
339 			if (irq < 64)
340 				write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER0_EL1);
341 			else
342 				write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER1_EL1);
343 		}
344 		return;
345 	default:
346 		BUILD_BUG_ON(1);
347 	}
348 }
349 
gicv5_ppi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)350 static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d,
351 					   enum irqchip_irq_state which,
352 					   bool *state)
353 {
354 	u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64);
355 
356 	switch (which) {
357 	case IRQCHIP_STATE_PENDING:
358 		*state = !!(read_ppi_sysreg_s(d->hwirq, PPI_PENDING) & hwirq_id_bit);
359 		return 0;
360 	case IRQCHIP_STATE_ACTIVE:
361 		*state = !!(read_ppi_sysreg_s(d->hwirq, PPI_ACTIVE) & hwirq_id_bit);
362 		return 0;
363 	default:
364 		pr_debug("Unexpected PPI irqchip state\n");
365 		return -EINVAL;
366 	}
367 }
368 
gicv5_iri_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state,u8 hwirq_type)369 static int gicv5_iri_irq_get_irqchip_state(struct irq_data *d,
370 					   enum irqchip_irq_state which,
371 					   bool *state, u8 hwirq_type)
372 {
373 	u64 icsr, cdrcfg;
374 
375 	cdrcfg = d->hwirq | FIELD_PREP(GICV5_GIC_CDRCFG_TYPE_MASK, hwirq_type);
376 
377 	gic_insn(cdrcfg, CDRCFG);
378 	isb();
379 	icsr = read_sysreg_s(SYS_ICC_ICSR_EL1);
380 
381 	if (FIELD_GET(ICC_ICSR_EL1_F, icsr)) {
382 		pr_err("ICSR_EL1 is invalid\n");
383 		return -EINVAL;
384 	}
385 
386 	switch (which) {
387 	case IRQCHIP_STATE_PENDING:
388 		*state = !!(FIELD_GET(ICC_ICSR_EL1_Pending, icsr));
389 		return 0;
390 
391 	case IRQCHIP_STATE_ACTIVE:
392 		*state = !!(FIELD_GET(ICC_ICSR_EL1_Active, icsr));
393 		return 0;
394 
395 	default:
396 		pr_debug("Unexpected irqchip_irq_state\n");
397 		return -EINVAL;
398 	}
399 }
400 
gicv5_spi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)401 static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d,
402 					   enum irqchip_irq_state which,
403 					   bool *state)
404 {
405 	return gicv5_iri_irq_get_irqchip_state(d, which, state,
406 					       GICV5_HWIRQ_TYPE_SPI);
407 }
408 
gicv5_lpi_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)409 static int gicv5_lpi_irq_get_irqchip_state(struct irq_data *d,
410 					   enum irqchip_irq_state which,
411 					   bool *state)
412 {
413 	return gicv5_iri_irq_get_irqchip_state(d, which, state,
414 					       GICV5_HWIRQ_TYPE_LPI);
415 }
416 
gicv5_ppi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)417 static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d,
418 					   enum irqchip_irq_state which,
419 					   bool state)
420 {
421 	switch (which) {
422 	case IRQCHIP_STATE_PENDING:
423 		write_ppi_sysreg_s(d->hwirq, state, PPI_PENDING);
424 		return 0;
425 	case IRQCHIP_STATE_ACTIVE:
426 		write_ppi_sysreg_s(d->hwirq, state, PPI_ACTIVE);
427 		return 0;
428 	default:
429 		pr_debug("Unexpected PPI irqchip state\n");
430 		return -EINVAL;
431 	}
432 }
433 
gicv5_iri_irq_write_pending_state(struct irq_data * d,bool state,u8 hwirq_type)434 static void gicv5_iri_irq_write_pending_state(struct irq_data *d, bool state,
435 					      u8 hwirq_type)
436 {
437 	u64 cdpend;
438 
439 	cdpend = FIELD_PREP(GICV5_GIC_CDPEND_TYPE_MASK, hwirq_type)	|
440 		 FIELD_PREP(GICV5_GIC_CDPEND_ID_MASK, d->hwirq)		|
441 		 FIELD_PREP(GICV5_GIC_CDPEND_PENDING_MASK, state);
442 
443 	gic_insn(cdpend, CDPEND);
444 }
445 
gicv5_spi_irq_write_pending_state(struct irq_data * d,bool state)446 static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state)
447 {
448 	gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI);
449 }
450 
gicv5_lpi_irq_write_pending_state(struct irq_data * d,bool state)451 static void gicv5_lpi_irq_write_pending_state(struct irq_data *d, bool state)
452 {
453 	gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_LPI);
454 }
455 
gicv5_spi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)456 static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d,
457 					   enum irqchip_irq_state which,
458 					   bool state)
459 {
460 	switch (which) {
461 	case IRQCHIP_STATE_PENDING:
462 		gicv5_spi_irq_write_pending_state(d, state);
463 		break;
464 	default:
465 		pr_debug("Unexpected irqchip_irq_state\n");
466 		return -EINVAL;
467 	}
468 
469 	return 0;
470 }
471 
gicv5_lpi_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)472 static int gicv5_lpi_irq_set_irqchip_state(struct irq_data *d,
473 					   enum irqchip_irq_state which,
474 					   bool state)
475 {
476 	switch (which) {
477 	case IRQCHIP_STATE_PENDING:
478 		gicv5_lpi_irq_write_pending_state(d, state);
479 		break;
480 
481 	default:
482 		pr_debug("Unexpected irqchip_irq_state\n");
483 		return -EINVAL;
484 	}
485 
486 	return 0;
487 }
488 
gicv5_spi_irq_retrigger(struct irq_data * data)489 static int gicv5_spi_irq_retrigger(struct irq_data *data)
490 {
491 	return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
492 						true);
493 }
494 
gicv5_lpi_irq_retrigger(struct irq_data * data)495 static int gicv5_lpi_irq_retrigger(struct irq_data *data)
496 {
497 	return !gicv5_lpi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING,
498 						true);
499 }
500 
gicv5_ipi_send_single(struct irq_data * d,unsigned int cpu)501 static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu)
502 {
503 	/* Mark the LPI pending */
504 	irq_chip_retrigger_hierarchy(d);
505 }
506 
gicv5_ppi_irq_is_level(irq_hw_number_t hwirq)507 static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq)
508 {
509 	u64 bit = BIT_ULL(hwirq % 64);
510 
511 	return !!(read_ppi_sysreg_s(hwirq, PPI_HM) & bit);
512 }
513 
gicv5_ppi_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)514 static int gicv5_ppi_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
515 {
516 	if (vcpu)
517 		irqd_set_forwarded_to_vcpu(d);
518 	else
519 		irqd_clr_forwarded_to_vcpu(d);
520 
521 	return 0;
522 }
523 
524 static const struct irq_chip gicv5_ppi_irq_chip = {
525 	.name			= "GICv5-PPI",
526 	.irq_mask		= gicv5_ppi_irq_mask,
527 	.irq_unmask		= gicv5_ppi_irq_unmask,
528 	.irq_eoi		= gicv5_ppi_irq_eoi,
529 	.irq_get_irqchip_state	= gicv5_ppi_irq_get_irqchip_state,
530 	.irq_set_irqchip_state	= gicv5_ppi_irq_set_irqchip_state,
531 	.irq_set_vcpu_affinity	= gicv5_ppi_irq_set_vcpu_affinity,
532 	.flags			= IRQCHIP_SKIP_SET_WAKE	  |
533 				  IRQCHIP_MASK_ON_SUSPEND,
534 };
535 
536 static const struct irq_chip gicv5_spi_irq_chip = {
537 	.name			= "GICv5-SPI",
538 	.irq_mask		= gicv5_spi_irq_mask,
539 	.irq_unmask		= gicv5_spi_irq_unmask,
540 	.irq_eoi		= gicv5_spi_irq_eoi,
541 	.irq_set_type		= gicv5_spi_irq_set_type,
542 	.irq_set_affinity	= gicv5_spi_irq_set_affinity,
543 	.irq_retrigger		= gicv5_spi_irq_retrigger,
544 	.irq_get_irqchip_state	= gicv5_spi_irq_get_irqchip_state,
545 	.irq_set_irqchip_state	= gicv5_spi_irq_set_irqchip_state,
546 	.flags			= IRQCHIP_SET_TYPE_MASKED |
547 				  IRQCHIP_SKIP_SET_WAKE	  |
548 				  IRQCHIP_MASK_ON_SUSPEND,
549 };
550 
551 static const struct irq_chip gicv5_lpi_irq_chip = {
552 	.name			= "GICv5-LPI",
553 	.irq_mask		= gicv5_lpi_irq_mask,
554 	.irq_unmask		= gicv5_lpi_irq_unmask,
555 	.irq_eoi		= gicv5_lpi_irq_eoi,
556 	.irq_set_affinity	= gicv5_lpi_irq_set_affinity,
557 	.irq_retrigger		= gicv5_lpi_irq_retrigger,
558 	.irq_get_irqchip_state	= gicv5_lpi_irq_get_irqchip_state,
559 	.irq_set_irqchip_state	= gicv5_lpi_irq_set_irqchip_state,
560 	.flags			= IRQCHIP_SKIP_SET_WAKE	  |
561 				  IRQCHIP_MASK_ON_SUSPEND,
562 };
563 
564 static const struct irq_chip gicv5_ipi_irq_chip = {
565 	.name			= "GICv5-IPI",
566 	.irq_mask		= irq_chip_mask_parent,
567 	.irq_unmask		= irq_chip_unmask_parent,
568 	.irq_eoi		= irq_chip_eoi_parent,
569 	.irq_set_affinity	= irq_chip_set_affinity_parent,
570 	.irq_get_irqchip_state	= irq_chip_get_parent_state,
571 	.irq_set_irqchip_state	= irq_chip_set_parent_state,
572 	.ipi_send_single	= gicv5_ipi_send_single,
573 	.flags			= IRQCHIP_SKIP_SET_WAKE	  |
574 				  IRQCHIP_MASK_ON_SUSPEND,
575 };
576 
gicv5_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type,const u8 hwirq_type)577 static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d,
578 						      struct irq_fwspec *fwspec,
579 						      irq_hw_number_t *hwirq,
580 						      unsigned int *type,
581 						      const u8 hwirq_type)
582 {
583 	unsigned int hwirq_trigger;
584 	u8 fwspec_irq_type;
585 
586 	if (is_of_node(fwspec->fwnode)) {
587 
588 		if (fwspec->param_count < 3)
589 			return -EINVAL;
590 
591 		fwspec_irq_type = fwspec->param[0];
592 
593 		if (fwspec->param[0] != hwirq_type)
594 			return -EINVAL;
595 
596 		*hwirq = fwspec->param[1];
597 		hwirq_trigger = fwspec->param[2];
598 	}
599 
600 	if (is_fwnode_irqchip(fwspec->fwnode)) {
601 
602 		if (fwspec->param_count != 2)
603 			return -EINVAL;
604 
605 		fwspec_irq_type = FIELD_GET(GICV5_HWIRQ_TYPE, fwspec->param[0]);
606 
607 		if (fwspec_irq_type != hwirq_type)
608 			return -EINVAL;
609 
610 		*hwirq = FIELD_GET(GICV5_HWIRQ_ID, fwspec->param[0]);
611 		hwirq_trigger = fwspec->param[1];
612 	}
613 
614 	switch (hwirq_type) {
615 	case GICV5_HWIRQ_TYPE_PPI:
616 		/*
617 		 * Handling mode is hardcoded for PPIs, set the type using
618 		 * HW reported value.
619 		 */
620 		*type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW :
621 							 IRQ_TYPE_EDGE_RISING;
622 		break;
623 	case GICV5_HWIRQ_TYPE_SPI:
624 		*type = hwirq_trigger & IRQ_TYPE_SENSE_MASK;
625 		break;
626 	default:
627 		BUILD_BUG_ON(1);
628 	}
629 
630 	return 0;
631 }
632 
gicv5_irq_ppi_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type)633 static int gicv5_irq_ppi_domain_translate(struct irq_domain *d,
634 					  struct irq_fwspec *fwspec,
635 					  irq_hw_number_t *hwirq,
636 					  unsigned int *type)
637 {
638 	return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
639 					  GICV5_HWIRQ_TYPE_PPI);
640 }
641 
gicv5_irq_ppi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)642 static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq,
643 				      unsigned int nr_irqs, void *arg)
644 {
645 	unsigned int type = IRQ_TYPE_NONE;
646 	struct irq_fwspec *fwspec = arg;
647 	irq_hw_number_t hwirq;
648 	int ret;
649 
650 	if (WARN_ON_ONCE(nr_irqs != 1))
651 		return -EINVAL;
652 
653 	ret = gicv5_irq_ppi_domain_translate(domain, fwspec, &hwirq, &type);
654 	if (ret)
655 		return ret;
656 
657 	if (type & IRQ_TYPE_LEVEL_MASK)
658 		irq_set_status_flags(virq, IRQ_LEVEL);
659 
660 	irq_set_percpu_devid(virq);
661 	irq_domain_set_info(domain, virq, hwirq, &gicv5_ppi_irq_chip, NULL,
662 			    handle_percpu_devid_irq, NULL, NULL);
663 
664 	return 0;
665 }
666 
gicv5_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)667 static void gicv5_irq_domain_free(struct irq_domain *domain, unsigned int virq,
668 				  unsigned int nr_irqs)
669 {
670 	struct irq_data *d;
671 
672 	if (WARN_ON_ONCE(nr_irqs != 1))
673 		return;
674 
675 	d = irq_domain_get_irq_data(domain, virq);
676 
677 	irq_set_handler(virq, NULL);
678 	irq_domain_reset_irq_data(d);
679 }
680 
gicv5_irq_ppi_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)681 static int gicv5_irq_ppi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
682 				       enum irq_domain_bus_token bus_token)
683 {
684 	u32 hwirq_type;
685 
686 	if (fwspec->fwnode != d->fwnode)
687 		return 0;
688 
689 	if (is_of_node(fwspec->fwnode))
690 		hwirq_type = fwspec->param[0];
691 
692 	if (is_fwnode_irqchip(fwspec->fwnode))
693 		hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, fwspec->param[0]);
694 
695 	if (hwirq_type != GICV5_HWIRQ_TYPE_PPI)
696 		return 0;
697 
698 	return (d == gicv5_global_data.ppi_domain);
699 }
700 
701 static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = {
702 	.translate	= gicv5_irq_ppi_domain_translate,
703 	.alloc		= gicv5_irq_ppi_domain_alloc,
704 	.free		= gicv5_irq_domain_free,
705 	.select		= gicv5_irq_ppi_domain_select
706 };
707 
gicv5_irq_spi_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,irq_hw_number_t * hwirq,unsigned int * type)708 static int gicv5_irq_spi_domain_translate(struct irq_domain *d,
709 					  struct irq_fwspec *fwspec,
710 					  irq_hw_number_t *hwirq,
711 					  unsigned int *type)
712 {
713 	return gicv5_irq_domain_translate(d, fwspec, hwirq, type,
714 					  GICV5_HWIRQ_TYPE_SPI);
715 }
716 
gicv5_irq_spi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)717 static int gicv5_irq_spi_domain_alloc(struct irq_domain *domain, unsigned int virq,
718 				      unsigned int nr_irqs, void *arg)
719 {
720 	struct gicv5_irs_chip_data *chip_data;
721 	unsigned int type = IRQ_TYPE_NONE;
722 	struct irq_fwspec *fwspec = arg;
723 	struct irq_data *irqd;
724 	irq_hw_number_t hwirq;
725 	int ret;
726 
727 	if (WARN_ON_ONCE(nr_irqs != 1))
728 		return -EINVAL;
729 
730 	ret = gicv5_irq_spi_domain_translate(domain, fwspec, &hwirq, &type);
731 	if (ret)
732 		return ret;
733 
734 	irqd = irq_desc_get_irq_data(irq_to_desc(virq));
735 	chip_data = gicv5_irs_lookup_by_spi_id(hwirq);
736 
737 	irq_domain_set_info(domain, virq, hwirq, &gicv5_spi_irq_chip, chip_data,
738 			    handle_fasteoi_irq, NULL, NULL);
739 	irq_set_probe(virq);
740 	irqd_set_single_target(irqd);
741 
742 	gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_SPI);
743 
744 	return 0;
745 }
746 
gicv5_irq_spi_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)747 static int gicv5_irq_spi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
748 				       enum irq_domain_bus_token bus_token)
749 {
750 	u32 hwirq_type;
751 
752 	if (fwspec->fwnode != d->fwnode)
753 		return 0;
754 
755 	if (is_of_node(fwspec->fwnode))
756 		hwirq_type = fwspec->param[0];
757 
758 	if (is_fwnode_irqchip(fwspec->fwnode))
759 		hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, fwspec->param[0]);
760 
761 	if (hwirq_type != GICV5_HWIRQ_TYPE_SPI)
762 		return 0;
763 
764 	return (d == gicv5_global_data.spi_domain);
765 }
766 
767 static const struct irq_domain_ops gicv5_irq_spi_domain_ops = {
768 	.translate	= gicv5_irq_spi_domain_translate,
769 	.alloc		= gicv5_irq_spi_domain_alloc,
770 	.free		= gicv5_irq_domain_free,
771 	.select		= gicv5_irq_spi_domain_select
772 };
773 
gicv5_lpi_config_reset(struct irq_data * d)774 static void gicv5_lpi_config_reset(struct irq_data *d)
775 {
776 	u64 cdhm;
777 
778 	/*
779 	 * Reset LPIs handling mode to edge by default and clear pending
780 	 * state to make sure we start the LPI with a clean state from
781 	 * previous incarnations.
782 	 */
783 	cdhm = FIELD_PREP(GICV5_GIC_CDHM_HM_MASK, 0)				|
784 	       FIELD_PREP(GICV5_GIC_CDHM_TYPE_MASK, GICV5_HWIRQ_TYPE_LPI)	|
785 	       FIELD_PREP(GICV5_GIC_CDHM_ID_MASK, d->hwirq);
786 	gic_insn(cdhm, CDHM);
787 
788 	gicv5_lpi_irq_write_pending_state(d, false);
789 }
790 
gicv5_irq_lpi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)791 static int gicv5_irq_lpi_domain_alloc(struct irq_domain *domain, unsigned int virq,
792 				      unsigned int nr_irqs, void *arg)
793 {
794 	irq_hw_number_t hwirq;
795 	struct irq_data *irqd;
796 	u32 *lpi = arg;
797 	int ret;
798 
799 	if (WARN_ON_ONCE(nr_irqs != 1))
800 		return -EINVAL;
801 
802 	hwirq = *lpi;
803 
804 	irqd = irq_domain_get_irq_data(domain, virq);
805 
806 	irq_domain_set_info(domain, virq, hwirq, &gicv5_lpi_irq_chip, NULL,
807 			    handle_fasteoi_irq, NULL, NULL);
808 	irqd_set_single_target(irqd);
809 
810 	ret = gicv5_irs_iste_alloc(hwirq);
811 	if (ret < 0)
812 		return ret;
813 
814 	gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_LPI);
815 	gicv5_lpi_config_reset(irqd);
816 
817 	return 0;
818 }
819 
820 static const struct irq_domain_ops gicv5_irq_lpi_domain_ops = {
821 	.alloc	= gicv5_irq_lpi_domain_alloc,
822 	.free	= gicv5_irq_domain_free,
823 };
824 
gicv5_init_lpi_domain(void)825 void __init gicv5_init_lpi_domain(void)
826 {
827 	struct irq_domain *d;
828 
829 	d = irq_domain_create_tree(NULL, &gicv5_irq_lpi_domain_ops, NULL);
830 	gicv5_global_data.lpi_domain = d;
831 }
832 
gicv5_free_lpi_domain(void)833 void __init gicv5_free_lpi_domain(void)
834 {
835 	irq_domain_remove(gicv5_global_data.lpi_domain);
836 	gicv5_global_data.lpi_domain = NULL;
837 }
838 
gicv5_irq_ipi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)839 static int gicv5_irq_ipi_domain_alloc(struct irq_domain *domain, unsigned int virq,
840 				      unsigned int nr_irqs, void *arg)
841 {
842 	struct irq_data *irqd;
843 	int ret, i;
844 	u32 lpi;
845 
846 	for (i = 0; i < nr_irqs; i++) {
847 		ret = gicv5_alloc_lpi();
848 		if (ret < 0)
849 			return ret;
850 
851 		lpi = ret;
852 
853 		ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
854 		if (ret) {
855 			gicv5_free_lpi(lpi);
856 			return ret;
857 		}
858 
859 		irqd = irq_domain_get_irq_data(domain, virq + i);
860 
861 		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
862 				&gicv5_ipi_irq_chip, NULL);
863 
864 		irqd_set_single_target(irqd);
865 
866 		irq_set_handler(virq + i, handle_percpu_irq);
867 	}
868 
869 	return 0;
870 }
871 
gicv5_irq_ipi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)872 static void gicv5_irq_ipi_domain_free(struct irq_domain *domain, unsigned int virq,
873 				      unsigned int nr_irqs)
874 {
875 	struct irq_data *d;
876 	unsigned int i;
877 
878 	for (i = 0; i < nr_irqs; i++) {
879 		d = irq_domain_get_irq_data(domain, virq + i);
880 
881 		if (!d)
882 			return;
883 
884 		gicv5_free_lpi(d->parent_data->hwirq);
885 
886 		irq_set_handler(virq + i, NULL);
887 		irq_domain_reset_irq_data(d);
888 		irq_domain_free_irqs_parent(domain, virq + i, 1);
889 	}
890 }
891 
892 static const struct irq_domain_ops gicv5_irq_ipi_domain_ops = {
893 	.alloc	= gicv5_irq_ipi_domain_alloc,
894 	.free	= gicv5_irq_ipi_domain_free,
895 };
896 
handle_irq_per_domain(u32 hwirq)897 static void handle_irq_per_domain(u32 hwirq)
898 {
899 	u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq);
900 	u32 hwirq_id = FIELD_GET(GICV5_HWIRQ_ID, hwirq);
901 	struct irq_domain *domain;
902 
903 	switch (hwirq_type) {
904 	case GICV5_HWIRQ_TYPE_PPI:
905 		domain = gicv5_global_data.ppi_domain;
906 		break;
907 	case GICV5_HWIRQ_TYPE_SPI:
908 		domain = gicv5_global_data.spi_domain;
909 		break;
910 	case GICV5_HWIRQ_TYPE_LPI:
911 		domain = gicv5_global_data.lpi_domain;
912 		break;
913 	default:
914 		pr_err_once("Unknown IRQ type, bail out\n");
915 		return;
916 	}
917 
918 	if (generic_handle_domain_irq(domain, hwirq_id)) {
919 		pr_err_once("Could not handle, hwirq = 0x%x", hwirq_id);
920 		gicv5_hwirq_eoi(hwirq_id, hwirq_type);
921 	}
922 }
923 
gicv5_handle_irq(struct pt_regs * regs)924 static void __exception_irq_entry gicv5_handle_irq(struct pt_regs *regs)
925 {
926 	bool valid;
927 	u32 hwirq;
928 	u64 ia;
929 
930 	ia = gicr_insn(CDIA);
931 	valid = GICV5_GICR_CDIA_VALID(ia);
932 
933 	if (!valid)
934 		return;
935 
936 	/*
937 	 * Ensure that the CDIA instruction effects (ie IRQ activation) are
938 	 * completed before handling the interrupt.
939 	 */
940 	gsb_ack();
941 
942 	/*
943 	 * Ensure instruction ordering between an acknowledgment and subsequent
944 	 * instructions in the IRQ handler using an ISB.
945 	 */
946 	isb();
947 
948 	hwirq = FIELD_GET(GICV5_HWIRQ_INTID, ia);
949 
950 	handle_irq_per_domain(hwirq);
951 }
952 
gicv5_cpu_disable_interrupts(void)953 static void gicv5_cpu_disable_interrupts(void)
954 {
955 	u64 cr0;
956 
957 	cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0);
958 	write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
959 }
960 
gicv5_cpu_enable_interrupts(void)961 static void gicv5_cpu_enable_interrupts(void)
962 {
963 	u64 cr0, pcr;
964 
965 	write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1);
966 	write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1);
967 
968 	gicv5_ppi_priority_init();
969 
970 	pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_PRI_MI);
971 	write_sysreg_s(pcr, SYS_ICC_PCR_EL1);
972 
973 	cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1);
974 	write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
975 }
976 
977 static int base_ipi_virq;
978 
gicv5_starting_cpu(unsigned int cpu)979 static int gicv5_starting_cpu(unsigned int cpu)
980 {
981 	if (WARN(!gicv5_cpuif_has_gcie(),
982 		 "GICv5 system components present but CPU does not have FEAT_GCIE"))
983 		return -ENODEV;
984 
985 	gicv5_cpu_enable_interrupts();
986 
987 	return gicv5_irs_register_cpu(cpu);
988 }
989 
gicv5_smp_init(void)990 static void __init gicv5_smp_init(void)
991 {
992 	unsigned int num_ipis = GICV5_IPIS_PER_CPU * nr_cpu_ids;
993 
994 	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
995 				  "irqchip/arm/gicv5:starting",
996 				  gicv5_starting_cpu, NULL);
997 
998 	base_ipi_virq = irq_domain_alloc_irqs(gicv5_global_data.ipi_domain,
999 					      num_ipis, NUMA_NO_NODE, NULL);
1000 	if (WARN(base_ipi_virq <= 0, "IPI IRQ allocation was not successful"))
1001 		return;
1002 
1003 	set_smp_ipi_range_percpu(base_ipi_virq, GICV5_IPIS_PER_CPU, nr_cpu_ids);
1004 }
1005 
gicv5_free_domains(void)1006 static void __init gicv5_free_domains(void)
1007 {
1008 	if (gicv5_global_data.ppi_domain)
1009 		irq_domain_remove(gicv5_global_data.ppi_domain);
1010 	if (gicv5_global_data.spi_domain)
1011 		irq_domain_remove(gicv5_global_data.spi_domain);
1012 	if (gicv5_global_data.ipi_domain)
1013 		irq_domain_remove(gicv5_global_data.ipi_domain);
1014 
1015 	gicv5_global_data.ppi_domain = NULL;
1016 	gicv5_global_data.spi_domain = NULL;
1017 	gicv5_global_data.ipi_domain = NULL;
1018 }
1019 
gicv5_init_domains(struct fwnode_handle * handle)1020 static int __init gicv5_init_domains(struct fwnode_handle *handle)
1021 {
1022 	u32 spi_count = gicv5_global_data.global_spi_count;
1023 	struct irq_domain *d;
1024 
1025 	d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL);
1026 	if (!d)
1027 		return -ENOMEM;
1028 
1029 	irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
1030 	gicv5_global_data.ppi_domain = d;
1031 
1032 	if (spi_count) {
1033 		d = irq_domain_create_linear(handle, spi_count,
1034 					     &gicv5_irq_spi_domain_ops, NULL);
1035 
1036 		if (!d) {
1037 			gicv5_free_domains();
1038 			return -ENOMEM;
1039 		}
1040 
1041 		gicv5_global_data.spi_domain = d;
1042 		irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED);
1043 	}
1044 
1045 	if (!WARN(!gicv5_global_data.lpi_domain,
1046 		  "LPI domain uninitialized, can't set up IPIs")) {
1047 		d = irq_domain_create_hierarchy(gicv5_global_data.lpi_domain,
1048 						0, GICV5_IPIS_PER_CPU * nr_cpu_ids,
1049 						NULL, &gicv5_irq_ipi_domain_ops,
1050 						NULL);
1051 
1052 		if (!d) {
1053 			gicv5_free_domains();
1054 			return -ENOMEM;
1055 		}
1056 		gicv5_global_data.ipi_domain = d;
1057 	}
1058 	gicv5_global_data.fwnode = handle;
1059 
1060 	return 0;
1061 }
1062 
gicv5_set_cpuif_pribits(void)1063 static void gicv5_set_cpuif_pribits(void)
1064 {
1065 	u64 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
1066 
1067 	switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) {
1068 	case ICC_IDR0_EL1_PRI_BITS_4BITS:
1069 		gicv5_global_data.cpuif_pri_bits = 4;
1070 		break;
1071 	case ICC_IDR0_EL1_PRI_BITS_5BITS:
1072 		gicv5_global_data.cpuif_pri_bits = 5;
1073 		break;
1074 	default:
1075 		pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4");
1076 		gicv5_global_data.cpuif_pri_bits = 4;
1077 		break;
1078 	}
1079 }
1080 
gicv5_set_cpuif_idbits(void)1081 static void gicv5_set_cpuif_idbits(void)
1082 {
1083 	u32 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
1084 
1085 	switch (FIELD_GET(ICC_IDR0_EL1_ID_BITS, icc_idr0)) {
1086 	case ICC_IDR0_EL1_ID_BITS_16BITS:
1087 		gicv5_global_data.cpuif_id_bits = 16;
1088 		break;
1089 	case ICC_IDR0_EL1_ID_BITS_24BITS:
1090 		gicv5_global_data.cpuif_id_bits = 24;
1091 		break;
1092 	default:
1093 		pr_err("Unexpected ICC_IDR0_EL1_ID_BITS value, default to 16");
1094 		gicv5_global_data.cpuif_id_bits = 16;
1095 		break;
1096 	}
1097 }
1098 
1099 #ifdef CONFIG_KVM
1100 static struct gic_kvm_info gic_v5_kvm_info __initdata;
1101 
gic_of_setup_kvm_info(struct device_node * node)1102 static void __init gic_of_setup_kvm_info(struct device_node *node)
1103 {
1104 	/*
1105 	 * If we don't have native GICv5 virtualisation support, then
1106 	 * we also don't have FEAT_GCIE_LEGACY - the architecture
1107 	 * forbids this combination.
1108 	 */
1109 	if (!gicv5_global_data.virt_capable) {
1110 		pr_info("GIC implementation is not virtualization capable\n");
1111 		return;
1112 	}
1113 
1114 	gic_v5_kvm_info.type = GIC_V5;
1115 
1116 	/* GIC Virtual CPU interface maintenance interrupt */
1117 	gic_v5_kvm_info.no_maint_irq_mask = false;
1118 	gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1119 	if (!gic_v5_kvm_info.maint_irq) {
1120 		pr_warn("cannot find GICv5 virtual CPU interface maintenance interrupt\n");
1121 		return;
1122 	}
1123 
1124 	vgic_set_kvm_info(&gic_v5_kvm_info);
1125 }
1126 #else
gic_of_setup_kvm_info(struct device_node * node)1127 static inline void __init gic_of_setup_kvm_info(struct device_node *node)
1128 {
1129 }
1130 #endif // CONFIG_KVM
1131 
gicv5_init_common(struct fwnode_handle * parent_domain)1132 static int __init gicv5_init_common(struct fwnode_handle *parent_domain)
1133 {
1134 	int ret = gicv5_init_domains(parent_domain);
1135 	if (ret)
1136 		return ret;
1137 
1138 	gicv5_set_cpuif_pribits();
1139 	gicv5_set_cpuif_idbits();
1140 
1141 	pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits,
1142 				gicv5_global_data.irs_pri_bits);
1143 
1144 	ret = gicv5_starting_cpu(smp_processor_id());
1145 	if (ret)
1146 		goto out_dom;
1147 
1148 	ret = set_handle_irq(gicv5_handle_irq);
1149 	if (ret)
1150 		goto out_int;
1151 
1152 	ret = gicv5_irs_enable();
1153 	if (ret)
1154 		goto out_int;
1155 
1156 	gicv5_smp_init();
1157 
1158 	gicv5_irs_its_probe();
1159 	return 0;
1160 
1161 out_int:
1162 	gicv5_cpu_disable_interrupts();
1163 out_dom:
1164 	gicv5_free_domains();
1165 	return ret;
1166 }
1167 
gicv5_of_init(struct device_node * node,struct device_node * parent)1168 static int __init gicv5_of_init(struct device_node *node, struct device_node *parent)
1169 {
1170 	int ret = gicv5_irs_of_probe(node);
1171 	if (ret)
1172 		return ret;
1173 
1174 	ret = gicv5_init_common(of_fwnode_handle(node));
1175 	if (ret)
1176 		goto out_irs;
1177 
1178 	gic_of_setup_kvm_info(node);
1179 
1180 	return 0;
1181 out_irs:
1182 	gicv5_irs_remove();
1183 
1184 	return ret;
1185 }
1186 IRQCHIP_DECLARE(gic_v5, "arm,gic-v5", gicv5_of_init);
1187 
1188 #ifdef CONFIG_ACPI
acpi_validate_gic_table(struct acpi_subtable_header * header,struct acpi_probe_entry * ape)1189 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1190 					   struct acpi_probe_entry *ape)
1191 {
1192 	struct acpi_madt_gicv5_irs *irs = (struct acpi_madt_gicv5_irs *)header;
1193 
1194 	return (irs->version == ape->driver_data);
1195 }
1196 
1197 static struct fwnode_handle *gsi_domain_handle;
1198 
gic_v5_get_gsi_domain_id(u32 gsi)1199 static struct fwnode_handle *gic_v5_get_gsi_domain_id(u32 gsi)
1200 {
1201 	if (FIELD_GET(GICV5_GSI_IC_TYPE, gsi) == GICV5_GSI_IWB_TYPE)
1202 		return iort_iwb_handle(FIELD_GET(GICV5_GSI_IWB_FRAME_ID, gsi));
1203 
1204 	return gsi_domain_handle;
1205 }
1206 
gic_acpi_init(union acpi_subtable_headers * header,const unsigned long end)1207 static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
1208 {
1209 	struct acpi_madt_gicv5_irs *irs = (struct acpi_madt_gicv5_irs *)header;
1210 	int ret;
1211 
1212 	if (gsi_domain_handle)
1213 		return 0;
1214 
1215 	gsi_domain_handle = irq_domain_alloc_fwnode(&irs->config_base_address);
1216 	if (!gsi_domain_handle)
1217 		return -ENOMEM;
1218 
1219 	ret = gicv5_irs_acpi_probe();
1220 	if (ret)
1221 		goto out_fwnode;
1222 
1223 	ret = gicv5_init_common(gsi_domain_handle);
1224 	if (ret)
1225 		goto out_irs;
1226 
1227 	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC_V5, gic_v5_get_gsi_domain_id);
1228 
1229 	return 0;
1230 
1231 out_irs:
1232 	gicv5_irs_remove();
1233 out_fwnode:
1234 	irq_domain_free_fwnode(gsi_domain_handle);
1235 	return ret;
1236 }
1237 IRQCHIP_ACPI_DECLARE(gic_v5, ACPI_MADT_TYPE_GICV5_IRS,
1238 		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V5,
1239 		     gic_acpi_init);
1240 #endif
1241