1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2010 John Crispin <john@phrozen.org>
5 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
6 */
7
8 #include <linux/interrupt.h>
9 #include <linux/ioport.h>
10 #include <linux/sched.h>
11 #include <linux/irqchip.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16
17 #include <asm/bootinfo.h>
18 #include <asm/irq_cpu.h>
19 #include <asm/time.h>
20
21 #include <lantiq_soc.h>
22 #include <irq.h>
23
24 /* register definitions - internal irqs */
25 #define LTQ_ICU_ISR 0x0000
26 #define LTQ_ICU_IER 0x0008
27 #define LTQ_ICU_IOSR 0x0010
28 #define LTQ_ICU_IRSR 0x0018
29 #define LTQ_ICU_IMR 0x0020
30
31 #define LTQ_ICU_IM_SIZE 0x28
32
33 /* register definitions - external irqs */
34 #define LTQ_EIU_EXIN_C 0x0000
35 #define LTQ_EIU_EXIN_INIC 0x0004
36 #define LTQ_EIU_EXIN_INC 0x0008
37 #define LTQ_EIU_EXIN_INEN 0x000C
38
39 /* number of external interrupts */
40 #define MAX_EIU 6
41
42 /* the performance counter */
43 #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
44
45 /*
46 * irqs generated by devices attached to the EBU need to be acked in
47 * a special manner
48 */
49 #define LTQ_ICU_EBU_IRQ 22
50
51 #define ltq_icu_w32(vpe, m, x, y) \
52 ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
53
54 #define ltq_icu_r32(vpe, m, x) \
55 ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
56
57 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
58 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
59
60 /* we have a cascade of 8 irqs */
61 #define MIPS_CPU_IRQ_CASCADE 8
62
63 static int exin_avail;
64 static u32 ltq_eiu_irq[MAX_EIU];
65 static void __iomem *ltq_icu_membase[NR_CPUS];
66 static void __iomem *ltq_eiu_membase;
67 static struct irq_domain *ltq_domain;
68 static DEFINE_SPINLOCK(ltq_eiu_lock);
69 static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
70 static int ltq_perfcount_irq;
71
ltq_eiu_get_irq(int exin)72 int ltq_eiu_get_irq(int exin)
73 {
74 if (exin < exin_avail)
75 return ltq_eiu_irq[exin];
76 return -1;
77 }
78
ltq_disable_irq(struct irq_data * d)79 void ltq_disable_irq(struct irq_data *d)
80 {
81 unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
82 unsigned long im = offset / INT_NUM_IM_OFFSET;
83 unsigned long flags;
84 int vpe;
85
86 offset %= INT_NUM_IM_OFFSET;
87
88 raw_spin_lock_irqsave(<q_icu_lock, flags);
89 for_each_present_cpu(vpe) {
90 ltq_icu_w32(vpe, im,
91 ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
92 LTQ_ICU_IER);
93 }
94 raw_spin_unlock_irqrestore(<q_icu_lock, flags);
95 }
96
ltq_mask_and_ack_irq(struct irq_data * d)97 void ltq_mask_and_ack_irq(struct irq_data *d)
98 {
99 unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
100 unsigned long im = offset / INT_NUM_IM_OFFSET;
101 unsigned long flags;
102 int vpe;
103
104 offset %= INT_NUM_IM_OFFSET;
105
106 raw_spin_lock_irqsave(<q_icu_lock, flags);
107 for_each_present_cpu(vpe) {
108 ltq_icu_w32(vpe, im,
109 ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
110 LTQ_ICU_IER);
111 ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
112 }
113 raw_spin_unlock_irqrestore(<q_icu_lock, flags);
114 }
115
ltq_ack_irq(struct irq_data * d)116 static void ltq_ack_irq(struct irq_data *d)
117 {
118 unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
119 unsigned long im = offset / INT_NUM_IM_OFFSET;
120 unsigned long flags;
121 int vpe;
122
123 offset %= INT_NUM_IM_OFFSET;
124
125 raw_spin_lock_irqsave(<q_icu_lock, flags);
126 for_each_present_cpu(vpe) {
127 ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
128 }
129 raw_spin_unlock_irqrestore(<q_icu_lock, flags);
130 }
131
ltq_enable_irq(struct irq_data * d)132 void ltq_enable_irq(struct irq_data *d)
133 {
134 unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
135 unsigned long im = offset / INT_NUM_IM_OFFSET;
136 unsigned long flags;
137 int vpe;
138
139 offset %= INT_NUM_IM_OFFSET;
140
141 vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
142
143 /* This shouldn't be even possible, maybe during CPU hotplug spam */
144 if (unlikely(vpe >= nr_cpu_ids))
145 vpe = smp_processor_id();
146
147 raw_spin_lock_irqsave(<q_icu_lock, flags);
148
149 ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
150 LTQ_ICU_IER);
151
152 raw_spin_unlock_irqrestore(<q_icu_lock, flags);
153 }
154
ltq_eiu_settype(struct irq_data * d,unsigned int type)155 static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
156 {
157 int i;
158 unsigned long flags;
159
160 for (i = 0; i < exin_avail; i++) {
161 if (d->hwirq == ltq_eiu_irq[i]) {
162 int val = 0;
163 int edge = 0;
164
165 switch (type) {
166 case IRQF_TRIGGER_NONE:
167 break;
168 case IRQF_TRIGGER_RISING:
169 val = 1;
170 edge = 1;
171 break;
172 case IRQF_TRIGGER_FALLING:
173 val = 2;
174 edge = 1;
175 break;
176 case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
177 val = 3;
178 edge = 1;
179 break;
180 case IRQF_TRIGGER_HIGH:
181 val = 5;
182 break;
183 case IRQF_TRIGGER_LOW:
184 val = 6;
185 break;
186 default:
187 pr_err("invalid type %d for irq %ld\n",
188 type, d->hwirq);
189 return -EINVAL;
190 }
191
192 if (edge)
193 irq_set_handler(d->hwirq, handle_edge_irq);
194
195 spin_lock_irqsave(<q_eiu_lock, flags);
196 ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
197 (~(7 << (i * 4)))) | (val << (i * 4)),
198 LTQ_EIU_EXIN_C);
199 spin_unlock_irqrestore(<q_eiu_lock, flags);
200 }
201 }
202
203 return 0;
204 }
205
ltq_startup_eiu_irq(struct irq_data * d)206 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
207 {
208 int i;
209
210 ltq_enable_irq(d);
211 for (i = 0; i < exin_avail; i++) {
212 if (d->hwirq == ltq_eiu_irq[i]) {
213 /* by default we are low level triggered */
214 ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
215 /* clear all pending */
216 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
217 LTQ_EIU_EXIN_INC);
218 /* enable */
219 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
220 LTQ_EIU_EXIN_INEN);
221 break;
222 }
223 }
224
225 return 0;
226 }
227
ltq_shutdown_eiu_irq(struct irq_data * d)228 static void ltq_shutdown_eiu_irq(struct irq_data *d)
229 {
230 int i;
231
232 ltq_disable_irq(d);
233 for (i = 0; i < exin_avail; i++) {
234 if (d->hwirq == ltq_eiu_irq[i]) {
235 /* disable */
236 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
237 LTQ_EIU_EXIN_INEN);
238 break;
239 }
240 }
241 }
242
243 #if defined(CONFIG_SMP)
ltq_icu_irq_set_affinity(struct irq_data * d,const struct cpumask * cpumask,bool force)244 static int ltq_icu_irq_set_affinity(struct irq_data *d,
245 const struct cpumask *cpumask, bool force)
246 {
247 struct cpumask tmask;
248
249 if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
250 return -EINVAL;
251
252 irq_data_update_effective_affinity(d, &tmask);
253
254 return IRQ_SET_MASK_OK;
255 }
256 #endif
257
258 static struct irq_chip ltq_irq_type = {
259 .name = "icu",
260 .irq_enable = ltq_enable_irq,
261 .irq_disable = ltq_disable_irq,
262 .irq_unmask = ltq_enable_irq,
263 .irq_ack = ltq_ack_irq,
264 .irq_mask = ltq_disable_irq,
265 .irq_mask_ack = ltq_mask_and_ack_irq,
266 #if defined(CONFIG_SMP)
267 .irq_set_affinity = ltq_icu_irq_set_affinity,
268 #endif
269 };
270
271 static struct irq_chip ltq_eiu_type = {
272 .name = "eiu",
273 .irq_startup = ltq_startup_eiu_irq,
274 .irq_shutdown = ltq_shutdown_eiu_irq,
275 .irq_enable = ltq_enable_irq,
276 .irq_disable = ltq_disable_irq,
277 .irq_unmask = ltq_enable_irq,
278 .irq_ack = ltq_ack_irq,
279 .irq_mask = ltq_disable_irq,
280 .irq_mask_ack = ltq_mask_and_ack_irq,
281 .irq_set_type = ltq_eiu_settype,
282 #if defined(CONFIG_SMP)
283 .irq_set_affinity = ltq_icu_irq_set_affinity,
284 #endif
285 };
286
ltq_hw_irq_handler(struct irq_desc * desc)287 static void ltq_hw_irq_handler(struct irq_desc *desc)
288 {
289 unsigned int module = irq_desc_get_irq(desc) - 2;
290 u32 irq;
291 irq_hw_number_t hwirq;
292 int vpe = smp_processor_id();
293
294 irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
295 if (irq == 0)
296 return;
297
298 /*
299 * silicon bug causes only the msb set to 1 to be valid. all
300 * other bits might be bogus
301 */
302 irq = __fls(irq);
303 hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
304 generic_handle_domain_irq(ltq_domain, hwirq);
305
306 /* if this is a EBU irq, we need to ack it or get a deadlock */
307 if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
308 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
309 LTQ_EBU_PCC_ISTAT);
310 }
311
icu_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)312 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
313 {
314 struct irq_chip *chip = <q_irq_type;
315 struct irq_data *data;
316 int i;
317
318 if (hw < MIPS_CPU_IRQ_CASCADE)
319 return 0;
320
321 for (i = 0; i < exin_avail; i++)
322 if (hw == ltq_eiu_irq[i])
323 chip = <q_eiu_type;
324
325 data = irq_get_irq_data(irq);
326
327 irq_data_update_effective_affinity(data, cpumask_of(0));
328
329 irq_set_chip_and_handler(irq, chip, handle_level_irq);
330
331 return 0;
332 }
333
334 static const struct irq_domain_ops irq_domain_ops = {
335 .xlate = irq_domain_xlate_onetwocell,
336 .map = icu_map,
337 };
338
339 static int __init
icu_of_init(struct device_node * node,struct device_node * parent)340 icu_of_init(struct device_node *node, struct device_node *parent)
341 {
342 struct device_node *eiu_node;
343 struct resource res;
344 int i, ret, vpe;
345
346 /* load register regions of available ICUs */
347 for_each_possible_cpu(vpe) {
348 if (of_address_to_resource(node, vpe, &res))
349 panic("Failed to get icu%i memory range", vpe);
350
351 if (!request_mem_region(res.start, resource_size(&res),
352 res.name))
353 pr_err("Failed to request icu%i memory\n", vpe);
354
355 ltq_icu_membase[vpe] = ioremap(res.start,
356 resource_size(&res));
357
358 if (!ltq_icu_membase[vpe])
359 panic("Failed to remap icu%i memory", vpe);
360 }
361
362 /* turn off all irqs by default */
363 for_each_possible_cpu(vpe) {
364 for (i = 0; i < MAX_IM; i++) {
365 /* make sure all irqs are turned off by default */
366 ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
367
368 /* clear all possibly pending interrupts */
369 ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
370 ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
371
372 /* clear resend */
373 ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
374 }
375 }
376
377 mips_cpu_irq_init();
378
379 for (i = 0; i < MAX_IM; i++)
380 irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
381
382 ltq_domain = irq_domain_create_linear(of_fwnode_handle(node),
383 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
384 &irq_domain_ops, 0);
385
386 /* tell oprofile which irq to use */
387 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
388
389 /* the external interrupts are optional and xway only */
390 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
391 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
392 /* find out how many external irq sources we have */
393 exin_avail = of_property_count_u32_elems(eiu_node,
394 "lantiq,eiu-irqs");
395
396 if (exin_avail > MAX_EIU)
397 exin_avail = MAX_EIU;
398
399 ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
400 ltq_eiu_irq, exin_avail);
401 if (ret)
402 panic("failed to load external irq resources");
403
404 if (!request_mem_region(res.start, resource_size(&res),
405 res.name))
406 pr_err("Failed to request eiu memory");
407
408 ltq_eiu_membase = ioremap(res.start,
409 resource_size(&res));
410 if (!ltq_eiu_membase)
411 panic("Failed to remap eiu memory");
412 }
413 of_node_put(eiu_node);
414
415 return 0;
416 }
417
get_c0_perfcount_int(void)418 int get_c0_perfcount_int(void)
419 {
420 return ltq_perfcount_irq;
421 }
422 EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
423
get_c0_compare_int(void)424 unsigned int get_c0_compare_int(void)
425 {
426 return CP0_LEGACY_COMPARE_IRQ;
427 }
428
429 IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
430
arch_init_irq(void)431 void __init arch_init_irq(void)
432 {
433 irqchip_init();
434 }
435