1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Loongson 3A5000 ext interrupt controller emulation
4 *
5 * Copyright (C) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "qemu/module.h"
10 #include "qemu/log.h"
11 #include "qapi/error.h"
12 #include "hw/irq.h"
13 #include "hw/loongarch/virt.h"
14 #include "system/address-spaces.h"
15 #include "system/kvm.h"
16 #include "hw/intc/loongarch_extioi.h"
17 #include "trace.h"
18
extioi_get_index_from_archid(LoongArchExtIOICommonState * s,uint64_t arch_id)19 static int extioi_get_index_from_archid(LoongArchExtIOICommonState *s,
20 uint64_t arch_id)
21 {
22 int i;
23
24 for (i = 0; i < s->num_cpu; i++) {
25 if (s->cpu[i].arch_id == arch_id) {
26 break;
27 }
28 }
29
30 if ((i < s->num_cpu) && s->cpu[i].cpu) {
31 return i;
32 }
33
34 return -1;
35 }
36
extioi_update_irq(LoongArchExtIOICommonState * s,int irq,int level)37 static void extioi_update_irq(LoongArchExtIOICommonState *s, int irq, int level)
38 {
39 int ipnum, cpu, found, irq_index, irq_mask;
40
41 ipnum = s->sw_ipmap[irq / 32];
42 cpu = s->sw_coremap[irq];
43 irq_index = irq / 32;
44 irq_mask = 1 << (irq & 0x1f);
45
46 if (level) {
47 /* if not enable return false */
48 if (((s->enable[irq_index]) & irq_mask) == 0) {
49 return;
50 }
51 s->cpu[cpu].coreisr[irq_index] |= irq_mask;
52 found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS);
53 set_bit(irq, s->cpu[cpu].sw_isr[ipnum]);
54 if (found < EXTIOI_IRQS) {
55 /* other irq is handling, need not update parent irq level */
56 return;
57 }
58 } else {
59 s->cpu[cpu].coreisr[irq_index] &= ~irq_mask;
60 clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]);
61 found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS);
62 if (found < EXTIOI_IRQS) {
63 /* other irq is handling, need not update parent irq level */
64 return;
65 }
66 }
67 qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level);
68 }
69
extioi_setirq(void * opaque,int irq,int level)70 static void extioi_setirq(void *opaque, int irq, int level)
71 {
72 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
73 trace_loongarch_extioi_setirq(irq, level);
74 if (level) {
75 set_bit32(irq, s->isr);
76 } else {
77 clear_bit32(irq, s->isr);
78 }
79 extioi_update_irq(s, irq, level);
80 }
81
extioi_readw(void * opaque,hwaddr addr,uint64_t * data,unsigned size,MemTxAttrs attrs)82 static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data,
83 unsigned size, MemTxAttrs attrs)
84 {
85 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
86 unsigned long offset = addr & 0xffff;
87 uint32_t index, cpu;
88
89 switch (offset) {
90 case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1:
91 index = (offset - EXTIOI_NODETYPE_START) >> 2;
92 *data = s->nodetype[index];
93 break;
94 case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1:
95 index = (offset - EXTIOI_IPMAP_START) >> 2;
96 *data = s->ipmap[index];
97 break;
98 case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1:
99 index = (offset - EXTIOI_ENABLE_START) >> 2;
100 *data = s->enable[index];
101 break;
102 case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1:
103 index = (offset - EXTIOI_BOUNCE_START) >> 2;
104 *data = s->bounce[index];
105 break;
106 case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1:
107 index = (offset - EXTIOI_COREISR_START) >> 2;
108 /* using attrs to get current cpu index */
109 cpu = attrs.requester_id;
110 *data = s->cpu[cpu].coreisr[index];
111 break;
112 case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1:
113 index = (offset - EXTIOI_COREMAP_START) >> 2;
114 *data = s->coremap[index];
115 break;
116 default:
117 break;
118 }
119
120 trace_loongarch_extioi_readw(addr, *data);
121 return MEMTX_OK;
122 }
123
extioi_enable_irq(LoongArchExtIOICommonState * s,int index,uint32_t mask,int level)124 static inline void extioi_enable_irq(LoongArchExtIOICommonState *s, int index,\
125 uint32_t mask, int level)
126 {
127 uint32_t val;
128 int irq;
129
130 val = mask & s->isr[index];
131 irq = ctz32(val);
132 while (irq != 32) {
133 /*
134 * enable bit change from 0 to 1,
135 * need to update irq by pending bits
136 */
137 extioi_update_irq(s, irq + index * 32, level);
138 val &= ~(1 << irq);
139 irq = ctz32(val);
140 }
141 }
142
extioi_update_sw_coremap(LoongArchExtIOICommonState * s,int irq,uint64_t val,bool notify)143 static inline void extioi_update_sw_coremap(LoongArchExtIOICommonState *s,
144 int irq, uint64_t val, bool notify)
145 {
146 int i, cpu, cpuid;
147
148 /*
149 * loongarch only support little endian,
150 * so we paresd the value with little endian.
151 */
152 val = cpu_to_le64(val);
153
154 for (i = 0; i < 4; i++) {
155 cpuid = val & 0xff;
156 val = val >> 8;
157
158 if (!(s->status & BIT(EXTIOI_ENABLE_CPU_ENCODE))) {
159 cpuid = ctz32(cpuid);
160 cpuid = (cpuid >= 4) ? 0 : cpuid;
161 }
162
163 cpu = extioi_get_index_from_archid(s, cpuid);
164 if (cpu < 0) {
165 continue;
166 }
167
168 if (s->sw_coremap[irq + i] == cpu) {
169 continue;
170 }
171
172 if (notify && test_bit32(irq + i, s->isr)) {
173 /*
174 * lower irq at old cpu and raise irq at new cpu
175 */
176 extioi_update_irq(s, irq + i, 0);
177 s->sw_coremap[irq + i] = cpu;
178 extioi_update_irq(s, irq + i, 1);
179 } else {
180 s->sw_coremap[irq + i] = cpu;
181 }
182 }
183 }
184
extioi_update_sw_ipmap(LoongArchExtIOICommonState * s,int index,uint64_t val)185 static inline void extioi_update_sw_ipmap(LoongArchExtIOICommonState *s,
186 int index, uint64_t val)
187 {
188 int i;
189 uint8_t ipnum;
190
191 /*
192 * loongarch only support little endian,
193 * so we paresd the value with little endian.
194 */
195 val = cpu_to_le64(val);
196 for (i = 0; i < 4; i++) {
197 ipnum = val & 0xff;
198 ipnum = ctz32(ipnum);
199 ipnum = (ipnum >= 4) ? 0 : ipnum;
200 s->sw_ipmap[index * 4 + i] = ipnum;
201 val = val >> 8;
202 }
203 }
204
extioi_writew(void * opaque,hwaddr addr,uint64_t val,unsigned size,MemTxAttrs attrs)205 static MemTxResult extioi_writew(void *opaque, hwaddr addr,
206 uint64_t val, unsigned size,
207 MemTxAttrs attrs)
208 {
209 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
210 int cpu, index, old_data, irq;
211 uint32_t offset;
212
213 trace_loongarch_extioi_writew(addr, val);
214 offset = addr & 0xffff;
215
216 switch (offset) {
217 case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1:
218 index = (offset - EXTIOI_NODETYPE_START) >> 2;
219 s->nodetype[index] = val;
220 break;
221 case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1:
222 /*
223 * ipmap cannot be set at runtime, can be set only at the beginning
224 * of intr driver, need not update upper irq level
225 */
226 index = (offset - EXTIOI_IPMAP_START) >> 2;
227 s->ipmap[index] = val;
228 extioi_update_sw_ipmap(s, index, val);
229 break;
230 case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1:
231 index = (offset - EXTIOI_ENABLE_START) >> 2;
232 old_data = s->enable[index];
233 s->enable[index] = val;
234
235 /* unmask irq */
236 val = s->enable[index] & ~old_data;
237 extioi_enable_irq(s, index, val, 1);
238
239 /* mask irq */
240 val = ~s->enable[index] & old_data;
241 extioi_enable_irq(s, index, val, 0);
242 break;
243 case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1:
244 /* do not emulate hw bounced irq routing */
245 index = (offset - EXTIOI_BOUNCE_START) >> 2;
246 s->bounce[index] = val;
247 break;
248 case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1:
249 index = (offset - EXTIOI_COREISR_START) >> 2;
250 /* using attrs to get current cpu index */
251 cpu = attrs.requester_id;
252 old_data = s->cpu[cpu].coreisr[index];
253 s->cpu[cpu].coreisr[index] = old_data & ~val;
254 /* write 1 to clear interrupt */
255 old_data &= val;
256 irq = ctz32(old_data);
257 while (irq != 32) {
258 extioi_update_irq(s, irq + index * 32, 0);
259 old_data &= ~(1 << irq);
260 irq = ctz32(old_data);
261 }
262 break;
263 case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1:
264 irq = offset - EXTIOI_COREMAP_START;
265 index = irq / 4;
266 s->coremap[index] = val;
267
268 extioi_update_sw_coremap(s, irq, val, true);
269 break;
270 default:
271 break;
272 }
273 return MEMTX_OK;
274 }
275
276 static const MemoryRegionOps extioi_ops = {
277 .read_with_attrs = extioi_readw,
278 .write_with_attrs = extioi_writew,
279 .impl.min_access_size = 4,
280 .impl.max_access_size = 4,
281 .valid.min_access_size = 4,
282 .valid.max_access_size = 8,
283 .endianness = DEVICE_LITTLE_ENDIAN,
284 };
285
extioi_virt_readw(void * opaque,hwaddr addr,uint64_t * data,unsigned size,MemTxAttrs attrs)286 static MemTxResult extioi_virt_readw(void *opaque, hwaddr addr, uint64_t *data,
287 unsigned size, MemTxAttrs attrs)
288 {
289 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
290
291 switch (addr) {
292 case EXTIOI_VIRT_FEATURES:
293 *data = s->features;
294 break;
295 case EXTIOI_VIRT_CONFIG:
296 *data = s->status;
297 break;
298 default:
299 g_assert_not_reached();
300 }
301
302 return MEMTX_OK;
303 }
304
extioi_virt_writew(void * opaque,hwaddr addr,uint64_t val,unsigned size,MemTxAttrs attrs)305 static MemTxResult extioi_virt_writew(void *opaque, hwaddr addr,
306 uint64_t val, unsigned size,
307 MemTxAttrs attrs)
308 {
309 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
310
311 switch (addr) {
312 case EXTIOI_VIRT_FEATURES:
313 return MEMTX_ACCESS_ERROR;
314
315 case EXTIOI_VIRT_CONFIG:
316 /*
317 * extioi features can only be set at disabled status
318 */
319 if ((s->status & BIT(EXTIOI_ENABLE)) && val) {
320 return MEMTX_ACCESS_ERROR;
321 }
322
323 s->status = val & s->features;
324 break;
325 default:
326 g_assert_not_reached();
327 }
328 return MEMTX_OK;
329 }
330
331 static const MemoryRegionOps extioi_virt_ops = {
332 .read_with_attrs = extioi_virt_readw,
333 .write_with_attrs = extioi_virt_writew,
334 .impl.min_access_size = 4,
335 .impl.max_access_size = 4,
336 .valid.min_access_size = 4,
337 .valid.max_access_size = 8,
338 .endianness = DEVICE_LITTLE_ENDIAN,
339 };
340
loongarch_extioi_realize(DeviceState * dev,Error ** errp)341 static void loongarch_extioi_realize(DeviceState *dev, Error **errp)
342 {
343 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
344 LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(dev);
345 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
346 Error *local_err = NULL;
347 int i;
348
349 lec->parent_realize(dev, &local_err);
350 if (local_err) {
351 error_propagate(errp, local_err);
352 return;
353 }
354
355 if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) {
356 s->features |= EXTIOI_VIRT_HAS_FEATURES;
357 } else {
358 s->status |= BIT(EXTIOI_ENABLE);
359 }
360
361 if (kvm_irqchip_in_kernel()) {
362 kvm_extioi_realize(dev, errp);
363 } else {
364 for (i = 0; i < EXTIOI_IRQS; i++) {
365 sysbus_init_irq(sbd, &s->irq[i]);
366 }
367
368 qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS);
369 memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops,
370 s, "extioi_system_mem", 0x900);
371 sysbus_init_mmio(sbd, &s->extioi_system_mem);
372 if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) {
373 memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops,
374 s, "extioi_virt", EXTIOI_VIRT_SIZE);
375 sysbus_init_mmio(sbd, &s->virt_extend);
376 }
377 }
378 }
379
loongarch_extioi_unrealize(DeviceState * dev)380 static void loongarch_extioi_unrealize(DeviceState *dev)
381 {
382 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
383
384 g_free(s->cpu);
385 }
386
loongarch_extioi_reset_hold(Object * obj,ResetType type)387 static void loongarch_extioi_reset_hold(Object *obj, ResetType type)
388 {
389 LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(obj);
390
391 if (lec->parent_phases.hold) {
392 lec->parent_phases.hold(obj, type);
393 }
394
395 if (kvm_irqchip_in_kernel()) {
396 kvm_extioi_put(obj, 0);
397 }
398 }
399
vmstate_extioi_pre_save(void * opaque)400 static int vmstate_extioi_pre_save(void *opaque)
401 {
402 if (kvm_irqchip_in_kernel()) {
403 return kvm_extioi_get(opaque);
404 }
405
406 return 0;
407 }
408
vmstate_extioi_post_load(void * opaque,int version_id)409 static int vmstate_extioi_post_load(void *opaque, int version_id)
410 {
411 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque);
412 int i, start_irq;
413
414 if (kvm_irqchip_in_kernel()) {
415 return kvm_extioi_put(opaque, version_id);
416 }
417
418 for (i = 0; i < (EXTIOI_IRQS / 4); i++) {
419 start_irq = i * 4;
420 extioi_update_sw_coremap(s, start_irq, s->coremap[i], false);
421 }
422
423 for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) {
424 extioi_update_sw_ipmap(s, i, s->ipmap[i]);
425 }
426
427 return 0;
428 }
429
loongarch_extioi_class_init(ObjectClass * klass,const void * data)430 static void loongarch_extioi_class_init(ObjectClass *klass, const void *data)
431 {
432 DeviceClass *dc = DEVICE_CLASS(klass);
433 LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_CLASS(klass);
434 LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass);
435 ResettableClass *rc = RESETTABLE_CLASS(klass);
436
437 device_class_set_parent_realize(dc, loongarch_extioi_realize,
438 &lec->parent_realize);
439 device_class_set_parent_unrealize(dc, loongarch_extioi_unrealize,
440 &lec->parent_unrealize);
441 resettable_class_set_parent_phases(rc, NULL, loongarch_extioi_reset_hold,
442 NULL, &lec->parent_phases);
443 lecc->pre_save = vmstate_extioi_pre_save;
444 lecc->post_load = vmstate_extioi_post_load;
445 }
446
447 static const TypeInfo loongarch_extioi_types[] = {
448 {
449 .name = TYPE_LOONGARCH_EXTIOI,
450 .parent = TYPE_LOONGARCH_EXTIOI_COMMON,
451 .instance_size = sizeof(LoongArchExtIOIState),
452 .class_size = sizeof(LoongArchExtIOIClass),
453 .class_init = loongarch_extioi_class_init,
454 }
455 };
456
457 DEFINE_TYPES(loongarch_extioi_types)
458