1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Loongson 3A5000 ext interrupt controller emulation 4 * 5 * Copyright (C) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/module.h" 10 #include "qemu/log.h" 11 #include "qapi/error.h" 12 #include "hw/irq.h" 13 #include "hw/loongarch/virt.h" 14 #include "exec/address-spaces.h" 15 #include "hw/intc/loongarch_extioi.h" 16 #include "trace.h" 17 18 static int extioi_get_index_from_archid(LoongArchExtIOICommonState *s, 19 uint64_t arch_id) 20 { 21 int i; 22 23 for (i = 0; i < s->num_cpu; i++) { 24 if (s->cpu[i].arch_id == arch_id) { 25 break; 26 } 27 } 28 29 if ((i < s->num_cpu) && s->cpu[i].cpu) { 30 return i; 31 } 32 33 return -1; 34 } 35 36 static void extioi_update_irq(LoongArchExtIOICommonState *s, int irq, int level) 37 { 38 int ipnum, cpu, found, irq_index, irq_mask; 39 40 ipnum = s->sw_ipmap[irq / 32]; 41 cpu = s->sw_coremap[irq]; 42 irq_index = irq / 32; 43 irq_mask = 1 << (irq & 0x1f); 44 45 if (level) { 46 /* if not enable return false */ 47 if (((s->enable[irq_index]) & irq_mask) == 0) { 48 return; 49 } 50 s->cpu[cpu].coreisr[irq_index] |= irq_mask; 51 found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); 52 set_bit(irq, s->cpu[cpu].sw_isr[ipnum]); 53 if (found < EXTIOI_IRQS) { 54 /* other irq is handling, need not update parent irq level */ 55 return; 56 } 57 } else { 58 s->cpu[cpu].coreisr[irq_index] &= ~irq_mask; 59 clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]); 60 found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); 61 if (found < EXTIOI_IRQS) { 62 /* other irq is handling, need not update parent irq level */ 63 return; 64 } 65 } 66 qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level); 67 } 68 69 static void extioi_setirq(void *opaque, int irq, int level) 70 { 71 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 72 trace_loongarch_extioi_setirq(irq, level); 73 if (level) { 74 set_bit32(irq, s->isr); 75 } else { 76 clear_bit32(irq, s->isr); 77 } 78 extioi_update_irq(s, irq, level); 79 } 80 81 static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, 82 unsigned size, MemTxAttrs attrs) 83 { 84 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 85 unsigned long offset = addr & 0xffff; 86 uint32_t index, cpu; 87 88 switch (offset) { 89 case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1: 90 index = (offset - EXTIOI_NODETYPE_START) >> 2; 91 *data = s->nodetype[index]; 92 break; 93 case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1: 94 index = (offset - EXTIOI_IPMAP_START) >> 2; 95 *data = s->ipmap[index]; 96 break; 97 case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: 98 index = (offset - EXTIOI_ENABLE_START) >> 2; 99 *data = s->enable[index]; 100 break; 101 case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1: 102 index = (offset - EXTIOI_BOUNCE_START) >> 2; 103 *data = s->bounce[index]; 104 break; 105 case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1: 106 index = (offset - EXTIOI_COREISR_START) >> 2; 107 /* using attrs to get current cpu index */ 108 cpu = attrs.requester_id; 109 *data = s->cpu[cpu].coreisr[index]; 110 break; 111 case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: 112 index = (offset - EXTIOI_COREMAP_START) >> 2; 113 *data = s->coremap[index]; 114 break; 115 default: 116 break; 117 } 118 119 trace_loongarch_extioi_readw(addr, *data); 120 return MEMTX_OK; 121 } 122 123 static inline void extioi_enable_irq(LoongArchExtIOICommonState *s, int index,\ 124 uint32_t mask, int level) 125 { 126 uint32_t val; 127 int irq; 128 129 val = mask & s->isr[index]; 130 irq = ctz32(val); 131 while (irq != 32) { 132 /* 133 * enable bit change from 0 to 1, 134 * need to update irq by pending bits 135 */ 136 extioi_update_irq(s, irq + index * 32, level); 137 val &= ~(1 << irq); 138 irq = ctz32(val); 139 } 140 } 141 142 static inline void extioi_update_sw_coremap(LoongArchExtIOICommonState *s, 143 int irq, uint64_t val, bool notify) 144 { 145 int i, cpu, cpuid; 146 147 /* 148 * loongarch only support little endian, 149 * so we paresd the value with little endian. 150 */ 151 val = cpu_to_le64(val); 152 153 for (i = 0; i < 4; i++) { 154 cpuid = val & 0xff; 155 val = val >> 8; 156 157 if (!(s->status & BIT(EXTIOI_ENABLE_CPU_ENCODE))) { 158 cpuid = ctz32(cpuid); 159 cpuid = (cpuid >= 4) ? 0 : cpuid; 160 } 161 162 cpu = extioi_get_index_from_archid(s, cpuid); 163 if (cpu < 0) { 164 continue; 165 } 166 167 if (s->sw_coremap[irq + i] == cpu) { 168 continue; 169 } 170 171 if (notify && test_bit32(irq + i, s->isr)) { 172 /* 173 * lower irq at old cpu and raise irq at new cpu 174 */ 175 extioi_update_irq(s, irq + i, 0); 176 s->sw_coremap[irq + i] = cpu; 177 extioi_update_irq(s, irq + i, 1); 178 } else { 179 s->sw_coremap[irq + i] = cpu; 180 } 181 } 182 } 183 184 static inline void extioi_update_sw_ipmap(LoongArchExtIOICommonState *s, 185 int index, uint64_t val) 186 { 187 int i; 188 uint8_t ipnum; 189 190 /* 191 * loongarch only support little endian, 192 * so we paresd the value with little endian. 193 */ 194 val = cpu_to_le64(val); 195 for (i = 0; i < 4; i++) { 196 ipnum = val & 0xff; 197 ipnum = ctz32(ipnum); 198 ipnum = (ipnum >= 4) ? 0 : ipnum; 199 s->sw_ipmap[index * 4 + i] = ipnum; 200 val = val >> 8; 201 } 202 } 203 204 static MemTxResult extioi_writew(void *opaque, hwaddr addr, 205 uint64_t val, unsigned size, 206 MemTxAttrs attrs) 207 { 208 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 209 int cpu, index, old_data, irq; 210 uint32_t offset; 211 212 trace_loongarch_extioi_writew(addr, val); 213 offset = addr & 0xffff; 214 215 switch (offset) { 216 case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1: 217 index = (offset - EXTIOI_NODETYPE_START) >> 2; 218 s->nodetype[index] = val; 219 break; 220 case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1: 221 /* 222 * ipmap cannot be set at runtime, can be set only at the beginning 223 * of intr driver, need not update upper irq level 224 */ 225 index = (offset - EXTIOI_IPMAP_START) >> 2; 226 s->ipmap[index] = val; 227 extioi_update_sw_ipmap(s, index, val); 228 break; 229 case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: 230 index = (offset - EXTIOI_ENABLE_START) >> 2; 231 old_data = s->enable[index]; 232 s->enable[index] = val; 233 234 /* unmask irq */ 235 val = s->enable[index] & ~old_data; 236 extioi_enable_irq(s, index, val, 1); 237 238 /* mask irq */ 239 val = ~s->enable[index] & old_data; 240 extioi_enable_irq(s, index, val, 0); 241 break; 242 case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1: 243 /* do not emulate hw bounced irq routing */ 244 index = (offset - EXTIOI_BOUNCE_START) >> 2; 245 s->bounce[index] = val; 246 break; 247 case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1: 248 index = (offset - EXTIOI_COREISR_START) >> 2; 249 /* using attrs to get current cpu index */ 250 cpu = attrs.requester_id; 251 old_data = s->cpu[cpu].coreisr[index]; 252 s->cpu[cpu].coreisr[index] = old_data & ~val; 253 /* write 1 to clear interrupt */ 254 old_data &= val; 255 irq = ctz32(old_data); 256 while (irq != 32) { 257 extioi_update_irq(s, irq + index * 32, 0); 258 old_data &= ~(1 << irq); 259 irq = ctz32(old_data); 260 } 261 break; 262 case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: 263 irq = offset - EXTIOI_COREMAP_START; 264 index = irq / 4; 265 s->coremap[index] = val; 266 267 extioi_update_sw_coremap(s, irq, val, true); 268 break; 269 default: 270 break; 271 } 272 return MEMTX_OK; 273 } 274 275 static const MemoryRegionOps extioi_ops = { 276 .read_with_attrs = extioi_readw, 277 .write_with_attrs = extioi_writew, 278 .impl.min_access_size = 4, 279 .impl.max_access_size = 4, 280 .valid.min_access_size = 4, 281 .valid.max_access_size = 8, 282 .endianness = DEVICE_LITTLE_ENDIAN, 283 }; 284 285 static MemTxResult extioi_virt_readw(void *opaque, hwaddr addr, uint64_t *data, 286 unsigned size, MemTxAttrs attrs) 287 { 288 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 289 290 switch (addr) { 291 case EXTIOI_VIRT_FEATURES: 292 *data = s->features; 293 break; 294 case EXTIOI_VIRT_CONFIG: 295 *data = s->status; 296 break; 297 default: 298 g_assert_not_reached(); 299 } 300 301 return MEMTX_OK; 302 } 303 304 static MemTxResult extioi_virt_writew(void *opaque, hwaddr addr, 305 uint64_t val, unsigned size, 306 MemTxAttrs attrs) 307 { 308 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 309 310 switch (addr) { 311 case EXTIOI_VIRT_FEATURES: 312 return MEMTX_ACCESS_ERROR; 313 314 case EXTIOI_VIRT_CONFIG: 315 /* 316 * extioi features can only be set at disabled status 317 */ 318 if ((s->status & BIT(EXTIOI_ENABLE)) && val) { 319 return MEMTX_ACCESS_ERROR; 320 } 321 322 s->status = val & s->features; 323 break; 324 default: 325 g_assert_not_reached(); 326 } 327 return MEMTX_OK; 328 } 329 330 static const MemoryRegionOps extioi_virt_ops = { 331 .read_with_attrs = extioi_virt_readw, 332 .write_with_attrs = extioi_virt_writew, 333 .impl.min_access_size = 4, 334 .impl.max_access_size = 4, 335 .valid.min_access_size = 4, 336 .valid.max_access_size = 8, 337 .endianness = DEVICE_LITTLE_ENDIAN, 338 }; 339 340 static void loongarch_extioi_realize(DeviceState *dev, Error **errp) 341 { 342 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); 343 LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(dev); 344 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 345 Error *local_err = NULL; 346 int i; 347 348 lec->parent_realize(dev, &local_err); 349 if (local_err) { 350 error_propagate(errp, local_err); 351 return; 352 } 353 354 for (i = 0; i < EXTIOI_IRQS; i++) { 355 sysbus_init_irq(sbd, &s->irq[i]); 356 } 357 358 qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); 359 memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, 360 s, "extioi_system_mem", 0x900); 361 sysbus_init_mmio(sbd, &s->extioi_system_mem); 362 363 if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { 364 memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops, 365 s, "extioi_virt", EXTIOI_VIRT_SIZE); 366 sysbus_init_mmio(sbd, &s->virt_extend); 367 s->features |= EXTIOI_VIRT_HAS_FEATURES; 368 } else { 369 s->status |= BIT(EXTIOI_ENABLE); 370 } 371 } 372 373 static void loongarch_extioi_unrealize(DeviceState *dev) 374 { 375 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); 376 377 g_free(s->cpu); 378 } 379 380 static void loongarch_extioi_reset(DeviceState *d) 381 { 382 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(d); 383 384 s->status = 0; 385 } 386 387 static int vmstate_extioi_post_load(void *opaque, int version_id) 388 { 389 LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); 390 int i, start_irq; 391 392 for (i = 0; i < (EXTIOI_IRQS / 4); i++) { 393 start_irq = i * 4; 394 extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); 395 } 396 397 for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) { 398 extioi_update_sw_ipmap(s, i, s->ipmap[i]); 399 } 400 401 return 0; 402 } 403 404 static void loongarch_extioi_class_init(ObjectClass *klass, void *data) 405 { 406 DeviceClass *dc = DEVICE_CLASS(klass); 407 LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_CLASS(klass); 408 LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass); 409 410 device_class_set_parent_realize(dc, loongarch_extioi_realize, 411 &lec->parent_realize); 412 device_class_set_parent_unrealize(dc, loongarch_extioi_unrealize, 413 &lec->parent_unrealize); 414 device_class_set_legacy_reset(dc, loongarch_extioi_reset); 415 lecc->post_load = vmstate_extioi_post_load; 416 } 417 418 static const TypeInfo loongarch_extioi_types[] = { 419 { 420 .name = TYPE_LOONGARCH_EXTIOI, 421 .parent = TYPE_LOONGARCH_EXTIOI_COMMON, 422 .instance_size = sizeof(LoongArchExtIOIState), 423 .class_size = sizeof(LoongArchExtIOIClass), 424 .class_init = loongarch_extioi_class_init, 425 } 426 }; 427 428 DEFINE_TYPES(loongarch_extioi_types) 429