1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "system/qtest.h" 14 #include "system/tcg.h" 15 #include "system/kvm.h" 16 #include "kvm/kvm_loongarch.h" 17 #include "hw/qdev-properties.h" 18 #include "exec/exec-all.h" 19 #include "exec/translation-block.h" 20 #include "cpu.h" 21 #include "internals.h" 22 #include "fpu/softfloat-helpers.h" 23 #include "csr.h" 24 #ifndef CONFIG_USER_ONLY 25 #include "system/reset.h" 26 #endif 27 #include "vec.h" 28 #ifdef CONFIG_KVM 29 #include <linux/kvm.h> 30 #endif 31 #ifdef CONFIG_TCG 32 #include "exec/cpu_ldst.h" 33 #include "tcg/tcg.h" 34 #endif 35 36 const char * const regnames[32] = { 37 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 38 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 39 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 40 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 41 }; 42 43 const char * const fregnames[32] = { 44 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 45 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 46 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 47 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 48 }; 49 50 struct TypeExcp { 51 int32_t exccode; 52 const char * const name; 53 }; 54 55 static const struct TypeExcp excp_names[] = { 56 {EXCCODE_INT, "Interrupt"}, 57 {EXCCODE_PIL, "Page invalid exception for load"}, 58 {EXCCODE_PIS, "Page invalid exception for store"}, 59 {EXCCODE_PIF, "Page invalid exception for fetch"}, 60 {EXCCODE_PME, "Page modified exception"}, 61 {EXCCODE_PNR, "Page Not Readable exception"}, 62 {EXCCODE_PNX, "Page Not Executable exception"}, 63 {EXCCODE_PPI, "Page Privilege error"}, 64 {EXCCODE_ADEF, "Address error for instruction fetch"}, 65 {EXCCODE_ADEM, "Address error for Memory access"}, 66 {EXCCODE_SYS, "Syscall"}, 67 {EXCCODE_BRK, "Break"}, 68 {EXCCODE_INE, "Instruction Non-Existent"}, 69 {EXCCODE_IPE, "Instruction privilege error"}, 70 {EXCCODE_FPD, "Floating Point Disabled"}, 71 {EXCCODE_FPE, "Floating Point Exception"}, 72 {EXCCODE_DBP, "Debug breakpoint"}, 73 {EXCCODE_BCE, "Bound Check Exception"}, 74 {EXCCODE_SXD, "128 bit vector instructions Disable exception"}, 75 {EXCCODE_ASXD, "256 bit vector instructions Disable exception"}, 76 {EXCP_HLT, "EXCP_HLT"}, 77 }; 78 79 const char *loongarch_exception_name(int32_t exception) 80 { 81 int i; 82 83 for (i = 0; i < ARRAY_SIZE(excp_names); i++) { 84 if (excp_names[i].exccode == exception) { 85 return excp_names[i].name; 86 } 87 } 88 return "Unknown"; 89 } 90 91 void G_NORETURN do_raise_exception(CPULoongArchState *env, 92 uint32_t exception, 93 uintptr_t pc) 94 { 95 CPUState *cs = env_cpu(env); 96 97 qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n", 98 __func__, 99 exception, 100 loongarch_exception_name(exception)); 101 cs->exception_index = exception; 102 103 cpu_loop_exit_restore(cs, pc); 104 } 105 106 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 107 { 108 set_pc(cpu_env(cs), value); 109 } 110 111 static vaddr loongarch_cpu_get_pc(CPUState *cs) 112 { 113 return cpu_env(cs)->pc; 114 } 115 116 #ifndef CONFIG_USER_ONLY 117 #include "hw/loongarch/virt.h" 118 119 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 120 { 121 LoongArchCPU *cpu = opaque; 122 CPULoongArchState *env = &cpu->env; 123 CPUState *cs = CPU(cpu); 124 125 if (irq < 0 || irq >= N_IRQS) { 126 return; 127 } 128 129 if (kvm_enabled()) { 130 kvm_loongarch_set_interrupt(cpu, irq, level); 131 } else if (tcg_enabled()) { 132 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 133 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 134 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 135 } else { 136 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 137 } 138 } 139 } 140 141 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 142 { 143 bool ret = 0; 144 145 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 146 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 147 148 return ret; 149 } 150 151 /* Check if there is pending and not masked out interrupt */ 152 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 153 { 154 uint32_t pending; 155 uint32_t status; 156 157 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 158 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 159 160 return (pending & status) != 0; 161 } 162 #endif 163 164 #ifdef CONFIG_TCG 165 #ifndef CONFIG_USER_ONLY 166 static void loongarch_cpu_do_interrupt(CPUState *cs) 167 { 168 CPULoongArchState *env = cpu_env(cs); 169 bool update_badinstr = 1; 170 int cause = -1; 171 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 172 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 173 174 if (cs->exception_index != EXCCODE_INT) { 175 qemu_log_mask(CPU_LOG_INT, 176 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 177 " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n", 178 __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA, 179 cs->exception_index, 180 loongarch_exception_name(cs->exception_index)); 181 } 182 183 switch (cs->exception_index) { 184 case EXCCODE_DBP: 185 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 186 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 187 goto set_DERA; 188 set_DERA: 189 env->CSR_DERA = env->pc; 190 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 191 set_pc(env, env->CSR_EENTRY + 0x480); 192 break; 193 case EXCCODE_INT: 194 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 195 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 196 goto set_DERA; 197 } 198 QEMU_FALLTHROUGH; 199 case EXCCODE_PIF: 200 case EXCCODE_ADEF: 201 cause = cs->exception_index; 202 update_badinstr = 0; 203 break; 204 case EXCCODE_SYS: 205 case EXCCODE_BRK: 206 case EXCCODE_INE: 207 case EXCCODE_IPE: 208 case EXCCODE_FPD: 209 case EXCCODE_FPE: 210 case EXCCODE_SXD: 211 case EXCCODE_ASXD: 212 env->CSR_BADV = env->pc; 213 QEMU_FALLTHROUGH; 214 case EXCCODE_BCE: 215 case EXCCODE_ADEM: 216 case EXCCODE_PIL: 217 case EXCCODE_PIS: 218 case EXCCODE_PME: 219 case EXCCODE_PNR: 220 case EXCCODE_PNX: 221 case EXCCODE_PPI: 222 cause = cs->exception_index; 223 break; 224 default: 225 qemu_log("Error: exception(%d) has not been supported\n", 226 cs->exception_index); 227 abort(); 228 } 229 230 if (update_badinstr) { 231 env->CSR_BADI = cpu_ldl_code(env, env->pc); 232 } 233 234 /* Save PLV and IE */ 235 if (tlbfill) { 236 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 237 FIELD_EX64(env->CSR_CRMD, 238 CSR_CRMD, PLV)); 239 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 240 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 241 /* set the DA mode */ 242 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 243 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 244 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 245 PC, (env->pc >> 2)); 246 } else { 247 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, 248 EXCODE_MCODE(cause)); 249 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, 250 EXCODE_SUBCODE(cause)); 251 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 252 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 253 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 254 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 255 env->CSR_ERA = env->pc; 256 } 257 258 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 259 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 260 261 if (vec_size) { 262 vec_size = (1 << vec_size) * 4; 263 } 264 265 if (cs->exception_index == EXCCODE_INT) { 266 /* Interrupt */ 267 uint32_t vector = 0; 268 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 269 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 270 271 /* Find the highest-priority interrupt. */ 272 vector = 31 - clz32(pending); 273 set_pc(env, env->CSR_EENTRY + \ 274 (EXCCODE_EXTERNAL_INT + vector) * vec_size); 275 qemu_log_mask(CPU_LOG_INT, 276 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 277 " cause %d\n" " A " TARGET_FMT_lx " D " 278 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 279 TARGET_FMT_lx "\n", 280 __func__, env->pc, env->CSR_ERA, 281 cause, env->CSR_BADV, env->CSR_DERA, vector, 282 env->CSR_ECFG, env->CSR_ESTAT); 283 } else { 284 if (tlbfill) { 285 set_pc(env, env->CSR_TLBRENTRY); 286 } else { 287 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); 288 } 289 qemu_log_mask(CPU_LOG_INT, 290 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 291 " cause %d%s\n, ESTAT " TARGET_FMT_lx 292 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 293 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 294 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 295 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 296 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 297 env->CSR_ECFG, 298 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 299 env->CSR_BADI, env->gpr[11], cs->cpu_index, 300 env->CSR_ASID); 301 } 302 cs->exception_index = -1; 303 } 304 305 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 306 vaddr addr, unsigned size, 307 MMUAccessType access_type, 308 int mmu_idx, MemTxAttrs attrs, 309 MemTxResult response, 310 uintptr_t retaddr) 311 { 312 CPULoongArchState *env = cpu_env(cs); 313 314 if (access_type == MMU_INST_FETCH) { 315 do_raise_exception(env, EXCCODE_ADEF, retaddr); 316 } else { 317 do_raise_exception(env, EXCCODE_ADEM, retaddr); 318 } 319 } 320 321 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 322 { 323 if (interrupt_request & CPU_INTERRUPT_HARD) { 324 CPULoongArchState *env = cpu_env(cs); 325 326 if (cpu_loongarch_hw_interrupts_enabled(env) && 327 cpu_loongarch_hw_interrupts_pending(env)) { 328 /* Raise it */ 329 cs->exception_index = EXCCODE_INT; 330 loongarch_cpu_do_interrupt(cs); 331 return true; 332 } 333 } 334 return false; 335 } 336 #endif 337 338 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 339 const TranslationBlock *tb) 340 { 341 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 342 set_pc(cpu_env(cs), tb->pc); 343 } 344 345 static void loongarch_restore_state_to_opc(CPUState *cs, 346 const TranslationBlock *tb, 347 const uint64_t *data) 348 { 349 set_pc(cpu_env(cs), data[0]); 350 } 351 #endif /* CONFIG_TCG */ 352 353 #ifndef CONFIG_USER_ONLY 354 static bool loongarch_cpu_has_work(CPUState *cs) 355 { 356 bool has_work = false; 357 358 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 359 cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) { 360 has_work = true; 361 } 362 363 return has_work; 364 } 365 #endif /* !CONFIG_USER_ONLY */ 366 367 static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) 368 { 369 CPULoongArchState *env = cpu_env(cs); 370 371 if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { 372 return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); 373 } 374 return MMU_DA_IDX; 375 } 376 377 static void loongarch_la464_init_csr(Object *obj) 378 { 379 #ifndef CONFIG_USER_ONLY 380 static bool initialized; 381 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 382 CPULoongArchState *env = &cpu->env; 383 int i, num; 384 385 if (!initialized) { 386 initialized = true; 387 num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM); 388 for (i = num; i < 16; i++) { 389 set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED); 390 } 391 set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED); 392 set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED); 393 set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED); 394 set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED); 395 set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED); 396 set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED); 397 set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED); 398 set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED); 399 set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED); 400 } 401 #endif 402 } 403 404 static void loongarch_la464_initfn(Object *obj) 405 { 406 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 407 CPULoongArchState *env = &cpu->env; 408 uint32_t data = 0, field; 409 int i; 410 411 for (i = 0; i < 21; i++) { 412 env->cpucfg[i] = 0x0; 413 } 414 415 cpu->dtb_compatible = "loongarch,Loongson-3A5000"; 416 env->cpucfg[0] = 0x14c010; /* PRID */ 417 418 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 419 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 420 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 421 if (kvm_enabled()) { 422 /* GPA address width of VM is 47, field value is 47 - 1 */ 423 field = 0x2e; 424 } else { 425 field = 0x2f; /* 48 bit - 1 */ 426 } 427 data = FIELD_DP32(data, CPUCFG1, PALEN, field); 428 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 429 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 430 data = FIELD_DP32(data, CPUCFG1, RI, 1); 431 data = FIELD_DP32(data, CPUCFG1, EP, 1); 432 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 433 data = FIELD_DP32(data, CPUCFG1, HP, 1); 434 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 435 env->cpucfg[1] = data; 436 437 data = 0; 438 data = FIELD_DP32(data, CPUCFG2, FP, 1); 439 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 440 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 441 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 442 data = FIELD_DP32(data, CPUCFG2, LSX, 1), 443 data = FIELD_DP32(data, CPUCFG2, LASX, 1), 444 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 445 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 446 data = FIELD_DP32(data, CPUCFG2, LSPW, 1); 447 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 448 env->cpucfg[2] = data; 449 450 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 451 452 data = 0; 453 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 454 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 455 env->cpucfg[5] = data; 456 457 data = 0; 458 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 459 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 460 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 461 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 462 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 463 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 464 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 465 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 466 env->cpucfg[16] = data; 467 468 data = 0; 469 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 470 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 471 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 472 env->cpucfg[17] = data; 473 474 data = 0; 475 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 476 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 477 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 478 env->cpucfg[18] = data; 479 480 data = 0; 481 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 482 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 483 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 484 env->cpucfg[19] = data; 485 486 data = 0; 487 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 488 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 489 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); 490 env->cpucfg[20] = data; 491 492 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 493 494 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM, 8); 495 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, TIMER_BITS, 0x2f); 496 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, VSMAX, 7); 497 498 env->CSR_PRCFG2 = 0x3ffff000; 499 500 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 501 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 502 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 503 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 504 505 loongarch_la464_init_csr(obj); 506 loongarch_cpu_post_init(obj); 507 } 508 509 static void loongarch_la132_initfn(Object *obj) 510 { 511 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 512 CPULoongArchState *env = &cpu->env; 513 uint32_t data = 0; 514 int i; 515 516 for (i = 0; i < 21; i++) { 517 env->cpucfg[i] = 0x0; 518 } 519 520 cpu->dtb_compatible = "loongarch,Loongson-1C103"; 521 env->cpucfg[0] = 0x148042; /* PRID */ 522 523 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */ 524 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 525 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 526 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */ 527 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */ 528 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 529 data = FIELD_DP32(data, CPUCFG1, RI, 0); 530 data = FIELD_DP32(data, CPUCFG1, EP, 0); 531 data = FIELD_DP32(data, CPUCFG1, RPLV, 0); 532 data = FIELD_DP32(data, CPUCFG1, HP, 1); 533 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 534 env->cpucfg[1] = data; 535 } 536 537 static void loongarch_max_initfn(Object *obj) 538 { 539 /* '-cpu max' for TCG: we use cpu la464. */ 540 loongarch_la464_initfn(obj); 541 } 542 543 static void loongarch_cpu_reset_hold(Object *obj, ResetType type) 544 { 545 uint8_t tlb_ps; 546 CPUState *cs = CPU(obj); 547 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj); 548 CPULoongArchState *env = cpu_env(cs); 549 550 if (lacc->parent_phases.hold) { 551 lacc->parent_phases.hold(obj, type); 552 } 553 554 #ifdef CONFIG_TCG 555 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 556 #endif 557 env->fcsr0 = 0x0; 558 559 int n; 560 /* Set csr registers value after reset, see the manual 6.4. */ 561 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 562 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 563 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 564 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 565 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 0); 566 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 0); 567 568 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 569 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 570 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 571 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 572 573 env->CSR_MISC = 0; 574 575 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 576 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 577 578 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 579 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 580 env->CSR_CPUID = cs->cpu_index; 581 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 582 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 583 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 584 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 585 env->CSR_TID = cs->cpu_index; 586 /* 587 * Workaround for edk2-stable202408, CSR PGD register is set only if 588 * its value is equal to zero for boot cpu, it causes reboot issue. 589 * 590 * Here clear CSR registers relative with TLB. 591 */ 592 env->CSR_PGDH = 0; 593 env->CSR_PGDL = 0; 594 env->CSR_PWCH = 0; 595 env->CSR_EENTRY = 0; 596 env->CSR_TLBRENTRY = 0; 597 env->CSR_MERRENTRY = 0; 598 /* set CSR_PWCL.PTBASE and CSR_STLBPS.PS bits from CSR_PRCFG2 */ 599 if (env->CSR_PRCFG2 == 0) { 600 env->CSR_PRCFG2 = 0x3fffff000; 601 } 602 tlb_ps = ctz32(env->CSR_PRCFG2); 603 env->CSR_STLBPS = FIELD_DP64(env->CSR_STLBPS, CSR_STLBPS, PS, tlb_ps); 604 env->CSR_PWCL = FIELD_DP64(env->CSR_PWCL, CSR_PWCL, PTBASE, tlb_ps); 605 for (n = 0; n < 4; n++) { 606 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 607 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 608 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 609 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 610 } 611 612 #ifndef CONFIG_USER_ONLY 613 env->pc = 0x1c000000; 614 #ifdef CONFIG_TCG 615 memset(env->tlb, 0, sizeof(env->tlb)); 616 #endif 617 if (kvm_enabled()) { 618 kvm_arch_reset_vcpu(cs); 619 } 620 #endif 621 622 #ifdef CONFIG_TCG 623 restore_fp_status(env); 624 #endif 625 cs->exception_index = -1; 626 } 627 628 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 629 { 630 info->endian = BFD_ENDIAN_LITTLE; 631 info->print_insn = print_insn_loongarch; 632 } 633 634 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 635 { 636 CPUState *cs = CPU(dev); 637 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 638 Error *local_err = NULL; 639 640 cpu_exec_realizefn(cs, &local_err); 641 if (local_err != NULL) { 642 error_propagate(errp, local_err); 643 return; 644 } 645 646 loongarch_cpu_register_gdb_regs_for_features(cs); 647 648 qemu_init_vcpu(cs); 649 cpu_reset(cs); 650 651 lacc->parent_realize(dev, errp); 652 } 653 654 static void loongarch_cpu_unrealizefn(DeviceState *dev) 655 { 656 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 657 658 #ifndef CONFIG_USER_ONLY 659 cpu_remove_sync(CPU(dev)); 660 #endif 661 662 lacc->parent_unrealize(dev); 663 } 664 665 static bool loongarch_get_lsx(Object *obj, Error **errp) 666 { 667 return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF; 668 } 669 670 static void loongarch_set_lsx(Object *obj, bool value, Error **errp) 671 { 672 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 673 uint32_t val; 674 675 cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 676 if (cpu->lsx == ON_OFF_AUTO_OFF) { 677 cpu->lasx = ON_OFF_AUTO_OFF; 678 if (cpu->lasx == ON_OFF_AUTO_ON) { 679 error_setg(errp, "Failed to disable LSX since LASX is enabled"); 680 return; 681 } 682 } 683 684 if (kvm_enabled()) { 685 /* kvm feature detection in function kvm_arch_init_vcpu */ 686 return; 687 } 688 689 /* LSX feature detection in TCG mode */ 690 val = cpu->env.cpucfg[2]; 691 if (cpu->lsx == ON_OFF_AUTO_ON) { 692 if (FIELD_EX32(val, CPUCFG2, LSX) == 0) { 693 error_setg(errp, "Failed to enable LSX in TCG mode"); 694 return; 695 } 696 } else { 697 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0); 698 val = cpu->env.cpucfg[2]; 699 } 700 701 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value); 702 } 703 704 static bool loongarch_get_lasx(Object *obj, Error **errp) 705 { 706 return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF; 707 } 708 709 static void loongarch_set_lasx(Object *obj, bool value, Error **errp) 710 { 711 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 712 uint32_t val; 713 714 cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 715 if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) { 716 error_setg(errp, "Failed to enable LASX since lSX is disabled"); 717 return; 718 } 719 720 if (kvm_enabled()) { 721 /* kvm feature detection in function kvm_arch_init_vcpu */ 722 return; 723 } 724 725 /* LASX feature detection in TCG mode */ 726 val = cpu->env.cpucfg[2]; 727 if (cpu->lasx == ON_OFF_AUTO_ON) { 728 if (FIELD_EX32(val, CPUCFG2, LASX) == 0) { 729 error_setg(errp, "Failed to enable LASX in TCG mode"); 730 return; 731 } 732 } 733 734 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value); 735 } 736 737 void loongarch_cpu_post_init(Object *obj) 738 { 739 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 740 741 cpu->lbt = ON_OFF_AUTO_OFF; 742 cpu->pmu = ON_OFF_AUTO_OFF; 743 cpu->lsx = ON_OFF_AUTO_AUTO; 744 cpu->lasx = ON_OFF_AUTO_AUTO; 745 object_property_add_bool(obj, "lsx", loongarch_get_lsx, 746 loongarch_set_lsx); 747 object_property_add_bool(obj, "lasx", loongarch_get_lasx, 748 loongarch_set_lasx); 749 /* lbt is enabled only in kvm mode, not supported in tcg mode */ 750 if (kvm_enabled()) { 751 kvm_loongarch_cpu_post_init(cpu); 752 } 753 } 754 755 static void loongarch_cpu_init(Object *obj) 756 { 757 #ifndef CONFIG_USER_ONLY 758 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 759 760 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 761 #ifdef CONFIG_TCG 762 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 763 &loongarch_constant_timer_cb, cpu); 764 #endif 765 #endif 766 } 767 768 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 769 { 770 ObjectClass *oc; 771 772 oc = object_class_by_name(cpu_model); 773 if (!oc) { 774 g_autofree char *typename 775 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 776 oc = object_class_by_name(typename); 777 } 778 779 return oc; 780 } 781 782 static void loongarch_cpu_dump_csr(CPUState *cs, FILE *f) 783 { 784 #ifndef CONFIG_USER_ONLY 785 CPULoongArchState *env = cpu_env(cs); 786 CSRInfo *csr_info; 787 int64_t *addr; 788 int i, j, len, col = 0; 789 790 qemu_fprintf(f, "\n"); 791 792 /* Dump all generic CSR register */ 793 for (i = 0; i < LOONGARCH_CSR_DBG; i++) { 794 csr_info = get_csr(i); 795 if (!csr_info || (csr_info->flags & CSRFL_UNUSED)) { 796 if (i == (col + 3)) { 797 qemu_fprintf(f, "\n"); 798 } 799 800 continue; 801 } 802 803 if ((i > (col + 3)) || (i == col)) { 804 col = i & ~3; 805 qemu_fprintf(f, " CSR%03d:", col); 806 } 807 808 addr = (void *)env + csr_info->offset; 809 qemu_fprintf(f, " %s ", csr_info->name); 810 len = strlen(csr_info->name); 811 for (; len < 6; len++) { 812 qemu_fprintf(f, " "); 813 } 814 815 qemu_fprintf(f, "%" PRIx64, *addr); 816 j = find_last_bit((void *)addr, BITS_PER_LONG) & (BITS_PER_LONG - 1); 817 len += j / 4 + 1; 818 for (; len < 22; len++) { 819 qemu_fprintf(f, " "); 820 } 821 822 if (i == (col + 3)) { 823 qemu_fprintf(f, "\n"); 824 } 825 } 826 qemu_fprintf(f, "\n"); 827 #endif 828 } 829 830 static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 831 { 832 CPULoongArchState *env = cpu_env(cs); 833 int i; 834 835 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 836 qemu_fprintf(f, " FCSR0 0x%08x\n", env->fcsr0); 837 838 /* gpr */ 839 for (i = 0; i < 32; i++) { 840 if ((i & 3) == 0) { 841 qemu_fprintf(f, " GPR%02d:", i); 842 } 843 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 844 if ((i & 3) == 3) { 845 qemu_fprintf(f, "\n"); 846 } 847 } 848 849 /* csr */ 850 loongarch_cpu_dump_csr(cs, f); 851 852 /* fpr */ 853 if (flags & CPU_DUMP_FPU) { 854 for (i = 0; i < 32; i++) { 855 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0)); 856 if ((i & 3) == 3) { 857 qemu_fprintf(f, "\n"); 858 } 859 } 860 } 861 } 862 863 #ifdef CONFIG_TCG 864 #include "accel/tcg/cpu-ops.h" 865 866 static const TCGCPUOps loongarch_tcg_ops = { 867 .initialize = loongarch_translate_init, 868 .translate_code = loongarch_translate_code, 869 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 870 .restore_state_to_opc = loongarch_restore_state_to_opc, 871 872 #ifndef CONFIG_USER_ONLY 873 .tlb_fill = loongarch_cpu_tlb_fill, 874 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 875 .cpu_exec_halt = loongarch_cpu_has_work, 876 .do_interrupt = loongarch_cpu_do_interrupt, 877 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 878 #endif 879 }; 880 #endif /* CONFIG_TCG */ 881 882 #ifndef CONFIG_USER_ONLY 883 #include "hw/core/sysemu-cpu-ops.h" 884 885 static const struct SysemuCPUOps loongarch_sysemu_ops = { 886 .has_work = loongarch_cpu_has_work, 887 .write_elf64_note = loongarch_cpu_write_elf64_note, 888 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 889 }; 890 891 static int64_t loongarch_cpu_get_arch_id(CPUState *cs) 892 { 893 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 894 895 return cpu->phy_id; 896 } 897 #endif 898 899 static const Property loongarch_cpu_properties[] = { 900 DEFINE_PROP_INT32("socket-id", LoongArchCPU, socket_id, 0), 901 DEFINE_PROP_INT32("core-id", LoongArchCPU, core_id, 0), 902 DEFINE_PROP_INT32("thread-id", LoongArchCPU, thread_id, 0), 903 DEFINE_PROP_INT32("node-id", LoongArchCPU, node_id, CPU_UNSET_NUMA_NODE_ID), 904 }; 905 906 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 907 { 908 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 909 CPUClass *cc = CPU_CLASS(c); 910 DeviceClass *dc = DEVICE_CLASS(c); 911 ResettableClass *rc = RESETTABLE_CLASS(c); 912 913 device_class_set_props(dc, loongarch_cpu_properties); 914 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 915 &lacc->parent_realize); 916 device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn, 917 &lacc->parent_unrealize); 918 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, 919 &lacc->parent_phases); 920 921 cc->class_by_name = loongarch_cpu_class_by_name; 922 cc->mmu_index = loongarch_cpu_mmu_index; 923 cc->dump_state = loongarch_cpu_dump_state; 924 cc->set_pc = loongarch_cpu_set_pc; 925 cc->get_pc = loongarch_cpu_get_pc; 926 #ifndef CONFIG_USER_ONLY 927 cc->get_arch_id = loongarch_cpu_get_arch_id; 928 dc->vmsd = &vmstate_loongarch_cpu; 929 cc->sysemu_ops = &loongarch_sysemu_ops; 930 #endif 931 cc->disas_set_info = loongarch_cpu_disas_set_info; 932 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 933 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 934 cc->gdb_stop_before_watchpoint = true; 935 936 #ifdef CONFIG_TCG 937 cc->tcg_ops = &loongarch_tcg_ops; 938 #endif 939 dc->user_creatable = true; 940 } 941 942 static const gchar *loongarch32_gdb_arch_name(CPUState *cs) 943 { 944 return "loongarch32"; 945 } 946 947 static void loongarch32_cpu_class_init(ObjectClass *c, void *data) 948 { 949 CPUClass *cc = CPU_CLASS(c); 950 951 cc->gdb_core_xml_file = "loongarch-base32.xml"; 952 cc->gdb_arch_name = loongarch32_gdb_arch_name; 953 } 954 955 static const gchar *loongarch64_gdb_arch_name(CPUState *cs) 956 { 957 return "loongarch64"; 958 } 959 960 static void loongarch64_cpu_class_init(ObjectClass *c, void *data) 961 { 962 CPUClass *cc = CPU_CLASS(c); 963 964 cc->gdb_core_xml_file = "loongarch-base64.xml"; 965 cc->gdb_arch_name = loongarch64_gdb_arch_name; 966 } 967 968 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \ 969 { \ 970 .parent = TYPE_LOONGARCH##size##_CPU, \ 971 .instance_init = initfn, \ 972 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 973 } 974 975 static const TypeInfo loongarch_cpu_type_infos[] = { 976 { 977 .name = TYPE_LOONGARCH_CPU, 978 .parent = TYPE_CPU, 979 .instance_size = sizeof(LoongArchCPU), 980 .instance_align = __alignof(LoongArchCPU), 981 .instance_init = loongarch_cpu_init, 982 983 .abstract = true, 984 .class_size = sizeof(LoongArchCPUClass), 985 .class_init = loongarch_cpu_class_init, 986 }, 987 { 988 .name = TYPE_LOONGARCH32_CPU, 989 .parent = TYPE_LOONGARCH_CPU, 990 991 .abstract = true, 992 .class_init = loongarch32_cpu_class_init, 993 }, 994 { 995 .name = TYPE_LOONGARCH64_CPU, 996 .parent = TYPE_LOONGARCH_CPU, 997 998 .abstract = true, 999 .class_init = loongarch64_cpu_class_init, 1000 }, 1001 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn), 1002 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn), 1003 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn), 1004 }; 1005 1006 DEFINE_TYPES(loongarch_cpu_type_infos) 1007