1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch CPU
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "qemu/log.h"
10 #include "qemu/qemu-print.h"
11 #include "qapi/error.h"
12 #include "qemu/module.h"
13 #include "system/qtest.h"
14 #include "system/tcg.h"
15 #include "system/kvm.h"
16 #include "kvm/kvm_loongarch.h"
17 #include "hw/qdev-properties.h"
18 #include "exec/translation-block.h"
19 #include "cpu.h"
20 #include "internals.h"
21 #include "fpu/softfloat-helpers.h"
22 #include "csr.h"
23 #ifndef CONFIG_USER_ONLY
24 #include "system/reset.h"
25 #endif
26 #include "vec.h"
27 #ifdef CONFIG_KVM
28 #include <linux/kvm.h>
29 #endif
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/cpu-ldst.h"
32 #include "accel/tcg/cpu-ops.h"
33 #include "tcg/tcg.h"
34 #endif
35 #include "tcg/tcg_loongarch.h"
36
37 const char * const regnames[32] = {
38 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
39 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
40 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
41 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
42 };
43
44 const char * const fregnames[32] = {
45 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
46 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
47 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
48 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
49 };
50
51 struct TypeExcp {
52 int32_t exccode;
53 const char * const name;
54 };
55
56 static const struct TypeExcp excp_names[] = {
57 {EXCCODE_INT, "Interrupt"},
58 {EXCCODE_PIL, "Page invalid exception for load"},
59 {EXCCODE_PIS, "Page invalid exception for store"},
60 {EXCCODE_PIF, "Page invalid exception for fetch"},
61 {EXCCODE_PME, "Page modified exception"},
62 {EXCCODE_PNR, "Page Not Readable exception"},
63 {EXCCODE_PNX, "Page Not Executable exception"},
64 {EXCCODE_PPI, "Page Privilege error"},
65 {EXCCODE_ADEF, "Address error for instruction fetch"},
66 {EXCCODE_ADEM, "Address error for Memory access"},
67 {EXCCODE_SYS, "Syscall"},
68 {EXCCODE_BRK, "Break"},
69 {EXCCODE_INE, "Instruction Non-Existent"},
70 {EXCCODE_IPE, "Instruction privilege error"},
71 {EXCCODE_FPD, "Floating Point Disabled"},
72 {EXCCODE_FPE, "Floating Point Exception"},
73 {EXCCODE_DBP, "Debug breakpoint"},
74 {EXCCODE_BCE, "Bound Check Exception"},
75 {EXCCODE_SXD, "128 bit vector instructions Disable exception"},
76 {EXCCODE_ASXD, "256 bit vector instructions Disable exception"},
77 {EXCP_HLT, "EXCP_HLT"},
78 };
79
loongarch_exception_name(int32_t exception)80 const char *loongarch_exception_name(int32_t exception)
81 {
82 int i;
83
84 for (i = 0; i < ARRAY_SIZE(excp_names); i++) {
85 if (excp_names[i].exccode == exception) {
86 return excp_names[i].name;
87 }
88 }
89 return "Unknown";
90 }
91
do_raise_exception(CPULoongArchState * env,uint32_t exception,uintptr_t pc)92 void G_NORETURN do_raise_exception(CPULoongArchState *env,
93 uint32_t exception,
94 uintptr_t pc)
95 {
96 CPUState *cs = env_cpu(env);
97
98 qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n",
99 __func__,
100 exception,
101 loongarch_exception_name(exception));
102 cs->exception_index = exception;
103
104 cpu_loop_exit_restore(cs, pc);
105 }
106
loongarch_cpu_set_pc(CPUState * cs,vaddr value)107 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
108 {
109 set_pc(cpu_env(cs), value);
110 }
111
loongarch_cpu_get_pc(CPUState * cs)112 static vaddr loongarch_cpu_get_pc(CPUState *cs)
113 {
114 return cpu_env(cs)->pc;
115 }
116
117 #ifndef CONFIG_USER_ONLY
118 #include "hw/loongarch/virt.h"
119
loongarch_cpu_set_irq(void * opaque,int irq,int level)120 void loongarch_cpu_set_irq(void *opaque, int irq, int level)
121 {
122 LoongArchCPU *cpu = opaque;
123 CPULoongArchState *env = &cpu->env;
124 CPUState *cs = CPU(cpu);
125
126 if (irq < 0 || irq >= N_IRQS) {
127 return;
128 }
129
130 if (kvm_enabled()) {
131 kvm_loongarch_set_interrupt(cpu, irq, level);
132 } else if (tcg_enabled()) {
133 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0);
134 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) {
135 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
136 } else {
137 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
138 }
139 }
140 }
141
cpu_loongarch_hw_interrupts_enabled(CPULoongArchState * env)142 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
143 {
144 bool ret = 0;
145
146 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
147 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
148
149 return ret;
150 }
151
152 /* Check if there is pending and not masked out interrupt */
cpu_loongarch_hw_interrupts_pending(CPULoongArchState * env)153 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
154 {
155 uint32_t pending;
156 uint32_t status;
157
158 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
159 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
160
161 return (pending & status) != 0;
162 }
163 #endif
164
165 #ifdef CONFIG_TCG
166 #ifndef CONFIG_USER_ONLY
loongarch_cpu_do_interrupt(CPUState * cs)167 static void loongarch_cpu_do_interrupt(CPUState *cs)
168 {
169 CPULoongArchState *env = cpu_env(cs);
170 bool update_badinstr = 1;
171 int cause = -1;
172 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
173 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
174
175 if (cs->exception_index != EXCCODE_INT) {
176 qemu_log_mask(CPU_LOG_INT,
177 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
178 " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n",
179 __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA,
180 cs->exception_index,
181 loongarch_exception_name(cs->exception_index));
182 }
183
184 switch (cs->exception_index) {
185 case EXCCODE_DBP:
186 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
187 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
188 goto set_DERA;
189 set_DERA:
190 env->CSR_DERA = env->pc;
191 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
192 set_pc(env, env->CSR_EENTRY + 0x480);
193 break;
194 case EXCCODE_INT:
195 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
196 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
197 goto set_DERA;
198 }
199 QEMU_FALLTHROUGH;
200 case EXCCODE_PIF:
201 case EXCCODE_ADEF:
202 cause = cs->exception_index;
203 update_badinstr = 0;
204 break;
205 case EXCCODE_SYS:
206 case EXCCODE_BRK:
207 case EXCCODE_INE:
208 case EXCCODE_IPE:
209 case EXCCODE_FPD:
210 case EXCCODE_FPE:
211 case EXCCODE_SXD:
212 case EXCCODE_ASXD:
213 env->CSR_BADV = env->pc;
214 QEMU_FALLTHROUGH;
215 case EXCCODE_BCE:
216 case EXCCODE_ADEM:
217 case EXCCODE_PIL:
218 case EXCCODE_PIS:
219 case EXCCODE_PME:
220 case EXCCODE_PNR:
221 case EXCCODE_PNX:
222 case EXCCODE_PPI:
223 cause = cs->exception_index;
224 break;
225 default:
226 qemu_log("Error: exception(%d) has not been supported\n",
227 cs->exception_index);
228 abort();
229 }
230
231 if (update_badinstr) {
232 env->CSR_BADI = cpu_ldl_code(env, env->pc);
233 }
234
235 /* Save PLV and IE */
236 if (tlbfill) {
237 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
238 FIELD_EX64(env->CSR_CRMD,
239 CSR_CRMD, PLV));
240 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
241 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
242 /* set the DA mode */
243 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
244 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
245 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
246 PC, (env->pc >> 2));
247 } else {
248 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
249 EXCODE_MCODE(cause));
250 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
251 EXCODE_SUBCODE(cause));
252 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
253 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
254 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
255 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
256 env->CSR_ERA = env->pc;
257 }
258
259 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
260 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
261
262 if (vec_size) {
263 vec_size = (1 << vec_size) * 4;
264 }
265
266 if (cs->exception_index == EXCCODE_INT) {
267 /* Interrupt */
268 uint32_t vector = 0;
269 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
270 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
271
272 /* Find the highest-priority interrupt. */
273 vector = 31 - clz32(pending);
274 set_pc(env, env->CSR_EENTRY + \
275 (EXCCODE_EXTERNAL_INT + vector) * vec_size);
276 qemu_log_mask(CPU_LOG_INT,
277 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
278 " cause %d\n" " A " TARGET_FMT_lx " D "
279 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
280 TARGET_FMT_lx "\n",
281 __func__, env->pc, env->CSR_ERA,
282 cause, env->CSR_BADV, env->CSR_DERA, vector,
283 env->CSR_ECFG, env->CSR_ESTAT);
284 } else {
285 if (tlbfill) {
286 set_pc(env, env->CSR_TLBRENTRY);
287 } else {
288 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
289 }
290 qemu_log_mask(CPU_LOG_INT,
291 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
292 " cause %d%s\n, ESTAT " TARGET_FMT_lx
293 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
294 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
295 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
296 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
297 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
298 env->CSR_ECFG,
299 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
300 env->CSR_BADI, env->gpr[11], cs->cpu_index,
301 env->CSR_ASID);
302 }
303 cs->exception_index = -1;
304 }
305
loongarch_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)306 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
307 vaddr addr, unsigned size,
308 MMUAccessType access_type,
309 int mmu_idx, MemTxAttrs attrs,
310 MemTxResult response,
311 uintptr_t retaddr)
312 {
313 CPULoongArchState *env = cpu_env(cs);
314
315 if (access_type == MMU_INST_FETCH) {
316 do_raise_exception(env, EXCCODE_ADEF, retaddr);
317 } else {
318 do_raise_exception(env, EXCCODE_ADEM, retaddr);
319 }
320 }
321
loongarch_cpu_exec_interrupt(CPUState * cs,int interrupt_request)322 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
323 {
324 if (interrupt_request & CPU_INTERRUPT_HARD) {
325 CPULoongArchState *env = cpu_env(cs);
326
327 if (cpu_loongarch_hw_interrupts_enabled(env) &&
328 cpu_loongarch_hw_interrupts_pending(env)) {
329 /* Raise it */
330 cs->exception_index = EXCCODE_INT;
331 loongarch_cpu_do_interrupt(cs);
332 return true;
333 }
334 }
335 return false;
336 }
337
loongarch_pointer_wrap(CPUState * cs,int mmu_idx,vaddr result,vaddr base)338 static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
339 vaddr result, vaddr base)
340 {
341 return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
342 }
343 #endif
344
loongarch_get_tb_cpu_state(CPUState * cs)345 static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
346 {
347 CPULoongArchState *env = cpu_env(cs);
348 uint32_t flags;
349
350 flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
351 flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
352 flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
353 flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
354 flags |= is_va32(env) * HW_FLAGS_VA32;
355
356 return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
357 }
358
loongarch_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)359 static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
360 const TranslationBlock *tb)
361 {
362 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
363 set_pc(cpu_env(cs), tb->pc);
364 }
365
loongarch_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)366 static void loongarch_restore_state_to_opc(CPUState *cs,
367 const TranslationBlock *tb,
368 const uint64_t *data)
369 {
370 set_pc(cpu_env(cs), data[0]);
371 }
372 #endif /* CONFIG_TCG */
373
374 #ifndef CONFIG_USER_ONLY
loongarch_cpu_has_work(CPUState * cs)375 static bool loongarch_cpu_has_work(CPUState *cs)
376 {
377 bool has_work = false;
378
379 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
380 cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) {
381 has_work = true;
382 }
383
384 return has_work;
385 }
386 #endif /* !CONFIG_USER_ONLY */
387
loongarch_cpu_mmu_index(CPUState * cs,bool ifetch)388 static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
389 {
390 CPULoongArchState *env = cpu_env(cs);
391
392 if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
393 return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
394 }
395 return MMU_DA_IDX;
396 }
397
loongarch_la464_init_csr(Object * obj)398 static void loongarch_la464_init_csr(Object *obj)
399 {
400 #ifndef CONFIG_USER_ONLY
401 static bool initialized;
402 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
403 CPULoongArchState *env = &cpu->env;
404 int i, num;
405
406 if (!initialized) {
407 initialized = true;
408 num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM);
409 for (i = num; i < 16; i++) {
410 set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED);
411 }
412 set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED);
413 set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED);
414 set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED);
415 set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED);
416 set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED);
417 set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED);
418 set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED);
419 set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED);
420 set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED);
421 }
422 #endif
423 }
424
loongarch_la464_initfn(Object * obj)425 static void loongarch_la464_initfn(Object *obj)
426 {
427 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
428 CPULoongArchState *env = &cpu->env;
429 uint32_t data = 0, field;
430 int i;
431
432 for (i = 0; i < 21; i++) {
433 env->cpucfg[i] = 0x0;
434 }
435
436 cpu->dtb_compatible = "loongarch,Loongson-3A5000";
437 env->cpucfg[0] = 0x14c010; /* PRID */
438
439 data = FIELD_DP32(data, CPUCFG1, ARCH, 2);
440 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
441 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
442 if (kvm_enabled()) {
443 /* GPA address width of VM is 47, field value is 47 - 1 */
444 field = 0x2e;
445 } else {
446 field = 0x2f; /* 48 bit - 1 */
447 }
448 data = FIELD_DP32(data, CPUCFG1, PALEN, field);
449 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f);
450 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
451 data = FIELD_DP32(data, CPUCFG1, RI, 1);
452 data = FIELD_DP32(data, CPUCFG1, EP, 1);
453 data = FIELD_DP32(data, CPUCFG1, RPLV, 1);
454 data = FIELD_DP32(data, CPUCFG1, HP, 1);
455 data = FIELD_DP32(data, CPUCFG1, CRC, 1);
456 env->cpucfg[1] = data;
457
458 data = 0;
459 data = FIELD_DP32(data, CPUCFG2, FP, 1);
460 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1);
461 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1);
462 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1);
463 data = FIELD_DP32(data, CPUCFG2, LSX, 1),
464 data = FIELD_DP32(data, CPUCFG2, LASX, 1),
465 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1);
466 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1);
467 data = FIELD_DP32(data, CPUCFG2, LSPW, 1);
468 data = FIELD_DP32(data, CPUCFG2, LAM, 1);
469 env->cpucfg[2] = data;
470
471 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */
472
473 data = 0;
474 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1);
475 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1);
476 env->cpucfg[5] = data;
477
478 data = 0;
479 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1);
480 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1);
481 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1);
482 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1);
483 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1);
484 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1);
485 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1);
486 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1);
487 env->cpucfg[16] = data;
488
489 data = 0;
490 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3);
491 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8);
492 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6);
493 env->cpucfg[17] = data;
494
495 data = 0;
496 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3);
497 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8);
498 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6);
499 env->cpucfg[18] = data;
500
501 data = 0;
502 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15);
503 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8);
504 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6);
505 env->cpucfg[19] = data;
506
507 data = 0;
508 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15);
509 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14);
510 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6);
511 env->cpucfg[20] = data;
512
513 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa);
514
515 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM, 8);
516 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, TIMER_BITS, 0x2f);
517 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, VSMAX, 7);
518
519 env->CSR_PRCFG2 = 0x3ffff000;
520
521 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2);
522 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63);
523 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7);
524 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
525
526 loongarch_la464_init_csr(obj);
527 loongarch_cpu_post_init(obj);
528 }
529
loongarch_la132_initfn(Object * obj)530 static void loongarch_la132_initfn(Object *obj)
531 {
532 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
533 CPULoongArchState *env = &cpu->env;
534 uint32_t data = 0;
535 int i;
536
537 for (i = 0; i < 21; i++) {
538 env->cpucfg[i] = 0x0;
539 }
540
541 cpu->dtb_compatible = "loongarch,Loongson-1C103";
542 env->cpucfg[0] = 0x148042; /* PRID */
543
544 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */
545 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
546 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
547 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */
548 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */
549 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
550 data = FIELD_DP32(data, CPUCFG1, RI, 0);
551 data = FIELD_DP32(data, CPUCFG1, EP, 0);
552 data = FIELD_DP32(data, CPUCFG1, RPLV, 0);
553 data = FIELD_DP32(data, CPUCFG1, HP, 1);
554 data = FIELD_DP32(data, CPUCFG1, CRC, 1);
555 env->cpucfg[1] = data;
556 }
557
loongarch_max_initfn(Object * obj)558 static void loongarch_max_initfn(Object *obj)
559 {
560 /* '-cpu max' for TCG: we use cpu la464. */
561 loongarch_la464_initfn(obj);
562 }
563
loongarch_cpu_reset_hold(Object * obj,ResetType type)564 static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
565 {
566 uint8_t tlb_ps;
567 CPUState *cs = CPU(obj);
568 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj);
569 CPULoongArchState *env = cpu_env(cs);
570
571 if (lacc->parent_phases.hold) {
572 lacc->parent_phases.hold(obj, type);
573 }
574
575 #ifdef CONFIG_TCG
576 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3;
577 #endif
578 env->fcsr0 = 0x0;
579
580 int n;
581 /* Set csr registers value after reset, see the manual 6.4. */
582 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
583 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
584 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
585 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
586 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 0);
587 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 0);
588
589 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0);
590 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0);
591 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0);
592 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0);
593
594 env->CSR_MISC = 0;
595
596 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0);
597 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0);
598
599 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2));
600 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0);
601 env->CSR_CPUID = cs->cpu_index;
602 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0);
603 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0);
604 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0);
605 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0);
606 env->CSR_TID = cs->cpu_index;
607 /*
608 * Workaround for edk2-stable202408, CSR PGD register is set only if
609 * its value is equal to zero for boot cpu, it causes reboot issue.
610 *
611 * Here clear CSR registers relative with TLB.
612 */
613 env->CSR_PGDH = 0;
614 env->CSR_PGDL = 0;
615 env->CSR_PWCH = 0;
616 env->CSR_EENTRY = 0;
617 env->CSR_TLBRENTRY = 0;
618 env->CSR_MERRENTRY = 0;
619 /* set CSR_PWCL.PTBASE and CSR_STLBPS.PS bits from CSR_PRCFG2 */
620 if (env->CSR_PRCFG2 == 0) {
621 env->CSR_PRCFG2 = 0x3fffff000;
622 }
623 tlb_ps = ctz32(env->CSR_PRCFG2);
624 env->CSR_STLBPS = FIELD_DP64(env->CSR_STLBPS, CSR_STLBPS, PS, tlb_ps);
625 env->CSR_PWCL = FIELD_DP64(env->CSR_PWCL, CSR_PWCL, PTBASE, tlb_ps);
626 for (n = 0; n < 4; n++) {
627 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0);
628 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0);
629 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0);
630 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0);
631 }
632
633 #ifndef CONFIG_USER_ONLY
634 env->pc = 0x1c000000;
635 #ifdef CONFIG_TCG
636 memset(env->tlb, 0, sizeof(env->tlb));
637 #endif
638 if (kvm_enabled()) {
639 kvm_arch_reset_vcpu(cs);
640 }
641 #endif
642
643 #ifdef CONFIG_TCG
644 restore_fp_status(env);
645 #endif
646 cs->exception_index = -1;
647 }
648
loongarch_cpu_disas_set_info(CPUState * s,disassemble_info * info)649 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
650 {
651 info->endian = BFD_ENDIAN_LITTLE;
652 info->print_insn = print_insn_loongarch;
653 }
654
loongarch_cpu_realizefn(DeviceState * dev,Error ** errp)655 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
656 {
657 CPUState *cs = CPU(dev);
658 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
659 Error *local_err = NULL;
660
661 cpu_exec_realizefn(cs, &local_err);
662 if (local_err != NULL) {
663 error_propagate(errp, local_err);
664 return;
665 }
666
667 loongarch_cpu_register_gdb_regs_for_features(cs);
668
669 qemu_init_vcpu(cs);
670 cpu_reset(cs);
671
672 lacc->parent_realize(dev, errp);
673 }
674
loongarch_cpu_unrealizefn(DeviceState * dev)675 static void loongarch_cpu_unrealizefn(DeviceState *dev)
676 {
677 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
678
679 #ifndef CONFIG_USER_ONLY
680 cpu_remove_sync(CPU(dev));
681 #endif
682
683 lacc->parent_unrealize(dev);
684 }
685
loongarch_get_lsx(Object * obj,Error ** errp)686 static bool loongarch_get_lsx(Object *obj, Error **errp)
687 {
688 return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF;
689 }
690
loongarch_set_lsx(Object * obj,bool value,Error ** errp)691 static void loongarch_set_lsx(Object *obj, bool value, Error **errp)
692 {
693 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
694 uint32_t val;
695
696 cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
697 if (cpu->lsx == ON_OFF_AUTO_OFF) {
698 cpu->lasx = ON_OFF_AUTO_OFF;
699 if (cpu->lasx == ON_OFF_AUTO_ON) {
700 error_setg(errp, "Failed to disable LSX since LASX is enabled");
701 return;
702 }
703 }
704
705 if (kvm_enabled()) {
706 /* kvm feature detection in function kvm_arch_init_vcpu */
707 return;
708 }
709
710 /* LSX feature detection in TCG mode */
711 val = cpu->env.cpucfg[2];
712 if (cpu->lsx == ON_OFF_AUTO_ON) {
713 if (FIELD_EX32(val, CPUCFG2, LSX) == 0) {
714 error_setg(errp, "Failed to enable LSX in TCG mode");
715 return;
716 }
717 } else {
718 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0);
719 val = cpu->env.cpucfg[2];
720 }
721
722 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value);
723 }
724
loongarch_get_lasx(Object * obj,Error ** errp)725 static bool loongarch_get_lasx(Object *obj, Error **errp)
726 {
727 return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF;
728 }
729
loongarch_set_lasx(Object * obj,bool value,Error ** errp)730 static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
731 {
732 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
733 uint32_t val;
734
735 cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
736 if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) {
737 error_setg(errp, "Failed to enable LASX since lSX is disabled");
738 return;
739 }
740
741 if (kvm_enabled()) {
742 /* kvm feature detection in function kvm_arch_init_vcpu */
743 return;
744 }
745
746 /* LASX feature detection in TCG mode */
747 val = cpu->env.cpucfg[2];
748 if (cpu->lasx == ON_OFF_AUTO_ON) {
749 if (FIELD_EX32(val, CPUCFG2, LASX) == 0) {
750 error_setg(errp, "Failed to enable LASX in TCG mode");
751 return;
752 }
753 }
754
755 cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value);
756 }
757
loongarch_cpu_post_init(Object * obj)758 void loongarch_cpu_post_init(Object *obj)
759 {
760 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
761
762 cpu->lbt = ON_OFF_AUTO_OFF;
763 cpu->pmu = ON_OFF_AUTO_OFF;
764 cpu->lsx = ON_OFF_AUTO_AUTO;
765 cpu->lasx = ON_OFF_AUTO_AUTO;
766 object_property_add_bool(obj, "lsx", loongarch_get_lsx,
767 loongarch_set_lsx);
768 object_property_add_bool(obj, "lasx", loongarch_get_lasx,
769 loongarch_set_lasx);
770 /* lbt is enabled only in kvm mode, not supported in tcg mode */
771 if (kvm_enabled()) {
772 kvm_loongarch_cpu_post_init(cpu);
773 }
774 }
775
loongarch_cpu_init(Object * obj)776 static void loongarch_cpu_init(Object *obj)
777 {
778 #ifndef CONFIG_USER_ONLY
779 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
780
781 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS);
782 #ifdef CONFIG_TCG
783 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL,
784 &loongarch_constant_timer_cb, cpu);
785 #endif
786 #endif
787 }
788
loongarch_cpu_class_by_name(const char * cpu_model)789 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
790 {
791 ObjectClass *oc;
792
793 oc = object_class_by_name(cpu_model);
794 if (!oc) {
795 g_autofree char *typename
796 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
797 oc = object_class_by_name(typename);
798 }
799
800 return oc;
801 }
802
loongarch_cpu_dump_csr(CPUState * cs,FILE * f)803 static void loongarch_cpu_dump_csr(CPUState *cs, FILE *f)
804 {
805 #ifndef CONFIG_USER_ONLY
806 CPULoongArchState *env = cpu_env(cs);
807 CSRInfo *csr_info;
808 int64_t *addr;
809 int i, j, len, col = 0;
810
811 qemu_fprintf(f, "\n");
812
813 /* Dump all generic CSR register */
814 for (i = 0; i < LOONGARCH_CSR_DBG; i++) {
815 csr_info = get_csr(i);
816 if (!csr_info || (csr_info->flags & CSRFL_UNUSED)) {
817 if (i == (col + 3)) {
818 qemu_fprintf(f, "\n");
819 }
820
821 continue;
822 }
823
824 if ((i > (col + 3)) || (i == col)) {
825 col = i & ~3;
826 qemu_fprintf(f, " CSR%03d:", col);
827 }
828
829 addr = (void *)env + csr_info->offset;
830 qemu_fprintf(f, " %s ", csr_info->name);
831 len = strlen(csr_info->name);
832 for (; len < 6; len++) {
833 qemu_fprintf(f, " ");
834 }
835
836 qemu_fprintf(f, "%" PRIx64, *addr);
837 j = find_last_bit((void *)addr, BITS_PER_LONG) & (BITS_PER_LONG - 1);
838 len += j / 4 + 1;
839 for (; len < 22; len++) {
840 qemu_fprintf(f, " ");
841 }
842
843 if (i == (col + 3)) {
844 qemu_fprintf(f, "\n");
845 }
846 }
847 qemu_fprintf(f, "\n");
848 #endif
849 }
850
loongarch_cpu_dump_state(CPUState * cs,FILE * f,int flags)851 static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
852 {
853 CPULoongArchState *env = cpu_env(cs);
854 int i;
855
856 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
857 qemu_fprintf(f, " FCSR0 0x%08x\n", env->fcsr0);
858
859 /* gpr */
860 for (i = 0; i < 32; i++) {
861 if ((i & 3) == 0) {
862 qemu_fprintf(f, " GPR%02d:", i);
863 }
864 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]);
865 if ((i & 3) == 3) {
866 qemu_fprintf(f, "\n");
867 }
868 }
869
870 /* csr */
871 loongarch_cpu_dump_csr(cs, f);
872
873 /* fpr */
874 if (flags & CPU_DUMP_FPU) {
875 for (i = 0; i < 32; i++) {
876 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0));
877 if ((i & 3) == 3) {
878 qemu_fprintf(f, "\n");
879 }
880 }
881 }
882 }
883
884 #ifdef CONFIG_TCG
885 static const TCGCPUOps loongarch_tcg_ops = {
886 .guest_default_memory_order = 0,
887 .mttcg_supported = true,
888
889 .initialize = loongarch_translate_init,
890 .translate_code = loongarch_translate_code,
891 .get_tb_cpu_state = loongarch_get_tb_cpu_state,
892 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
893 .restore_state_to_opc = loongarch_restore_state_to_opc,
894 .mmu_index = loongarch_cpu_mmu_index,
895
896 #ifndef CONFIG_USER_ONLY
897 .tlb_fill = loongarch_cpu_tlb_fill,
898 .pointer_wrap = loongarch_pointer_wrap,
899 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
900 .cpu_exec_halt = loongarch_cpu_has_work,
901 .cpu_exec_reset = cpu_reset,
902 .do_interrupt = loongarch_cpu_do_interrupt,
903 .do_transaction_failed = loongarch_cpu_do_transaction_failed,
904 #endif
905 };
906 #endif /* CONFIG_TCG */
907
908 #ifndef CONFIG_USER_ONLY
909 #include "hw/core/sysemu-cpu-ops.h"
910
911 static const struct SysemuCPUOps loongarch_sysemu_ops = {
912 .has_work = loongarch_cpu_has_work,
913 .write_elf64_note = loongarch_cpu_write_elf64_note,
914 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
915 };
916
loongarch_cpu_get_arch_id(CPUState * cs)917 static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
918 {
919 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
920
921 return cpu->phy_id;
922 }
923 #endif
924
925 static const Property loongarch_cpu_properties[] = {
926 DEFINE_PROP_INT32("socket-id", LoongArchCPU, socket_id, 0),
927 DEFINE_PROP_INT32("core-id", LoongArchCPU, core_id, 0),
928 DEFINE_PROP_INT32("thread-id", LoongArchCPU, thread_id, 0),
929 DEFINE_PROP_INT32("node-id", LoongArchCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
930 };
931
loongarch_cpu_class_init(ObjectClass * c,const void * data)932 static void loongarch_cpu_class_init(ObjectClass *c, const void *data)
933 {
934 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c);
935 CPUClass *cc = CPU_CLASS(c);
936 DeviceClass *dc = DEVICE_CLASS(c);
937 ResettableClass *rc = RESETTABLE_CLASS(c);
938
939 device_class_set_props(dc, loongarch_cpu_properties);
940 device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
941 &lacc->parent_realize);
942 device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn,
943 &lacc->parent_unrealize);
944 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL,
945 &lacc->parent_phases);
946
947 cc->class_by_name = loongarch_cpu_class_by_name;
948 cc->dump_state = loongarch_cpu_dump_state;
949 cc->set_pc = loongarch_cpu_set_pc;
950 cc->get_pc = loongarch_cpu_get_pc;
951 #ifndef CONFIG_USER_ONLY
952 cc->get_arch_id = loongarch_cpu_get_arch_id;
953 dc->vmsd = &vmstate_loongarch_cpu;
954 cc->sysemu_ops = &loongarch_sysemu_ops;
955 #endif
956 cc->disas_set_info = loongarch_cpu_disas_set_info;
957 cc->gdb_read_register = loongarch_cpu_gdb_read_register;
958 cc->gdb_write_register = loongarch_cpu_gdb_write_register;
959 cc->gdb_stop_before_watchpoint = true;
960
961 #ifdef CONFIG_TCG
962 cc->tcg_ops = &loongarch_tcg_ops;
963 #endif
964 dc->user_creatable = true;
965 }
966
loongarch32_gdb_arch_name(CPUState * cs)967 static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
968 {
969 return "loongarch32";
970 }
971
loongarch32_cpu_class_init(ObjectClass * c,const void * data)972 static void loongarch32_cpu_class_init(ObjectClass *c, const void *data)
973 {
974 CPUClass *cc = CPU_CLASS(c);
975
976 cc->gdb_core_xml_file = "loongarch-base32.xml";
977 cc->gdb_arch_name = loongarch32_gdb_arch_name;
978 }
979
loongarch64_gdb_arch_name(CPUState * cs)980 static const gchar *loongarch64_gdb_arch_name(CPUState *cs)
981 {
982 return "loongarch64";
983 }
984
loongarch64_cpu_class_init(ObjectClass * c,const void * data)985 static void loongarch64_cpu_class_init(ObjectClass *c, const void *data)
986 {
987 CPUClass *cc = CPU_CLASS(c);
988
989 cc->gdb_core_xml_file = "loongarch-base64.xml";
990 cc->gdb_arch_name = loongarch64_gdb_arch_name;
991 }
992
993 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \
994 { \
995 .parent = TYPE_LOONGARCH##size##_CPU, \
996 .instance_init = initfn, \
997 .name = LOONGARCH_CPU_TYPE_NAME(model), \
998 }
999
1000 static const TypeInfo loongarch_cpu_type_infos[] = {
1001 {
1002 .name = TYPE_LOONGARCH_CPU,
1003 .parent = TYPE_CPU,
1004 .instance_size = sizeof(LoongArchCPU),
1005 .instance_align = __alignof(LoongArchCPU),
1006 .instance_init = loongarch_cpu_init,
1007
1008 .abstract = true,
1009 .class_size = sizeof(LoongArchCPUClass),
1010 .class_init = loongarch_cpu_class_init,
1011 },
1012 {
1013 .name = TYPE_LOONGARCH32_CPU,
1014 .parent = TYPE_LOONGARCH_CPU,
1015
1016 .abstract = true,
1017 .class_init = loongarch32_cpu_class_init,
1018 },
1019 {
1020 .name = TYPE_LOONGARCH64_CPU,
1021 .parent = TYPE_LOONGARCH_CPU,
1022
1023 .abstract = true,
1024 .class_init = loongarch64_cpu_class_init,
1025 },
1026 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn),
1027 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn),
1028 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn),
1029 };
1030
1031 DEFINE_TYPES(loongarch_cpu_type_infos)
1032