1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * LoongArch emulation for QEMU - main translation routines.
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "tcg/tcg-op.h"
11 #include "tcg/tcg-op-gvec.h"
12 #include "exec/translation-block.h"
13 #include "exec/translator.h"
14 #include "exec/target_page.h"
15 #include "exec/helper-proto.h"
16 #include "exec/helper-gen.h"
17 #include "exec/log.h"
18 #include "qemu/qemu-print.h"
19 #include "fpu/softfloat.h"
20 #include "tcg_loongarch.h"
21 #include "translate.h"
22 #include "internals.h"
23 #include "vec.h"
24
25 /* Global register indices */
26 TCGv cpu_gpr[32], cpu_pc;
27 static TCGv cpu_lladdr, cpu_llval;
28
29 #define HELPER_H "helper.h"
30 #include "exec/helper-info.c.inc"
31 #undef HELPER_H
32
33 #define DISAS_STOP DISAS_TARGET_0
34 #define DISAS_EXIT DISAS_TARGET_1
35 #define DISAS_EXIT_UPDATE DISAS_TARGET_2
36
vec_full_offset(int regno)37 static inline int vec_full_offset(int regno)
38 {
39 return offsetof(CPULoongArchState, fpr[regno]);
40 }
41
vec_reg_offset(int regno,int index,MemOp mop)42 static inline int vec_reg_offset(int regno, int index, MemOp mop)
43 {
44 const uint8_t size = 1 << mop;
45 int offs = index * size;
46
47 if (HOST_BIG_ENDIAN && size < 8 ) {
48 offs ^= (8 - size);
49 }
50
51 return offs + vec_full_offset(regno);
52 }
53
get_vreg64(TCGv_i64 dest,int regno,int index)54 static inline void get_vreg64(TCGv_i64 dest, int regno, int index)
55 {
56 tcg_gen_ld_i64(dest, tcg_env,
57 offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
58 }
59
set_vreg64(TCGv_i64 src,int regno,int index)60 static inline void set_vreg64(TCGv_i64 src, int regno, int index)
61 {
62 tcg_gen_st_i64(src, tcg_env,
63 offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
64 }
65
plus_1(DisasContext * ctx,int x)66 static inline int plus_1(DisasContext *ctx, int x)
67 {
68 return x + 1;
69 }
70
shl_1(DisasContext * ctx,int x)71 static inline int shl_1(DisasContext *ctx, int x)
72 {
73 return x << 1;
74 }
75
shl_2(DisasContext * ctx,int x)76 static inline int shl_2(DisasContext *ctx, int x)
77 {
78 return x << 2;
79 }
80
shl_3(DisasContext * ctx,int x)81 static inline int shl_3(DisasContext *ctx, int x)
82 {
83 return x << 3;
84 }
85
86 /*
87 * LoongArch the upper 32 bits are undefined ("can be any value").
88 * QEMU chooses to nanbox, because it is most likely to show guest bugs early.
89 */
gen_nanbox_s(TCGv_i64 out,TCGv_i64 in)90 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
91 {
92 tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
93 }
94
generate_exception(DisasContext * ctx,int excp)95 void generate_exception(DisasContext *ctx, int excp)
96 {
97 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
98 gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
99 ctx->base.is_jmp = DISAS_NORETURN;
100 }
101
gen_goto_tb(DisasContext * ctx,int n,target_ulong dest)102 static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
103 {
104 if (ctx->va32) {
105 dest = (uint32_t) dest;
106 }
107
108 if (translator_use_goto_tb(&ctx->base, dest)) {
109 tcg_gen_goto_tb(n);
110 tcg_gen_movi_tl(cpu_pc, dest);
111 tcg_gen_exit_tb(ctx->base.tb, n);
112 } else {
113 tcg_gen_movi_tl(cpu_pc, dest);
114 tcg_gen_lookup_and_goto_ptr();
115 }
116 }
117
loongarch_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)118 static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
119 CPUState *cs)
120 {
121 int64_t bound;
122 CPULoongArchState *env = cpu_env(cs);
123 DisasContext *ctx = container_of(dcbase, DisasContext, base);
124
125 ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
126 ctx->plv = ctx->base.tb->flags & HW_FLAGS_PLV_MASK;
127 if (ctx->base.tb->flags & HW_FLAGS_CRMD_PG) {
128 ctx->mem_idx = ctx->plv;
129 } else {
130 ctx->mem_idx = MMU_DA_IDX;
131 }
132
133 /* Bound the number of insns to execute to those left on the page. */
134 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
135 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
136
137 if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LSX)) {
138 ctx->vl = LSX_LEN;
139 }
140
141 if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LASX)) {
142 ctx->vl = LASX_LEN;
143 }
144
145 ctx->la64 = is_la64(env);
146 ctx->va32 = (ctx->base.tb->flags & HW_FLAGS_VA32) != 0;
147
148 ctx->zero = tcg_constant_tl(0);
149
150 ctx->cpucfg1 = env->cpucfg[1];
151 ctx->cpucfg2 = env->cpucfg[2];
152 }
153
loongarch_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)154 static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
155 {
156 }
157
loongarch_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)158 static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
159 {
160 DisasContext *ctx = container_of(dcbase, DisasContext, base);
161
162 tcg_gen_insn_start(ctx->base.pc_next);
163 }
164
165 /*
166 * Wrappers for getting reg values.
167 *
168 * The $zero register does not have cpu_gpr[0] allocated -- we supply the
169 * constant zero as a source, and an uninitialized sink as destination.
170 *
171 * Further, we may provide an extension for word operations.
172 */
gpr_src(DisasContext * ctx,int reg_num,DisasExtend src_ext)173 static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext)
174 {
175 TCGv t;
176
177 if (reg_num == 0) {
178 return ctx->zero;
179 }
180
181 switch (src_ext) {
182 case EXT_NONE:
183 return cpu_gpr[reg_num];
184 case EXT_SIGN:
185 t = tcg_temp_new();
186 tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
187 return t;
188 case EXT_ZERO:
189 t = tcg_temp_new();
190 tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
191 return t;
192 }
193 g_assert_not_reached();
194 }
195
gpr_dst(DisasContext * ctx,int reg_num,DisasExtend dst_ext)196 static TCGv gpr_dst(DisasContext *ctx, int reg_num, DisasExtend dst_ext)
197 {
198 if (reg_num == 0 || dst_ext) {
199 return tcg_temp_new();
200 }
201 return cpu_gpr[reg_num];
202 }
203
gen_set_gpr(int reg_num,TCGv t,DisasExtend dst_ext)204 static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext)
205 {
206 if (reg_num != 0) {
207 switch (dst_ext) {
208 case EXT_NONE:
209 tcg_gen_mov_tl(cpu_gpr[reg_num], t);
210 break;
211 case EXT_SIGN:
212 tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
213 break;
214 case EXT_ZERO:
215 tcg_gen_ext32u_tl(cpu_gpr[reg_num], t);
216 break;
217 default:
218 g_assert_not_reached();
219 }
220 }
221 }
222
get_fpr(DisasContext * ctx,int reg_num)223 static TCGv get_fpr(DisasContext *ctx, int reg_num)
224 {
225 TCGv t = tcg_temp_new();
226 tcg_gen_ld_i64(t, tcg_env,
227 offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
228 return t;
229 }
230
set_fpr(int reg_num,TCGv val)231 static void set_fpr(int reg_num, TCGv val)
232 {
233 tcg_gen_st_i64(val, tcg_env,
234 offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
235 }
236
make_address_x(DisasContext * ctx,TCGv base,TCGv addend)237 static TCGv make_address_x(DisasContext *ctx, TCGv base, TCGv addend)
238 {
239 TCGv temp = NULL;
240
241 if (addend || ctx->va32) {
242 temp = tcg_temp_new();
243 }
244 if (addend) {
245 tcg_gen_add_tl(temp, base, addend);
246 base = temp;
247 }
248 if (ctx->va32) {
249 tcg_gen_ext32u_tl(temp, base);
250 base = temp;
251 }
252 return base;
253 }
254
make_address_i(DisasContext * ctx,TCGv base,target_long ofs)255 static TCGv make_address_i(DisasContext *ctx, TCGv base, target_long ofs)
256 {
257 TCGv addend = ofs ? tcg_constant_tl(ofs) : NULL;
258 return make_address_x(ctx, base, addend);
259 }
260
make_address_pc(DisasContext * ctx,uint64_t addr)261 static uint64_t make_address_pc(DisasContext *ctx, uint64_t addr)
262 {
263 if (ctx->va32) {
264 addr = (int32_t)addr;
265 }
266 return addr;
267 }
268
269 #include "decode-insns.c.inc"
270 #include "insn_trans/trans_arith.c.inc"
271 #include "insn_trans/trans_shift.c.inc"
272 #include "insn_trans/trans_bit.c.inc"
273 #include "insn_trans/trans_memory.c.inc"
274 #include "insn_trans/trans_atomic.c.inc"
275 #include "insn_trans/trans_extra.c.inc"
276 #include "insn_trans/trans_farith.c.inc"
277 #include "insn_trans/trans_fcmp.c.inc"
278 #include "insn_trans/trans_fcnv.c.inc"
279 #include "insn_trans/trans_fmov.c.inc"
280 #include "insn_trans/trans_fmemory.c.inc"
281 #include "insn_trans/trans_branch.c.inc"
282 #include "insn_trans/trans_privileged.c.inc"
283 #include "insn_trans/trans_vec.c.inc"
284
loongarch_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)285 static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
286 {
287 DisasContext *ctx = container_of(dcbase, DisasContext, base);
288
289 ctx->opcode = translator_ldl(cpu_env(cs), &ctx->base, ctx->base.pc_next);
290
291 if (!decode(ctx, ctx->opcode)) {
292 qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. "
293 "0x%" VADDR_PRIx ": 0x%x\n",
294 ctx->base.pc_next, ctx->opcode);
295 generate_exception(ctx, EXCCODE_INE);
296 }
297
298 ctx->base.pc_next += 4;
299
300 if (ctx->va32) {
301 ctx->base.pc_next = (uint32_t)ctx->base.pc_next;
302 }
303 }
304
loongarch_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)305 static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
306 {
307 DisasContext *ctx = container_of(dcbase, DisasContext, base);
308
309 switch (ctx->base.is_jmp) {
310 case DISAS_STOP:
311 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
312 tcg_gen_lookup_and_goto_ptr();
313 break;
314 case DISAS_TOO_MANY:
315 gen_goto_tb(ctx, 0, ctx->base.pc_next);
316 break;
317 case DISAS_NORETURN:
318 break;
319 case DISAS_EXIT_UPDATE:
320 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
321 QEMU_FALLTHROUGH;
322 case DISAS_EXIT:
323 tcg_gen_exit_tb(NULL, 0);
324 break;
325 default:
326 g_assert_not_reached();
327 }
328 }
329
330 static const TranslatorOps loongarch_tr_ops = {
331 .init_disas_context = loongarch_tr_init_disas_context,
332 .tb_start = loongarch_tr_tb_start,
333 .insn_start = loongarch_tr_insn_start,
334 .translate_insn = loongarch_tr_translate_insn,
335 .tb_stop = loongarch_tr_tb_stop,
336 };
337
loongarch_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)338 void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
339 int *max_insns, vaddr pc, void *host_pc)
340 {
341 DisasContext ctx;
342
343 translator_loop(cs, tb, max_insns, pc, host_pc,
344 &loongarch_tr_ops, &ctx.base);
345 }
346
loongarch_translate_init(void)347 void loongarch_translate_init(void)
348 {
349 int i;
350
351 cpu_gpr[0] = NULL;
352 for (i = 1; i < 32; i++) {
353 cpu_gpr[i] = tcg_global_mem_new(tcg_env,
354 offsetof(CPULoongArchState, gpr[i]),
355 regnames[i]);
356 }
357
358 cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPULoongArchState, pc), "pc");
359 cpu_lladdr = tcg_global_mem_new(tcg_env,
360 offsetof(CPULoongArchState, lladdr), "lladdr");
361 cpu_llval = tcg_global_mem_new(tcg_env,
362 offsetof(CPULoongArchState, llval), "llval");
363
364 #ifndef CONFIG_USER_ONLY
365 loongarch_csr_translate_init();
366 #endif
367 }
368