xref: /qemu/target/microblaze/translate.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/translation-block.h"
29 #include "exec/target_page.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     /* Decoder.  */
67     uint32_t ext_imm;
68     unsigned int tb_flags;
69     unsigned int tb_flags_to_set;
70     int mem_index;
71 
72     /* Condition under which to jump, including NEVER and ALWAYS. */
73     TCGCond jmp_cond;
74 
75     /* Immediate branch-taken destination, or -1 for indirect. */
76     uint32_t jmp_dest;
77 } DisasContext;
78 
typeb_imm(DisasContext * dc,int x)79 static int typeb_imm(DisasContext *dc, int x)
80 {
81     if (dc->tb_flags & IMM_FLAG) {
82         return deposit32(dc->ext_imm, 0, 16, x);
83     }
84     return x;
85 }
86 
87 /* Include the auto-generated decoder.  */
88 #include "decode-insns.c.inc"
89 
t_sync_flags(DisasContext * dc)90 static void t_sync_flags(DisasContext *dc)
91 {
92     /* Synch the tb dependent flags between translator and runtime.  */
93     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
94         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
95     }
96 }
97 
gen_raise_exception(DisasContext * dc,uint32_t index)98 static void gen_raise_exception(DisasContext *dc, uint32_t index)
99 {
100     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
101     dc->base.is_jmp = DISAS_NORETURN;
102 }
103 
gen_raise_exception_sync(DisasContext * dc,uint32_t index)104 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
105 {
106     t_sync_flags(dc);
107     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
108     gen_raise_exception(dc, index);
109 }
110 
gen_raise_hw_excp(DisasContext * dc,uint32_t esr_ec)111 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
112 {
113     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
114     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
115 
116     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
117 }
118 
gen_goto_tb(DisasContext * dc,int n,target_ulong dest)119 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
120 {
121     if (translator_use_goto_tb(&dc->base, dest)) {
122         tcg_gen_goto_tb(n);
123         tcg_gen_movi_i32(cpu_pc, dest);
124         tcg_gen_exit_tb(dc->base.tb, n);
125     } else {
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_lookup_and_goto_ptr();
128     }
129     dc->base.is_jmp = DISAS_NORETURN;
130 }
131 
132 /*
133  * Returns true if the insn an illegal operation.
134  * If exceptions are enabled, an exception is raised.
135  */
trap_illegal(DisasContext * dc,bool cond)136 static bool trap_illegal(DisasContext *dc, bool cond)
137 {
138     if (cond && (dc->tb_flags & MSR_EE)
139         && dc->cfg->illegal_opcode_exception) {
140         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
141     }
142     return cond;
143 }
144 
145 /*
146  * Returns true if the insn is illegal in userspace.
147  * If exceptions are enabled, an exception is raised.
148  */
trap_userspace(DisasContext * dc,bool cond)149 static bool trap_userspace(DisasContext *dc, bool cond)
150 {
151     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
152 
153     if (cond_user && (dc->tb_flags & MSR_EE)) {
154         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
155     }
156     return cond_user;
157 }
158 
159 /*
160  * Return true, and log an error, if the current insn is
161  * within a delay slot.
162  */
invalid_delay_slot(DisasContext * dc,const char * insn_type)163 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
164 {
165     if (dc->tb_flags & D_FLAG) {
166         qemu_log_mask(LOG_GUEST_ERROR,
167                       "Invalid insn in delay slot: %s at %08x\n",
168                       insn_type, (uint32_t)dc->base.pc_next);
169         return true;
170     }
171     return false;
172 }
173 
reg_for_read(DisasContext * dc,int reg)174 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
175 {
176     if (likely(reg != 0)) {
177         return cpu_R[reg];
178     }
179     return tcg_constant_i32(0);
180 }
181 
reg_for_write(DisasContext * dc,int reg)182 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
183 {
184     if (likely(reg != 0)) {
185         return cpu_R[reg];
186     }
187     return tcg_temp_new_i32();
188 }
189 
do_typea(DisasContext * dc,arg_typea * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))190 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
191                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
192 {
193     TCGv_i32 rd, ra, rb;
194 
195     if (arg->rd == 0 && !side_effects) {
196         return true;
197     }
198 
199     rd = reg_for_write(dc, arg->rd);
200     ra = reg_for_read(dc, arg->ra);
201     rb = reg_for_read(dc, arg->rb);
202     fn(rd, ra, rb);
203     return true;
204 }
205 
do_typea0(DisasContext * dc,arg_typea0 * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32))206 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
207                       void (*fn)(TCGv_i32, TCGv_i32))
208 {
209     TCGv_i32 rd, ra;
210 
211     if (arg->rd == 0 && !side_effects) {
212         return true;
213     }
214 
215     rd = reg_for_write(dc, arg->rd);
216     ra = reg_for_read(dc, arg->ra);
217     fn(rd, ra);
218     return true;
219 }
220 
do_typeb_imm(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fni)(TCGv_i32,TCGv_i32,int32_t))221 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
222                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
223 {
224     TCGv_i32 rd, ra;
225 
226     if (arg->rd == 0 && !side_effects) {
227         return true;
228     }
229 
230     rd = reg_for_write(dc, arg->rd);
231     ra = reg_for_read(dc, arg->ra);
232     fni(rd, ra, arg->imm);
233     return true;
234 }
235 
do_typeb_val(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))236 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
237                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
238 {
239     TCGv_i32 rd, ra, imm;
240 
241     if (arg->rd == 0 && !side_effects) {
242         return true;
243     }
244 
245     rd = reg_for_write(dc, arg->rd);
246     ra = reg_for_read(dc, arg->ra);
247     imm = tcg_constant_i32(arg->imm);
248 
249     fn(rd, ra, imm);
250     return true;
251 }
252 
253 #define DO_TYPEA(NAME, SE, FN) \
254     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
255     { return do_typea(dc, a, SE, FN); }
256 
257 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
258     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
259     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
260 
261 #define DO_TYPEA0(NAME, SE, FN) \
262     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
263     { return do_typea0(dc, a, SE, FN); }
264 
265 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
266     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
267     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
268 
269 #define DO_TYPEBI(NAME, SE, FNI) \
270     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
271     { return do_typeb_imm(dc, a, SE, FNI); }
272 
273 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
274     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
275     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
276 
277 #define DO_TYPEBV(NAME, SE, FN) \
278     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
279     { return do_typeb_val(dc, a, SE, FN); }
280 
281 #define ENV_WRAPPER2(NAME, HELPER) \
282     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
283     { HELPER(out, tcg_env, ina); }
284 
285 #define ENV_WRAPPER3(NAME, HELPER) \
286     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
287     { HELPER(out, tcg_env, ina, inb); }
288 
289 /* No input carry, but output carry. */
gen_add(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)290 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
291 {
292     TCGv_i32 zero = tcg_constant_i32(0);
293 
294     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
295 }
296 
297 /* Input and output carry. */
gen_addc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)298 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
299 {
300     tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
301 }
302 
303 /* Input carry, but no output carry. */
gen_addkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)304 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
305 {
306     tcg_gen_add_i32(out, ina, inb);
307     tcg_gen_add_i32(out, out, cpu_msr_c);
308 }
309 
DO_TYPEA(add,true,gen_add)310 DO_TYPEA(add, true, gen_add)
311 DO_TYPEA(addc, true, gen_addc)
312 DO_TYPEA(addk, false, tcg_gen_add_i32)
313 DO_TYPEA(addkc, true, gen_addkc)
314 
315 DO_TYPEBV(addi, true, gen_add)
316 DO_TYPEBV(addic, true, gen_addc)
317 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
318 DO_TYPEBV(addikc, true, gen_addkc)
319 
320 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
321 {
322     tcg_gen_andi_i32(out, ina, ~imm);
323 }
324 
DO_TYPEA(and,false,tcg_gen_and_i32)325 DO_TYPEA(and, false, tcg_gen_and_i32)
326 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
327 DO_TYPEA(andn, false, tcg_gen_andc_i32)
328 DO_TYPEBI(andni, false, gen_andni)
329 
330 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
331 {
332     TCGv_i32 tmp = tcg_temp_new_i32();
333     tcg_gen_andi_i32(tmp, inb, 31);
334     tcg_gen_sar_i32(out, ina, tmp);
335 }
336 
gen_bsrl(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)337 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
338 {
339     TCGv_i32 tmp = tcg_temp_new_i32();
340     tcg_gen_andi_i32(tmp, inb, 31);
341     tcg_gen_shr_i32(out, ina, tmp);
342 }
343 
gen_bsll(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)344 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
345 {
346     TCGv_i32 tmp = tcg_temp_new_i32();
347     tcg_gen_andi_i32(tmp, inb, 31);
348     tcg_gen_shl_i32(out, ina, tmp);
349 }
350 
gen_bsefi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)351 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
352 {
353     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
354     int imm_w = extract32(imm, 5, 5);
355     int imm_s = extract32(imm, 0, 5);
356 
357     if (imm_w + imm_s > 32 || imm_w == 0) {
358         /* These inputs have an undefined behavior.  */
359         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
360                       imm_w, imm_s);
361     } else {
362         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
363     }
364 }
365 
gen_bsifi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)366 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
367 {
368     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
369     int imm_w = extract32(imm, 5, 5);
370     int imm_s = extract32(imm, 0, 5);
371     int width = imm_w - imm_s + 1;
372 
373     if (imm_w < imm_s) {
374         /* These inputs have an undefined behavior.  */
375         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
376                       imm_w, imm_s);
377     } else {
378         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
379     }
380 }
381 
DO_TYPEA_CFG(bsra,use_barrel,false,gen_bsra)382 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
383 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
384 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
385 
386 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
387 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
388 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
389 
390 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
391 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
392 
393 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
394 {
395     tcg_gen_clzi_i32(out, ina, 32);
396 }
397 
DO_TYPEA0_CFG(clz,use_pcmp_instr,false,gen_clz)398 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
399 
400 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
401 {
402     TCGv_i32 lt = tcg_temp_new_i32();
403 
404     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
405     tcg_gen_sub_i32(out, inb, ina);
406     tcg_gen_deposit_i32(out, out, lt, 31, 1);
407 }
408 
gen_cmpu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)409 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
410 {
411     TCGv_i32 lt = tcg_temp_new_i32();
412 
413     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
414     tcg_gen_sub_i32(out, inb, ina);
415     tcg_gen_deposit_i32(out, out, lt, 31, 1);
416 }
417 
DO_TYPEA(cmp,false,gen_cmp)418 DO_TYPEA(cmp, false, gen_cmp)
419 DO_TYPEA(cmpu, false, gen_cmpu)
420 
421 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
422 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
423 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
424 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
425 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
426 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
427 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
428 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
429 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
430 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
431 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
432 
433 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
434 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
435 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
436 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
437 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
438 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
439 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
440 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
441 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
442 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
443 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
444 
445 ENV_WRAPPER2(gen_flt, gen_helper_flt)
446 ENV_WRAPPER2(gen_fint, gen_helper_fint)
447 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
448 
449 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
450 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
451 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
452 
453 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
454 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
455 {
456     gen_helper_divs(out, tcg_env, inb, ina);
457 }
458 
gen_idivu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)459 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
460 {
461     gen_helper_divu(out, tcg_env, inb, ina);
462 }
463 
DO_TYPEA_CFG(idiv,use_div,true,gen_idiv)464 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
465 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
466 
467 static bool trans_imm(DisasContext *dc, arg_imm *arg)
468 {
469     if (invalid_delay_slot(dc, "imm")) {
470         return true;
471     }
472     dc->ext_imm = arg->imm << 16;
473     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
474     dc->tb_flags_to_set = IMM_FLAG;
475     return true;
476 }
477 
gen_mulh(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)478 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
479 {
480     TCGv_i32 tmp = tcg_temp_new_i32();
481     tcg_gen_muls2_i32(tmp, out, ina, inb);
482 }
483 
gen_mulhu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)484 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
485 {
486     TCGv_i32 tmp = tcg_temp_new_i32();
487     tcg_gen_mulu2_i32(tmp, out, ina, inb);
488 }
489 
gen_mulhsu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)490 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
491 {
492     TCGv_i32 tmp = tcg_temp_new_i32();
493     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
494 }
495 
DO_TYPEA_CFG(mul,use_hw_mul,false,tcg_gen_mul_i32)496 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
497 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
498 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
499 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
500 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
501 
502 DO_TYPEA(or, false, tcg_gen_or_i32)
503 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
504 
505 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
506 {
507     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
508 }
509 
gen_pcmpne(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)510 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
511 {
512     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
513 }
514 
DO_TYPEA_CFG(pcmpbf,use_pcmp_instr,false,gen_helper_pcmpbf)515 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
516 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
517 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
518 
519 /* No input carry, but output carry. */
520 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
521 {
522     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
523     tcg_gen_sub_i32(out, inb, ina);
524 }
525 
526 /* Input and output carry. */
gen_rsubc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)527 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529     TCGv_i32 tmp = tcg_temp_new_i32();
530 
531     tcg_gen_not_i32(tmp, ina);
532     tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
533 }
534 
535 /* No input or output carry. */
gen_rsubk(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)536 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
537 {
538     tcg_gen_sub_i32(out, inb, ina);
539 }
540 
541 /* Input carry, no output carry. */
gen_rsubkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)542 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
543 {
544     TCGv_i32 nota = tcg_temp_new_i32();
545 
546     tcg_gen_not_i32(nota, ina);
547     tcg_gen_add_i32(out, inb, nota);
548     tcg_gen_add_i32(out, out, cpu_msr_c);
549 }
550 
DO_TYPEA(rsub,true,gen_rsub)551 DO_TYPEA(rsub, true, gen_rsub)
552 DO_TYPEA(rsubc, true, gen_rsubc)
553 DO_TYPEA(rsubk, false, gen_rsubk)
554 DO_TYPEA(rsubkc, true, gen_rsubkc)
555 
556 DO_TYPEBV(rsubi, true, gen_rsub)
557 DO_TYPEBV(rsubic, true, gen_rsubc)
558 DO_TYPEBV(rsubik, false, gen_rsubk)
559 DO_TYPEBV(rsubikc, true, gen_rsubkc)
560 
561 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
562 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
563 
564 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
565 {
566     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
567     tcg_gen_sari_i32(out, ina, 1);
568 }
569 
gen_src(TCGv_i32 out,TCGv_i32 ina)570 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
571 {
572     TCGv_i32 tmp = tcg_temp_new_i32();
573 
574     tcg_gen_mov_i32(tmp, cpu_msr_c);
575     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
576     tcg_gen_extract2_i32(out, ina, tmp, 1);
577 }
578 
gen_srl(TCGv_i32 out,TCGv_i32 ina)579 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
580 {
581     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
582     tcg_gen_shri_i32(out, ina, 1);
583 }
584 
DO_TYPEA0(sra,false,gen_sra)585 DO_TYPEA0(sra, false, gen_sra)
586 DO_TYPEA0(src, false, gen_src)
587 DO_TYPEA0(srl, false, gen_srl)
588 
589 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
590 {
591     tcg_gen_rotri_i32(out, ina, 16);
592 }
593 
DO_TYPEA0(swapb,false,tcg_gen_bswap32_i32)594 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
595 DO_TYPEA0(swaph, false, gen_swaph)
596 
597 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
598 {
599     /* Cache operations are nops: only check for supervisor mode.  */
600     trap_userspace(dc, true);
601     return true;
602 }
603 
DO_TYPEA(xor,false,tcg_gen_xor_i32)604 DO_TYPEA(xor, false, tcg_gen_xor_i32)
605 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
606 
607 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
608 {
609     TCGv ret;
610 
611     /* If any of the regs is r0, set t to the value of the other reg.  */
612     if (ra && rb) {
613         ret = tcg_temp_new_i32();
614         tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
615     } else if (ra) {
616         ret = cpu_R[ra];
617     } else if (rb) {
618         ret = cpu_R[rb];
619     } else {
620         ret = tcg_constant_i32(0);
621     }
622 
623     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
624         gen_helper_stackprot(tcg_env, ret);
625     }
626     return ret;
627 }
628 
compute_ldst_addr_typeb(DisasContext * dc,int ra,int imm)629 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
630 {
631     TCGv ret;
632 
633     /* If any of the regs is r0, set t to the value of the other reg.  */
634     if (ra && imm) {
635         ret = tcg_temp_new_i32();
636         tcg_gen_addi_i32(ret, cpu_R[ra], imm);
637     } else if (ra) {
638         ret = cpu_R[ra];
639     } else {
640         ret = tcg_constant_i32(imm);
641     }
642 
643     if (ra == 1 && dc->cfg->stackprot) {
644         gen_helper_stackprot(tcg_env, ret);
645     }
646     return ret;
647 }
648 
649 #ifndef CONFIG_USER_ONLY
compute_ldst_addr_ea(DisasContext * dc,int ra,int rb)650 static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
651 {
652     int addr_size = dc->cfg->addr_size;
653     TCGv_i64 ret = tcg_temp_new_i64();
654 
655     if (addr_size == 32 || ra == 0) {
656         if (rb) {
657             tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
658         } else {
659             return tcg_constant_i64(0);
660         }
661     } else {
662         if (rb) {
663             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
664         } else {
665             tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
666             tcg_gen_shli_i64(ret, ret, 32);
667         }
668         if (addr_size < 64) {
669             /* Mask off out of range bits.  */
670             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
671         }
672     }
673     return ret;
674 }
675 #endif
676 
677 #ifndef CONFIG_USER_ONLY
record_unaligned_ess(DisasContext * dc,int rd,MemOp size,bool store)678 static void record_unaligned_ess(DisasContext *dc, int rd,
679                                  MemOp size, bool store)
680 {
681     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
682 
683     iflags |= ESR_ESS_FLAG;
684     iflags |= rd << 5;
685     iflags |= store * ESR_S;
686     iflags |= (size == MO_32) * ESR_W;
687 
688     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
689 }
690 
gen_alignment_check_ea(DisasContext * dc,TCGv_i64 ea,int rb,int rd,MemOp size,bool store)691 static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
692                                    int rd, MemOp size, bool store)
693 {
694     if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
695         TCGLabel *over = gen_new_label();
696 
697         record_unaligned_ess(dc, rd, size, store);
698 
699         tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
700         gen_helper_unaligned_access(tcg_env, ea);
701         gen_set_label(over);
702     }
703 }
704 #endif
705 
mo_endian(DisasContext * dc)706 static inline MemOp mo_endian(DisasContext *dc)
707 {
708     return dc->cfg->endi ? MO_LE : MO_BE;
709 }
710 
do_load(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)711 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
712                     int mem_index, bool rev)
713 {
714     MemOp size = mop & MO_SIZE;
715 
716     mop |= mo_endian(dc);
717 
718     /*
719      * When doing reverse accesses we need to do two things.
720      *
721      * 1. Reverse the address wrt endianness.
722      * 2. Byteswap the data lanes on the way back into the CPU core.
723      */
724     if (rev) {
725         if (size > MO_8) {
726             mop ^= MO_BSWAP;
727         }
728         if (size < MO_32) {
729             tcg_gen_xori_tl(addr, addr, 3 - size);
730         }
731     }
732 
733     /*
734      * For system mode, enforce alignment if the cpu configuration
735      * requires it.  For user-mode, the Linux kernel will have fixed up
736      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
737      */
738 #ifndef CONFIG_USER_ONLY
739     if (size > MO_8 &&
740         (dc->tb_flags & MSR_EE) &&
741         dc->cfg->unaligned_exceptions) {
742         record_unaligned_ess(dc, rd, size, false);
743         mop |= MO_ALIGN;
744     }
745 #endif
746 
747     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
748     return true;
749 }
750 
trans_lbu(DisasContext * dc,arg_typea * arg)751 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
752 {
753     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
754     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
755 }
756 
trans_lbur(DisasContext * dc,arg_typea * arg)757 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
758 {
759     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
760     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
761 }
762 
trans_lbuea(DisasContext * dc,arg_typea * arg)763 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
764 {
765     if (trap_userspace(dc, true)) {
766         return true;
767     }
768 #ifdef CONFIG_USER_ONLY
769     g_assert_not_reached();
770 #else
771     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
772     gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
773     return true;
774 #endif
775 }
776 
trans_lbui(DisasContext * dc,arg_typeb * arg)777 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
778 {
779     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
780     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
781 }
782 
trans_lhu(DisasContext * dc,arg_typea * arg)783 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
784 {
785     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
786     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
787 }
788 
trans_lhur(DisasContext * dc,arg_typea * arg)789 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
790 {
791     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
792     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
793 }
794 
trans_lhuea(DisasContext * dc,arg_typea * arg)795 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
796 {
797     if (trap_userspace(dc, true)) {
798         return true;
799     }
800 #ifdef CONFIG_USER_ONLY
801     g_assert_not_reached();
802 #else
803     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
804     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
805     (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
806         (reg_for_write(dc, arg->rd), tcg_env, addr);
807     return true;
808 #endif
809 }
810 
trans_lhui(DisasContext * dc,arg_typeb * arg)811 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
812 {
813     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
814     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
815 }
816 
trans_lw(DisasContext * dc,arg_typea * arg)817 static bool trans_lw(DisasContext *dc, arg_typea *arg)
818 {
819     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
820     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
821 }
822 
trans_lwr(DisasContext * dc,arg_typea * arg)823 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
824 {
825     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
826     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
827 }
828 
trans_lwea(DisasContext * dc,arg_typea * arg)829 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
830 {
831     if (trap_userspace(dc, true)) {
832         return true;
833     }
834 #ifdef CONFIG_USER_ONLY
835     g_assert_not_reached();
836 #else
837     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
838     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
839     (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
840         (reg_for_write(dc, arg->rd), tcg_env, addr);
841     return true;
842 #endif
843 }
844 
trans_lwi(DisasContext * dc,arg_typeb * arg)845 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
846 {
847     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
848     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
849 }
850 
trans_lwx(DisasContext * dc,arg_typea * arg)851 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
852 {
853     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
854 
855     /* lwx does not throw unaligned access errors, so force alignment */
856     tcg_gen_andi_tl(addr, addr, ~3);
857 
858     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
859                         mo_endian(dc) | MO_UL);
860     tcg_gen_mov_tl(cpu_res_addr, addr);
861 
862     if (arg->rd) {
863         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
864     }
865 
866     /* No support for AXI exclusive so always clear C */
867     tcg_gen_movi_i32(cpu_msr_c, 0);
868     return true;
869 }
870 
do_store(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)871 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
872                      int mem_index, bool rev)
873 {
874     MemOp size = mop & MO_SIZE;
875 
876     mop |= mo_endian(dc);
877 
878     /*
879      * When doing reverse accesses we need to do two things.
880      *
881      * 1. Reverse the address wrt endianness.
882      * 2. Byteswap the data lanes on the way back into the CPU core.
883      */
884     if (rev) {
885         if (size > MO_8) {
886             mop ^= MO_BSWAP;
887         }
888         if (size < MO_32) {
889             tcg_gen_xori_tl(addr, addr, 3 - size);
890         }
891     }
892 
893     /*
894      * For system mode, enforce alignment if the cpu configuration
895      * requires it.  For user-mode, the Linux kernel will have fixed up
896      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
897      */
898 #ifndef CONFIG_USER_ONLY
899     if (size > MO_8 &&
900         (dc->tb_flags & MSR_EE) &&
901         dc->cfg->unaligned_exceptions) {
902         record_unaligned_ess(dc, rd, size, true);
903         mop |= MO_ALIGN;
904     }
905 #endif
906 
907     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
908     return true;
909 }
910 
trans_sb(DisasContext * dc,arg_typea * arg)911 static bool trans_sb(DisasContext *dc, arg_typea *arg)
912 {
913     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
914     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
915 }
916 
trans_sbr(DisasContext * dc,arg_typea * arg)917 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
918 {
919     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
920     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
921 }
922 
trans_sbea(DisasContext * dc,arg_typea * arg)923 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
924 {
925     if (trap_userspace(dc, true)) {
926         return true;
927     }
928 #ifdef CONFIG_USER_ONLY
929     g_assert_not_reached();
930 #else
931     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
932     gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
933     return true;
934 #endif
935 }
936 
trans_sbi(DisasContext * dc,arg_typeb * arg)937 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
938 {
939     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
940     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
941 }
942 
trans_sh(DisasContext * dc,arg_typea * arg)943 static bool trans_sh(DisasContext *dc, arg_typea *arg)
944 {
945     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
946     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
947 }
948 
trans_shr(DisasContext * dc,arg_typea * arg)949 static bool trans_shr(DisasContext *dc, arg_typea *arg)
950 {
951     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
952     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
953 }
954 
trans_shea(DisasContext * dc,arg_typea * arg)955 static bool trans_shea(DisasContext *dc, arg_typea *arg)
956 {
957     if (trap_userspace(dc, true)) {
958         return true;
959     }
960 #ifdef CONFIG_USER_ONLY
961     g_assert_not_reached();
962 #else
963     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
964     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
965     (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
966         (tcg_env, reg_for_read(dc, arg->rd), addr);
967     return true;
968 #endif
969 }
970 
trans_shi(DisasContext * dc,arg_typeb * arg)971 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
972 {
973     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
974     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
975 }
976 
trans_sw(DisasContext * dc,arg_typea * arg)977 static bool trans_sw(DisasContext *dc, arg_typea *arg)
978 {
979     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
980     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
981 }
982 
trans_swr(DisasContext * dc,arg_typea * arg)983 static bool trans_swr(DisasContext *dc, arg_typea *arg)
984 {
985     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
986     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
987 }
988 
trans_swea(DisasContext * dc,arg_typea * arg)989 static bool trans_swea(DisasContext *dc, arg_typea *arg)
990 {
991     if (trap_userspace(dc, true)) {
992         return true;
993     }
994 #ifdef CONFIG_USER_ONLY
995     g_assert_not_reached();
996 #else
997     TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
998     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
999     (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
1000         (tcg_env, reg_for_read(dc, arg->rd), addr);
1001     return true;
1002 #endif
1003 }
1004 
trans_swi(DisasContext * dc,arg_typeb * arg)1005 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1006 {
1007     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1008     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
1009 }
1010 
trans_swx(DisasContext * dc,arg_typea * arg)1011 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1012 {
1013     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1014     TCGLabel *swx_done = gen_new_label();
1015     TCGLabel *swx_fail = gen_new_label();
1016     TCGv_i32 tval;
1017 
1018     /* swx does not throw unaligned access errors, so force alignment */
1019     tcg_gen_andi_tl(addr, addr, ~3);
1020 
1021     /*
1022      * Compare the address vs the one we used during lwx.
1023      * On mismatch, the operation fails.  On match, addr dies at the
1024      * branch, but we know we can use the equal version in the global.
1025      * In either case, addr is no longer needed.
1026      */
1027     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1028 
1029     /*
1030      * Compare the value loaded during lwx with current contents of
1031      * the reserved location.
1032      */
1033     tval = tcg_temp_new_i32();
1034 
1035     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1036                                reg_for_write(dc, arg->rd),
1037                                dc->mem_index, mo_endian(dc) | MO_UL);
1038 
1039     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1040 
1041     /* Success */
1042     tcg_gen_movi_i32(cpu_msr_c, 0);
1043     tcg_gen_br(swx_done);
1044 
1045     /* Failure */
1046     gen_set_label(swx_fail);
1047     tcg_gen_movi_i32(cpu_msr_c, 1);
1048 
1049     gen_set_label(swx_done);
1050 
1051     /*
1052      * Prevent the saved address from working again without another ldx.
1053      * Akin to the pseudocode setting reservation = 0.
1054      */
1055     tcg_gen_movi_tl(cpu_res_addr, -1);
1056     return true;
1057 }
1058 
setup_dslot(DisasContext * dc,bool type_b)1059 static void setup_dslot(DisasContext *dc, bool type_b)
1060 {
1061     dc->tb_flags_to_set |= D_FLAG;
1062     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1063         dc->tb_flags_to_set |= BIMM_FLAG;
1064     }
1065 }
1066 
do_branch(DisasContext * dc,int dest_rb,int dest_imm,bool delay,bool abs,int link)1067 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1068                       bool delay, bool abs, int link)
1069 {
1070     uint32_t add_pc;
1071 
1072     if (invalid_delay_slot(dc, "branch")) {
1073         return true;
1074     }
1075     if (delay) {
1076         setup_dslot(dc, dest_rb < 0);
1077     }
1078 
1079     if (link) {
1080         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1081     }
1082 
1083     /* Store the branch taken destination into btarget.  */
1084     add_pc = abs ? 0 : dc->base.pc_next;
1085     if (dest_rb > 0) {
1086         dc->jmp_dest = -1;
1087         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1088     } else {
1089         dc->jmp_dest = add_pc + dest_imm;
1090         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1091     }
1092     dc->jmp_cond = TCG_COND_ALWAYS;
1093     return true;
1094 }
1095 
1096 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1097     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1098     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1099     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1100     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1101 
DO_BR(br,bri,false,false,false)1102 DO_BR(br, bri, false, false, false)
1103 DO_BR(bra, brai, false, true, false)
1104 DO_BR(brd, brid, true, false, false)
1105 DO_BR(brad, braid, true, true, false)
1106 DO_BR(brld, brlid, true, false, true)
1107 DO_BR(brald, bralid, true, true, true)
1108 
1109 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1110                    TCGCond cond, int ra, bool delay)
1111 {
1112     TCGv_i32 zero, next;
1113 
1114     if (invalid_delay_slot(dc, "bcc")) {
1115         return true;
1116     }
1117     if (delay) {
1118         setup_dslot(dc, dest_rb < 0);
1119     }
1120 
1121     dc->jmp_cond = cond;
1122 
1123     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1124     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1125 
1126     /* Store the branch taken destination into btarget.  */
1127     if (dest_rb > 0) {
1128         dc->jmp_dest = -1;
1129         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1130     } else {
1131         dc->jmp_dest = dc->base.pc_next + dest_imm;
1132         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1133     }
1134 
1135     /* Compute the final destination into btarget.  */
1136     zero = tcg_constant_i32(0);
1137     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1138     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1139                         reg_for_read(dc, ra), zero,
1140                         cpu_btarget, next);
1141 
1142     return true;
1143 }
1144 
1145 #define DO_BCC(NAME, COND)                                              \
1146     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1147     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1148     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1149     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1150     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1151     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1152     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1153     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1154 
DO_BCC(beq,TCG_COND_EQ)1155 DO_BCC(beq, TCG_COND_EQ)
1156 DO_BCC(bge, TCG_COND_GE)
1157 DO_BCC(bgt, TCG_COND_GT)
1158 DO_BCC(ble, TCG_COND_LE)
1159 DO_BCC(blt, TCG_COND_LT)
1160 DO_BCC(bne, TCG_COND_NE)
1161 
1162 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1163 {
1164     if (trap_userspace(dc, true)) {
1165         return true;
1166     }
1167     if (invalid_delay_slot(dc, "brk")) {
1168         return true;
1169     }
1170 
1171     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1172     if (arg->rd) {
1173         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1174     }
1175     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1176     tcg_gen_movi_tl(cpu_res_addr, -1);
1177 
1178     dc->base.is_jmp = DISAS_EXIT;
1179     return true;
1180 }
1181 
trans_brki(DisasContext * dc,arg_typeb_br * arg)1182 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1183 {
1184     uint32_t imm = arg->imm;
1185 
1186     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1187         return true;
1188     }
1189     if (invalid_delay_slot(dc, "brki")) {
1190         return true;
1191     }
1192 
1193     tcg_gen_movi_i32(cpu_pc, imm);
1194     if (arg->rd) {
1195         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1196     }
1197     tcg_gen_movi_tl(cpu_res_addr, -1);
1198 
1199 #ifdef CONFIG_USER_ONLY
1200     switch (imm) {
1201     case 0x8:  /* syscall trap */
1202         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1203         break;
1204     case 0x18: /* debug trap */
1205         gen_raise_exception_sync(dc, EXCP_DEBUG);
1206         break;
1207     default:   /* eliminated with trap_userspace check */
1208         g_assert_not_reached();
1209     }
1210 #else
1211     uint32_t msr_to_set = 0;
1212 
1213     if (imm != 0x18) {
1214         msr_to_set |= MSR_BIP;
1215     }
1216     if (imm == 0x8 || imm == 0x18) {
1217         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1218         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1219         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1220                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1221     }
1222     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1223     dc->base.is_jmp = DISAS_EXIT;
1224 #endif
1225 
1226     return true;
1227 }
1228 
trans_mbar(DisasContext * dc,arg_mbar * arg)1229 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1230 {
1231     int mbar_imm = arg->imm;
1232 
1233     /* Note that mbar is a specialized branch instruction. */
1234     if (invalid_delay_slot(dc, "mbar")) {
1235         return true;
1236     }
1237 
1238     /* Data access memory barrier.  */
1239     if ((mbar_imm & 2) == 0) {
1240         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1241     }
1242 
1243     /* Sleep. */
1244     if (mbar_imm & 16) {
1245         if (trap_userspace(dc, true)) {
1246             /* Sleep is a privileged instruction.  */
1247             return true;
1248         }
1249 
1250         t_sync_flags(dc);
1251 
1252         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1253                        -offsetof(MicroBlazeCPU, env)
1254                        +offsetof(CPUState, halted));
1255 
1256         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1257 
1258         gen_raise_exception(dc, EXCP_HLT);
1259     }
1260 
1261     /*
1262      * If !(mbar_imm & 1), this is an instruction access memory barrier
1263      * and we need to end the TB so that we recognize self-modified
1264      * code immediately.
1265      *
1266      * However, there are some data mbars that need the TB break
1267      * (and return to main loop) to recognize interrupts right away.
1268      * E.g. recognizing a change to an interrupt controller register.
1269      *
1270      * Therefore, choose to end the TB always.
1271      */
1272     dc->base.is_jmp = DISAS_EXIT_NEXT;
1273     return true;
1274 }
1275 
do_rts(DisasContext * dc,arg_typeb_bc * arg,int to_set)1276 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1277 {
1278     if (trap_userspace(dc, to_set)) {
1279         return true;
1280     }
1281     if (invalid_delay_slot(dc, "rts")) {
1282         return true;
1283     }
1284 
1285     dc->tb_flags_to_set |= to_set;
1286     setup_dslot(dc, true);
1287 
1288     dc->jmp_cond = TCG_COND_ALWAYS;
1289     dc->jmp_dest = -1;
1290     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1291     return true;
1292 }
1293 
1294 #define DO_RTS(NAME, IFLAG) \
1295     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1296     { return do_rts(dc, arg, IFLAG); }
1297 
DO_RTS(rtbd,DRTB_FLAG)1298 DO_RTS(rtbd, DRTB_FLAG)
1299 DO_RTS(rtid, DRTI_FLAG)
1300 DO_RTS(rted, DRTE_FLAG)
1301 DO_RTS(rtsd, 0)
1302 
1303 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1304 {
1305     /* If opcode_0_illegal, trap.  */
1306     if (dc->cfg->opcode_0_illegal) {
1307         trap_illegal(dc, true);
1308         return true;
1309     }
1310     /*
1311      * Otherwise, this is "add r0, r0, r0".
1312      * Continue to trans_add so that MSR[C] gets cleared.
1313      */
1314     return false;
1315 }
1316 
msr_read(DisasContext * dc,TCGv_i32 d)1317 static void msr_read(DisasContext *dc, TCGv_i32 d)
1318 {
1319     TCGv_i32 t;
1320 
1321     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1322     t = tcg_temp_new_i32();
1323     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1324     tcg_gen_or_i32(d, cpu_msr, t);
1325 }
1326 
do_msrclrset(DisasContext * dc,arg_type_msr * arg,bool set)1327 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1328 {
1329     uint32_t imm = arg->imm;
1330 
1331     if (trap_userspace(dc, imm != MSR_C)) {
1332         return true;
1333     }
1334 
1335     if (arg->rd) {
1336         msr_read(dc, cpu_R[arg->rd]);
1337     }
1338 
1339     /*
1340      * Handle the carry bit separately.
1341      * This is the only bit that userspace can modify.
1342      */
1343     if (imm & MSR_C) {
1344         tcg_gen_movi_i32(cpu_msr_c, set);
1345     }
1346 
1347     /*
1348      * MSR_C and MSR_CC set above.
1349      * MSR_PVR is not writable, and is always clear.
1350      */
1351     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1352 
1353     if (imm != 0) {
1354         if (set) {
1355             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1356         } else {
1357             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1358         }
1359         dc->base.is_jmp = DISAS_EXIT_NEXT;
1360     }
1361     return true;
1362 }
1363 
trans_msrclr(DisasContext * dc,arg_type_msr * arg)1364 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1365 {
1366     return do_msrclrset(dc, arg, false);
1367 }
1368 
trans_msrset(DisasContext * dc,arg_type_msr * arg)1369 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1370 {
1371     return do_msrclrset(dc, arg, true);
1372 }
1373 
trans_mts(DisasContext * dc,arg_mts * arg)1374 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1375 {
1376     if (trap_userspace(dc, true)) {
1377         return true;
1378     }
1379 
1380 #ifdef CONFIG_USER_ONLY
1381     g_assert_not_reached();
1382 #else
1383     if (arg->e && arg->rs != 0x1003) {
1384         qemu_log_mask(LOG_GUEST_ERROR,
1385                       "Invalid extended mts reg 0x%x\n", arg->rs);
1386         return true;
1387     }
1388 
1389     TCGv_i32 src = reg_for_read(dc, arg->ra);
1390     switch (arg->rs) {
1391     case SR_MSR:
1392         /* Install MSR_C.  */
1393         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1394         /*
1395          * Clear MSR_C and MSR_CC;
1396          * MSR_PVR is not writable, and is always clear.
1397          */
1398         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1399         break;
1400     case SR_FSR:
1401         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1402         break;
1403     case 0x800:
1404         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1405         break;
1406     case 0x802:
1407         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1408         break;
1409 
1410     case 0x1000: /* PID */
1411     case 0x1001: /* ZPR */
1412     case 0x1002: /* TLBX */
1413     case 0x1003: /* TLBLO */
1414     case 0x1004: /* TLBHI */
1415     case 0x1005: /* TLBSX */
1416         {
1417             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1418             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1419 
1420             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1421         }
1422         break;
1423 
1424     default:
1425         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1426         return true;
1427     }
1428     dc->base.is_jmp = DISAS_EXIT_NEXT;
1429     return true;
1430 #endif
1431 }
1432 
trans_mfs(DisasContext * dc,arg_mfs * arg)1433 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1434 {
1435     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1436 
1437     if (arg->e) {
1438         switch (arg->rs) {
1439         case SR_EAR:
1440             {
1441                 TCGv_i64 t64 = tcg_temp_new_i64();
1442                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1443                 tcg_gen_extrh_i64_i32(dest, t64);
1444             }
1445             return true;
1446 #ifndef CONFIG_USER_ONLY
1447         case 0x1003: /* TLBLO */
1448             /* Handled below. */
1449             break;
1450 #endif
1451         case 0x2006 ... 0x2009:
1452             /* High bits of PVR6-9 not implemented. */
1453             tcg_gen_movi_i32(dest, 0);
1454             return true;
1455         default:
1456             qemu_log_mask(LOG_GUEST_ERROR,
1457                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1458             return true;
1459         }
1460     }
1461 
1462     switch (arg->rs) {
1463     case SR_PC:
1464         tcg_gen_movi_i32(dest, dc->base.pc_next);
1465         break;
1466     case SR_MSR:
1467         msr_read(dc, dest);
1468         break;
1469     case SR_EAR:
1470         {
1471             TCGv_i64 t64 = tcg_temp_new_i64();
1472             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1473             tcg_gen_extrl_i64_i32(dest, t64);
1474         }
1475         break;
1476     case SR_ESR:
1477         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1478         break;
1479     case SR_FSR:
1480         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1481         break;
1482     case SR_BTR:
1483         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1484         break;
1485     case SR_EDR:
1486         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1487         break;
1488     case 0x800:
1489         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1490         break;
1491     case 0x802:
1492         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1493         break;
1494 
1495 #ifndef CONFIG_USER_ONLY
1496     case 0x1000: /* PID */
1497     case 0x1001: /* ZPR */
1498     case 0x1002: /* TLBX */
1499     case 0x1003: /* TLBLO */
1500     case 0x1004: /* TLBHI */
1501     case 0x1005: /* TLBSX */
1502         {
1503             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1504             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1505 
1506             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1507         }
1508         break;
1509 #endif
1510 
1511     case 0x2000 ... 0x200c:
1512         tcg_gen_ld_i32(dest, tcg_env,
1513                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1514                        - offsetof(MicroBlazeCPU, env));
1515         break;
1516     default:
1517         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1518         break;
1519     }
1520     return true;
1521 }
1522 
do_rti(DisasContext * dc)1523 static void do_rti(DisasContext *dc)
1524 {
1525     TCGv_i32 tmp = tcg_temp_new_i32();
1526 
1527     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1528     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1529     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1530     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1531     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1532 }
1533 
do_rtb(DisasContext * dc)1534 static void do_rtb(DisasContext *dc)
1535 {
1536     TCGv_i32 tmp = tcg_temp_new_i32();
1537 
1538     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1539     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1540     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1541     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1542 }
1543 
do_rte(DisasContext * dc)1544 static void do_rte(DisasContext *dc)
1545 {
1546     TCGv_i32 tmp = tcg_temp_new_i32();
1547 
1548     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1549     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1550     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1551     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1552     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1553 }
1554 
1555 /* Insns connected to FSL or AXI stream attached devices.  */
do_get(DisasContext * dc,int rd,int rb,int imm,int ctrl)1556 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1557 {
1558     TCGv_i32 t_id, t_ctrl;
1559 
1560     if (trap_userspace(dc, true)) {
1561         return true;
1562     }
1563 
1564     t_id = tcg_temp_new_i32();
1565     if (rb) {
1566         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1567     } else {
1568         tcg_gen_movi_i32(t_id, imm);
1569     }
1570 
1571     t_ctrl = tcg_constant_i32(ctrl);
1572     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1573     return true;
1574 }
1575 
trans_get(DisasContext * dc,arg_get * arg)1576 static bool trans_get(DisasContext *dc, arg_get *arg)
1577 {
1578     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1579 }
1580 
trans_getd(DisasContext * dc,arg_getd * arg)1581 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1582 {
1583     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1584 }
1585 
do_put(DisasContext * dc,int ra,int rb,int imm,int ctrl)1586 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1587 {
1588     TCGv_i32 t_id, t_ctrl;
1589 
1590     if (trap_userspace(dc, true)) {
1591         return true;
1592     }
1593 
1594     t_id = tcg_temp_new_i32();
1595     if (rb) {
1596         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1597     } else {
1598         tcg_gen_movi_i32(t_id, imm);
1599     }
1600 
1601     t_ctrl = tcg_constant_i32(ctrl);
1602     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1603     return true;
1604 }
1605 
trans_put(DisasContext * dc,arg_put * arg)1606 static bool trans_put(DisasContext *dc, arg_put *arg)
1607 {
1608     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1609 }
1610 
trans_putd(DisasContext * dc,arg_putd * arg)1611 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1612 {
1613     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1614 }
1615 
mb_tr_init_disas_context(DisasContextBase * dcb,CPUState * cs)1616 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1617 {
1618     DisasContext *dc = container_of(dcb, DisasContext, base);
1619     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1620     int bound;
1621 
1622     dc->cfg = &cpu->cfg;
1623     dc->tb_flags = dc->base.tb->flags;
1624     dc->ext_imm = dc->base.tb->cs_base;
1625     dc->mem_index = cpu_mmu_index(cs, false);
1626     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1627     dc->jmp_dest = -1;
1628 
1629     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1630     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1631 }
1632 
mb_tr_tb_start(DisasContextBase * dcb,CPUState * cs)1633 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1634 {
1635 }
1636 
mb_tr_insn_start(DisasContextBase * dcb,CPUState * cs)1637 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1638 {
1639     DisasContext *dc = container_of(dcb, DisasContext, base);
1640 
1641     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1642 }
1643 
mb_tr_translate_insn(DisasContextBase * dcb,CPUState * cs)1644 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1645 {
1646     DisasContext *dc = container_of(dcb, DisasContext, base);
1647     uint32_t ir;
1648 
1649     /* TODO: This should raise an exception, not terminate qemu. */
1650     if (dc->base.pc_next & 3) {
1651         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1652                   (uint32_t)dc->base.pc_next);
1653     }
1654 
1655     dc->tb_flags_to_set = 0;
1656 
1657     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1658                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1659     if (!decode(dc, ir)) {
1660         trap_illegal(dc, true);
1661     }
1662 
1663     /* Discard the imm global when its contents cannot be used. */
1664     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1665         tcg_gen_discard_i32(cpu_imm);
1666     }
1667 
1668     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1669     dc->tb_flags |= dc->tb_flags_to_set;
1670     dc->base.pc_next += 4;
1671 
1672     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1673         /*
1674          * Finish any return-from branch.
1675          */
1676         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1677         if (unlikely(rt_ibe != 0)) {
1678             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1679             if (rt_ibe & DRTI_FLAG) {
1680                 do_rti(dc);
1681             } else if (rt_ibe & DRTB_FLAG) {
1682                 do_rtb(dc);
1683             } else {
1684                 do_rte(dc);
1685             }
1686         }
1687 
1688         /* Complete the branch, ending the TB. */
1689         switch (dc->base.is_jmp) {
1690         case DISAS_NORETURN:
1691             /*
1692              * E.g. illegal insn in a delay slot.  We've already exited
1693              * and will handle D_FLAG in mb_cpu_do_interrupt.
1694              */
1695             break;
1696         case DISAS_NEXT:
1697             /*
1698              * Normal insn a delay slot.
1699              * However, the return-from-exception type insns should
1700              * return to the main loop, as they have adjusted MSR.
1701              */
1702             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1703             break;
1704         case DISAS_EXIT_NEXT:
1705             /*
1706              * E.g. mts insn in a delay slot.  Continue with btarget,
1707              * but still return to the main loop.
1708              */
1709             dc->base.is_jmp = DISAS_EXIT_JUMP;
1710             break;
1711         default:
1712             g_assert_not_reached();
1713         }
1714     }
1715 }
1716 
mb_tr_tb_stop(DisasContextBase * dcb,CPUState * cs)1717 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1718 {
1719     DisasContext *dc = container_of(dcb, DisasContext, base);
1720 
1721     if (dc->base.is_jmp == DISAS_NORETURN) {
1722         /* We have already exited the TB. */
1723         return;
1724     }
1725 
1726     t_sync_flags(dc);
1727 
1728     switch (dc->base.is_jmp) {
1729     case DISAS_TOO_MANY:
1730         gen_goto_tb(dc, 0, dc->base.pc_next);
1731         return;
1732 
1733     case DISAS_EXIT:
1734         break;
1735     case DISAS_EXIT_NEXT:
1736         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1737         break;
1738     case DISAS_EXIT_JUMP:
1739         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1740         tcg_gen_discard_i32(cpu_btarget);
1741         break;
1742 
1743     case DISAS_JUMP:
1744         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1745             /* Direct jump. */
1746             tcg_gen_discard_i32(cpu_btarget);
1747 
1748             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1749                 /* Conditional direct jump. */
1750                 TCGLabel *taken = gen_new_label();
1751                 TCGv_i32 tmp = tcg_temp_new_i32();
1752 
1753                 /*
1754                  * Copy bvalue to a temp now, so we can discard bvalue.
1755                  * This can avoid writing bvalue to memory when the
1756                  * delay slot cannot raise an exception.
1757                  */
1758                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1759                 tcg_gen_discard_i32(cpu_bvalue);
1760 
1761                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1762                 gen_goto_tb(dc, 1, dc->base.pc_next);
1763                 gen_set_label(taken);
1764             }
1765             gen_goto_tb(dc, 0, dc->jmp_dest);
1766             return;
1767         }
1768 
1769         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1770         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1771         tcg_gen_discard_i32(cpu_btarget);
1772         tcg_gen_lookup_and_goto_ptr();
1773         return;
1774 
1775     default:
1776         g_assert_not_reached();
1777     }
1778 
1779     /* Finish DISAS_EXIT_* */
1780     if (unlikely(cs->singlestep_enabled)) {
1781         gen_raise_exception(dc, EXCP_DEBUG);
1782     } else {
1783         tcg_gen_exit_tb(NULL, 0);
1784     }
1785 }
1786 
1787 static const TranslatorOps mb_tr_ops = {
1788     .init_disas_context = mb_tr_init_disas_context,
1789     .tb_start           = mb_tr_tb_start,
1790     .insn_start         = mb_tr_insn_start,
1791     .translate_insn     = mb_tr_translate_insn,
1792     .tb_stop            = mb_tr_tb_stop,
1793 };
1794 
mb_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)1795 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1796                        int *max_insns, vaddr pc, void *host_pc)
1797 {
1798     DisasContext dc;
1799     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1800 }
1801 
mb_cpu_dump_state(CPUState * cs,FILE * f,int flags)1802 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1803 {
1804     CPUMBState *env = cpu_env(cs);
1805     uint32_t iflags;
1806     int i;
1807 
1808     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1809                  env->pc, env->msr,
1810                  (env->msr & MSR_UM) ? "user" : "kernel",
1811                  (env->msr & MSR_UMS) ? "user" : "kernel",
1812                  (bool)(env->msr & MSR_EIP),
1813                  (bool)(env->msr & MSR_IE));
1814 
1815     iflags = env->iflags;
1816     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1817     if (iflags & IMM_FLAG) {
1818         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1819     }
1820     if (iflags & BIMM_FLAG) {
1821         qemu_fprintf(f, " BIMM");
1822     }
1823     if (iflags & D_FLAG) {
1824         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1825     }
1826     if (iflags & DRTI_FLAG) {
1827         qemu_fprintf(f, " DRTI");
1828     }
1829     if (iflags & DRTE_FLAG) {
1830         qemu_fprintf(f, " DRTE");
1831     }
1832     if (iflags & DRTB_FLAG) {
1833         qemu_fprintf(f, " DRTB");
1834     }
1835     if (iflags & ESR_ESS_FLAG) {
1836         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1837     }
1838 
1839     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1840                  "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
1841                  env->esr, env->fsr, env->btr, env->edr,
1842                  env->ear, env->slr, env->shr);
1843 
1844     for (i = 0; i < 32; i++) {
1845         qemu_fprintf(f, "r%2.2d=%08x%c",
1846                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1847     }
1848     qemu_fprintf(f, "\n");
1849 }
1850 
mb_tcg_init(void)1851 void mb_tcg_init(void)
1852 {
1853 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1854 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1855 
1856     static const struct {
1857         TCGv_i32 *var; int ofs; char name[8];
1858     } i32s[] = {
1859         /*
1860          * Note that r0 is handled specially in reg_for_read
1861          * and reg_for_write.  Nothing should touch cpu_R[0].
1862          * Leave that element NULL, which will assert quickly
1863          * inside the tcg generator functions.
1864          */
1865                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1866         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1867         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1868         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1869 
1870         SP(pc),
1871         SP(msr),
1872         SP(msr_c),
1873         SP(imm),
1874         SP(iflags),
1875         SP(bvalue),
1876         SP(btarget),
1877         SP(res_val),
1878     };
1879 
1880 #undef R
1881 #undef SP
1882 
1883     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1884         *i32s[i].var =
1885           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1886     }
1887 
1888     cpu_res_addr =
1889         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1890 }
1891