xref: /qemu/target/microblaze/translate.c (revision 57be554c2984de3261d1e5d446d797d8e5b2c997)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "accel/tcg/cpu-ldst.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/translation-block.h"
30 #include "exec/target_page.h"
31 #include "qemu/qemu-print.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 #define EXTRACT_FIELD(src, start, end) \
40             (((src) >> start) & ((1 << (end - start + 1)) - 1))
41 
42 /* is_jmp field values */
43 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
44 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
45 
46 /* cpu state besides pc was modified dynamically; update pc to next */
47 #define DISAS_EXIT_NEXT DISAS_TARGET_2
48 /* cpu state besides pc was modified dynamically; update pc to btarget */
49 #define DISAS_EXIT_JUMP DISAS_TARGET_3
50 
51 static TCGv_i32 cpu_R[32];
52 static TCGv_i32 cpu_pc;
53 static TCGv_i32 cpu_msr;
54 static TCGv_i32 cpu_msr_c;
55 static TCGv_i32 cpu_imm;
56 static TCGv_i32 cpu_bvalue;
57 static TCGv_i32 cpu_btarget;
58 static TCGv_i32 cpu_iflags;
59 static TCGv cpu_res_addr;
60 static TCGv_i32 cpu_res_val;
61 
62 /* This is the state at translation time.  */
63 typedef struct DisasContext {
64     DisasContextBase base;
65     const MicroBlazeCPUConfig *cfg;
66 
67     TCGv_i32 r0;
68     bool r0_set;
69 
70     /* Decoder.  */
71     uint32_t ext_imm;
72     unsigned int tb_flags;
73     unsigned int tb_flags_to_set;
74     int mem_index;
75 
76     /* Condition under which to jump, including NEVER and ALWAYS. */
77     TCGCond jmp_cond;
78 
79     /* Immediate branch-taken destination, or -1 for indirect. */
80     uint32_t jmp_dest;
81 } DisasContext;
82 
83 static int typeb_imm(DisasContext *dc, int x)
84 {
85     if (dc->tb_flags & IMM_FLAG) {
86         return deposit32(dc->ext_imm, 0, 16, x);
87     }
88     return x;
89 }
90 
91 /* Include the auto-generated decoder.  */
92 #include "decode-insns.c.inc"
93 
94 static void t_sync_flags(DisasContext *dc)
95 {
96     /* Synch the tb dependent flags between translator and runtime.  */
97     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
99     }
100 }
101 
102 static void gen_raise_exception(DisasContext *dc, uint32_t index)
103 {
104     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
105     dc->base.is_jmp = DISAS_NORETURN;
106 }
107 
108 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
109 {
110     t_sync_flags(dc);
111     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
112     gen_raise_exception(dc, index);
113 }
114 
115 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
116 {
117     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
118     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
119 
120     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
121 }
122 
123 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
124 {
125     if (translator_use_goto_tb(&dc->base, dest)) {
126         tcg_gen_goto_tb(n);
127         tcg_gen_movi_i32(cpu_pc, dest);
128         tcg_gen_exit_tb(dc->base.tb, n);
129     } else {
130         tcg_gen_movi_i32(cpu_pc, dest);
131         tcg_gen_lookup_and_goto_ptr();
132     }
133     dc->base.is_jmp = DISAS_NORETURN;
134 }
135 
136 /*
137  * Returns true if the insn an illegal operation.
138  * If exceptions are enabled, an exception is raised.
139  */
140 static bool trap_illegal(DisasContext *dc, bool cond)
141 {
142     if (cond && (dc->tb_flags & MSR_EE)
143         && dc->cfg->illegal_opcode_exception) {
144         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
145     }
146     return cond;
147 }
148 
149 /*
150  * Returns true if the insn is illegal in userspace.
151  * If exceptions are enabled, an exception is raised.
152  */
153 static bool trap_userspace(DisasContext *dc, bool cond)
154 {
155     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
156 
157     if (cond_user && (dc->tb_flags & MSR_EE)) {
158         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
159     }
160     return cond_user;
161 }
162 
163 /*
164  * Return true, and log an error, if the current insn is
165  * within a delay slot.
166  */
167 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
168 {
169     if (dc->tb_flags & D_FLAG) {
170         qemu_log_mask(LOG_GUEST_ERROR,
171                       "Invalid insn in delay slot: %s at %08x\n",
172                       insn_type, (uint32_t)dc->base.pc_next);
173         return true;
174     }
175     return false;
176 }
177 
178 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
179 {
180     if (likely(reg != 0)) {
181         return cpu_R[reg];
182     }
183     if (!dc->r0_set) {
184         if (dc->r0 == NULL) {
185             dc->r0 = tcg_temp_new_i32();
186         }
187         tcg_gen_movi_i32(dc->r0, 0);
188         dc->r0_set = true;
189     }
190     return dc->r0;
191 }
192 
193 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
194 {
195     if (likely(reg != 0)) {
196         return cpu_R[reg];
197     }
198     if (dc->r0 == NULL) {
199         dc->r0 = tcg_temp_new_i32();
200     }
201     return dc->r0;
202 }
203 
204 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
205                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
206 {
207     TCGv_i32 rd, ra, rb;
208 
209     if (arg->rd == 0 && !side_effects) {
210         return true;
211     }
212 
213     rd = reg_for_write(dc, arg->rd);
214     ra = reg_for_read(dc, arg->ra);
215     rb = reg_for_read(dc, arg->rb);
216     fn(rd, ra, rb);
217     return true;
218 }
219 
220 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
221                       void (*fn)(TCGv_i32, TCGv_i32))
222 {
223     TCGv_i32 rd, ra;
224 
225     if (arg->rd == 0 && !side_effects) {
226         return true;
227     }
228 
229     rd = reg_for_write(dc, arg->rd);
230     ra = reg_for_read(dc, arg->ra);
231     fn(rd, ra);
232     return true;
233 }
234 
235 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
236                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
237 {
238     TCGv_i32 rd, ra;
239 
240     if (arg->rd == 0 && !side_effects) {
241         return true;
242     }
243 
244     rd = reg_for_write(dc, arg->rd);
245     ra = reg_for_read(dc, arg->ra);
246     fni(rd, ra, arg->imm);
247     return true;
248 }
249 
250 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
251                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
252 {
253     TCGv_i32 rd, ra, imm;
254 
255     if (arg->rd == 0 && !side_effects) {
256         return true;
257     }
258 
259     rd = reg_for_write(dc, arg->rd);
260     ra = reg_for_read(dc, arg->ra);
261     imm = tcg_constant_i32(arg->imm);
262 
263     fn(rd, ra, imm);
264     return true;
265 }
266 
267 #define DO_TYPEA(NAME, SE, FN) \
268     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
269     { return do_typea(dc, a, SE, FN); }
270 
271 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
272     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
273     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
274 
275 #define DO_TYPEA0(NAME, SE, FN) \
276     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
277     { return do_typea0(dc, a, SE, FN); }
278 
279 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
280     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
281     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
282 
283 #define DO_TYPEBI(NAME, SE, FNI) \
284     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
285     { return do_typeb_imm(dc, a, SE, FNI); }
286 
287 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
288     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
289     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
290 
291 #define DO_TYPEBV(NAME, SE, FN) \
292     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
293     { return do_typeb_val(dc, a, SE, FN); }
294 
295 #define ENV_WRAPPER2(NAME, HELPER) \
296     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
297     { HELPER(out, tcg_env, ina); }
298 
299 #define ENV_WRAPPER3(NAME, HELPER) \
300     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
301     { HELPER(out, tcg_env, ina, inb); }
302 
303 /* No input carry, but output carry. */
304 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
305 {
306     TCGv_i32 zero = tcg_constant_i32(0);
307 
308     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
309 }
310 
311 /* Input and output carry. */
312 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
313 {
314     tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
315 }
316 
317 /* Input carry, but no output carry. */
318 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
319 {
320     tcg_gen_add_i32(out, ina, inb);
321     tcg_gen_add_i32(out, out, cpu_msr_c);
322 }
323 
324 DO_TYPEA(add, true, gen_add)
325 DO_TYPEA(addc, true, gen_addc)
326 DO_TYPEA(addk, false, tcg_gen_add_i32)
327 DO_TYPEA(addkc, true, gen_addkc)
328 
329 DO_TYPEBV(addi, true, gen_add)
330 DO_TYPEBV(addic, true, gen_addc)
331 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
332 DO_TYPEBV(addikc, true, gen_addkc)
333 
334 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
335 {
336     tcg_gen_andi_i32(out, ina, ~imm);
337 }
338 
339 DO_TYPEA(and, false, tcg_gen_and_i32)
340 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
341 DO_TYPEA(andn, false, tcg_gen_andc_i32)
342 DO_TYPEBI(andni, false, gen_andni)
343 
344 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
345 {
346     TCGv_i32 tmp = tcg_temp_new_i32();
347     tcg_gen_andi_i32(tmp, inb, 31);
348     tcg_gen_sar_i32(out, ina, tmp);
349 }
350 
351 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
352 {
353     TCGv_i32 tmp = tcg_temp_new_i32();
354     tcg_gen_andi_i32(tmp, inb, 31);
355     tcg_gen_shr_i32(out, ina, tmp);
356 }
357 
358 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
359 {
360     TCGv_i32 tmp = tcg_temp_new_i32();
361     tcg_gen_andi_i32(tmp, inb, 31);
362     tcg_gen_shl_i32(out, ina, tmp);
363 }
364 
365 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
366 {
367     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
368     int imm_w = extract32(imm, 5, 5);
369     int imm_s = extract32(imm, 0, 5);
370 
371     if (imm_w + imm_s > 32 || imm_w == 0) {
372         /* These inputs have an undefined behavior.  */
373         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
374                       imm_w, imm_s);
375     } else {
376         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
377     }
378 }
379 
380 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
381 {
382     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
383     int imm_w = extract32(imm, 5, 5);
384     int imm_s = extract32(imm, 0, 5);
385     int width = imm_w - imm_s + 1;
386 
387     if (imm_w < imm_s) {
388         /* These inputs have an undefined behavior.  */
389         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
390                       imm_w, imm_s);
391     } else {
392         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
393     }
394 }
395 
396 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
397 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
398 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
399 
400 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
401 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
402 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
403 
404 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
405 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
406 
407 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
408 {
409     tcg_gen_clzi_i32(out, ina, 32);
410 }
411 
412 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
413 
414 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
415 {
416     TCGv_i32 lt = tcg_temp_new_i32();
417 
418     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
419     tcg_gen_sub_i32(out, inb, ina);
420     tcg_gen_deposit_i32(out, out, lt, 31, 1);
421 }
422 
423 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
424 {
425     TCGv_i32 lt = tcg_temp_new_i32();
426 
427     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
428     tcg_gen_sub_i32(out, inb, ina);
429     tcg_gen_deposit_i32(out, out, lt, 31, 1);
430 }
431 
432 DO_TYPEA(cmp, false, gen_cmp)
433 DO_TYPEA(cmpu, false, gen_cmpu)
434 
435 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
436 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
437 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
438 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
439 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
440 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
441 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
442 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
443 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
444 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
445 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
446 
447 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
448 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
449 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
450 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
451 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
452 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
453 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
454 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
455 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
456 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
457 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
458 
459 ENV_WRAPPER2(gen_flt, gen_helper_flt)
460 ENV_WRAPPER2(gen_fint, gen_helper_fint)
461 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
462 
463 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
464 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
465 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
466 
467 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
468 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
469 {
470     gen_helper_divs(out, tcg_env, inb, ina);
471 }
472 
473 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
474 {
475     gen_helper_divu(out, tcg_env, inb, ina);
476 }
477 
478 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
479 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
480 
481 static bool trans_imm(DisasContext *dc, arg_imm *arg)
482 {
483     if (invalid_delay_slot(dc, "imm")) {
484         return true;
485     }
486     dc->ext_imm = arg->imm << 16;
487     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
488     dc->tb_flags_to_set = IMM_FLAG;
489     return true;
490 }
491 
492 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
493 {
494     TCGv_i32 tmp = tcg_temp_new_i32();
495     tcg_gen_muls2_i32(tmp, out, ina, inb);
496 }
497 
498 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
499 {
500     TCGv_i32 tmp = tcg_temp_new_i32();
501     tcg_gen_mulu2_i32(tmp, out, ina, inb);
502 }
503 
504 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
505 {
506     TCGv_i32 tmp = tcg_temp_new_i32();
507     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
508 }
509 
510 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
511 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
512 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
513 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
514 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
515 
516 DO_TYPEA(or, false, tcg_gen_or_i32)
517 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
518 
519 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520 {
521     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
522 }
523 
524 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525 {
526     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
527 }
528 
529 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
530 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
531 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
532 
533 /* No input carry, but output carry. */
534 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
535 {
536     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
537     tcg_gen_sub_i32(out, inb, ina);
538 }
539 
540 /* Input and output carry. */
541 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542 {
543     TCGv_i32 tmp = tcg_temp_new_i32();
544 
545     tcg_gen_not_i32(tmp, ina);
546     tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
547 }
548 
549 /* No input or output carry. */
550 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
551 {
552     tcg_gen_sub_i32(out, inb, ina);
553 }
554 
555 /* Input carry, no output carry. */
556 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
557 {
558     TCGv_i32 nota = tcg_temp_new_i32();
559 
560     tcg_gen_not_i32(nota, ina);
561     tcg_gen_add_i32(out, inb, nota);
562     tcg_gen_add_i32(out, out, cpu_msr_c);
563 }
564 
565 DO_TYPEA(rsub, true, gen_rsub)
566 DO_TYPEA(rsubc, true, gen_rsubc)
567 DO_TYPEA(rsubk, false, gen_rsubk)
568 DO_TYPEA(rsubkc, true, gen_rsubkc)
569 
570 DO_TYPEBV(rsubi, true, gen_rsub)
571 DO_TYPEBV(rsubic, true, gen_rsubc)
572 DO_TYPEBV(rsubik, false, gen_rsubk)
573 DO_TYPEBV(rsubikc, true, gen_rsubkc)
574 
575 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
576 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
577 
578 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
579 {
580     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
581     tcg_gen_sari_i32(out, ina, 1);
582 }
583 
584 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
585 {
586     TCGv_i32 tmp = tcg_temp_new_i32();
587 
588     tcg_gen_mov_i32(tmp, cpu_msr_c);
589     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
590     tcg_gen_extract2_i32(out, ina, tmp, 1);
591 }
592 
593 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
594 {
595     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
596     tcg_gen_shri_i32(out, ina, 1);
597 }
598 
599 DO_TYPEA0(sra, false, gen_sra)
600 DO_TYPEA0(src, false, gen_src)
601 DO_TYPEA0(srl, false, gen_srl)
602 
603 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
604 {
605     tcg_gen_rotri_i32(out, ina, 16);
606 }
607 
608 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
609 DO_TYPEA0(swaph, false, gen_swaph)
610 
611 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
612 {
613     /* Cache operations are nops: only check for supervisor mode.  */
614     trap_userspace(dc, true);
615     return true;
616 }
617 
618 DO_TYPEA(xor, false, tcg_gen_xor_i32)
619 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
620 
621 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
622 {
623     TCGv ret = tcg_temp_new();
624 
625     /* If any of the regs is r0, set t to the value of the other reg.  */
626     if (ra && rb) {
627         TCGv_i32 tmp = tcg_temp_new_i32();
628         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
629         tcg_gen_extu_i32_tl(ret, tmp);
630     } else if (ra) {
631         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
632     } else if (rb) {
633         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
634     } else {
635         tcg_gen_movi_tl(ret, 0);
636     }
637 
638     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
639         gen_helper_stackprot(tcg_env, ret);
640     }
641     return ret;
642 }
643 
644 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
645 {
646     TCGv ret = tcg_temp_new();
647 
648     /* If any of the regs is r0, set t to the value of the other reg.  */
649     if (ra) {
650         TCGv_i32 tmp = tcg_temp_new_i32();
651         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
652         tcg_gen_extu_i32_tl(ret, tmp);
653     } else {
654         tcg_gen_movi_tl(ret, (uint32_t)imm);
655     }
656 
657     if (ra == 1 && dc->cfg->stackprot) {
658         gen_helper_stackprot(tcg_env, ret);
659     }
660     return ret;
661 }
662 
663 #ifndef CONFIG_USER_ONLY
664 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
665 {
666     int addr_size = dc->cfg->addr_size;
667     TCGv ret = tcg_temp_new();
668 
669     if (addr_size == 32 || ra == 0) {
670         if (rb) {
671             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
672         } else {
673             tcg_gen_movi_tl(ret, 0);
674         }
675     } else {
676         if (rb) {
677             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
678         } else {
679             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
680             tcg_gen_shli_tl(ret, ret, 32);
681         }
682         if (addr_size < 64) {
683             /* Mask off out of range bits.  */
684             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
685         }
686     }
687     return ret;
688 }
689 #endif
690 
691 #ifndef CONFIG_USER_ONLY
692 static void record_unaligned_ess(DisasContext *dc, int rd,
693                                  MemOp size, bool store)
694 {
695     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
696 
697     iflags |= ESR_ESS_FLAG;
698     iflags |= rd << 5;
699     iflags |= store * ESR_S;
700     iflags |= (size == MO_32) * ESR_W;
701 
702     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
703 }
704 #endif
705 
706 static inline MemOp mo_endian(DisasContext *dc)
707 {
708     return dc->cfg->endi ? MO_LE : MO_BE;
709 }
710 
711 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
712                     int mem_index, bool rev)
713 {
714     MemOp size = mop & MO_SIZE;
715 
716     mop |= mo_endian(dc);
717 
718     /*
719      * When doing reverse accesses we need to do two things.
720      *
721      * 1. Reverse the address wrt endianness.
722      * 2. Byteswap the data lanes on the way back into the CPU core.
723      */
724     if (rev) {
725         if (size > MO_8) {
726             mop ^= MO_BSWAP;
727         }
728         if (size < MO_32) {
729             tcg_gen_xori_tl(addr, addr, 3 - size);
730         }
731     }
732 
733     /*
734      * For system mode, enforce alignment if the cpu configuration
735      * requires it.  For user-mode, the Linux kernel will have fixed up
736      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
737      */
738 #ifndef CONFIG_USER_ONLY
739     if (size > MO_8 &&
740         (dc->tb_flags & MSR_EE) &&
741         dc->cfg->unaligned_exceptions) {
742         record_unaligned_ess(dc, rd, size, false);
743         mop |= MO_ALIGN;
744     }
745 #endif
746 
747     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
748     return true;
749 }
750 
751 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
752 {
753     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
754     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
755 }
756 
757 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
758 {
759     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
760     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
761 }
762 
763 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
764 {
765     if (trap_userspace(dc, true)) {
766         return true;
767     }
768 #ifdef CONFIG_USER_ONLY
769     return true;
770 #else
771     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
772     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
773 #endif
774 }
775 
776 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
777 {
778     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
779     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
780 }
781 
782 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
783 {
784     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
785     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
786 }
787 
788 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
789 {
790     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
791     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
792 }
793 
794 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
795 {
796     if (trap_userspace(dc, true)) {
797         return true;
798     }
799 #ifdef CONFIG_USER_ONLY
800     return true;
801 #else
802     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
803     return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
804 #endif
805 }
806 
807 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
808 {
809     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
810     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
811 }
812 
813 static bool trans_lw(DisasContext *dc, arg_typea *arg)
814 {
815     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
817 }
818 
819 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
820 {
821     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
822     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
823 }
824 
825 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
826 {
827     if (trap_userspace(dc, true)) {
828         return true;
829     }
830 #ifdef CONFIG_USER_ONLY
831     return true;
832 #else
833     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
834     return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
835 #endif
836 }
837 
838 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
839 {
840     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
841     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
842 }
843 
844 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
845 {
846     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847 
848     /* lwx does not throw unaligned access errors, so force alignment */
849     tcg_gen_andi_tl(addr, addr, ~3);
850 
851     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
852                         mo_endian(dc) | MO_UL);
853     tcg_gen_mov_tl(cpu_res_addr, addr);
854 
855     if (arg->rd) {
856         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
857     }
858 
859     /* No support for AXI exclusive so always clear C */
860     tcg_gen_movi_i32(cpu_msr_c, 0);
861     return true;
862 }
863 
864 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
865                      int mem_index, bool rev)
866 {
867     MemOp size = mop & MO_SIZE;
868 
869     mop |= mo_endian(dc);
870 
871     /*
872      * When doing reverse accesses we need to do two things.
873      *
874      * 1. Reverse the address wrt endianness.
875      * 2. Byteswap the data lanes on the way back into the CPU core.
876      */
877     if (rev) {
878         if (size > MO_8) {
879             mop ^= MO_BSWAP;
880         }
881         if (size < MO_32) {
882             tcg_gen_xori_tl(addr, addr, 3 - size);
883         }
884     }
885 
886     /*
887      * For system mode, enforce alignment if the cpu configuration
888      * requires it.  For user-mode, the Linux kernel will have fixed up
889      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
890      */
891 #ifndef CONFIG_USER_ONLY
892     if (size > MO_8 &&
893         (dc->tb_flags & MSR_EE) &&
894         dc->cfg->unaligned_exceptions) {
895         record_unaligned_ess(dc, rd, size, true);
896         mop |= MO_ALIGN;
897     }
898 #endif
899 
900     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
901     return true;
902 }
903 
904 static bool trans_sb(DisasContext *dc, arg_typea *arg)
905 {
906     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
907     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
908 }
909 
910 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
911 {
912     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
913     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
914 }
915 
916 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
917 {
918     if (trap_userspace(dc, true)) {
919         return true;
920     }
921 #ifdef CONFIG_USER_ONLY
922     return true;
923 #else
924     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
925     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
926 #endif
927 }
928 
929 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
930 {
931     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
932     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
933 }
934 
935 static bool trans_sh(DisasContext *dc, arg_typea *arg)
936 {
937     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
938     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
939 }
940 
941 static bool trans_shr(DisasContext *dc, arg_typea *arg)
942 {
943     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
944     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
945 }
946 
947 static bool trans_shea(DisasContext *dc, arg_typea *arg)
948 {
949     if (trap_userspace(dc, true)) {
950         return true;
951     }
952 #ifdef CONFIG_USER_ONLY
953     return true;
954 #else
955     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
956     return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
957 #endif
958 }
959 
960 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
961 {
962     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
963     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
964 }
965 
966 static bool trans_sw(DisasContext *dc, arg_typea *arg)
967 {
968     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
969     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
970 }
971 
972 static bool trans_swr(DisasContext *dc, arg_typea *arg)
973 {
974     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
975     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
976 }
977 
978 static bool trans_swea(DisasContext *dc, arg_typea *arg)
979 {
980     if (trap_userspace(dc, true)) {
981         return true;
982     }
983 #ifdef CONFIG_USER_ONLY
984     return true;
985 #else
986     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
987     return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
988 #endif
989 }
990 
991 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
992 {
993     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
994     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
995 }
996 
997 static bool trans_swx(DisasContext *dc, arg_typea *arg)
998 {
999     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1000     TCGLabel *swx_done = gen_new_label();
1001     TCGLabel *swx_fail = gen_new_label();
1002     TCGv_i32 tval;
1003 
1004     /* swx does not throw unaligned access errors, so force alignment */
1005     tcg_gen_andi_tl(addr, addr, ~3);
1006 
1007     /*
1008      * Compare the address vs the one we used during lwx.
1009      * On mismatch, the operation fails.  On match, addr dies at the
1010      * branch, but we know we can use the equal version in the global.
1011      * In either case, addr is no longer needed.
1012      */
1013     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1014 
1015     /*
1016      * Compare the value loaded during lwx with current contents of
1017      * the reserved location.
1018      */
1019     tval = tcg_temp_new_i32();
1020 
1021     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1022                                reg_for_write(dc, arg->rd),
1023                                dc->mem_index, mo_endian(dc) | MO_UL);
1024 
1025     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1026 
1027     /* Success */
1028     tcg_gen_movi_i32(cpu_msr_c, 0);
1029     tcg_gen_br(swx_done);
1030 
1031     /* Failure */
1032     gen_set_label(swx_fail);
1033     tcg_gen_movi_i32(cpu_msr_c, 1);
1034 
1035     gen_set_label(swx_done);
1036 
1037     /*
1038      * Prevent the saved address from working again without another ldx.
1039      * Akin to the pseudocode setting reservation = 0.
1040      */
1041     tcg_gen_movi_tl(cpu_res_addr, -1);
1042     return true;
1043 }
1044 
1045 static void setup_dslot(DisasContext *dc, bool type_b)
1046 {
1047     dc->tb_flags_to_set |= D_FLAG;
1048     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1049         dc->tb_flags_to_set |= BIMM_FLAG;
1050     }
1051 }
1052 
1053 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1054                       bool delay, bool abs, int link)
1055 {
1056     uint32_t add_pc;
1057 
1058     if (invalid_delay_slot(dc, "branch")) {
1059         return true;
1060     }
1061     if (delay) {
1062         setup_dslot(dc, dest_rb < 0);
1063     }
1064 
1065     if (link) {
1066         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1067     }
1068 
1069     /* Store the branch taken destination into btarget.  */
1070     add_pc = abs ? 0 : dc->base.pc_next;
1071     if (dest_rb > 0) {
1072         dc->jmp_dest = -1;
1073         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1074     } else {
1075         dc->jmp_dest = add_pc + dest_imm;
1076         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1077     }
1078     dc->jmp_cond = TCG_COND_ALWAYS;
1079     return true;
1080 }
1081 
1082 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1083     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1084     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1085     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1086     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1087 
1088 DO_BR(br, bri, false, false, false)
1089 DO_BR(bra, brai, false, true, false)
1090 DO_BR(brd, brid, true, false, false)
1091 DO_BR(brad, braid, true, true, false)
1092 DO_BR(brld, brlid, true, false, true)
1093 DO_BR(brald, bralid, true, true, true)
1094 
1095 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1096                    TCGCond cond, int ra, bool delay)
1097 {
1098     TCGv_i32 zero, next;
1099 
1100     if (invalid_delay_slot(dc, "bcc")) {
1101         return true;
1102     }
1103     if (delay) {
1104         setup_dslot(dc, dest_rb < 0);
1105     }
1106 
1107     dc->jmp_cond = cond;
1108 
1109     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1110     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1111 
1112     /* Store the branch taken destination into btarget.  */
1113     if (dest_rb > 0) {
1114         dc->jmp_dest = -1;
1115         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1116     } else {
1117         dc->jmp_dest = dc->base.pc_next + dest_imm;
1118         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1119     }
1120 
1121     /* Compute the final destination into btarget.  */
1122     zero = tcg_constant_i32(0);
1123     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1124     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1125                         reg_for_read(dc, ra), zero,
1126                         cpu_btarget, next);
1127 
1128     return true;
1129 }
1130 
1131 #define DO_BCC(NAME, COND)                                              \
1132     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1133     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1134     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1135     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1136     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1137     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1138     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1139     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1140 
1141 DO_BCC(beq, TCG_COND_EQ)
1142 DO_BCC(bge, TCG_COND_GE)
1143 DO_BCC(bgt, TCG_COND_GT)
1144 DO_BCC(ble, TCG_COND_LE)
1145 DO_BCC(blt, TCG_COND_LT)
1146 DO_BCC(bne, TCG_COND_NE)
1147 
1148 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1149 {
1150     if (trap_userspace(dc, true)) {
1151         return true;
1152     }
1153     if (invalid_delay_slot(dc, "brk")) {
1154         return true;
1155     }
1156 
1157     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1158     if (arg->rd) {
1159         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1160     }
1161     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1162     tcg_gen_movi_tl(cpu_res_addr, -1);
1163 
1164     dc->base.is_jmp = DISAS_EXIT;
1165     return true;
1166 }
1167 
1168 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1169 {
1170     uint32_t imm = arg->imm;
1171 
1172     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1173         return true;
1174     }
1175     if (invalid_delay_slot(dc, "brki")) {
1176         return true;
1177     }
1178 
1179     tcg_gen_movi_i32(cpu_pc, imm);
1180     if (arg->rd) {
1181         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1182     }
1183     tcg_gen_movi_tl(cpu_res_addr, -1);
1184 
1185 #ifdef CONFIG_USER_ONLY
1186     switch (imm) {
1187     case 0x8:  /* syscall trap */
1188         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1189         break;
1190     case 0x18: /* debug trap */
1191         gen_raise_exception_sync(dc, EXCP_DEBUG);
1192         break;
1193     default:   /* eliminated with trap_userspace check */
1194         g_assert_not_reached();
1195     }
1196 #else
1197     uint32_t msr_to_set = 0;
1198 
1199     if (imm != 0x18) {
1200         msr_to_set |= MSR_BIP;
1201     }
1202     if (imm == 0x8 || imm == 0x18) {
1203         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1204         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1205         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1206                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1207     }
1208     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1209     dc->base.is_jmp = DISAS_EXIT;
1210 #endif
1211 
1212     return true;
1213 }
1214 
1215 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1216 {
1217     int mbar_imm = arg->imm;
1218 
1219     /* Note that mbar is a specialized branch instruction. */
1220     if (invalid_delay_slot(dc, "mbar")) {
1221         return true;
1222     }
1223 
1224     /* Data access memory barrier.  */
1225     if ((mbar_imm & 2) == 0) {
1226         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1227     }
1228 
1229     /* Sleep. */
1230     if (mbar_imm & 16) {
1231         if (trap_userspace(dc, true)) {
1232             /* Sleep is a privileged instruction.  */
1233             return true;
1234         }
1235 
1236         t_sync_flags(dc);
1237 
1238         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1239                        -offsetof(MicroBlazeCPU, env)
1240                        +offsetof(CPUState, halted));
1241 
1242         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1243 
1244         gen_raise_exception(dc, EXCP_HLT);
1245     }
1246 
1247     /*
1248      * If !(mbar_imm & 1), this is an instruction access memory barrier
1249      * and we need to end the TB so that we recognize self-modified
1250      * code immediately.
1251      *
1252      * However, there are some data mbars that need the TB break
1253      * (and return to main loop) to recognize interrupts right away.
1254      * E.g. recognizing a change to an interrupt controller register.
1255      *
1256      * Therefore, choose to end the TB always.
1257      */
1258     dc->base.is_jmp = DISAS_EXIT_NEXT;
1259     return true;
1260 }
1261 
1262 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1263 {
1264     if (trap_userspace(dc, to_set)) {
1265         return true;
1266     }
1267     if (invalid_delay_slot(dc, "rts")) {
1268         return true;
1269     }
1270 
1271     dc->tb_flags_to_set |= to_set;
1272     setup_dslot(dc, true);
1273 
1274     dc->jmp_cond = TCG_COND_ALWAYS;
1275     dc->jmp_dest = -1;
1276     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1277     return true;
1278 }
1279 
1280 #define DO_RTS(NAME, IFLAG) \
1281     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1282     { return do_rts(dc, arg, IFLAG); }
1283 
1284 DO_RTS(rtbd, DRTB_FLAG)
1285 DO_RTS(rtid, DRTI_FLAG)
1286 DO_RTS(rted, DRTE_FLAG)
1287 DO_RTS(rtsd, 0)
1288 
1289 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1290 {
1291     /* If opcode_0_illegal, trap.  */
1292     if (dc->cfg->opcode_0_illegal) {
1293         trap_illegal(dc, true);
1294         return true;
1295     }
1296     /*
1297      * Otherwise, this is "add r0, r0, r0".
1298      * Continue to trans_add so that MSR[C] gets cleared.
1299      */
1300     return false;
1301 }
1302 
1303 static void msr_read(DisasContext *dc, TCGv_i32 d)
1304 {
1305     TCGv_i32 t;
1306 
1307     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1308     t = tcg_temp_new_i32();
1309     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1310     tcg_gen_or_i32(d, cpu_msr, t);
1311 }
1312 
1313 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1314 {
1315     uint32_t imm = arg->imm;
1316 
1317     if (trap_userspace(dc, imm != MSR_C)) {
1318         return true;
1319     }
1320 
1321     if (arg->rd) {
1322         msr_read(dc, cpu_R[arg->rd]);
1323     }
1324 
1325     /*
1326      * Handle the carry bit separately.
1327      * This is the only bit that userspace can modify.
1328      */
1329     if (imm & MSR_C) {
1330         tcg_gen_movi_i32(cpu_msr_c, set);
1331     }
1332 
1333     /*
1334      * MSR_C and MSR_CC set above.
1335      * MSR_PVR is not writable, and is always clear.
1336      */
1337     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1338 
1339     if (imm != 0) {
1340         if (set) {
1341             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1342         } else {
1343             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1344         }
1345         dc->base.is_jmp = DISAS_EXIT_NEXT;
1346     }
1347     return true;
1348 }
1349 
1350 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1351 {
1352     return do_msrclrset(dc, arg, false);
1353 }
1354 
1355 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1356 {
1357     return do_msrclrset(dc, arg, true);
1358 }
1359 
1360 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1361 {
1362     if (trap_userspace(dc, true)) {
1363         return true;
1364     }
1365 
1366 #ifdef CONFIG_USER_ONLY
1367     g_assert_not_reached();
1368 #else
1369     if (arg->e && arg->rs != 0x1003) {
1370         qemu_log_mask(LOG_GUEST_ERROR,
1371                       "Invalid extended mts reg 0x%x\n", arg->rs);
1372         return true;
1373     }
1374 
1375     TCGv_i32 src = reg_for_read(dc, arg->ra);
1376     switch (arg->rs) {
1377     case SR_MSR:
1378         /* Install MSR_C.  */
1379         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1380         /*
1381          * Clear MSR_C and MSR_CC;
1382          * MSR_PVR is not writable, and is always clear.
1383          */
1384         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1385         break;
1386     case SR_FSR:
1387         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1388         break;
1389     case 0x800:
1390         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1391         break;
1392     case 0x802:
1393         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1394         break;
1395 
1396     case 0x1000: /* PID */
1397     case 0x1001: /* ZPR */
1398     case 0x1002: /* TLBX */
1399     case 0x1003: /* TLBLO */
1400     case 0x1004: /* TLBHI */
1401     case 0x1005: /* TLBSX */
1402         {
1403             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1404             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1405 
1406             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1407         }
1408         break;
1409 
1410     default:
1411         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1412         return true;
1413     }
1414     dc->base.is_jmp = DISAS_EXIT_NEXT;
1415     return true;
1416 #endif
1417 }
1418 
1419 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1420 {
1421     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1422 
1423     if (arg->e) {
1424         switch (arg->rs) {
1425         case SR_EAR:
1426             {
1427                 TCGv_i64 t64 = tcg_temp_new_i64();
1428                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1429                 tcg_gen_extrh_i64_i32(dest, t64);
1430             }
1431             return true;
1432 #ifndef CONFIG_USER_ONLY
1433         case 0x1003: /* TLBLO */
1434             /* Handled below. */
1435             break;
1436 #endif
1437         case 0x2006 ... 0x2009:
1438             /* High bits of PVR6-9 not implemented. */
1439             tcg_gen_movi_i32(dest, 0);
1440             return true;
1441         default:
1442             qemu_log_mask(LOG_GUEST_ERROR,
1443                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1444             return true;
1445         }
1446     }
1447 
1448     switch (arg->rs) {
1449     case SR_PC:
1450         tcg_gen_movi_i32(dest, dc->base.pc_next);
1451         break;
1452     case SR_MSR:
1453         msr_read(dc, dest);
1454         break;
1455     case SR_EAR:
1456         {
1457             TCGv_i64 t64 = tcg_temp_new_i64();
1458             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1459             tcg_gen_extrl_i64_i32(dest, t64);
1460         }
1461         break;
1462     case SR_ESR:
1463         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1464         break;
1465     case SR_FSR:
1466         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1467         break;
1468     case SR_BTR:
1469         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1470         break;
1471     case SR_EDR:
1472         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1473         break;
1474     case 0x800:
1475         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1476         break;
1477     case 0x802:
1478         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1479         break;
1480 
1481 #ifndef CONFIG_USER_ONLY
1482     case 0x1000: /* PID */
1483     case 0x1001: /* ZPR */
1484     case 0x1002: /* TLBX */
1485     case 0x1003: /* TLBLO */
1486     case 0x1004: /* TLBHI */
1487     case 0x1005: /* TLBSX */
1488         {
1489             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1490             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1491 
1492             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1493         }
1494         break;
1495 #endif
1496 
1497     case 0x2000 ... 0x200c:
1498         tcg_gen_ld_i32(dest, tcg_env,
1499                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1500                        - offsetof(MicroBlazeCPU, env));
1501         break;
1502     default:
1503         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1504         break;
1505     }
1506     return true;
1507 }
1508 
1509 static void do_rti(DisasContext *dc)
1510 {
1511     TCGv_i32 tmp = tcg_temp_new_i32();
1512 
1513     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1514     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1515     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1516     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1517     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1518 }
1519 
1520 static void do_rtb(DisasContext *dc)
1521 {
1522     TCGv_i32 tmp = tcg_temp_new_i32();
1523 
1524     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1525     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1526     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1527     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1528 }
1529 
1530 static void do_rte(DisasContext *dc)
1531 {
1532     TCGv_i32 tmp = tcg_temp_new_i32();
1533 
1534     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1535     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1536     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1537     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1538     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1539 }
1540 
1541 /* Insns connected to FSL or AXI stream attached devices.  */
1542 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1543 {
1544     TCGv_i32 t_id, t_ctrl;
1545 
1546     if (trap_userspace(dc, true)) {
1547         return true;
1548     }
1549 
1550     t_id = tcg_temp_new_i32();
1551     if (rb) {
1552         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1553     } else {
1554         tcg_gen_movi_i32(t_id, imm);
1555     }
1556 
1557     t_ctrl = tcg_constant_i32(ctrl);
1558     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1559     return true;
1560 }
1561 
1562 static bool trans_get(DisasContext *dc, arg_get *arg)
1563 {
1564     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1565 }
1566 
1567 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1568 {
1569     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1570 }
1571 
1572 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1573 {
1574     TCGv_i32 t_id, t_ctrl;
1575 
1576     if (trap_userspace(dc, true)) {
1577         return true;
1578     }
1579 
1580     t_id = tcg_temp_new_i32();
1581     if (rb) {
1582         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1583     } else {
1584         tcg_gen_movi_i32(t_id, imm);
1585     }
1586 
1587     t_ctrl = tcg_constant_i32(ctrl);
1588     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1589     return true;
1590 }
1591 
1592 static bool trans_put(DisasContext *dc, arg_put *arg)
1593 {
1594     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1595 }
1596 
1597 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1598 {
1599     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1600 }
1601 
1602 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1603 {
1604     DisasContext *dc = container_of(dcb, DisasContext, base);
1605     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1606     int bound;
1607 
1608     dc->cfg = &cpu->cfg;
1609     dc->tb_flags = dc->base.tb->flags;
1610     dc->ext_imm = dc->base.tb->cs_base;
1611     dc->r0 = NULL;
1612     dc->r0_set = false;
1613     dc->mem_index = cpu_mmu_index(cs, false);
1614     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1615     dc->jmp_dest = -1;
1616 
1617     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1618     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1619 }
1620 
1621 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1622 {
1623 }
1624 
1625 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1626 {
1627     DisasContext *dc = container_of(dcb, DisasContext, base);
1628 
1629     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1630 }
1631 
1632 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1633 {
1634     DisasContext *dc = container_of(dcb, DisasContext, base);
1635     uint32_t ir;
1636 
1637     /* TODO: This should raise an exception, not terminate qemu. */
1638     if (dc->base.pc_next & 3) {
1639         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1640                   (uint32_t)dc->base.pc_next);
1641     }
1642 
1643     dc->tb_flags_to_set = 0;
1644 
1645     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1646                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1647     if (!decode(dc, ir)) {
1648         trap_illegal(dc, true);
1649     }
1650 
1651     if (dc->r0) {
1652         dc->r0 = NULL;
1653         dc->r0_set = false;
1654     }
1655 
1656     /* Discard the imm global when its contents cannot be used. */
1657     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1658         tcg_gen_discard_i32(cpu_imm);
1659     }
1660 
1661     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1662     dc->tb_flags |= dc->tb_flags_to_set;
1663     dc->base.pc_next += 4;
1664 
1665     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1666         /*
1667          * Finish any return-from branch.
1668          */
1669         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1670         if (unlikely(rt_ibe != 0)) {
1671             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1672             if (rt_ibe & DRTI_FLAG) {
1673                 do_rti(dc);
1674             } else if (rt_ibe & DRTB_FLAG) {
1675                 do_rtb(dc);
1676             } else {
1677                 do_rte(dc);
1678             }
1679         }
1680 
1681         /* Complete the branch, ending the TB. */
1682         switch (dc->base.is_jmp) {
1683         case DISAS_NORETURN:
1684             /*
1685              * E.g. illegal insn in a delay slot.  We've already exited
1686              * and will handle D_FLAG in mb_cpu_do_interrupt.
1687              */
1688             break;
1689         case DISAS_NEXT:
1690             /*
1691              * Normal insn a delay slot.
1692              * However, the return-from-exception type insns should
1693              * return to the main loop, as they have adjusted MSR.
1694              */
1695             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1696             break;
1697         case DISAS_EXIT_NEXT:
1698             /*
1699              * E.g. mts insn in a delay slot.  Continue with btarget,
1700              * but still return to the main loop.
1701              */
1702             dc->base.is_jmp = DISAS_EXIT_JUMP;
1703             break;
1704         default:
1705             g_assert_not_reached();
1706         }
1707     }
1708 }
1709 
1710 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1711 {
1712     DisasContext *dc = container_of(dcb, DisasContext, base);
1713 
1714     if (dc->base.is_jmp == DISAS_NORETURN) {
1715         /* We have already exited the TB. */
1716         return;
1717     }
1718 
1719     t_sync_flags(dc);
1720 
1721     switch (dc->base.is_jmp) {
1722     case DISAS_TOO_MANY:
1723         gen_goto_tb(dc, 0, dc->base.pc_next);
1724         return;
1725 
1726     case DISAS_EXIT:
1727         break;
1728     case DISAS_EXIT_NEXT:
1729         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1730         break;
1731     case DISAS_EXIT_JUMP:
1732         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1733         tcg_gen_discard_i32(cpu_btarget);
1734         break;
1735 
1736     case DISAS_JUMP:
1737         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1738             /* Direct jump. */
1739             tcg_gen_discard_i32(cpu_btarget);
1740 
1741             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1742                 /* Conditional direct jump. */
1743                 TCGLabel *taken = gen_new_label();
1744                 TCGv_i32 tmp = tcg_temp_new_i32();
1745 
1746                 /*
1747                  * Copy bvalue to a temp now, so we can discard bvalue.
1748                  * This can avoid writing bvalue to memory when the
1749                  * delay slot cannot raise an exception.
1750                  */
1751                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1752                 tcg_gen_discard_i32(cpu_bvalue);
1753 
1754                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1755                 gen_goto_tb(dc, 1, dc->base.pc_next);
1756                 gen_set_label(taken);
1757             }
1758             gen_goto_tb(dc, 0, dc->jmp_dest);
1759             return;
1760         }
1761 
1762         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1763         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1764         tcg_gen_discard_i32(cpu_btarget);
1765         tcg_gen_lookup_and_goto_ptr();
1766         return;
1767 
1768     default:
1769         g_assert_not_reached();
1770     }
1771 
1772     /* Finish DISAS_EXIT_* */
1773     if (unlikely(cs->singlestep_enabled)) {
1774         gen_raise_exception(dc, EXCP_DEBUG);
1775     } else {
1776         tcg_gen_exit_tb(NULL, 0);
1777     }
1778 }
1779 
1780 static const TranslatorOps mb_tr_ops = {
1781     .init_disas_context = mb_tr_init_disas_context,
1782     .tb_start           = mb_tr_tb_start,
1783     .insn_start         = mb_tr_insn_start,
1784     .translate_insn     = mb_tr_translate_insn,
1785     .tb_stop            = mb_tr_tb_stop,
1786 };
1787 
1788 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1789                        int *max_insns, vaddr pc, void *host_pc)
1790 {
1791     DisasContext dc;
1792     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1793 }
1794 
1795 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1796 {
1797     CPUMBState *env = cpu_env(cs);
1798     uint32_t iflags;
1799     int i;
1800 
1801     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1802                  env->pc, env->msr,
1803                  (env->msr & MSR_UM) ? "user" : "kernel",
1804                  (env->msr & MSR_UMS) ? "user" : "kernel",
1805                  (bool)(env->msr & MSR_EIP),
1806                  (bool)(env->msr & MSR_IE));
1807 
1808     iflags = env->iflags;
1809     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1810     if (iflags & IMM_FLAG) {
1811         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1812     }
1813     if (iflags & BIMM_FLAG) {
1814         qemu_fprintf(f, " BIMM");
1815     }
1816     if (iflags & D_FLAG) {
1817         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1818     }
1819     if (iflags & DRTI_FLAG) {
1820         qemu_fprintf(f, " DRTI");
1821     }
1822     if (iflags & DRTE_FLAG) {
1823         qemu_fprintf(f, " DRTE");
1824     }
1825     if (iflags & DRTB_FLAG) {
1826         qemu_fprintf(f, " DRTB");
1827     }
1828     if (iflags & ESR_ESS_FLAG) {
1829         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1830     }
1831 
1832     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1833                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1834                  env->esr, env->fsr, env->btr, env->edr,
1835                  env->ear, env->slr, env->shr);
1836 
1837     for (i = 0; i < 32; i++) {
1838         qemu_fprintf(f, "r%2.2d=%08x%c",
1839                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1840     }
1841     qemu_fprintf(f, "\n");
1842 }
1843 
1844 void mb_tcg_init(void)
1845 {
1846 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1847 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1848 
1849     static const struct {
1850         TCGv_i32 *var; int ofs; char name[8];
1851     } i32s[] = {
1852         /*
1853          * Note that r0 is handled specially in reg_for_read
1854          * and reg_for_write.  Nothing should touch cpu_R[0].
1855          * Leave that element NULL, which will assert quickly
1856          * inside the tcg generator functions.
1857          */
1858                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1859         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1860         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1861         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1862 
1863         SP(pc),
1864         SP(msr),
1865         SP(msr_c),
1866         SP(imm),
1867         SP(iflags),
1868         SP(bvalue),
1869         SP(btarget),
1870         SP(res_val),
1871     };
1872 
1873 #undef R
1874 #undef SP
1875 
1876     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1877         *i32s[i].var =
1878           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1879     }
1880 
1881     cpu_res_addr =
1882         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1883 }
1884