xref: /qemu/target/microblaze/translate.c (revision 8cea8bd4d3909b7828310a0f76d5194d1bf0095a)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/translation-block.h"
29 #include "exec/target_page.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     TCGv_i32 r0;
67     bool r0_set;
68 
69     /* Decoder.  */
70     uint32_t ext_imm;
71     unsigned int tb_flags;
72     unsigned int tb_flags_to_set;
73     int mem_index;
74 
75     /* Condition under which to jump, including NEVER and ALWAYS. */
76     TCGCond jmp_cond;
77 
78     /* Immediate branch-taken destination, or -1 for indirect. */
79     uint32_t jmp_dest;
80 } DisasContext;
81 
82 static int typeb_imm(DisasContext *dc, int x)
83 {
84     if (dc->tb_flags & IMM_FLAG) {
85         return deposit32(dc->ext_imm, 0, 16, x);
86     }
87     return x;
88 }
89 
90 /* Include the auto-generated decoder.  */
91 #include "decode-insns.c.inc"
92 
93 static void t_sync_flags(DisasContext *dc)
94 {
95     /* Synch the tb dependent flags between translator and runtime.  */
96     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
97         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
98     }
99 }
100 
101 static void gen_raise_exception(DisasContext *dc, uint32_t index)
102 {
103     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
104     dc->base.is_jmp = DISAS_NORETURN;
105 }
106 
107 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
108 {
109     t_sync_flags(dc);
110     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
111     gen_raise_exception(dc, index);
112 }
113 
114 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
115 {
116     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
117     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
118 
119     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
120 }
121 
122 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
123 {
124     if (translator_use_goto_tb(&dc->base, dest)) {
125         tcg_gen_goto_tb(n);
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_exit_tb(dc->base.tb, n);
128     } else {
129         tcg_gen_movi_i32(cpu_pc, dest);
130         tcg_gen_lookup_and_goto_ptr();
131     }
132     dc->base.is_jmp = DISAS_NORETURN;
133 }
134 
135 /*
136  * Returns true if the insn an illegal operation.
137  * If exceptions are enabled, an exception is raised.
138  */
139 static bool trap_illegal(DisasContext *dc, bool cond)
140 {
141     if (cond && (dc->tb_flags & MSR_EE)
142         && dc->cfg->illegal_opcode_exception) {
143         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
144     }
145     return cond;
146 }
147 
148 /*
149  * Returns true if the insn is illegal in userspace.
150  * If exceptions are enabled, an exception is raised.
151  */
152 static bool trap_userspace(DisasContext *dc, bool cond)
153 {
154     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
155 
156     if (cond_user && (dc->tb_flags & MSR_EE)) {
157         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
158     }
159     return cond_user;
160 }
161 
162 /*
163  * Return true, and log an error, if the current insn is
164  * within a delay slot.
165  */
166 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
167 {
168     if (dc->tb_flags & D_FLAG) {
169         qemu_log_mask(LOG_GUEST_ERROR,
170                       "Invalid insn in delay slot: %s at %08x\n",
171                       insn_type, (uint32_t)dc->base.pc_next);
172         return true;
173     }
174     return false;
175 }
176 
177 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
178 {
179     if (likely(reg != 0)) {
180         return cpu_R[reg];
181     }
182     if (!dc->r0_set) {
183         if (dc->r0 == NULL) {
184             dc->r0 = tcg_temp_new_i32();
185         }
186         tcg_gen_movi_i32(dc->r0, 0);
187         dc->r0_set = true;
188     }
189     return dc->r0;
190 }
191 
192 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
193 {
194     if (likely(reg != 0)) {
195         return cpu_R[reg];
196     }
197     if (dc->r0 == NULL) {
198         dc->r0 = tcg_temp_new_i32();
199     }
200     return dc->r0;
201 }
202 
203 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
204                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
205 {
206     TCGv_i32 rd, ra, rb;
207 
208     if (arg->rd == 0 && !side_effects) {
209         return true;
210     }
211 
212     rd = reg_for_write(dc, arg->rd);
213     ra = reg_for_read(dc, arg->ra);
214     rb = reg_for_read(dc, arg->rb);
215     fn(rd, ra, rb);
216     return true;
217 }
218 
219 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
220                       void (*fn)(TCGv_i32, TCGv_i32))
221 {
222     TCGv_i32 rd, ra;
223 
224     if (arg->rd == 0 && !side_effects) {
225         return true;
226     }
227 
228     rd = reg_for_write(dc, arg->rd);
229     ra = reg_for_read(dc, arg->ra);
230     fn(rd, ra);
231     return true;
232 }
233 
234 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
235                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
236 {
237     TCGv_i32 rd, ra;
238 
239     if (arg->rd == 0 && !side_effects) {
240         return true;
241     }
242 
243     rd = reg_for_write(dc, arg->rd);
244     ra = reg_for_read(dc, arg->ra);
245     fni(rd, ra, arg->imm);
246     return true;
247 }
248 
249 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
250                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
251 {
252     TCGv_i32 rd, ra, imm;
253 
254     if (arg->rd == 0 && !side_effects) {
255         return true;
256     }
257 
258     rd = reg_for_write(dc, arg->rd);
259     ra = reg_for_read(dc, arg->ra);
260     imm = tcg_constant_i32(arg->imm);
261 
262     fn(rd, ra, imm);
263     return true;
264 }
265 
266 #define DO_TYPEA(NAME, SE, FN) \
267     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
268     { return do_typea(dc, a, SE, FN); }
269 
270 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
271     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
272     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
273 
274 #define DO_TYPEA0(NAME, SE, FN) \
275     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
276     { return do_typea0(dc, a, SE, FN); }
277 
278 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
279     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
280     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
281 
282 #define DO_TYPEBI(NAME, SE, FNI) \
283     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
284     { return do_typeb_imm(dc, a, SE, FNI); }
285 
286 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
287     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
289 
290 #define DO_TYPEBV(NAME, SE, FN) \
291     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292     { return do_typeb_val(dc, a, SE, FN); }
293 
294 #define ENV_WRAPPER2(NAME, HELPER) \
295     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
296     { HELPER(out, tcg_env, ina); }
297 
298 #define ENV_WRAPPER3(NAME, HELPER) \
299     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
300     { HELPER(out, tcg_env, ina, inb); }
301 
302 /* No input carry, but output carry. */
303 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
304 {
305     TCGv_i32 zero = tcg_constant_i32(0);
306 
307     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
308 }
309 
310 /* Input and output carry. */
311 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
312 {
313     tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
314 }
315 
316 /* Input carry, but no output carry. */
317 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
318 {
319     tcg_gen_add_i32(out, ina, inb);
320     tcg_gen_add_i32(out, out, cpu_msr_c);
321 }
322 
323 DO_TYPEA(add, true, gen_add)
324 DO_TYPEA(addc, true, gen_addc)
325 DO_TYPEA(addk, false, tcg_gen_add_i32)
326 DO_TYPEA(addkc, true, gen_addkc)
327 
328 DO_TYPEBV(addi, true, gen_add)
329 DO_TYPEBV(addic, true, gen_addc)
330 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
331 DO_TYPEBV(addikc, true, gen_addkc)
332 
333 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
334 {
335     tcg_gen_andi_i32(out, ina, ~imm);
336 }
337 
338 DO_TYPEA(and, false, tcg_gen_and_i32)
339 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
340 DO_TYPEA(andn, false, tcg_gen_andc_i32)
341 DO_TYPEBI(andni, false, gen_andni)
342 
343 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
344 {
345     TCGv_i32 tmp = tcg_temp_new_i32();
346     tcg_gen_andi_i32(tmp, inb, 31);
347     tcg_gen_sar_i32(out, ina, tmp);
348 }
349 
350 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
351 {
352     TCGv_i32 tmp = tcg_temp_new_i32();
353     tcg_gen_andi_i32(tmp, inb, 31);
354     tcg_gen_shr_i32(out, ina, tmp);
355 }
356 
357 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
358 {
359     TCGv_i32 tmp = tcg_temp_new_i32();
360     tcg_gen_andi_i32(tmp, inb, 31);
361     tcg_gen_shl_i32(out, ina, tmp);
362 }
363 
364 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
365 {
366     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
367     int imm_w = extract32(imm, 5, 5);
368     int imm_s = extract32(imm, 0, 5);
369 
370     if (imm_w + imm_s > 32 || imm_w == 0) {
371         /* These inputs have an undefined behavior.  */
372         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
373                       imm_w, imm_s);
374     } else {
375         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
376     }
377 }
378 
379 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
380 {
381     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
382     int imm_w = extract32(imm, 5, 5);
383     int imm_s = extract32(imm, 0, 5);
384     int width = imm_w - imm_s + 1;
385 
386     if (imm_w < imm_s) {
387         /* These inputs have an undefined behavior.  */
388         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
389                       imm_w, imm_s);
390     } else {
391         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
392     }
393 }
394 
395 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
396 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
397 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
398 
399 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
400 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
401 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
402 
403 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
404 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
405 
406 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
407 {
408     tcg_gen_clzi_i32(out, ina, 32);
409 }
410 
411 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
412 
413 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
414 {
415     TCGv_i32 lt = tcg_temp_new_i32();
416 
417     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
418     tcg_gen_sub_i32(out, inb, ina);
419     tcg_gen_deposit_i32(out, out, lt, 31, 1);
420 }
421 
422 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
423 {
424     TCGv_i32 lt = tcg_temp_new_i32();
425 
426     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
427     tcg_gen_sub_i32(out, inb, ina);
428     tcg_gen_deposit_i32(out, out, lt, 31, 1);
429 }
430 
431 DO_TYPEA(cmp, false, gen_cmp)
432 DO_TYPEA(cmpu, false, gen_cmpu)
433 
434 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
435 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
436 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
437 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
438 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
439 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
440 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
441 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
442 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
443 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
444 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
445 
446 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
447 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
448 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
449 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
450 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
451 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
452 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
453 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
454 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
455 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
456 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
457 
458 ENV_WRAPPER2(gen_flt, gen_helper_flt)
459 ENV_WRAPPER2(gen_fint, gen_helper_fint)
460 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
461 
462 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
463 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
464 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
465 
466 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
467 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
468 {
469     gen_helper_divs(out, tcg_env, inb, ina);
470 }
471 
472 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
473 {
474     gen_helper_divu(out, tcg_env, inb, ina);
475 }
476 
477 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
478 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
479 
480 static bool trans_imm(DisasContext *dc, arg_imm *arg)
481 {
482     if (invalid_delay_slot(dc, "imm")) {
483         return true;
484     }
485     dc->ext_imm = arg->imm << 16;
486     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
487     dc->tb_flags_to_set = IMM_FLAG;
488     return true;
489 }
490 
491 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
492 {
493     TCGv_i32 tmp = tcg_temp_new_i32();
494     tcg_gen_muls2_i32(tmp, out, ina, inb);
495 }
496 
497 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498 {
499     TCGv_i32 tmp = tcg_temp_new_i32();
500     tcg_gen_mulu2_i32(tmp, out, ina, inb);
501 }
502 
503 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 {
505     TCGv_i32 tmp = tcg_temp_new_i32();
506     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
507 }
508 
509 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
510 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
511 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
512 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
513 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
514 
515 DO_TYPEA(or, false, tcg_gen_or_i32)
516 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
517 
518 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
519 {
520     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
521 }
522 
523 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
524 {
525     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
526 }
527 
528 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
529 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
530 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
531 
532 /* No input carry, but output carry. */
533 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
534 {
535     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
536     tcg_gen_sub_i32(out, inb, ina);
537 }
538 
539 /* Input and output carry. */
540 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
541 {
542     TCGv_i32 tmp = tcg_temp_new_i32();
543 
544     tcg_gen_not_i32(tmp, ina);
545     tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
546 }
547 
548 /* No input or output carry. */
549 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
550 {
551     tcg_gen_sub_i32(out, inb, ina);
552 }
553 
554 /* Input carry, no output carry. */
555 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556 {
557     TCGv_i32 nota = tcg_temp_new_i32();
558 
559     tcg_gen_not_i32(nota, ina);
560     tcg_gen_add_i32(out, inb, nota);
561     tcg_gen_add_i32(out, out, cpu_msr_c);
562 }
563 
564 DO_TYPEA(rsub, true, gen_rsub)
565 DO_TYPEA(rsubc, true, gen_rsubc)
566 DO_TYPEA(rsubk, false, gen_rsubk)
567 DO_TYPEA(rsubkc, true, gen_rsubkc)
568 
569 DO_TYPEBV(rsubi, true, gen_rsub)
570 DO_TYPEBV(rsubic, true, gen_rsubc)
571 DO_TYPEBV(rsubik, false, gen_rsubk)
572 DO_TYPEBV(rsubikc, true, gen_rsubkc)
573 
574 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
575 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
576 
577 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
578 {
579     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
580     tcg_gen_sari_i32(out, ina, 1);
581 }
582 
583 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
584 {
585     TCGv_i32 tmp = tcg_temp_new_i32();
586 
587     tcg_gen_mov_i32(tmp, cpu_msr_c);
588     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
589     tcg_gen_extract2_i32(out, ina, tmp, 1);
590 }
591 
592 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
593 {
594     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
595     tcg_gen_shri_i32(out, ina, 1);
596 }
597 
598 DO_TYPEA0(sra, false, gen_sra)
599 DO_TYPEA0(src, false, gen_src)
600 DO_TYPEA0(srl, false, gen_srl)
601 
602 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
603 {
604     tcg_gen_rotri_i32(out, ina, 16);
605 }
606 
607 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
608 DO_TYPEA0(swaph, false, gen_swaph)
609 
610 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
611 {
612     /* Cache operations are nops: only check for supervisor mode.  */
613     trap_userspace(dc, true);
614     return true;
615 }
616 
617 DO_TYPEA(xor, false, tcg_gen_xor_i32)
618 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
619 
620 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
621 {
622     TCGv ret = tcg_temp_new();
623 
624     /* If any of the regs is r0, set t to the value of the other reg.  */
625     if (ra && rb) {
626         TCGv_i32 tmp = tcg_temp_new_i32();
627         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
628         tcg_gen_extu_i32_tl(ret, tmp);
629     } else if (ra) {
630         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
631     } else if (rb) {
632         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
633     } else {
634         tcg_gen_movi_tl(ret, 0);
635     }
636 
637     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
638         gen_helper_stackprot(tcg_env, ret);
639     }
640     return ret;
641 }
642 
643 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
644 {
645     TCGv ret = tcg_temp_new();
646 
647     /* If any of the regs is r0, set t to the value of the other reg.  */
648     if (ra) {
649         TCGv_i32 tmp = tcg_temp_new_i32();
650         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
651         tcg_gen_extu_i32_tl(ret, tmp);
652     } else {
653         tcg_gen_movi_tl(ret, (uint32_t)imm);
654     }
655 
656     if (ra == 1 && dc->cfg->stackprot) {
657         gen_helper_stackprot(tcg_env, ret);
658     }
659     return ret;
660 }
661 
662 #ifndef CONFIG_USER_ONLY
663 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
664 {
665     int addr_size = dc->cfg->addr_size;
666     TCGv ret = tcg_temp_new();
667 
668     if (addr_size == 32 || ra == 0) {
669         if (rb) {
670             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
671         } else {
672             tcg_gen_movi_tl(ret, 0);
673         }
674     } else {
675         if (rb) {
676             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
677         } else {
678             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
679             tcg_gen_shli_tl(ret, ret, 32);
680         }
681         if (addr_size < 64) {
682             /* Mask off out of range bits.  */
683             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
684         }
685     }
686     return ret;
687 }
688 #endif
689 
690 #ifndef CONFIG_USER_ONLY
691 static void record_unaligned_ess(DisasContext *dc, int rd,
692                                  MemOp size, bool store)
693 {
694     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
695 
696     iflags |= ESR_ESS_FLAG;
697     iflags |= rd << 5;
698     iflags |= store * ESR_S;
699     iflags |= (size == MO_32) * ESR_W;
700 
701     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
702 }
703 
704 static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
705                                    int rd, MemOp size, bool store)
706 {
707     if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
708         TCGLabel *over = gen_new_label();
709 
710         record_unaligned_ess(dc, rd, size, store);
711 
712         tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
713         gen_helper_unaligned_access(tcg_env, ea);
714         gen_set_label(over);
715     }
716 }
717 #endif
718 
719 static inline MemOp mo_endian(DisasContext *dc)
720 {
721     return dc->cfg->endi ? MO_LE : MO_BE;
722 }
723 
724 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
725                     int mem_index, bool rev)
726 {
727     MemOp size = mop & MO_SIZE;
728 
729     mop |= mo_endian(dc);
730 
731     /*
732      * When doing reverse accesses we need to do two things.
733      *
734      * 1. Reverse the address wrt endianness.
735      * 2. Byteswap the data lanes on the way back into the CPU core.
736      */
737     if (rev) {
738         if (size > MO_8) {
739             mop ^= MO_BSWAP;
740         }
741         if (size < MO_32) {
742             tcg_gen_xori_tl(addr, addr, 3 - size);
743         }
744     }
745 
746     /*
747      * For system mode, enforce alignment if the cpu configuration
748      * requires it.  For user-mode, the Linux kernel will have fixed up
749      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
750      */
751 #ifndef CONFIG_USER_ONLY
752     if (size > MO_8 &&
753         (dc->tb_flags & MSR_EE) &&
754         dc->cfg->unaligned_exceptions) {
755         record_unaligned_ess(dc, rd, size, false);
756         mop |= MO_ALIGN;
757     }
758 #endif
759 
760     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
761     return true;
762 }
763 
764 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
765 {
766     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
767     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
768 }
769 
770 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
771 {
772     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
773     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
774 }
775 
776 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
777 {
778     if (trap_userspace(dc, true)) {
779         return true;
780     }
781 #ifdef CONFIG_USER_ONLY
782     g_assert_not_reached();
783 #else
784     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
785     gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
786     return true;
787 #endif
788 }
789 
790 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
791 {
792     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
793     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
794 }
795 
796 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
797 {
798     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
799     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
800 }
801 
802 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
803 {
804     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
805     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
806 }
807 
808 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
809 {
810     if (trap_userspace(dc, true)) {
811         return true;
812     }
813 #ifdef CONFIG_USER_ONLY
814     g_assert_not_reached();
815 #else
816     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
817     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
818     (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
819         (reg_for_write(dc, arg->rd), tcg_env, addr);
820     return true;
821 #endif
822 }
823 
824 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
825 {
826     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
827     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
828 }
829 
830 static bool trans_lw(DisasContext *dc, arg_typea *arg)
831 {
832     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
833     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
834 }
835 
836 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
837 {
838     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
839     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
840 }
841 
842 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
843 {
844     if (trap_userspace(dc, true)) {
845         return true;
846     }
847 #ifdef CONFIG_USER_ONLY
848     g_assert_not_reached();
849 #else
850     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
851     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
852     (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
853         (reg_for_write(dc, arg->rd), tcg_env, addr);
854     return true;
855 #endif
856 }
857 
858 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
859 {
860     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
861     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
862 }
863 
864 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
865 {
866     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
867 
868     /* lwx does not throw unaligned access errors, so force alignment */
869     tcg_gen_andi_tl(addr, addr, ~3);
870 
871     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
872                         mo_endian(dc) | MO_UL);
873     tcg_gen_mov_tl(cpu_res_addr, addr);
874 
875     if (arg->rd) {
876         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
877     }
878 
879     /* No support for AXI exclusive so always clear C */
880     tcg_gen_movi_i32(cpu_msr_c, 0);
881     return true;
882 }
883 
884 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
885                      int mem_index, bool rev)
886 {
887     MemOp size = mop & MO_SIZE;
888 
889     mop |= mo_endian(dc);
890 
891     /*
892      * When doing reverse accesses we need to do two things.
893      *
894      * 1. Reverse the address wrt endianness.
895      * 2. Byteswap the data lanes on the way back into the CPU core.
896      */
897     if (rev) {
898         if (size > MO_8) {
899             mop ^= MO_BSWAP;
900         }
901         if (size < MO_32) {
902             tcg_gen_xori_tl(addr, addr, 3 - size);
903         }
904     }
905 
906     /*
907      * For system mode, enforce alignment if the cpu configuration
908      * requires it.  For user-mode, the Linux kernel will have fixed up
909      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
910      */
911 #ifndef CONFIG_USER_ONLY
912     if (size > MO_8 &&
913         (dc->tb_flags & MSR_EE) &&
914         dc->cfg->unaligned_exceptions) {
915         record_unaligned_ess(dc, rd, size, true);
916         mop |= MO_ALIGN;
917     }
918 #endif
919 
920     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
921     return true;
922 }
923 
924 static bool trans_sb(DisasContext *dc, arg_typea *arg)
925 {
926     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
927     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
928 }
929 
930 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
931 {
932     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
933     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
934 }
935 
936 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
937 {
938     if (trap_userspace(dc, true)) {
939         return true;
940     }
941 #ifdef CONFIG_USER_ONLY
942     g_assert_not_reached();
943 #else
944     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
945     gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
946     return true;
947 #endif
948 }
949 
950 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
951 {
952     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
953     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
954 }
955 
956 static bool trans_sh(DisasContext *dc, arg_typea *arg)
957 {
958     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
959     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
960 }
961 
962 static bool trans_shr(DisasContext *dc, arg_typea *arg)
963 {
964     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
965     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
966 }
967 
968 static bool trans_shea(DisasContext *dc, arg_typea *arg)
969 {
970     if (trap_userspace(dc, true)) {
971         return true;
972     }
973 #ifdef CONFIG_USER_ONLY
974     g_assert_not_reached();
975 #else
976     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
977     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
978     (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
979         (tcg_env, reg_for_read(dc, arg->rd), addr);
980     return true;
981 #endif
982 }
983 
984 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
985 {
986     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
987     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
988 }
989 
990 static bool trans_sw(DisasContext *dc, arg_typea *arg)
991 {
992     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
993     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
994 }
995 
996 static bool trans_swr(DisasContext *dc, arg_typea *arg)
997 {
998     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
999     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
1000 }
1001 
1002 static bool trans_swea(DisasContext *dc, arg_typea *arg)
1003 {
1004     if (trap_userspace(dc, true)) {
1005         return true;
1006     }
1007 #ifdef CONFIG_USER_ONLY
1008     g_assert_not_reached();
1009 #else
1010     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1011     gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
1012     (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
1013         (tcg_env, reg_for_read(dc, arg->rd), addr);
1014     return true;
1015 #endif
1016 }
1017 
1018 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1019 {
1020     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1021     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
1022 }
1023 
1024 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1025 {
1026     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1027     TCGLabel *swx_done = gen_new_label();
1028     TCGLabel *swx_fail = gen_new_label();
1029     TCGv_i32 tval;
1030 
1031     /* swx does not throw unaligned access errors, so force alignment */
1032     tcg_gen_andi_tl(addr, addr, ~3);
1033 
1034     /*
1035      * Compare the address vs the one we used during lwx.
1036      * On mismatch, the operation fails.  On match, addr dies at the
1037      * branch, but we know we can use the equal version in the global.
1038      * In either case, addr is no longer needed.
1039      */
1040     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1041 
1042     /*
1043      * Compare the value loaded during lwx with current contents of
1044      * the reserved location.
1045      */
1046     tval = tcg_temp_new_i32();
1047 
1048     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1049                                reg_for_write(dc, arg->rd),
1050                                dc->mem_index, mo_endian(dc) | MO_UL);
1051 
1052     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1053 
1054     /* Success */
1055     tcg_gen_movi_i32(cpu_msr_c, 0);
1056     tcg_gen_br(swx_done);
1057 
1058     /* Failure */
1059     gen_set_label(swx_fail);
1060     tcg_gen_movi_i32(cpu_msr_c, 1);
1061 
1062     gen_set_label(swx_done);
1063 
1064     /*
1065      * Prevent the saved address from working again without another ldx.
1066      * Akin to the pseudocode setting reservation = 0.
1067      */
1068     tcg_gen_movi_tl(cpu_res_addr, -1);
1069     return true;
1070 }
1071 
1072 static void setup_dslot(DisasContext *dc, bool type_b)
1073 {
1074     dc->tb_flags_to_set |= D_FLAG;
1075     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1076         dc->tb_flags_to_set |= BIMM_FLAG;
1077     }
1078 }
1079 
1080 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1081                       bool delay, bool abs, int link)
1082 {
1083     uint32_t add_pc;
1084 
1085     if (invalid_delay_slot(dc, "branch")) {
1086         return true;
1087     }
1088     if (delay) {
1089         setup_dslot(dc, dest_rb < 0);
1090     }
1091 
1092     if (link) {
1093         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1094     }
1095 
1096     /* Store the branch taken destination into btarget.  */
1097     add_pc = abs ? 0 : dc->base.pc_next;
1098     if (dest_rb > 0) {
1099         dc->jmp_dest = -1;
1100         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1101     } else {
1102         dc->jmp_dest = add_pc + dest_imm;
1103         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1104     }
1105     dc->jmp_cond = TCG_COND_ALWAYS;
1106     return true;
1107 }
1108 
1109 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1110     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1111     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1112     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1113     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1114 
1115 DO_BR(br, bri, false, false, false)
1116 DO_BR(bra, brai, false, true, false)
1117 DO_BR(brd, brid, true, false, false)
1118 DO_BR(brad, braid, true, true, false)
1119 DO_BR(brld, brlid, true, false, true)
1120 DO_BR(brald, bralid, true, true, true)
1121 
1122 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1123                    TCGCond cond, int ra, bool delay)
1124 {
1125     TCGv_i32 zero, next;
1126 
1127     if (invalid_delay_slot(dc, "bcc")) {
1128         return true;
1129     }
1130     if (delay) {
1131         setup_dslot(dc, dest_rb < 0);
1132     }
1133 
1134     dc->jmp_cond = cond;
1135 
1136     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1137     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1138 
1139     /* Store the branch taken destination into btarget.  */
1140     if (dest_rb > 0) {
1141         dc->jmp_dest = -1;
1142         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1143     } else {
1144         dc->jmp_dest = dc->base.pc_next + dest_imm;
1145         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1146     }
1147 
1148     /* Compute the final destination into btarget.  */
1149     zero = tcg_constant_i32(0);
1150     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1151     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1152                         reg_for_read(dc, ra), zero,
1153                         cpu_btarget, next);
1154 
1155     return true;
1156 }
1157 
1158 #define DO_BCC(NAME, COND)                                              \
1159     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1160     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1161     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1162     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1163     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1164     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1165     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1166     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1167 
1168 DO_BCC(beq, TCG_COND_EQ)
1169 DO_BCC(bge, TCG_COND_GE)
1170 DO_BCC(bgt, TCG_COND_GT)
1171 DO_BCC(ble, TCG_COND_LE)
1172 DO_BCC(blt, TCG_COND_LT)
1173 DO_BCC(bne, TCG_COND_NE)
1174 
1175 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1176 {
1177     if (trap_userspace(dc, true)) {
1178         return true;
1179     }
1180     if (invalid_delay_slot(dc, "brk")) {
1181         return true;
1182     }
1183 
1184     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1185     if (arg->rd) {
1186         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1187     }
1188     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1189     tcg_gen_movi_tl(cpu_res_addr, -1);
1190 
1191     dc->base.is_jmp = DISAS_EXIT;
1192     return true;
1193 }
1194 
1195 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1196 {
1197     uint32_t imm = arg->imm;
1198 
1199     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1200         return true;
1201     }
1202     if (invalid_delay_slot(dc, "brki")) {
1203         return true;
1204     }
1205 
1206     tcg_gen_movi_i32(cpu_pc, imm);
1207     if (arg->rd) {
1208         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1209     }
1210     tcg_gen_movi_tl(cpu_res_addr, -1);
1211 
1212 #ifdef CONFIG_USER_ONLY
1213     switch (imm) {
1214     case 0x8:  /* syscall trap */
1215         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1216         break;
1217     case 0x18: /* debug trap */
1218         gen_raise_exception_sync(dc, EXCP_DEBUG);
1219         break;
1220     default:   /* eliminated with trap_userspace check */
1221         g_assert_not_reached();
1222     }
1223 #else
1224     uint32_t msr_to_set = 0;
1225 
1226     if (imm != 0x18) {
1227         msr_to_set |= MSR_BIP;
1228     }
1229     if (imm == 0x8 || imm == 0x18) {
1230         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1231         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1232         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1233                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1234     }
1235     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1236     dc->base.is_jmp = DISAS_EXIT;
1237 #endif
1238 
1239     return true;
1240 }
1241 
1242 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1243 {
1244     int mbar_imm = arg->imm;
1245 
1246     /* Note that mbar is a specialized branch instruction. */
1247     if (invalid_delay_slot(dc, "mbar")) {
1248         return true;
1249     }
1250 
1251     /* Data access memory barrier.  */
1252     if ((mbar_imm & 2) == 0) {
1253         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1254     }
1255 
1256     /* Sleep. */
1257     if (mbar_imm & 16) {
1258         if (trap_userspace(dc, true)) {
1259             /* Sleep is a privileged instruction.  */
1260             return true;
1261         }
1262 
1263         t_sync_flags(dc);
1264 
1265         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1266                        -offsetof(MicroBlazeCPU, env)
1267                        +offsetof(CPUState, halted));
1268 
1269         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1270 
1271         gen_raise_exception(dc, EXCP_HLT);
1272     }
1273 
1274     /*
1275      * If !(mbar_imm & 1), this is an instruction access memory barrier
1276      * and we need to end the TB so that we recognize self-modified
1277      * code immediately.
1278      *
1279      * However, there are some data mbars that need the TB break
1280      * (and return to main loop) to recognize interrupts right away.
1281      * E.g. recognizing a change to an interrupt controller register.
1282      *
1283      * Therefore, choose to end the TB always.
1284      */
1285     dc->base.is_jmp = DISAS_EXIT_NEXT;
1286     return true;
1287 }
1288 
1289 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1290 {
1291     if (trap_userspace(dc, to_set)) {
1292         return true;
1293     }
1294     if (invalid_delay_slot(dc, "rts")) {
1295         return true;
1296     }
1297 
1298     dc->tb_flags_to_set |= to_set;
1299     setup_dslot(dc, true);
1300 
1301     dc->jmp_cond = TCG_COND_ALWAYS;
1302     dc->jmp_dest = -1;
1303     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1304     return true;
1305 }
1306 
1307 #define DO_RTS(NAME, IFLAG) \
1308     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1309     { return do_rts(dc, arg, IFLAG); }
1310 
1311 DO_RTS(rtbd, DRTB_FLAG)
1312 DO_RTS(rtid, DRTI_FLAG)
1313 DO_RTS(rted, DRTE_FLAG)
1314 DO_RTS(rtsd, 0)
1315 
1316 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1317 {
1318     /* If opcode_0_illegal, trap.  */
1319     if (dc->cfg->opcode_0_illegal) {
1320         trap_illegal(dc, true);
1321         return true;
1322     }
1323     /*
1324      * Otherwise, this is "add r0, r0, r0".
1325      * Continue to trans_add so that MSR[C] gets cleared.
1326      */
1327     return false;
1328 }
1329 
1330 static void msr_read(DisasContext *dc, TCGv_i32 d)
1331 {
1332     TCGv_i32 t;
1333 
1334     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1335     t = tcg_temp_new_i32();
1336     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1337     tcg_gen_or_i32(d, cpu_msr, t);
1338 }
1339 
1340 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1341 {
1342     uint32_t imm = arg->imm;
1343 
1344     if (trap_userspace(dc, imm != MSR_C)) {
1345         return true;
1346     }
1347 
1348     if (arg->rd) {
1349         msr_read(dc, cpu_R[arg->rd]);
1350     }
1351 
1352     /*
1353      * Handle the carry bit separately.
1354      * This is the only bit that userspace can modify.
1355      */
1356     if (imm & MSR_C) {
1357         tcg_gen_movi_i32(cpu_msr_c, set);
1358     }
1359 
1360     /*
1361      * MSR_C and MSR_CC set above.
1362      * MSR_PVR is not writable, and is always clear.
1363      */
1364     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1365 
1366     if (imm != 0) {
1367         if (set) {
1368             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1369         } else {
1370             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1371         }
1372         dc->base.is_jmp = DISAS_EXIT_NEXT;
1373     }
1374     return true;
1375 }
1376 
1377 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1378 {
1379     return do_msrclrset(dc, arg, false);
1380 }
1381 
1382 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1383 {
1384     return do_msrclrset(dc, arg, true);
1385 }
1386 
1387 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1388 {
1389     if (trap_userspace(dc, true)) {
1390         return true;
1391     }
1392 
1393 #ifdef CONFIG_USER_ONLY
1394     g_assert_not_reached();
1395 #else
1396     if (arg->e && arg->rs != 0x1003) {
1397         qemu_log_mask(LOG_GUEST_ERROR,
1398                       "Invalid extended mts reg 0x%x\n", arg->rs);
1399         return true;
1400     }
1401 
1402     TCGv_i32 src = reg_for_read(dc, arg->ra);
1403     switch (arg->rs) {
1404     case SR_MSR:
1405         /* Install MSR_C.  */
1406         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1407         /*
1408          * Clear MSR_C and MSR_CC;
1409          * MSR_PVR is not writable, and is always clear.
1410          */
1411         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1412         break;
1413     case SR_FSR:
1414         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1415         break;
1416     case 0x800:
1417         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1418         break;
1419     case 0x802:
1420         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1421         break;
1422 
1423     case 0x1000: /* PID */
1424     case 0x1001: /* ZPR */
1425     case 0x1002: /* TLBX */
1426     case 0x1003: /* TLBLO */
1427     case 0x1004: /* TLBHI */
1428     case 0x1005: /* TLBSX */
1429         {
1430             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1431             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1432 
1433             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1434         }
1435         break;
1436 
1437     default:
1438         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1439         return true;
1440     }
1441     dc->base.is_jmp = DISAS_EXIT_NEXT;
1442     return true;
1443 #endif
1444 }
1445 
1446 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1447 {
1448     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1449 
1450     if (arg->e) {
1451         switch (arg->rs) {
1452         case SR_EAR:
1453             {
1454                 TCGv_i64 t64 = tcg_temp_new_i64();
1455                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1456                 tcg_gen_extrh_i64_i32(dest, t64);
1457             }
1458             return true;
1459 #ifndef CONFIG_USER_ONLY
1460         case 0x1003: /* TLBLO */
1461             /* Handled below. */
1462             break;
1463 #endif
1464         case 0x2006 ... 0x2009:
1465             /* High bits of PVR6-9 not implemented. */
1466             tcg_gen_movi_i32(dest, 0);
1467             return true;
1468         default:
1469             qemu_log_mask(LOG_GUEST_ERROR,
1470                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1471             return true;
1472         }
1473     }
1474 
1475     switch (arg->rs) {
1476     case SR_PC:
1477         tcg_gen_movi_i32(dest, dc->base.pc_next);
1478         break;
1479     case SR_MSR:
1480         msr_read(dc, dest);
1481         break;
1482     case SR_EAR:
1483         {
1484             TCGv_i64 t64 = tcg_temp_new_i64();
1485             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1486             tcg_gen_extrl_i64_i32(dest, t64);
1487         }
1488         break;
1489     case SR_ESR:
1490         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1491         break;
1492     case SR_FSR:
1493         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1494         break;
1495     case SR_BTR:
1496         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1497         break;
1498     case SR_EDR:
1499         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1500         break;
1501     case 0x800:
1502         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1503         break;
1504     case 0x802:
1505         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1506         break;
1507 
1508 #ifndef CONFIG_USER_ONLY
1509     case 0x1000: /* PID */
1510     case 0x1001: /* ZPR */
1511     case 0x1002: /* TLBX */
1512     case 0x1003: /* TLBLO */
1513     case 0x1004: /* TLBHI */
1514     case 0x1005: /* TLBSX */
1515         {
1516             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1517             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1518 
1519             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1520         }
1521         break;
1522 #endif
1523 
1524     case 0x2000 ... 0x200c:
1525         tcg_gen_ld_i32(dest, tcg_env,
1526                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1527                        - offsetof(MicroBlazeCPU, env));
1528         break;
1529     default:
1530         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1531         break;
1532     }
1533     return true;
1534 }
1535 
1536 static void do_rti(DisasContext *dc)
1537 {
1538     TCGv_i32 tmp = tcg_temp_new_i32();
1539 
1540     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1541     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1542     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1543     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1544     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1545 }
1546 
1547 static void do_rtb(DisasContext *dc)
1548 {
1549     TCGv_i32 tmp = tcg_temp_new_i32();
1550 
1551     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1552     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1553     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1554     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1555 }
1556 
1557 static void do_rte(DisasContext *dc)
1558 {
1559     TCGv_i32 tmp = tcg_temp_new_i32();
1560 
1561     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1562     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1563     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1564     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1565     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1566 }
1567 
1568 /* Insns connected to FSL or AXI stream attached devices.  */
1569 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1570 {
1571     TCGv_i32 t_id, t_ctrl;
1572 
1573     if (trap_userspace(dc, true)) {
1574         return true;
1575     }
1576 
1577     t_id = tcg_temp_new_i32();
1578     if (rb) {
1579         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1580     } else {
1581         tcg_gen_movi_i32(t_id, imm);
1582     }
1583 
1584     t_ctrl = tcg_constant_i32(ctrl);
1585     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1586     return true;
1587 }
1588 
1589 static bool trans_get(DisasContext *dc, arg_get *arg)
1590 {
1591     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1592 }
1593 
1594 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1595 {
1596     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1597 }
1598 
1599 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1600 {
1601     TCGv_i32 t_id, t_ctrl;
1602 
1603     if (trap_userspace(dc, true)) {
1604         return true;
1605     }
1606 
1607     t_id = tcg_temp_new_i32();
1608     if (rb) {
1609         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1610     } else {
1611         tcg_gen_movi_i32(t_id, imm);
1612     }
1613 
1614     t_ctrl = tcg_constant_i32(ctrl);
1615     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1616     return true;
1617 }
1618 
1619 static bool trans_put(DisasContext *dc, arg_put *arg)
1620 {
1621     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1622 }
1623 
1624 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1625 {
1626     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1627 }
1628 
1629 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1630 {
1631     DisasContext *dc = container_of(dcb, DisasContext, base);
1632     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1633     int bound;
1634 
1635     dc->cfg = &cpu->cfg;
1636     dc->tb_flags = dc->base.tb->flags;
1637     dc->ext_imm = dc->base.tb->cs_base;
1638     dc->r0 = NULL;
1639     dc->r0_set = false;
1640     dc->mem_index = cpu_mmu_index(cs, false);
1641     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1642     dc->jmp_dest = -1;
1643 
1644     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1645     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1646 }
1647 
1648 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1649 {
1650 }
1651 
1652 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1653 {
1654     DisasContext *dc = container_of(dcb, DisasContext, base);
1655 
1656     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1657 }
1658 
1659 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1660 {
1661     DisasContext *dc = container_of(dcb, DisasContext, base);
1662     uint32_t ir;
1663 
1664     /* TODO: This should raise an exception, not terminate qemu. */
1665     if (dc->base.pc_next & 3) {
1666         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1667                   (uint32_t)dc->base.pc_next);
1668     }
1669 
1670     dc->tb_flags_to_set = 0;
1671 
1672     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1673                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1674     if (!decode(dc, ir)) {
1675         trap_illegal(dc, true);
1676     }
1677 
1678     if (dc->r0) {
1679         dc->r0 = NULL;
1680         dc->r0_set = false;
1681     }
1682 
1683     /* Discard the imm global when its contents cannot be used. */
1684     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1685         tcg_gen_discard_i32(cpu_imm);
1686     }
1687 
1688     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1689     dc->tb_flags |= dc->tb_flags_to_set;
1690     dc->base.pc_next += 4;
1691 
1692     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1693         /*
1694          * Finish any return-from branch.
1695          */
1696         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1697         if (unlikely(rt_ibe != 0)) {
1698             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1699             if (rt_ibe & DRTI_FLAG) {
1700                 do_rti(dc);
1701             } else if (rt_ibe & DRTB_FLAG) {
1702                 do_rtb(dc);
1703             } else {
1704                 do_rte(dc);
1705             }
1706         }
1707 
1708         /* Complete the branch, ending the TB. */
1709         switch (dc->base.is_jmp) {
1710         case DISAS_NORETURN:
1711             /*
1712              * E.g. illegal insn in a delay slot.  We've already exited
1713              * and will handle D_FLAG in mb_cpu_do_interrupt.
1714              */
1715             break;
1716         case DISAS_NEXT:
1717             /*
1718              * Normal insn a delay slot.
1719              * However, the return-from-exception type insns should
1720              * return to the main loop, as they have adjusted MSR.
1721              */
1722             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1723             break;
1724         case DISAS_EXIT_NEXT:
1725             /*
1726              * E.g. mts insn in a delay slot.  Continue with btarget,
1727              * but still return to the main loop.
1728              */
1729             dc->base.is_jmp = DISAS_EXIT_JUMP;
1730             break;
1731         default:
1732             g_assert_not_reached();
1733         }
1734     }
1735 }
1736 
1737 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1738 {
1739     DisasContext *dc = container_of(dcb, DisasContext, base);
1740 
1741     if (dc->base.is_jmp == DISAS_NORETURN) {
1742         /* We have already exited the TB. */
1743         return;
1744     }
1745 
1746     t_sync_flags(dc);
1747 
1748     switch (dc->base.is_jmp) {
1749     case DISAS_TOO_MANY:
1750         gen_goto_tb(dc, 0, dc->base.pc_next);
1751         return;
1752 
1753     case DISAS_EXIT:
1754         break;
1755     case DISAS_EXIT_NEXT:
1756         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1757         break;
1758     case DISAS_EXIT_JUMP:
1759         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1760         tcg_gen_discard_i32(cpu_btarget);
1761         break;
1762 
1763     case DISAS_JUMP:
1764         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1765             /* Direct jump. */
1766             tcg_gen_discard_i32(cpu_btarget);
1767 
1768             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1769                 /* Conditional direct jump. */
1770                 TCGLabel *taken = gen_new_label();
1771                 TCGv_i32 tmp = tcg_temp_new_i32();
1772 
1773                 /*
1774                  * Copy bvalue to a temp now, so we can discard bvalue.
1775                  * This can avoid writing bvalue to memory when the
1776                  * delay slot cannot raise an exception.
1777                  */
1778                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1779                 tcg_gen_discard_i32(cpu_bvalue);
1780 
1781                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1782                 gen_goto_tb(dc, 1, dc->base.pc_next);
1783                 gen_set_label(taken);
1784             }
1785             gen_goto_tb(dc, 0, dc->jmp_dest);
1786             return;
1787         }
1788 
1789         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1790         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1791         tcg_gen_discard_i32(cpu_btarget);
1792         tcg_gen_lookup_and_goto_ptr();
1793         return;
1794 
1795     default:
1796         g_assert_not_reached();
1797     }
1798 
1799     /* Finish DISAS_EXIT_* */
1800     if (unlikely(cs->singlestep_enabled)) {
1801         gen_raise_exception(dc, EXCP_DEBUG);
1802     } else {
1803         tcg_gen_exit_tb(NULL, 0);
1804     }
1805 }
1806 
1807 static const TranslatorOps mb_tr_ops = {
1808     .init_disas_context = mb_tr_init_disas_context,
1809     .tb_start           = mb_tr_tb_start,
1810     .insn_start         = mb_tr_insn_start,
1811     .translate_insn     = mb_tr_translate_insn,
1812     .tb_stop            = mb_tr_tb_stop,
1813 };
1814 
1815 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1816                        int *max_insns, vaddr pc, void *host_pc)
1817 {
1818     DisasContext dc;
1819     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1820 }
1821 
1822 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1823 {
1824     CPUMBState *env = cpu_env(cs);
1825     uint32_t iflags;
1826     int i;
1827 
1828     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1829                  env->pc, env->msr,
1830                  (env->msr & MSR_UM) ? "user" : "kernel",
1831                  (env->msr & MSR_UMS) ? "user" : "kernel",
1832                  (bool)(env->msr & MSR_EIP),
1833                  (bool)(env->msr & MSR_IE));
1834 
1835     iflags = env->iflags;
1836     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1837     if (iflags & IMM_FLAG) {
1838         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1839     }
1840     if (iflags & BIMM_FLAG) {
1841         qemu_fprintf(f, " BIMM");
1842     }
1843     if (iflags & D_FLAG) {
1844         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1845     }
1846     if (iflags & DRTI_FLAG) {
1847         qemu_fprintf(f, " DRTI");
1848     }
1849     if (iflags & DRTE_FLAG) {
1850         qemu_fprintf(f, " DRTE");
1851     }
1852     if (iflags & DRTB_FLAG) {
1853         qemu_fprintf(f, " DRTB");
1854     }
1855     if (iflags & ESR_ESS_FLAG) {
1856         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1857     }
1858 
1859     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1860                  "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
1861                  env->esr, env->fsr, env->btr, env->edr,
1862                  env->ear, env->slr, env->shr);
1863 
1864     for (i = 0; i < 32; i++) {
1865         qemu_fprintf(f, "r%2.2d=%08x%c",
1866                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1867     }
1868     qemu_fprintf(f, "\n");
1869 }
1870 
1871 void mb_tcg_init(void)
1872 {
1873 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1874 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1875 
1876     static const struct {
1877         TCGv_i32 *var; int ofs; char name[8];
1878     } i32s[] = {
1879         /*
1880          * Note that r0 is handled specially in reg_for_read
1881          * and reg_for_write.  Nothing should touch cpu_R[0].
1882          * Leave that element NULL, which will assert quickly
1883          * inside the tcg generator functions.
1884          */
1885                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1886         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1887         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1888         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1889 
1890         SP(pc),
1891         SP(msr),
1892         SP(msr_c),
1893         SP(imm),
1894         SP(iflags),
1895         SP(bvalue),
1896         SP(btarget),
1897         SP(res_val),
1898     };
1899 
1900 #undef R
1901 #undef SP
1902 
1903     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1904         *i32s[i].var =
1905           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1906     }
1907 
1908     cpu_res_addr =
1909         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1910 }
1911