xref: /qemu/target/microblaze/translate.c (revision 6ff5da16000f908140723e164d33a0b51a6c4162)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/translation-block.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 /* This is the state at translation time.  */
62 typedef struct DisasContext {
63     DisasContextBase base;
64     const MicroBlazeCPUConfig *cfg;
65 
66     TCGv_i32 r0;
67     bool r0_set;
68 
69     /* Decoder.  */
70     uint32_t ext_imm;
71     unsigned int tb_flags;
72     unsigned int tb_flags_to_set;
73     int mem_index;
74 
75     /* Condition under which to jump, including NEVER and ALWAYS. */
76     TCGCond jmp_cond;
77 
78     /* Immediate branch-taken destination, or -1 for indirect. */
79     uint32_t jmp_dest;
80 } DisasContext;
81 
82 static int typeb_imm(DisasContext *dc, int x)
83 {
84     if (dc->tb_flags & IMM_FLAG) {
85         return deposit32(dc->ext_imm, 0, 16, x);
86     }
87     return x;
88 }
89 
90 /* Include the auto-generated decoder.  */
91 #include "decode-insns.c.inc"
92 
93 static void t_sync_flags(DisasContext *dc)
94 {
95     /* Synch the tb dependent flags between translator and runtime.  */
96     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
97         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
98     }
99 }
100 
101 static void gen_raise_exception(DisasContext *dc, uint32_t index)
102 {
103     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
104     dc->base.is_jmp = DISAS_NORETURN;
105 }
106 
107 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
108 {
109     t_sync_flags(dc);
110     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
111     gen_raise_exception(dc, index);
112 }
113 
114 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
115 {
116     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
117     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
118 
119     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
120 }
121 
122 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
123 {
124     if (translator_use_goto_tb(&dc->base, dest)) {
125         tcg_gen_goto_tb(n);
126         tcg_gen_movi_i32(cpu_pc, dest);
127         tcg_gen_exit_tb(dc->base.tb, n);
128     } else {
129         tcg_gen_movi_i32(cpu_pc, dest);
130         tcg_gen_lookup_and_goto_ptr();
131     }
132     dc->base.is_jmp = DISAS_NORETURN;
133 }
134 
135 /*
136  * Returns true if the insn an illegal operation.
137  * If exceptions are enabled, an exception is raised.
138  */
139 static bool trap_illegal(DisasContext *dc, bool cond)
140 {
141     if (cond && (dc->tb_flags & MSR_EE)
142         && dc->cfg->illegal_opcode_exception) {
143         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
144     }
145     return cond;
146 }
147 
148 /*
149  * Returns true if the insn is illegal in userspace.
150  * If exceptions are enabled, an exception is raised.
151  */
152 static bool trap_userspace(DisasContext *dc, bool cond)
153 {
154     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
155 
156     if (cond_user && (dc->tb_flags & MSR_EE)) {
157         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
158     }
159     return cond_user;
160 }
161 
162 /*
163  * Return true, and log an error, if the current insn is
164  * within a delay slot.
165  */
166 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
167 {
168     if (dc->tb_flags & D_FLAG) {
169         qemu_log_mask(LOG_GUEST_ERROR,
170                       "Invalid insn in delay slot: %s at %08x\n",
171                       insn_type, (uint32_t)dc->base.pc_next);
172         return true;
173     }
174     return false;
175 }
176 
177 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
178 {
179     if (likely(reg != 0)) {
180         return cpu_R[reg];
181     }
182     if (!dc->r0_set) {
183         if (dc->r0 == NULL) {
184             dc->r0 = tcg_temp_new_i32();
185         }
186         tcg_gen_movi_i32(dc->r0, 0);
187         dc->r0_set = true;
188     }
189     return dc->r0;
190 }
191 
192 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
193 {
194     if (likely(reg != 0)) {
195         return cpu_R[reg];
196     }
197     if (dc->r0 == NULL) {
198         dc->r0 = tcg_temp_new_i32();
199     }
200     return dc->r0;
201 }
202 
203 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
204                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
205 {
206     TCGv_i32 rd, ra, rb;
207 
208     if (arg->rd == 0 && !side_effects) {
209         return true;
210     }
211 
212     rd = reg_for_write(dc, arg->rd);
213     ra = reg_for_read(dc, arg->ra);
214     rb = reg_for_read(dc, arg->rb);
215     fn(rd, ra, rb);
216     return true;
217 }
218 
219 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
220                       void (*fn)(TCGv_i32, TCGv_i32))
221 {
222     TCGv_i32 rd, ra;
223 
224     if (arg->rd == 0 && !side_effects) {
225         return true;
226     }
227 
228     rd = reg_for_write(dc, arg->rd);
229     ra = reg_for_read(dc, arg->ra);
230     fn(rd, ra);
231     return true;
232 }
233 
234 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
235                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
236 {
237     TCGv_i32 rd, ra;
238 
239     if (arg->rd == 0 && !side_effects) {
240         return true;
241     }
242 
243     rd = reg_for_write(dc, arg->rd);
244     ra = reg_for_read(dc, arg->ra);
245     fni(rd, ra, arg->imm);
246     return true;
247 }
248 
249 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
250                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
251 {
252     TCGv_i32 rd, ra, imm;
253 
254     if (arg->rd == 0 && !side_effects) {
255         return true;
256     }
257 
258     rd = reg_for_write(dc, arg->rd);
259     ra = reg_for_read(dc, arg->ra);
260     imm = tcg_constant_i32(arg->imm);
261 
262     fn(rd, ra, imm);
263     return true;
264 }
265 
266 #define DO_TYPEA(NAME, SE, FN) \
267     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
268     { return do_typea(dc, a, SE, FN); }
269 
270 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
271     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
272     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
273 
274 #define DO_TYPEA0(NAME, SE, FN) \
275     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
276     { return do_typea0(dc, a, SE, FN); }
277 
278 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
279     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
280     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
281 
282 #define DO_TYPEBI(NAME, SE, FNI) \
283     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
284     { return do_typeb_imm(dc, a, SE, FNI); }
285 
286 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
287     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
289 
290 #define DO_TYPEBV(NAME, SE, FN) \
291     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292     { return do_typeb_val(dc, a, SE, FN); }
293 
294 #define ENV_WRAPPER2(NAME, HELPER) \
295     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
296     { HELPER(out, tcg_env, ina); }
297 
298 #define ENV_WRAPPER3(NAME, HELPER) \
299     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
300     { HELPER(out, tcg_env, ina, inb); }
301 
302 /* No input carry, but output carry. */
303 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
304 {
305     TCGv_i32 zero = tcg_constant_i32(0);
306 
307     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
308 }
309 
310 /* Input and output carry. */
311 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
312 {
313     TCGv_i32 zero = tcg_constant_i32(0);
314     TCGv_i32 tmp = tcg_temp_new_i32();
315 
316     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
317     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
318 }
319 
320 /* Input carry, but no output carry. */
321 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
322 {
323     tcg_gen_add_i32(out, ina, inb);
324     tcg_gen_add_i32(out, out, cpu_msr_c);
325 }
326 
327 DO_TYPEA(add, true, gen_add)
328 DO_TYPEA(addc, true, gen_addc)
329 DO_TYPEA(addk, false, tcg_gen_add_i32)
330 DO_TYPEA(addkc, true, gen_addkc)
331 
332 DO_TYPEBV(addi, true, gen_add)
333 DO_TYPEBV(addic, true, gen_addc)
334 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
335 DO_TYPEBV(addikc, true, gen_addkc)
336 
337 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
338 {
339     tcg_gen_andi_i32(out, ina, ~imm);
340 }
341 
342 DO_TYPEA(and, false, tcg_gen_and_i32)
343 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
344 DO_TYPEA(andn, false, tcg_gen_andc_i32)
345 DO_TYPEBI(andni, false, gen_andni)
346 
347 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
348 {
349     TCGv_i32 tmp = tcg_temp_new_i32();
350     tcg_gen_andi_i32(tmp, inb, 31);
351     tcg_gen_sar_i32(out, ina, tmp);
352 }
353 
354 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
355 {
356     TCGv_i32 tmp = tcg_temp_new_i32();
357     tcg_gen_andi_i32(tmp, inb, 31);
358     tcg_gen_shr_i32(out, ina, tmp);
359 }
360 
361 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
362 {
363     TCGv_i32 tmp = tcg_temp_new_i32();
364     tcg_gen_andi_i32(tmp, inb, 31);
365     tcg_gen_shl_i32(out, ina, tmp);
366 }
367 
368 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
369 {
370     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
371     int imm_w = extract32(imm, 5, 5);
372     int imm_s = extract32(imm, 0, 5);
373 
374     if (imm_w + imm_s > 32 || imm_w == 0) {
375         /* These inputs have an undefined behavior.  */
376         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
377                       imm_w, imm_s);
378     } else {
379         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
380     }
381 }
382 
383 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
384 {
385     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
386     int imm_w = extract32(imm, 5, 5);
387     int imm_s = extract32(imm, 0, 5);
388     int width = imm_w - imm_s + 1;
389 
390     if (imm_w < imm_s) {
391         /* These inputs have an undefined behavior.  */
392         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
393                       imm_w, imm_s);
394     } else {
395         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
396     }
397 }
398 
399 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
400 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
401 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
402 
403 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
404 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
405 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
406 
407 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
408 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
409 
410 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
411 {
412     tcg_gen_clzi_i32(out, ina, 32);
413 }
414 
415 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
416 
417 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
418 {
419     TCGv_i32 lt = tcg_temp_new_i32();
420 
421     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
422     tcg_gen_sub_i32(out, inb, ina);
423     tcg_gen_deposit_i32(out, out, lt, 31, 1);
424 }
425 
426 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
427 {
428     TCGv_i32 lt = tcg_temp_new_i32();
429 
430     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
431     tcg_gen_sub_i32(out, inb, ina);
432     tcg_gen_deposit_i32(out, out, lt, 31, 1);
433 }
434 
435 DO_TYPEA(cmp, false, gen_cmp)
436 DO_TYPEA(cmpu, false, gen_cmpu)
437 
438 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
439 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
440 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
441 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
442 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
443 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
444 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
445 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
446 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
447 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
448 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
449 
450 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
451 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
452 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
453 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
454 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
455 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
456 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
457 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
458 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
459 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
460 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
461 
462 ENV_WRAPPER2(gen_flt, gen_helper_flt)
463 ENV_WRAPPER2(gen_fint, gen_helper_fint)
464 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
465 
466 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
467 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
468 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
469 
470 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
471 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
472 {
473     gen_helper_divs(out, tcg_env, inb, ina);
474 }
475 
476 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
477 {
478     gen_helper_divu(out, tcg_env, inb, ina);
479 }
480 
481 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
482 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
483 
484 static bool trans_imm(DisasContext *dc, arg_imm *arg)
485 {
486     if (invalid_delay_slot(dc, "imm")) {
487         return true;
488     }
489     dc->ext_imm = arg->imm << 16;
490     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
491     dc->tb_flags_to_set = IMM_FLAG;
492     return true;
493 }
494 
495 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
496 {
497     TCGv_i32 tmp = tcg_temp_new_i32();
498     tcg_gen_muls2_i32(tmp, out, ina, inb);
499 }
500 
501 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
502 {
503     TCGv_i32 tmp = tcg_temp_new_i32();
504     tcg_gen_mulu2_i32(tmp, out, ina, inb);
505 }
506 
507 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
508 {
509     TCGv_i32 tmp = tcg_temp_new_i32();
510     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
511 }
512 
513 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
514 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
515 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
516 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
517 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
518 
519 DO_TYPEA(or, false, tcg_gen_or_i32)
520 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
521 
522 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
523 {
524     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
525 }
526 
527 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
530 }
531 
532 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
533 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
534 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
535 
536 /* No input carry, but output carry. */
537 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
538 {
539     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
540     tcg_gen_sub_i32(out, inb, ina);
541 }
542 
543 /* Input and output carry. */
544 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
545 {
546     TCGv_i32 zero = tcg_constant_i32(0);
547     TCGv_i32 tmp = tcg_temp_new_i32();
548 
549     tcg_gen_not_i32(tmp, ina);
550     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
551     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
552 }
553 
554 /* No input or output carry. */
555 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556 {
557     tcg_gen_sub_i32(out, inb, ina);
558 }
559 
560 /* Input carry, no output carry. */
561 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
562 {
563     TCGv_i32 nota = tcg_temp_new_i32();
564 
565     tcg_gen_not_i32(nota, ina);
566     tcg_gen_add_i32(out, inb, nota);
567     tcg_gen_add_i32(out, out, cpu_msr_c);
568 }
569 
570 DO_TYPEA(rsub, true, gen_rsub)
571 DO_TYPEA(rsubc, true, gen_rsubc)
572 DO_TYPEA(rsubk, false, gen_rsubk)
573 DO_TYPEA(rsubkc, true, gen_rsubkc)
574 
575 DO_TYPEBV(rsubi, true, gen_rsub)
576 DO_TYPEBV(rsubic, true, gen_rsubc)
577 DO_TYPEBV(rsubik, false, gen_rsubk)
578 DO_TYPEBV(rsubikc, true, gen_rsubkc)
579 
580 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
581 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
582 
583 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
584 {
585     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
586     tcg_gen_sari_i32(out, ina, 1);
587 }
588 
589 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
590 {
591     TCGv_i32 tmp = tcg_temp_new_i32();
592 
593     tcg_gen_mov_i32(tmp, cpu_msr_c);
594     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
595     tcg_gen_extract2_i32(out, ina, tmp, 1);
596 }
597 
598 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
599 {
600     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
601     tcg_gen_shri_i32(out, ina, 1);
602 }
603 
604 DO_TYPEA0(sra, false, gen_sra)
605 DO_TYPEA0(src, false, gen_src)
606 DO_TYPEA0(srl, false, gen_srl)
607 
608 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
609 {
610     tcg_gen_rotri_i32(out, ina, 16);
611 }
612 
613 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
614 DO_TYPEA0(swaph, false, gen_swaph)
615 
616 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
617 {
618     /* Cache operations are nops: only check for supervisor mode.  */
619     trap_userspace(dc, true);
620     return true;
621 }
622 
623 DO_TYPEA(xor, false, tcg_gen_xor_i32)
624 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
625 
626 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
627 {
628     TCGv ret = tcg_temp_new();
629 
630     /* If any of the regs is r0, set t to the value of the other reg.  */
631     if (ra && rb) {
632         TCGv_i32 tmp = tcg_temp_new_i32();
633         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
634         tcg_gen_extu_i32_tl(ret, tmp);
635     } else if (ra) {
636         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
637     } else if (rb) {
638         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
639     } else {
640         tcg_gen_movi_tl(ret, 0);
641     }
642 
643     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
644         gen_helper_stackprot(tcg_env, ret);
645     }
646     return ret;
647 }
648 
649 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
650 {
651     TCGv ret = tcg_temp_new();
652 
653     /* If any of the regs is r0, set t to the value of the other reg.  */
654     if (ra) {
655         TCGv_i32 tmp = tcg_temp_new_i32();
656         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
657         tcg_gen_extu_i32_tl(ret, tmp);
658     } else {
659         tcg_gen_movi_tl(ret, (uint32_t)imm);
660     }
661 
662     if (ra == 1 && dc->cfg->stackprot) {
663         gen_helper_stackprot(tcg_env, ret);
664     }
665     return ret;
666 }
667 
668 #ifndef CONFIG_USER_ONLY
669 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
670 {
671     int addr_size = dc->cfg->addr_size;
672     TCGv ret = tcg_temp_new();
673 
674     if (addr_size == 32 || ra == 0) {
675         if (rb) {
676             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
677         } else {
678             tcg_gen_movi_tl(ret, 0);
679         }
680     } else {
681         if (rb) {
682             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
683         } else {
684             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
685             tcg_gen_shli_tl(ret, ret, 32);
686         }
687         if (addr_size < 64) {
688             /* Mask off out of range bits.  */
689             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
690         }
691     }
692     return ret;
693 }
694 #endif
695 
696 #ifndef CONFIG_USER_ONLY
697 static void record_unaligned_ess(DisasContext *dc, int rd,
698                                  MemOp size, bool store)
699 {
700     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
701 
702     iflags |= ESR_ESS_FLAG;
703     iflags |= rd << 5;
704     iflags |= store * ESR_S;
705     iflags |= (size == MO_32) * ESR_W;
706 
707     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
708 }
709 #endif
710 
711 static inline MemOp mo_endian(DisasContext *dc)
712 {
713     return dc->cfg->endi ? MO_LE : MO_BE;
714 }
715 
716 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
717                     int mem_index, bool rev)
718 {
719     MemOp size = mop & MO_SIZE;
720 
721     mop |= mo_endian(dc);
722 
723     /*
724      * When doing reverse accesses we need to do two things.
725      *
726      * 1. Reverse the address wrt endianness.
727      * 2. Byteswap the data lanes on the way back into the CPU core.
728      */
729     if (rev) {
730         if (size > MO_8) {
731             mop ^= MO_BSWAP;
732         }
733         if (size < MO_32) {
734             tcg_gen_xori_tl(addr, addr, 3 - size);
735         }
736     }
737 
738     /*
739      * For system mode, enforce alignment if the cpu configuration
740      * requires it.  For user-mode, the Linux kernel will have fixed up
741      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
742      */
743 #ifndef CONFIG_USER_ONLY
744     if (size > MO_8 &&
745         (dc->tb_flags & MSR_EE) &&
746         dc->cfg->unaligned_exceptions) {
747         record_unaligned_ess(dc, rd, size, false);
748         mop |= MO_ALIGN;
749     }
750 #endif
751 
752     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
753     return true;
754 }
755 
756 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
757 {
758     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
759     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
760 }
761 
762 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
763 {
764     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
765     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
766 }
767 
768 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
769 {
770     if (trap_userspace(dc, true)) {
771         return true;
772     }
773 #ifdef CONFIG_USER_ONLY
774     return true;
775 #else
776     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
777     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
778 #endif
779 }
780 
781 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
782 {
783     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
784     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
785 }
786 
787 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
788 {
789     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
790     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
791 }
792 
793 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
794 {
795     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
796     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
797 }
798 
799 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
800 {
801     if (trap_userspace(dc, true)) {
802         return true;
803     }
804 #ifdef CONFIG_USER_ONLY
805     return true;
806 #else
807     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
808     return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
809 #endif
810 }
811 
812 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
813 {
814     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
815     return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
816 }
817 
818 static bool trans_lw(DisasContext *dc, arg_typea *arg)
819 {
820     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
821     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
822 }
823 
824 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
825 {
826     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
827     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
828 }
829 
830 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
831 {
832     if (trap_userspace(dc, true)) {
833         return true;
834     }
835 #ifdef CONFIG_USER_ONLY
836     return true;
837 #else
838     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
839     return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
840 #endif
841 }
842 
843 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
844 {
845     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
846     return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
847 }
848 
849 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
850 {
851     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
852 
853     /* lwx does not throw unaligned access errors, so force alignment */
854     tcg_gen_andi_tl(addr, addr, ~3);
855 
856     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
857                         mo_endian(dc) | MO_UL);
858     tcg_gen_mov_tl(cpu_res_addr, addr);
859 
860     if (arg->rd) {
861         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
862     }
863 
864     /* No support for AXI exclusive so always clear C */
865     tcg_gen_movi_i32(cpu_msr_c, 0);
866     return true;
867 }
868 
869 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
870                      int mem_index, bool rev)
871 {
872     MemOp size = mop & MO_SIZE;
873 
874     mop |= mo_endian(dc);
875 
876     /*
877      * When doing reverse accesses we need to do two things.
878      *
879      * 1. Reverse the address wrt endianness.
880      * 2. Byteswap the data lanes on the way back into the CPU core.
881      */
882     if (rev) {
883         if (size > MO_8) {
884             mop ^= MO_BSWAP;
885         }
886         if (size < MO_32) {
887             tcg_gen_xori_tl(addr, addr, 3 - size);
888         }
889     }
890 
891     /*
892      * For system mode, enforce alignment if the cpu configuration
893      * requires it.  For user-mode, the Linux kernel will have fixed up
894      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
895      */
896 #ifndef CONFIG_USER_ONLY
897     if (size > MO_8 &&
898         (dc->tb_flags & MSR_EE) &&
899         dc->cfg->unaligned_exceptions) {
900         record_unaligned_ess(dc, rd, size, true);
901         mop |= MO_ALIGN;
902     }
903 #endif
904 
905     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
906     return true;
907 }
908 
909 static bool trans_sb(DisasContext *dc, arg_typea *arg)
910 {
911     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
912     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
913 }
914 
915 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
916 {
917     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
918     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
919 }
920 
921 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
922 {
923     if (trap_userspace(dc, true)) {
924         return true;
925     }
926 #ifdef CONFIG_USER_ONLY
927     return true;
928 #else
929     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
930     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
931 #endif
932 }
933 
934 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
935 {
936     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
937     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
938 }
939 
940 static bool trans_sh(DisasContext *dc, arg_typea *arg)
941 {
942     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
943     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
944 }
945 
946 static bool trans_shr(DisasContext *dc, arg_typea *arg)
947 {
948     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
949     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
950 }
951 
952 static bool trans_shea(DisasContext *dc, arg_typea *arg)
953 {
954     if (trap_userspace(dc, true)) {
955         return true;
956     }
957 #ifdef CONFIG_USER_ONLY
958     return true;
959 #else
960     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
961     return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
962 #endif
963 }
964 
965 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
966 {
967     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
968     return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
969 }
970 
971 static bool trans_sw(DisasContext *dc, arg_typea *arg)
972 {
973     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
974     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
975 }
976 
977 static bool trans_swr(DisasContext *dc, arg_typea *arg)
978 {
979     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
980     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
981 }
982 
983 static bool trans_swea(DisasContext *dc, arg_typea *arg)
984 {
985     if (trap_userspace(dc, true)) {
986         return true;
987     }
988 #ifdef CONFIG_USER_ONLY
989     return true;
990 #else
991     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
992     return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
993 #endif
994 }
995 
996 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
997 {
998     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
999     return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
1000 }
1001 
1002 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1003 {
1004     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1005     TCGLabel *swx_done = gen_new_label();
1006     TCGLabel *swx_fail = gen_new_label();
1007     TCGv_i32 tval;
1008 
1009     /* swx does not throw unaligned access errors, so force alignment */
1010     tcg_gen_andi_tl(addr, addr, ~3);
1011 
1012     /*
1013      * Compare the address vs the one we used during lwx.
1014      * On mismatch, the operation fails.  On match, addr dies at the
1015      * branch, but we know we can use the equal version in the global.
1016      * In either case, addr is no longer needed.
1017      */
1018     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1019 
1020     /*
1021      * Compare the value loaded during lwx with current contents of
1022      * the reserved location.
1023      */
1024     tval = tcg_temp_new_i32();
1025 
1026     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1027                                reg_for_write(dc, arg->rd),
1028                                dc->mem_index, mo_endian(dc) | MO_UL);
1029 
1030     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1031 
1032     /* Success */
1033     tcg_gen_movi_i32(cpu_msr_c, 0);
1034     tcg_gen_br(swx_done);
1035 
1036     /* Failure */
1037     gen_set_label(swx_fail);
1038     tcg_gen_movi_i32(cpu_msr_c, 1);
1039 
1040     gen_set_label(swx_done);
1041 
1042     /*
1043      * Prevent the saved address from working again without another ldx.
1044      * Akin to the pseudocode setting reservation = 0.
1045      */
1046     tcg_gen_movi_tl(cpu_res_addr, -1);
1047     return true;
1048 }
1049 
1050 static void setup_dslot(DisasContext *dc, bool type_b)
1051 {
1052     dc->tb_flags_to_set |= D_FLAG;
1053     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1054         dc->tb_flags_to_set |= BIMM_FLAG;
1055     }
1056 }
1057 
1058 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1059                       bool delay, bool abs, int link)
1060 {
1061     uint32_t add_pc;
1062 
1063     if (invalid_delay_slot(dc, "branch")) {
1064         return true;
1065     }
1066     if (delay) {
1067         setup_dslot(dc, dest_rb < 0);
1068     }
1069 
1070     if (link) {
1071         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1072     }
1073 
1074     /* Store the branch taken destination into btarget.  */
1075     add_pc = abs ? 0 : dc->base.pc_next;
1076     if (dest_rb > 0) {
1077         dc->jmp_dest = -1;
1078         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1079     } else {
1080         dc->jmp_dest = add_pc + dest_imm;
1081         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1082     }
1083     dc->jmp_cond = TCG_COND_ALWAYS;
1084     return true;
1085 }
1086 
1087 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1088     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1089     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1090     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1091     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1092 
1093 DO_BR(br, bri, false, false, false)
1094 DO_BR(bra, brai, false, true, false)
1095 DO_BR(brd, brid, true, false, false)
1096 DO_BR(brad, braid, true, true, false)
1097 DO_BR(brld, brlid, true, false, true)
1098 DO_BR(brald, bralid, true, true, true)
1099 
1100 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1101                    TCGCond cond, int ra, bool delay)
1102 {
1103     TCGv_i32 zero, next;
1104 
1105     if (invalid_delay_slot(dc, "bcc")) {
1106         return true;
1107     }
1108     if (delay) {
1109         setup_dslot(dc, dest_rb < 0);
1110     }
1111 
1112     dc->jmp_cond = cond;
1113 
1114     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1115     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1116 
1117     /* Store the branch taken destination into btarget.  */
1118     if (dest_rb > 0) {
1119         dc->jmp_dest = -1;
1120         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1121     } else {
1122         dc->jmp_dest = dc->base.pc_next + dest_imm;
1123         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1124     }
1125 
1126     /* Compute the final destination into btarget.  */
1127     zero = tcg_constant_i32(0);
1128     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1129     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1130                         reg_for_read(dc, ra), zero,
1131                         cpu_btarget, next);
1132 
1133     return true;
1134 }
1135 
1136 #define DO_BCC(NAME, COND)                                              \
1137     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1138     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1139     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1140     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1141     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1142     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1143     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1144     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1145 
1146 DO_BCC(beq, TCG_COND_EQ)
1147 DO_BCC(bge, TCG_COND_GE)
1148 DO_BCC(bgt, TCG_COND_GT)
1149 DO_BCC(ble, TCG_COND_LE)
1150 DO_BCC(blt, TCG_COND_LT)
1151 DO_BCC(bne, TCG_COND_NE)
1152 
1153 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1154 {
1155     if (trap_userspace(dc, true)) {
1156         return true;
1157     }
1158     if (invalid_delay_slot(dc, "brk")) {
1159         return true;
1160     }
1161 
1162     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1163     if (arg->rd) {
1164         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1165     }
1166     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1167     tcg_gen_movi_tl(cpu_res_addr, -1);
1168 
1169     dc->base.is_jmp = DISAS_EXIT;
1170     return true;
1171 }
1172 
1173 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1174 {
1175     uint32_t imm = arg->imm;
1176 
1177     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1178         return true;
1179     }
1180     if (invalid_delay_slot(dc, "brki")) {
1181         return true;
1182     }
1183 
1184     tcg_gen_movi_i32(cpu_pc, imm);
1185     if (arg->rd) {
1186         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1187     }
1188     tcg_gen_movi_tl(cpu_res_addr, -1);
1189 
1190 #ifdef CONFIG_USER_ONLY
1191     switch (imm) {
1192     case 0x8:  /* syscall trap */
1193         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1194         break;
1195     case 0x18: /* debug trap */
1196         gen_raise_exception_sync(dc, EXCP_DEBUG);
1197         break;
1198     default:   /* eliminated with trap_userspace check */
1199         g_assert_not_reached();
1200     }
1201 #else
1202     uint32_t msr_to_set = 0;
1203 
1204     if (imm != 0x18) {
1205         msr_to_set |= MSR_BIP;
1206     }
1207     if (imm == 0x8 || imm == 0x18) {
1208         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1209         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1210         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1211                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1212     }
1213     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1214     dc->base.is_jmp = DISAS_EXIT;
1215 #endif
1216 
1217     return true;
1218 }
1219 
1220 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1221 {
1222     int mbar_imm = arg->imm;
1223 
1224     /* Note that mbar is a specialized branch instruction. */
1225     if (invalid_delay_slot(dc, "mbar")) {
1226         return true;
1227     }
1228 
1229     /* Data access memory barrier.  */
1230     if ((mbar_imm & 2) == 0) {
1231         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1232     }
1233 
1234     /* Sleep. */
1235     if (mbar_imm & 16) {
1236         if (trap_userspace(dc, true)) {
1237             /* Sleep is a privileged instruction.  */
1238             return true;
1239         }
1240 
1241         t_sync_flags(dc);
1242 
1243         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1244                        -offsetof(MicroBlazeCPU, env)
1245                        +offsetof(CPUState, halted));
1246 
1247         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1248 
1249         gen_raise_exception(dc, EXCP_HLT);
1250     }
1251 
1252     /*
1253      * If !(mbar_imm & 1), this is an instruction access memory barrier
1254      * and we need to end the TB so that we recognize self-modified
1255      * code immediately.
1256      *
1257      * However, there are some data mbars that need the TB break
1258      * (and return to main loop) to recognize interrupts right away.
1259      * E.g. recognizing a change to an interrupt controller register.
1260      *
1261      * Therefore, choose to end the TB always.
1262      */
1263     dc->base.is_jmp = DISAS_EXIT_NEXT;
1264     return true;
1265 }
1266 
1267 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1268 {
1269     if (trap_userspace(dc, to_set)) {
1270         return true;
1271     }
1272     if (invalid_delay_slot(dc, "rts")) {
1273         return true;
1274     }
1275 
1276     dc->tb_flags_to_set |= to_set;
1277     setup_dslot(dc, true);
1278 
1279     dc->jmp_cond = TCG_COND_ALWAYS;
1280     dc->jmp_dest = -1;
1281     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1282     return true;
1283 }
1284 
1285 #define DO_RTS(NAME, IFLAG) \
1286     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1287     { return do_rts(dc, arg, IFLAG); }
1288 
1289 DO_RTS(rtbd, DRTB_FLAG)
1290 DO_RTS(rtid, DRTI_FLAG)
1291 DO_RTS(rted, DRTE_FLAG)
1292 DO_RTS(rtsd, 0)
1293 
1294 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1295 {
1296     /* If opcode_0_illegal, trap.  */
1297     if (dc->cfg->opcode_0_illegal) {
1298         trap_illegal(dc, true);
1299         return true;
1300     }
1301     /*
1302      * Otherwise, this is "add r0, r0, r0".
1303      * Continue to trans_add so that MSR[C] gets cleared.
1304      */
1305     return false;
1306 }
1307 
1308 static void msr_read(DisasContext *dc, TCGv_i32 d)
1309 {
1310     TCGv_i32 t;
1311 
1312     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1313     t = tcg_temp_new_i32();
1314     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1315     tcg_gen_or_i32(d, cpu_msr, t);
1316 }
1317 
1318 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1319 {
1320     uint32_t imm = arg->imm;
1321 
1322     if (trap_userspace(dc, imm != MSR_C)) {
1323         return true;
1324     }
1325 
1326     if (arg->rd) {
1327         msr_read(dc, cpu_R[arg->rd]);
1328     }
1329 
1330     /*
1331      * Handle the carry bit separately.
1332      * This is the only bit that userspace can modify.
1333      */
1334     if (imm & MSR_C) {
1335         tcg_gen_movi_i32(cpu_msr_c, set);
1336     }
1337 
1338     /*
1339      * MSR_C and MSR_CC set above.
1340      * MSR_PVR is not writable, and is always clear.
1341      */
1342     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1343 
1344     if (imm != 0) {
1345         if (set) {
1346             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1347         } else {
1348             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1349         }
1350         dc->base.is_jmp = DISAS_EXIT_NEXT;
1351     }
1352     return true;
1353 }
1354 
1355 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1356 {
1357     return do_msrclrset(dc, arg, false);
1358 }
1359 
1360 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1361 {
1362     return do_msrclrset(dc, arg, true);
1363 }
1364 
1365 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1366 {
1367     if (trap_userspace(dc, true)) {
1368         return true;
1369     }
1370 
1371 #ifdef CONFIG_USER_ONLY
1372     g_assert_not_reached();
1373 #else
1374     if (arg->e && arg->rs != 0x1003) {
1375         qemu_log_mask(LOG_GUEST_ERROR,
1376                       "Invalid extended mts reg 0x%x\n", arg->rs);
1377         return true;
1378     }
1379 
1380     TCGv_i32 src = reg_for_read(dc, arg->ra);
1381     switch (arg->rs) {
1382     case SR_MSR:
1383         /* Install MSR_C.  */
1384         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1385         /*
1386          * Clear MSR_C and MSR_CC;
1387          * MSR_PVR is not writable, and is always clear.
1388          */
1389         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1390         break;
1391     case SR_FSR:
1392         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1393         break;
1394     case 0x800:
1395         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1396         break;
1397     case 0x802:
1398         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1399         break;
1400 
1401     case 0x1000: /* PID */
1402     case 0x1001: /* ZPR */
1403     case 0x1002: /* TLBX */
1404     case 0x1003: /* TLBLO */
1405     case 0x1004: /* TLBHI */
1406     case 0x1005: /* TLBSX */
1407         {
1408             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1409             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1410 
1411             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1412         }
1413         break;
1414 
1415     default:
1416         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1417         return true;
1418     }
1419     dc->base.is_jmp = DISAS_EXIT_NEXT;
1420     return true;
1421 #endif
1422 }
1423 
1424 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1425 {
1426     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1427 
1428     if (arg->e) {
1429         switch (arg->rs) {
1430         case SR_EAR:
1431             {
1432                 TCGv_i64 t64 = tcg_temp_new_i64();
1433                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1434                 tcg_gen_extrh_i64_i32(dest, t64);
1435             }
1436             return true;
1437 #ifndef CONFIG_USER_ONLY
1438         case 0x1003: /* TLBLO */
1439             /* Handled below. */
1440             break;
1441 #endif
1442         case 0x2006 ... 0x2009:
1443             /* High bits of PVR6-9 not implemented. */
1444             tcg_gen_movi_i32(dest, 0);
1445             return true;
1446         default:
1447             qemu_log_mask(LOG_GUEST_ERROR,
1448                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1449             return true;
1450         }
1451     }
1452 
1453     switch (arg->rs) {
1454     case SR_PC:
1455         tcg_gen_movi_i32(dest, dc->base.pc_next);
1456         break;
1457     case SR_MSR:
1458         msr_read(dc, dest);
1459         break;
1460     case SR_EAR:
1461         {
1462             TCGv_i64 t64 = tcg_temp_new_i64();
1463             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1464             tcg_gen_extrl_i64_i32(dest, t64);
1465         }
1466         break;
1467     case SR_ESR:
1468         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1469         break;
1470     case SR_FSR:
1471         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1472         break;
1473     case SR_BTR:
1474         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1475         break;
1476     case SR_EDR:
1477         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1478         break;
1479     case 0x800:
1480         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1481         break;
1482     case 0x802:
1483         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1484         break;
1485 
1486 #ifndef CONFIG_USER_ONLY
1487     case 0x1000: /* PID */
1488     case 0x1001: /* ZPR */
1489     case 0x1002: /* TLBX */
1490     case 0x1003: /* TLBLO */
1491     case 0x1004: /* TLBHI */
1492     case 0x1005: /* TLBSX */
1493         {
1494             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1495             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1496 
1497             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1498         }
1499         break;
1500 #endif
1501 
1502     case 0x2000 ... 0x200c:
1503         tcg_gen_ld_i32(dest, tcg_env,
1504                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1505                        - offsetof(MicroBlazeCPU, env));
1506         break;
1507     default:
1508         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1509         break;
1510     }
1511     return true;
1512 }
1513 
1514 static void do_rti(DisasContext *dc)
1515 {
1516     TCGv_i32 tmp = tcg_temp_new_i32();
1517 
1518     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1519     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1520     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1521     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1522     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1523 }
1524 
1525 static void do_rtb(DisasContext *dc)
1526 {
1527     TCGv_i32 tmp = tcg_temp_new_i32();
1528 
1529     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1530     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1531     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1532     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1533 }
1534 
1535 static void do_rte(DisasContext *dc)
1536 {
1537     TCGv_i32 tmp = tcg_temp_new_i32();
1538 
1539     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1540     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1541     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1542     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1543     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1544 }
1545 
1546 /* Insns connected to FSL or AXI stream attached devices.  */
1547 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1548 {
1549     TCGv_i32 t_id, t_ctrl;
1550 
1551     if (trap_userspace(dc, true)) {
1552         return true;
1553     }
1554 
1555     t_id = tcg_temp_new_i32();
1556     if (rb) {
1557         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1558     } else {
1559         tcg_gen_movi_i32(t_id, imm);
1560     }
1561 
1562     t_ctrl = tcg_constant_i32(ctrl);
1563     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1564     return true;
1565 }
1566 
1567 static bool trans_get(DisasContext *dc, arg_get *arg)
1568 {
1569     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1570 }
1571 
1572 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1573 {
1574     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1575 }
1576 
1577 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1578 {
1579     TCGv_i32 t_id, t_ctrl;
1580 
1581     if (trap_userspace(dc, true)) {
1582         return true;
1583     }
1584 
1585     t_id = tcg_temp_new_i32();
1586     if (rb) {
1587         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1588     } else {
1589         tcg_gen_movi_i32(t_id, imm);
1590     }
1591 
1592     t_ctrl = tcg_constant_i32(ctrl);
1593     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1594     return true;
1595 }
1596 
1597 static bool trans_put(DisasContext *dc, arg_put *arg)
1598 {
1599     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1600 }
1601 
1602 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1603 {
1604     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1605 }
1606 
1607 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1608 {
1609     DisasContext *dc = container_of(dcb, DisasContext, base);
1610     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1611     int bound;
1612 
1613     dc->cfg = &cpu->cfg;
1614     dc->tb_flags = dc->base.tb->flags;
1615     dc->ext_imm = dc->base.tb->cs_base;
1616     dc->r0 = NULL;
1617     dc->r0_set = false;
1618     dc->mem_index = cpu_mmu_index(cs, false);
1619     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1620     dc->jmp_dest = -1;
1621 
1622     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1623     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1624 }
1625 
1626 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1627 {
1628 }
1629 
1630 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1631 {
1632     DisasContext *dc = container_of(dcb, DisasContext, base);
1633 
1634     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1635 }
1636 
1637 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1638 {
1639     DisasContext *dc = container_of(dcb, DisasContext, base);
1640     uint32_t ir;
1641 
1642     /* TODO: This should raise an exception, not terminate qemu. */
1643     if (dc->base.pc_next & 3) {
1644         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1645                   (uint32_t)dc->base.pc_next);
1646     }
1647 
1648     dc->tb_flags_to_set = 0;
1649 
1650     ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next,
1651                              mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN);
1652     if (!decode(dc, ir)) {
1653         trap_illegal(dc, true);
1654     }
1655 
1656     if (dc->r0) {
1657         dc->r0 = NULL;
1658         dc->r0_set = false;
1659     }
1660 
1661     /* Discard the imm global when its contents cannot be used. */
1662     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1663         tcg_gen_discard_i32(cpu_imm);
1664     }
1665 
1666     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1667     dc->tb_flags |= dc->tb_flags_to_set;
1668     dc->base.pc_next += 4;
1669 
1670     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1671         /*
1672          * Finish any return-from branch.
1673          */
1674         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1675         if (unlikely(rt_ibe != 0)) {
1676             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1677             if (rt_ibe & DRTI_FLAG) {
1678                 do_rti(dc);
1679             } else if (rt_ibe & DRTB_FLAG) {
1680                 do_rtb(dc);
1681             } else {
1682                 do_rte(dc);
1683             }
1684         }
1685 
1686         /* Complete the branch, ending the TB. */
1687         switch (dc->base.is_jmp) {
1688         case DISAS_NORETURN:
1689             /*
1690              * E.g. illegal insn in a delay slot.  We've already exited
1691              * and will handle D_FLAG in mb_cpu_do_interrupt.
1692              */
1693             break;
1694         case DISAS_NEXT:
1695             /*
1696              * Normal insn a delay slot.
1697              * However, the return-from-exception type insns should
1698              * return to the main loop, as they have adjusted MSR.
1699              */
1700             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1701             break;
1702         case DISAS_EXIT_NEXT:
1703             /*
1704              * E.g. mts insn in a delay slot.  Continue with btarget,
1705              * but still return to the main loop.
1706              */
1707             dc->base.is_jmp = DISAS_EXIT_JUMP;
1708             break;
1709         default:
1710             g_assert_not_reached();
1711         }
1712     }
1713 }
1714 
1715 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1716 {
1717     DisasContext *dc = container_of(dcb, DisasContext, base);
1718 
1719     if (dc->base.is_jmp == DISAS_NORETURN) {
1720         /* We have already exited the TB. */
1721         return;
1722     }
1723 
1724     t_sync_flags(dc);
1725 
1726     switch (dc->base.is_jmp) {
1727     case DISAS_TOO_MANY:
1728         gen_goto_tb(dc, 0, dc->base.pc_next);
1729         return;
1730 
1731     case DISAS_EXIT:
1732         break;
1733     case DISAS_EXIT_NEXT:
1734         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1735         break;
1736     case DISAS_EXIT_JUMP:
1737         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1738         tcg_gen_discard_i32(cpu_btarget);
1739         break;
1740 
1741     case DISAS_JUMP:
1742         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1743             /* Direct jump. */
1744             tcg_gen_discard_i32(cpu_btarget);
1745 
1746             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1747                 /* Conditional direct jump. */
1748                 TCGLabel *taken = gen_new_label();
1749                 TCGv_i32 tmp = tcg_temp_new_i32();
1750 
1751                 /*
1752                  * Copy bvalue to a temp now, so we can discard bvalue.
1753                  * This can avoid writing bvalue to memory when the
1754                  * delay slot cannot raise an exception.
1755                  */
1756                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1757                 tcg_gen_discard_i32(cpu_bvalue);
1758 
1759                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1760                 gen_goto_tb(dc, 1, dc->base.pc_next);
1761                 gen_set_label(taken);
1762             }
1763             gen_goto_tb(dc, 0, dc->jmp_dest);
1764             return;
1765         }
1766 
1767         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1768         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1769         tcg_gen_discard_i32(cpu_btarget);
1770         tcg_gen_lookup_and_goto_ptr();
1771         return;
1772 
1773     default:
1774         g_assert_not_reached();
1775     }
1776 
1777     /* Finish DISAS_EXIT_* */
1778     if (unlikely(cs->singlestep_enabled)) {
1779         gen_raise_exception(dc, EXCP_DEBUG);
1780     } else {
1781         tcg_gen_exit_tb(NULL, 0);
1782     }
1783 }
1784 
1785 static const TranslatorOps mb_tr_ops = {
1786     .init_disas_context = mb_tr_init_disas_context,
1787     .tb_start           = mb_tr_tb_start,
1788     .insn_start         = mb_tr_insn_start,
1789     .translate_insn     = mb_tr_translate_insn,
1790     .tb_stop            = mb_tr_tb_stop,
1791 };
1792 
1793 void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1794                        int *max_insns, vaddr pc, void *host_pc)
1795 {
1796     DisasContext dc;
1797     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1798 }
1799 
1800 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1801 {
1802     CPUMBState *env = cpu_env(cs);
1803     uint32_t iflags;
1804     int i;
1805 
1806     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1807                  env->pc, env->msr,
1808                  (env->msr & MSR_UM) ? "user" : "kernel",
1809                  (env->msr & MSR_UMS) ? "user" : "kernel",
1810                  (bool)(env->msr & MSR_EIP),
1811                  (bool)(env->msr & MSR_IE));
1812 
1813     iflags = env->iflags;
1814     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1815     if (iflags & IMM_FLAG) {
1816         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1817     }
1818     if (iflags & BIMM_FLAG) {
1819         qemu_fprintf(f, " BIMM");
1820     }
1821     if (iflags & D_FLAG) {
1822         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1823     }
1824     if (iflags & DRTI_FLAG) {
1825         qemu_fprintf(f, " DRTI");
1826     }
1827     if (iflags & DRTE_FLAG) {
1828         qemu_fprintf(f, " DRTE");
1829     }
1830     if (iflags & DRTB_FLAG) {
1831         qemu_fprintf(f, " DRTB");
1832     }
1833     if (iflags & ESR_ESS_FLAG) {
1834         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1835     }
1836 
1837     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1838                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1839                  env->esr, env->fsr, env->btr, env->edr,
1840                  env->ear, env->slr, env->shr);
1841 
1842     for (i = 0; i < 32; i++) {
1843         qemu_fprintf(f, "r%2.2d=%08x%c",
1844                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1845     }
1846     qemu_fprintf(f, "\n");
1847 }
1848 
1849 void mb_tcg_init(void)
1850 {
1851 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1852 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1853 
1854     static const struct {
1855         TCGv_i32 *var; int ofs; char name[8];
1856     } i32s[] = {
1857         /*
1858          * Note that r0 is handled specially in reg_for_read
1859          * and reg_for_write.  Nothing should touch cpu_R[0].
1860          * Leave that element NULL, which will assert quickly
1861          * inside the tcg generator functions.
1862          */
1863                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1864         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1865         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1866         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1867 
1868         SP(pc),
1869         SP(msr),
1870         SP(msr_c),
1871         SP(imm),
1872         SP(iflags),
1873         SP(bvalue),
1874         SP(btarget),
1875         SP(res_val),
1876     };
1877 
1878 #undef R
1879 #undef SP
1880 
1881     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1882         *i32s[i].var =
1883           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1884     }
1885 
1886     cpu_res_addr =
1887         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1888 }
1889