xref: /qemu/target/sparc/translate.c (revision 68df8c8dba57f539d24f1a92a8699a179d9bb6fb)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "asi.h"
34 #include "target/sparc/translate.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 #ifdef TARGET_SPARC64
41 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
42 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
43 # define gen_helper_rett(E)                     qemu_build_not_reached()
44 # define gen_helper_power_down(E)               qemu_build_not_reached()
45 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
46 #else
47 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
48 # define gen_helper_done(E)                     qemu_build_not_reached()
49 # define gen_helper_flushw(E)                   qemu_build_not_reached()
50 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
51 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
52 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
53 # define gen_helper_restored(E)                 qemu_build_not_reached()
54 # define gen_helper_retry(E)                    qemu_build_not_reached()
55 # define gen_helper_saved(E)                    qemu_build_not_reached()
56 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
57 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
58 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
59 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
103 # define MAXTL_MASK                             0
104 #endif
105 
106 #define DISAS_EXIT  DISAS_TARGET_0
107 
108 /* global register indexes */
109 static TCGv_ptr cpu_regwptr;
110 static TCGv cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128 
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z  cpu_xcc_Z
131 #define cpu_cc_C  cpu_xcc_C
132 #else
133 #define cpu_cc_Z  cpu_icc_Z
134 #define cpu_cc_C  cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138 
139 /* Floating point comparison registers */
140 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
141 
142 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X)  env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X)  env_field_offsetof(X)
148 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
149 #endif
150 
151 typedef struct DisasCompare {
152     TCGCond cond;
153     TCGv c1;
154     int c2;
155 } DisasCompare;
156 
157 typedef struct DisasDelayException {
158     struct DisasDelayException *next;
159     TCGLabel *lab;
160     TCGv_i32 excp;
161     /* Saved state at parent insn. */
162     target_ulong pc;
163     target_ulong npc;
164 } DisasDelayException;
165 
166 typedef struct DisasContext {
167     DisasContextBase base;
168     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
169     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
170 
171     /* Used when JUMP_PC value is used. */
172     DisasCompare jump;
173     target_ulong jump_pc[2];
174 
175     int mem_idx;
176     bool cpu_cond_live;
177     bool fpu_enabled;
178     bool address_mask_32bit;
179 #ifndef CONFIG_USER_ONLY
180     bool supervisor;
181 #ifdef TARGET_SPARC64
182     bool hypervisor;
183 #else
184     bool fsr_qne;
185 #endif
186 #endif
187 
188     sparc_def_t *def;
189 #ifdef TARGET_SPARC64
190     int fprs_dirty;
191     int asi;
192 #endif
193     DisasDelayException *delay_excp_list;
194 } DisasContext;
195 
196 // This function uses non-native bit order
197 #define GET_FIELD(X, FROM, TO)                                  \
198     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
199 
200 // This function uses the order in the manuals, i.e. bit 0 is 2^0
201 #define GET_FIELD_SP(X, FROM, TO)               \
202     GET_FIELD(X, 31 - (TO), 31 - (FROM))
203 
204 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
205 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
206 
207 #define UA2005_HTRAP_MASK 0xff
208 #define V8_TRAP_MASK 0x7f
209 
210 #define IS_IMM (insn & (1<<13))
211 
212 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
213 {
214 #if defined(TARGET_SPARC64)
215     int bit = (rd < 32) ? 1 : 2;
216     /* If we know we've already set this bit within the TB,
217        we can avoid setting it again.  */
218     if (!(dc->fprs_dirty & bit)) {
219         dc->fprs_dirty |= bit;
220         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
221     }
222 #endif
223 }
224 
225 /* floating point registers moves */
226 
227 static int gen_offset_fpr_F(unsigned int reg)
228 {
229     int ret;
230 
231     tcg_debug_assert(reg < 32);
232     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
233     if (reg & 1) {
234         ret += offsetof(CPU_DoubleU, l.lower);
235     } else {
236         ret += offsetof(CPU_DoubleU, l.upper);
237     }
238     return ret;
239 }
240 
241 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
242 {
243     TCGv_i32 ret = tcg_temp_new_i32();
244     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
245     return ret;
246 }
247 
248 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
249 {
250     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
251     gen_update_fprs_dirty(dc, dst);
252 }
253 
254 static int gen_offset_fpr_D(unsigned int reg)
255 {
256     tcg_debug_assert(reg < 64);
257     tcg_debug_assert(reg % 2 == 0);
258     return offsetof(CPUSPARCState, fpr[reg / 2]);
259 }
260 
261 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
262 {
263     TCGv_i64 ret = tcg_temp_new_i64();
264     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
265     return ret;
266 }
267 
268 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
269 {
270     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
271     gen_update_fprs_dirty(dc, dst);
272 }
273 
274 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
275 {
276     TCGv_i128 ret = tcg_temp_new_i128();
277     TCGv_i64 h = gen_load_fpr_D(dc, src);
278     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
279 
280     tcg_gen_concat_i64_i128(ret, l, h);
281     return ret;
282 }
283 
284 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
285 {
286     TCGv_i64 h = tcg_temp_new_i64();
287     TCGv_i64 l = tcg_temp_new_i64();
288 
289     tcg_gen_extr_i128_i64(l, h, v);
290     gen_store_fpr_D(dc, dst, h);
291     gen_store_fpr_D(dc, dst + 2, l);
292 }
293 
294 /* moves */
295 #ifdef CONFIG_USER_ONLY
296 #define supervisor(dc) 0
297 #define hypervisor(dc) 0
298 #else
299 #ifdef TARGET_SPARC64
300 #define hypervisor(dc) (dc->hypervisor)
301 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
302 #else
303 #define supervisor(dc) (dc->supervisor)
304 #define hypervisor(dc) 0
305 #endif
306 #endif
307 
308 #if !defined(TARGET_SPARC64)
309 # define AM_CHECK(dc)  false
310 #elif defined(TARGET_ABI32)
311 # define AM_CHECK(dc)  true
312 #elif defined(CONFIG_USER_ONLY)
313 # define AM_CHECK(dc)  false
314 #else
315 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
316 #endif
317 
318 static void gen_address_mask(DisasContext *dc, TCGv addr)
319 {
320     if (AM_CHECK(dc)) {
321         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
322     }
323 }
324 
325 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
326 {
327     return AM_CHECK(dc) ? (uint32_t)addr : addr;
328 }
329 
330 static TCGv gen_load_gpr(DisasContext *dc, int reg)
331 {
332     if (reg > 0) {
333         assert(reg < 32);
334         return cpu_regs[reg];
335     } else {
336         TCGv t = tcg_temp_new();
337         tcg_gen_movi_tl(t, 0);
338         return t;
339     }
340 }
341 
342 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
343 {
344     if (reg > 0) {
345         assert(reg < 32);
346         tcg_gen_mov_tl(cpu_regs[reg], v);
347     }
348 }
349 
350 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
351 {
352     if (reg > 0) {
353         assert(reg < 32);
354         return cpu_regs[reg];
355     } else {
356         return tcg_temp_new();
357     }
358 }
359 
360 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
361 {
362     return translator_use_goto_tb(&s->base, pc) &&
363            translator_use_goto_tb(&s->base, npc);
364 }
365 
366 static void gen_goto_tb(DisasContext *s, int tb_num,
367                         target_ulong pc, target_ulong npc)
368 {
369     if (use_goto_tb(s, pc, npc))  {
370         /* jump to same page: we can use a direct jump */
371         tcg_gen_goto_tb(tb_num);
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_exit_tb(s->base.tb, tb_num);
375     } else {
376         /* jump to another page: we can use an indirect jump */
377         tcg_gen_movi_tl(cpu_pc, pc);
378         tcg_gen_movi_tl(cpu_npc, npc);
379         tcg_gen_lookup_and_goto_ptr();
380     }
381 }
382 
383 static TCGv gen_carry32(void)
384 {
385     if (TARGET_LONG_BITS == 64) {
386         TCGv t = tcg_temp_new();
387         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
388         return t;
389     }
390     return cpu_icc_C;
391 }
392 
393 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
394 {
395     TCGv z = tcg_constant_tl(0);
396 
397     if (cin) {
398         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
399         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
400     } else {
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
402     }
403     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
404     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
405     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
406     if (TARGET_LONG_BITS == 64) {
407         /*
408          * Carry-in to bit 32 is result ^ src1 ^ src2.
409          * We already have the src xor term in Z, from computation of V.
410          */
411         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
412         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
413     }
414     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
415     tcg_gen_mov_tl(dst, cpu_cc_N);
416 }
417 
418 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     gen_op_addcc_int(dst, src1, src2, NULL);
421 }
422 
423 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
424 {
425     TCGv t = tcg_temp_new();
426 
427     /* Save the tag bits around modification of dst. */
428     tcg_gen_or_tl(t, src1, src2);
429 
430     gen_op_addcc(dst, src1, src2);
431 
432     /* Incorprate tag bits into icc.V */
433     tcg_gen_andi_tl(t, t, 3);
434     tcg_gen_neg_tl(t, t);
435     tcg_gen_ext32u_tl(t, t);
436     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
437 }
438 
439 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
440 {
441     tcg_gen_add_tl(dst, src1, src2);
442     tcg_gen_add_tl(dst, dst, gen_carry32());
443 }
444 
445 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     gen_op_addcc_int(dst, src1, src2, gen_carry32());
448 }
449 
450 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
451 {
452     tcg_gen_add_tl(dst, src1, src2);
453     tcg_gen_add_tl(dst, dst, cpu_cc_C);
454 }
455 
456 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
457 {
458     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
459 }
460 
461 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
462 {
463     TCGv z = tcg_constant_tl(0);
464 
465     if (cin) {
466         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
467         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
468     } else {
469         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
470     }
471     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
472     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
473     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
474     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
475 #ifdef TARGET_SPARC64
476     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
477     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
478 #endif
479     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
480     tcg_gen_mov_tl(dst, cpu_cc_N);
481 }
482 
483 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     gen_op_subcc_int(dst, src1, src2, NULL);
486 }
487 
488 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
489 {
490     TCGv t = tcg_temp_new();
491 
492     /* Save the tag bits around modification of dst. */
493     tcg_gen_or_tl(t, src1, src2);
494 
495     gen_op_subcc(dst, src1, src2);
496 
497     /* Incorprate tag bits into icc.V */
498     tcg_gen_andi_tl(t, t, 3);
499     tcg_gen_neg_tl(t, t);
500     tcg_gen_ext32u_tl(t, t);
501     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
502 }
503 
504 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
505 {
506     tcg_gen_sub_tl(dst, src1, src2);
507     tcg_gen_sub_tl(dst, dst, gen_carry32());
508 }
509 
510 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     gen_op_subcc_int(dst, src1, src2, gen_carry32());
513 }
514 
515 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
516 {
517     tcg_gen_sub_tl(dst, src1, src2);
518     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
519 }
520 
521 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
522 {
523     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
524 }
525 
526 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
527 {
528     TCGv zero = tcg_constant_tl(0);
529     TCGv one = tcg_constant_tl(1);
530     TCGv t_src1 = tcg_temp_new();
531     TCGv t_src2 = tcg_temp_new();
532     TCGv t0 = tcg_temp_new();
533 
534     tcg_gen_ext32u_tl(t_src1, src1);
535     tcg_gen_ext32u_tl(t_src2, src2);
536 
537     /*
538      * if (!(env->y & 1))
539      *   src2 = 0;
540      */
541     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
542 
543     /*
544      * b2 = src1 & 1;
545      * y = (b2 << 31) | (y >> 1);
546      */
547     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
548     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
549 
550     // b1 = N ^ V;
551     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
552 
553     /*
554      * src1 = (b1 << 31) | (src1 >> 1)
555      */
556     tcg_gen_andi_tl(t0, t0, 1u << 31);
557     tcg_gen_shri_tl(t_src1, t_src1, 1);
558     tcg_gen_or_tl(t_src1, t_src1, t0);
559 
560     gen_op_addcc(dst, t_src1, t_src2);
561 }
562 
563 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
564 {
565 #if TARGET_LONG_BITS == 32
566     if (sign_ext) {
567         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
568     } else {
569         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
570     }
571 #else
572     TCGv t0 = tcg_temp_new_i64();
573     TCGv t1 = tcg_temp_new_i64();
574 
575     if (sign_ext) {
576         tcg_gen_ext32s_i64(t0, src1);
577         tcg_gen_ext32s_i64(t1, src2);
578     } else {
579         tcg_gen_ext32u_i64(t0, src1);
580         tcg_gen_ext32u_i64(t1, src2);
581     }
582 
583     tcg_gen_mul_i64(dst, t0, t1);
584     tcg_gen_shri_i64(cpu_y, dst, 32);
585 #endif
586 }
587 
588 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
589 {
590     /* zero-extend truncated operands before multiplication */
591     gen_op_multiply(dst, src1, src2, 0);
592 }
593 
594 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
595 {
596     /* sign-extend truncated operands before multiplication */
597     gen_op_multiply(dst, src1, src2, 1);
598 }
599 
600 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
601 {
602     TCGv discard = tcg_temp_new();
603     tcg_gen_mulu2_tl(discard, dst, src1, src2);
604 }
605 
606 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
607                            TCGv_i64 src2, TCGv_i64 src3)
608 {
609     TCGv_i64 t = tcg_temp_new_i64();
610 
611     tcg_gen_mul_i64(t, src1, src2);
612     tcg_gen_add_i64(dst, src3, t);
613 }
614 
615 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
616                              TCGv_i64 src2, TCGv_i64 src3)
617 {
618     TCGv_i64 l = tcg_temp_new_i64();
619     TCGv_i64 h = tcg_temp_new_i64();
620     TCGv_i64 z = tcg_constant_i64(0);
621 
622     tcg_gen_mulu2_i64(l, h, src1, src2);
623     tcg_gen_add2_i64(l, dst, l, h, src3, z);
624 }
625 
626 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
627 {
628 #ifdef TARGET_SPARC64
629     gen_helper_sdiv(dst, tcg_env, src1, src2);
630     tcg_gen_ext32s_tl(dst, dst);
631 #else
632     TCGv_i64 t64 = tcg_temp_new_i64();
633     gen_helper_sdiv(t64, tcg_env, src1, src2);
634     tcg_gen_trunc_i64_tl(dst, t64);
635 #endif
636 }
637 
638 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
639 {
640     TCGv_i64 t64;
641 
642 #ifdef TARGET_SPARC64
643     t64 = cpu_cc_V;
644 #else
645     t64 = tcg_temp_new_i64();
646 #endif
647 
648     gen_helper_udiv(t64, tcg_env, src1, src2);
649 
650 #ifdef TARGET_SPARC64
651     tcg_gen_ext32u_tl(cpu_cc_N, t64);
652     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
653     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
654     tcg_gen_movi_tl(cpu_icc_C, 0);
655 #else
656     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
657 #endif
658     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
659     tcg_gen_movi_tl(cpu_cc_C, 0);
660     tcg_gen_mov_tl(dst, cpu_cc_N);
661 }
662 
663 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
664 {
665     TCGv_i64 t64;
666 
667 #ifdef TARGET_SPARC64
668     t64 = cpu_cc_V;
669 #else
670     t64 = tcg_temp_new_i64();
671 #endif
672 
673     gen_helper_sdiv(t64, tcg_env, src1, src2);
674 
675 #ifdef TARGET_SPARC64
676     tcg_gen_ext32s_tl(cpu_cc_N, t64);
677     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
678     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
679     tcg_gen_movi_tl(cpu_icc_C, 0);
680 #else
681     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
682 #endif
683     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
684     tcg_gen_movi_tl(cpu_cc_C, 0);
685     tcg_gen_mov_tl(dst, cpu_cc_N);
686 }
687 
688 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
689 {
690     gen_helper_taddcctv(dst, tcg_env, src1, src2);
691 }
692 
693 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
694 {
695     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
696 }
697 
698 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
699 {
700     tcg_gen_ctpop_tl(dst, src2);
701 }
702 
703 static void gen_op_lzcnt(TCGv dst, TCGv src)
704 {
705     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
706 }
707 
708 #ifndef TARGET_SPARC64
709 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
710 {
711     g_assert_not_reached();
712 }
713 #endif
714 
715 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
716 {
717     gen_helper_array8(dst, src1, src2);
718     tcg_gen_shli_tl(dst, dst, 1);
719 }
720 
721 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
722 {
723     gen_helper_array8(dst, src1, src2);
724     tcg_gen_shli_tl(dst, dst, 2);
725 }
726 
727 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
728 {
729 #ifdef TARGET_SPARC64
730     gen_helper_fpack16(dst, cpu_gsr, src);
731 #else
732     g_assert_not_reached();
733 #endif
734 }
735 
736 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
737 {
738 #ifdef TARGET_SPARC64
739     gen_helper_fpackfix(dst, cpu_gsr, src);
740 #else
741     g_assert_not_reached();
742 #endif
743 }
744 
745 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
746 {
747 #ifdef TARGET_SPARC64
748     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
749 #else
750     g_assert_not_reached();
751 #endif
752 }
753 
754 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
755 {
756     TCGv_i32 t[2];
757 
758     for (int i = 0; i < 2; i++) {
759         TCGv_i32 u = tcg_temp_new_i32();
760         TCGv_i32 v = tcg_temp_new_i32();
761 
762         tcg_gen_sextract_i32(u, src1, i * 16, 16);
763         tcg_gen_sextract_i32(v, src2, i * 16, 16);
764         tcg_gen_add_i32(u, u, v);
765         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
766         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
767         t[i] = u;
768     }
769     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
770 }
771 
772 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
773 {
774     TCGv_i32 t[2];
775 
776     for (int i = 0; i < 2; i++) {
777         TCGv_i32 u = tcg_temp_new_i32();
778         TCGv_i32 v = tcg_temp_new_i32();
779 
780         tcg_gen_sextract_i32(u, src1, i * 16, 16);
781         tcg_gen_sextract_i32(v, src2, i * 16, 16);
782         tcg_gen_sub_i32(u, u, v);
783         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
784         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
785         t[i] = u;
786     }
787     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
788 }
789 
790 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
791 {
792     TCGv_i32 r = tcg_temp_new_i32();
793     TCGv_i32 t = tcg_temp_new_i32();
794     TCGv_i32 v = tcg_temp_new_i32();
795     TCGv_i32 z = tcg_constant_i32(0);
796 
797     tcg_gen_add_i32(r, src1, src2);
798     tcg_gen_xor_i32(t, src1, src2);
799     tcg_gen_xor_i32(v, r, src2);
800     tcg_gen_andc_i32(v, v, t);
801 
802     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
803     tcg_gen_addi_i32(t, t, INT32_MAX);
804 
805     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
806 }
807 
808 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
809 {
810     TCGv_i32 r = tcg_temp_new_i32();
811     TCGv_i32 t = tcg_temp_new_i32();
812     TCGv_i32 v = tcg_temp_new_i32();
813     TCGv_i32 z = tcg_constant_i32(0);
814 
815     tcg_gen_sub_i32(r, src1, src2);
816     tcg_gen_xor_i32(t, src1, src2);
817     tcg_gen_xor_i32(v, r, src1);
818     tcg_gen_and_i32(v, v, t);
819 
820     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
821     tcg_gen_addi_i32(t, t, INT32_MAX);
822 
823     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
824 }
825 
826 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
827                                 TCGv_i64 s2, TCGv gsr)
828 {
829 #ifdef TARGET_SPARC64
830     TCGv t1, t2, shift;
831 
832     t1 = tcg_temp_new();
833     t2 = tcg_temp_new();
834     shift = tcg_temp_new();
835 
836     tcg_gen_andi_tl(shift, gsr, 7);
837     tcg_gen_shli_tl(shift, shift, 3);
838     tcg_gen_shl_tl(t1, s1, shift);
839 
840     /*
841      * A shift of 64 does not produce 0 in TCG.  Divide this into a
842      * shift of (up to 63) followed by a constant shift of 1.
843      */
844     tcg_gen_xori_tl(shift, shift, 63);
845     tcg_gen_shr_tl(t2, s2, shift);
846     tcg_gen_shri_tl(t2, t2, 1);
847 
848     tcg_gen_or_tl(dst, t1, t2);
849 #else
850     g_assert_not_reached();
851 #endif
852 }
853 
854 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
855 {
856     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
857 }
858 
859 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
860 {
861 #ifdef TARGET_SPARC64
862     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
863 #else
864     g_assert_not_reached();
865 #endif
866 }
867 
868 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
869 {
870 #ifdef TARGET_SPARC64
871     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
872 #else
873     g_assert_not_reached();
874 #endif
875 }
876 
877 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
878 {
879     tcg_gen_ext16s_i32(src2, src2);
880     gen_helper_fmul8x16a(dst, src1, src2);
881 }
882 
883 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
884 {
885     tcg_gen_sari_i32(src2, src2, 16);
886     gen_helper_fmul8x16a(dst, src1, src2);
887 }
888 
889 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
890 {
891     TCGv_i32 t0 = tcg_temp_new_i32();
892     TCGv_i32 t1 = tcg_temp_new_i32();
893     TCGv_i32 t2 = tcg_temp_new_i32();
894 
895     tcg_gen_ext8u_i32(t0, src1);
896     tcg_gen_ext16s_i32(t1, src2);
897     tcg_gen_mul_i32(t0, t0, t1);
898 
899     tcg_gen_extract_i32(t1, src1, 16, 8);
900     tcg_gen_sextract_i32(t2, src2, 16, 16);
901     tcg_gen_mul_i32(t1, t1, t2);
902 
903     tcg_gen_concat_i32_i64(dst, t0, t1);
904 }
905 
906 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
907 {
908     TCGv_i32 t0 = tcg_temp_new_i32();
909     TCGv_i32 t1 = tcg_temp_new_i32();
910     TCGv_i32 t2 = tcg_temp_new_i32();
911 
912     /*
913      * The insn description talks about extracting the upper 8 bits
914      * of the signed 16-bit input rs1, performing the multiply, then
915      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
916      * the rs1 input, which avoids the need for two shifts.
917      */
918     tcg_gen_ext16s_i32(t0, src1);
919     tcg_gen_andi_i32(t0, t0, ~0xff);
920     tcg_gen_ext16s_i32(t1, src2);
921     tcg_gen_mul_i32(t0, t0, t1);
922 
923     tcg_gen_sextract_i32(t1, src1, 16, 16);
924     tcg_gen_andi_i32(t1, t1, ~0xff);
925     tcg_gen_sextract_i32(t2, src2, 16, 16);
926     tcg_gen_mul_i32(t1, t1, t2);
927 
928     tcg_gen_concat_i32_i64(dst, t0, t1);
929 }
930 
931 #ifdef TARGET_SPARC64
932 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
933                              TCGv_vec src1, TCGv_vec src2)
934 {
935     TCGv_vec a = tcg_temp_new_vec_matching(dst);
936     TCGv_vec c = tcg_temp_new_vec_matching(dst);
937 
938     tcg_gen_add_vec(vece, a, src1, src2);
939     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
940     /* Vector cmp produces -1 for true, so subtract to add carry. */
941     tcg_gen_sub_vec(vece, dst, a, c);
942 }
943 
944 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
945                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
946 {
947     static const TCGOpcode vecop_list[] = {
948         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
949     };
950     static const GVecGen3 op = {
951         .fni8 = gen_helper_fchksm16,
952         .fniv = gen_vec_fchksm16,
953         .opt_opc = vecop_list,
954         .vece = MO_16,
955     };
956     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
957 }
958 
959 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
960                             TCGv_vec src1, TCGv_vec src2)
961 {
962     TCGv_vec t = tcg_temp_new_vec_matching(dst);
963 
964     tcg_gen_or_vec(vece, t, src1, src2);
965     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
966     tcg_gen_sari_vec(vece, src1, src1, 1);
967     tcg_gen_sari_vec(vece, src2, src2, 1);
968     tcg_gen_add_vec(vece, dst, src1, src2);
969     tcg_gen_add_vec(vece, dst, dst, t);
970 }
971 
972 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
973                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
974 {
975     static const TCGOpcode vecop_list[] = {
976         INDEX_op_add_vec, INDEX_op_sari_vec,
977     };
978     static const GVecGen3 op = {
979         .fni8 = gen_helper_fmean16,
980         .fniv = gen_vec_fmean16,
981         .opt_opc = vecop_list,
982         .vece = MO_16,
983     };
984     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
985 }
986 #else
987 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
988 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
989 #endif
990 
991 static void finishing_insn(DisasContext *dc)
992 {
993     /*
994      * From here, there is no future path through an unwinding exception.
995      * If the current insn cannot raise an exception, the computation of
996      * cpu_cond may be able to be elided.
997      */
998     if (dc->cpu_cond_live) {
999         tcg_gen_discard_tl(cpu_cond);
1000         dc->cpu_cond_live = false;
1001     }
1002 }
1003 
1004 static void gen_generic_branch(DisasContext *dc)
1005 {
1006     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1007     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1008     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1009 
1010     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1011 }
1012 
1013 /* call this function before using the condition register as it may
1014    have been set for a jump */
1015 static void flush_cond(DisasContext *dc)
1016 {
1017     if (dc->npc == JUMP_PC) {
1018         gen_generic_branch(dc);
1019         dc->npc = DYNAMIC_PC_LOOKUP;
1020     }
1021 }
1022 
1023 static void save_npc(DisasContext *dc)
1024 {
1025     if (dc->npc & 3) {
1026         switch (dc->npc) {
1027         case JUMP_PC:
1028             gen_generic_branch(dc);
1029             dc->npc = DYNAMIC_PC_LOOKUP;
1030             break;
1031         case DYNAMIC_PC:
1032         case DYNAMIC_PC_LOOKUP:
1033             break;
1034         default:
1035             g_assert_not_reached();
1036         }
1037     } else {
1038         tcg_gen_movi_tl(cpu_npc, dc->npc);
1039     }
1040 }
1041 
1042 static void save_state(DisasContext *dc)
1043 {
1044     tcg_gen_movi_tl(cpu_pc, dc->pc);
1045     save_npc(dc);
1046 }
1047 
1048 static void gen_exception(DisasContext *dc, int which)
1049 {
1050     finishing_insn(dc);
1051     save_state(dc);
1052     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1053     dc->base.is_jmp = DISAS_NORETURN;
1054 }
1055 
1056 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1057 {
1058     DisasDelayException *e = g_new0(DisasDelayException, 1);
1059 
1060     e->next = dc->delay_excp_list;
1061     dc->delay_excp_list = e;
1062 
1063     e->lab = gen_new_label();
1064     e->excp = excp;
1065     e->pc = dc->pc;
1066     /* Caller must have used flush_cond before branch. */
1067     assert(e->npc != JUMP_PC);
1068     e->npc = dc->npc;
1069 
1070     return e->lab;
1071 }
1072 
1073 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1074 {
1075     return delay_exceptionv(dc, tcg_constant_i32(excp));
1076 }
1077 
1078 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1079 {
1080     TCGv t = tcg_temp_new();
1081     TCGLabel *lab;
1082 
1083     tcg_gen_andi_tl(t, addr, mask);
1084 
1085     flush_cond(dc);
1086     lab = delay_exception(dc, TT_UNALIGNED);
1087     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1088 }
1089 
1090 static void gen_mov_pc_npc(DisasContext *dc)
1091 {
1092     finishing_insn(dc);
1093 
1094     if (dc->npc & 3) {
1095         switch (dc->npc) {
1096         case JUMP_PC:
1097             gen_generic_branch(dc);
1098             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1099             dc->pc = DYNAMIC_PC_LOOKUP;
1100             break;
1101         case DYNAMIC_PC:
1102         case DYNAMIC_PC_LOOKUP:
1103             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1104             dc->pc = dc->npc;
1105             break;
1106         default:
1107             g_assert_not_reached();
1108         }
1109     } else {
1110         dc->pc = dc->npc;
1111     }
1112 }
1113 
1114 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1115                         DisasContext *dc)
1116 {
1117     TCGv t1;
1118 
1119     cmp->c1 = t1 = tcg_temp_new();
1120     cmp->c2 = 0;
1121 
1122     switch (cond & 7) {
1123     case 0x0: /* never */
1124         cmp->cond = TCG_COND_NEVER;
1125         cmp->c1 = tcg_constant_tl(0);
1126         break;
1127 
1128     case 0x1: /* eq: Z */
1129         cmp->cond = TCG_COND_EQ;
1130         if (TARGET_LONG_BITS == 32 || xcc) {
1131             tcg_gen_mov_tl(t1, cpu_cc_Z);
1132         } else {
1133             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1134         }
1135         break;
1136 
1137     case 0x2: /* le: Z | (N ^ V) */
1138         /*
1139          * Simplify:
1140          *   cc_Z || (N ^ V) < 0        NE
1141          *   cc_Z && !((N ^ V) < 0)     EQ
1142          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1143          */
1144         cmp->cond = TCG_COND_EQ;
1145         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1146         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1147         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1148         if (TARGET_LONG_BITS == 64 && !xcc) {
1149             tcg_gen_ext32u_tl(t1, t1);
1150         }
1151         break;
1152 
1153     case 0x3: /* lt: N ^ V */
1154         cmp->cond = TCG_COND_LT;
1155         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1156         if (TARGET_LONG_BITS == 64 && !xcc) {
1157             tcg_gen_ext32s_tl(t1, t1);
1158         }
1159         break;
1160 
1161     case 0x4: /* leu: Z | C */
1162         /*
1163          * Simplify:
1164          *   cc_Z == 0 || cc_C != 0     NE
1165          *   cc_Z != 0 && cc_C == 0     EQ
1166          *   cc_Z & (cc_C ? 0 : -1)     EQ
1167          *   cc_Z & (cc_C - 1)          EQ
1168          */
1169         cmp->cond = TCG_COND_EQ;
1170         if (TARGET_LONG_BITS == 32 || xcc) {
1171             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1172             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1173         } else {
1174             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1175             tcg_gen_subi_tl(t1, t1, 1);
1176             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1177             tcg_gen_ext32u_tl(t1, t1);
1178         }
1179         break;
1180 
1181     case 0x5: /* ltu: C */
1182         cmp->cond = TCG_COND_NE;
1183         if (TARGET_LONG_BITS == 32 || xcc) {
1184             tcg_gen_mov_tl(t1, cpu_cc_C);
1185         } else {
1186             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1187         }
1188         break;
1189 
1190     case 0x6: /* neg: N */
1191         cmp->cond = TCG_COND_LT;
1192         if (TARGET_LONG_BITS == 32 || xcc) {
1193             tcg_gen_mov_tl(t1, cpu_cc_N);
1194         } else {
1195             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1196         }
1197         break;
1198 
1199     case 0x7: /* vs: V */
1200         cmp->cond = TCG_COND_LT;
1201         if (TARGET_LONG_BITS == 32 || xcc) {
1202             tcg_gen_mov_tl(t1, cpu_cc_V);
1203         } else {
1204             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1205         }
1206         break;
1207     }
1208     if (cond & 8) {
1209         cmp->cond = tcg_invert_cond(cmp->cond);
1210     }
1211 }
1212 
1213 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1214 {
1215     TCGv_i32 fcc = cpu_fcc[cc];
1216     TCGv_i32 c1 = fcc;
1217     int c2 = 0;
1218     TCGCond tcond;
1219 
1220     /*
1221      * FCC values:
1222      * 0 =
1223      * 1 <
1224      * 2 >
1225      * 3 unordered
1226      */
1227     switch (cond & 7) {
1228     case 0x0: /* fbn */
1229         tcond = TCG_COND_NEVER;
1230         break;
1231     case 0x1: /* fbne : !0 */
1232         tcond = TCG_COND_NE;
1233         break;
1234     case 0x2: /* fblg : 1 or 2 */
1235         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1236         c1 = tcg_temp_new_i32();
1237         tcg_gen_addi_i32(c1, fcc, -1);
1238         c2 = 1;
1239         tcond = TCG_COND_LEU;
1240         break;
1241     case 0x3: /* fbul : 1 or 3 */
1242         c1 = tcg_temp_new_i32();
1243         tcg_gen_andi_i32(c1, fcc, 1);
1244         tcond = TCG_COND_NE;
1245         break;
1246     case 0x4: /* fbl  : 1 */
1247         c2 = 1;
1248         tcond = TCG_COND_EQ;
1249         break;
1250     case 0x5: /* fbug : 2 or 3 */
1251         c2 = 2;
1252         tcond = TCG_COND_GEU;
1253         break;
1254     case 0x6: /* fbg  : 2 */
1255         c2 = 2;
1256         tcond = TCG_COND_EQ;
1257         break;
1258     case 0x7: /* fbu  : 3 */
1259         c2 = 3;
1260         tcond = TCG_COND_EQ;
1261         break;
1262     }
1263     if (cond & 8) {
1264         tcond = tcg_invert_cond(tcond);
1265     }
1266 
1267     cmp->cond = tcond;
1268     cmp->c2 = c2;
1269     cmp->c1 = tcg_temp_new();
1270     tcg_gen_extu_i32_tl(cmp->c1, c1);
1271 }
1272 
1273 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1274 {
1275     static const TCGCond cond_reg[4] = {
1276         TCG_COND_NEVER,  /* reserved */
1277         TCG_COND_EQ,
1278         TCG_COND_LE,
1279         TCG_COND_LT,
1280     };
1281     TCGCond tcond;
1282 
1283     if ((cond & 3) == 0) {
1284         return false;
1285     }
1286     tcond = cond_reg[cond & 3];
1287     if (cond & 4) {
1288         tcond = tcg_invert_cond(tcond);
1289     }
1290 
1291     cmp->cond = tcond;
1292     cmp->c1 = tcg_temp_new();
1293     cmp->c2 = 0;
1294     tcg_gen_mov_tl(cmp->c1, r_src);
1295     return true;
1296 }
1297 
1298 static void gen_op_clear_ieee_excp_and_FTT(void)
1299 {
1300     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1301                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1302 }
1303 
1304 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1305 {
1306     gen_op_clear_ieee_excp_and_FTT();
1307     tcg_gen_mov_i32(dst, src);
1308 }
1309 
1310 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1311 {
1312     gen_op_clear_ieee_excp_and_FTT();
1313     tcg_gen_xori_i32(dst, src, 1u << 31);
1314 }
1315 
1316 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1317 {
1318     gen_op_clear_ieee_excp_and_FTT();
1319     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1320 }
1321 
1322 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1323 {
1324     gen_op_clear_ieee_excp_and_FTT();
1325     tcg_gen_mov_i64(dst, src);
1326 }
1327 
1328 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1329 {
1330     gen_op_clear_ieee_excp_and_FTT();
1331     tcg_gen_xori_i64(dst, src, 1ull << 63);
1332 }
1333 
1334 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1335 {
1336     gen_op_clear_ieee_excp_and_FTT();
1337     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1338 }
1339 
1340 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1341 {
1342     TCGv_i64 l = tcg_temp_new_i64();
1343     TCGv_i64 h = tcg_temp_new_i64();
1344 
1345     tcg_gen_extr_i128_i64(l, h, src);
1346     tcg_gen_xori_i64(h, h, 1ull << 63);
1347     tcg_gen_concat_i64_i128(dst, l, h);
1348 }
1349 
1350 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1351 {
1352     TCGv_i64 l = tcg_temp_new_i64();
1353     TCGv_i64 h = tcg_temp_new_i64();
1354 
1355     tcg_gen_extr_i128_i64(l, h, src);
1356     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1357     tcg_gen_concat_i64_i128(dst, l, h);
1358 }
1359 
1360 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1361 {
1362     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1363 }
1364 
1365 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1366 {
1367     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1368 }
1369 
1370 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1371 {
1372     int op = float_muladd_negate_c;
1373     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1374 }
1375 
1376 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1377 {
1378     int op = float_muladd_negate_c;
1379     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1380 }
1381 
1382 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1383 {
1384     int op = float_muladd_negate_c | float_muladd_negate_result;
1385     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1386 }
1387 
1388 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1389 {
1390     int op = float_muladd_negate_c | float_muladd_negate_result;
1391     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1392 }
1393 
1394 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1395 {
1396     int op = float_muladd_negate_result;
1397     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1398 }
1399 
1400 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1401 {
1402     int op = float_muladd_negate_result;
1403     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1404 }
1405 
1406 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1407 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1408 {
1409     TCGv_i32 one = tcg_constant_i32(float32_one);
1410     int op = float_muladd_halve_result;
1411     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1412 }
1413 
1414 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1415 {
1416     TCGv_i64 one = tcg_constant_i64(float64_one);
1417     int op = float_muladd_halve_result;
1418     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1419 }
1420 
1421 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1422 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1423 {
1424     TCGv_i32 one = tcg_constant_i32(float32_one);
1425     int op = float_muladd_negate_c | float_muladd_halve_result;
1426     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1427 }
1428 
1429 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1430 {
1431     TCGv_i64 one = tcg_constant_i64(float64_one);
1432     int op = float_muladd_negate_c | float_muladd_halve_result;
1433     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1434 }
1435 
1436 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1437 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1438 {
1439     TCGv_i32 one = tcg_constant_i32(float32_one);
1440     int op = float_muladd_negate_result | float_muladd_halve_result;
1441     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1442 }
1443 
1444 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1445 {
1446     TCGv_i64 one = tcg_constant_i64(float64_one);
1447     int op = float_muladd_negate_result | float_muladd_halve_result;
1448     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1449 }
1450 
1451 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1452 {
1453     /*
1454      * CEXC is only set when succesfully completing an FPop,
1455      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1456      * Thus we can simply store FTT into this field.
1457      */
1458     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1459                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1460     gen_exception(dc, TT_FP_EXCP);
1461 }
1462 
1463 static bool gen_trap_ifnofpu(DisasContext *dc)
1464 {
1465 #if !defined(CONFIG_USER_ONLY)
1466     if (!dc->fpu_enabled) {
1467         gen_exception(dc, TT_NFPU_INSN);
1468         return true;
1469     }
1470 #endif
1471     return false;
1472 }
1473 
1474 static bool gen_trap_iffpexception(DisasContext *dc)
1475 {
1476 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1477     /*
1478      * There are 3 states for the sparc32 fpu:
1479      * Normally the fpu is in fp_execute, and all insns are allowed.
1480      * When an exception is signaled, it moves to fp_exception_pending state.
1481      * Upon seeing the next FPop, the fpu moves to fp_exception state,
1482      * populates the FQ, and generates an fp_exception trap.
1483      * The fpu remains in fp_exception state until FQ becomes empty
1484      * after execution of a STDFQ instruction.  While the fpu is in
1485      * fp_exception state, and FPop, fp load or fp branch insn will
1486      * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1487      * and the insn will not be entered into the FQ.
1488      *
1489      * In QEMU, we do not model the fp_exception_pending state and
1490      * instead populate FQ and raise the exception immediately.
1491      * But we can still honor fp_exception state by noticing when
1492      * the FQ is not empty.
1493      */
1494     if (dc->fsr_qne) {
1495         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1496         return true;
1497     }
1498 #endif
1499     return false;
1500 }
1501 
1502 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1503 {
1504     return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1505 }
1506 
1507 /* asi moves */
1508 typedef enum {
1509     GET_ASI_HELPER,
1510     GET_ASI_EXCP,
1511     GET_ASI_DIRECT,
1512     GET_ASI_DTWINX,
1513     GET_ASI_CODE,
1514     GET_ASI_BLOCK,
1515     GET_ASI_SHORT,
1516     GET_ASI_BCOPY,
1517     GET_ASI_BFILL,
1518 } ASIType;
1519 
1520 typedef struct {
1521     ASIType type;
1522     int asi;
1523     int mem_idx;
1524     MemOp memop;
1525 } DisasASI;
1526 
1527 /*
1528  * Build DisasASI.
1529  * For asi == -1, treat as non-asi.
1530  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1531  */
1532 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1533 {
1534     ASIType type = GET_ASI_HELPER;
1535     int mem_idx = dc->mem_idx;
1536 
1537     if (asi == -1) {
1538         /* Artificial "non-asi" case. */
1539         type = GET_ASI_DIRECT;
1540         goto done;
1541     }
1542 
1543 #ifndef TARGET_SPARC64
1544     /* Before v9, all asis are immediate and privileged.  */
1545     if (asi < 0) {
1546         gen_exception(dc, TT_ILL_INSN);
1547         type = GET_ASI_EXCP;
1548     } else if (supervisor(dc)
1549                /* Note that LEON accepts ASI_USERDATA in user mode, for
1550                   use with CASA.  Also note that previous versions of
1551                   QEMU allowed (and old versions of gcc emitted) ASI_P
1552                   for LEON, which is incorrect.  */
1553                || (asi == ASI_USERDATA
1554                    && (dc->def->features & CPU_FEATURE_CASA))) {
1555         switch (asi) {
1556         case ASI_USERDATA:    /* User data access */
1557             mem_idx = MMU_USER_IDX;
1558             type = GET_ASI_DIRECT;
1559             break;
1560         case ASI_KERNELDATA:  /* Supervisor data access */
1561             mem_idx = MMU_KERNEL_IDX;
1562             type = GET_ASI_DIRECT;
1563             break;
1564         case ASI_USERTXT:     /* User text access */
1565             mem_idx = MMU_USER_IDX;
1566             type = GET_ASI_CODE;
1567             break;
1568         case ASI_KERNELTXT:   /* Supervisor text access */
1569             mem_idx = MMU_KERNEL_IDX;
1570             type = GET_ASI_CODE;
1571             break;
1572         case ASI_M_BYPASS:    /* MMU passthrough */
1573         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1574             mem_idx = MMU_PHYS_IDX;
1575             type = GET_ASI_DIRECT;
1576             break;
1577         case ASI_M_BCOPY: /* Block copy, sta access */
1578             mem_idx = MMU_KERNEL_IDX;
1579             type = GET_ASI_BCOPY;
1580             break;
1581         case ASI_M_BFILL: /* Block fill, stda access */
1582             mem_idx = MMU_KERNEL_IDX;
1583             type = GET_ASI_BFILL;
1584             break;
1585         }
1586 
1587         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1588          * permissions check in get_physical_address(..).
1589          */
1590         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1591     } else {
1592         gen_exception(dc, TT_PRIV_INSN);
1593         type = GET_ASI_EXCP;
1594     }
1595 #else
1596     if (asi < 0) {
1597         asi = dc->asi;
1598     }
1599     /* With v9, all asis below 0x80 are privileged.  */
1600     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1601        down that bit into DisasContext.  For the moment that's ok,
1602        since the direct implementations below doesn't have any ASIs
1603        in the restricted [0x30, 0x7f] range, and the check will be
1604        done properly in the helper.  */
1605     if (!supervisor(dc) && asi < 0x80) {
1606         gen_exception(dc, TT_PRIV_ACT);
1607         type = GET_ASI_EXCP;
1608     } else {
1609         switch (asi) {
1610         case ASI_REAL:      /* Bypass */
1611         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1612         case ASI_REAL_L:    /* Bypass LE */
1613         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1614         case ASI_TWINX_REAL:   /* Real address, twinx */
1615         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1616         case ASI_QUAD_LDD_PHYS:
1617         case ASI_QUAD_LDD_PHYS_L:
1618             mem_idx = MMU_PHYS_IDX;
1619             break;
1620         case ASI_N:  /* Nucleus */
1621         case ASI_NL: /* Nucleus LE */
1622         case ASI_TWINX_N:
1623         case ASI_TWINX_NL:
1624         case ASI_NUCLEUS_QUAD_LDD:
1625         case ASI_NUCLEUS_QUAD_LDD_L:
1626             if (hypervisor(dc)) {
1627                 mem_idx = MMU_PHYS_IDX;
1628             } else {
1629                 mem_idx = MMU_NUCLEUS_IDX;
1630             }
1631             break;
1632         case ASI_AIUP:  /* As if user primary */
1633         case ASI_AIUPL: /* As if user primary LE */
1634         case ASI_TWINX_AIUP:
1635         case ASI_TWINX_AIUP_L:
1636         case ASI_BLK_AIUP_4V:
1637         case ASI_BLK_AIUP_L_4V:
1638         case ASI_BLK_AIUP:
1639         case ASI_BLK_AIUPL:
1640         case ASI_MON_AIUP:
1641             mem_idx = MMU_USER_IDX;
1642             break;
1643         case ASI_AIUS:  /* As if user secondary */
1644         case ASI_AIUSL: /* As if user secondary LE */
1645         case ASI_TWINX_AIUS:
1646         case ASI_TWINX_AIUS_L:
1647         case ASI_BLK_AIUS_4V:
1648         case ASI_BLK_AIUS_L_4V:
1649         case ASI_BLK_AIUS:
1650         case ASI_BLK_AIUSL:
1651         case ASI_MON_AIUS:
1652             mem_idx = MMU_USER_SECONDARY_IDX;
1653             break;
1654         case ASI_S:  /* Secondary */
1655         case ASI_SL: /* Secondary LE */
1656         case ASI_TWINX_S:
1657         case ASI_TWINX_SL:
1658         case ASI_BLK_COMMIT_S:
1659         case ASI_BLK_S:
1660         case ASI_BLK_SL:
1661         case ASI_FL8_S:
1662         case ASI_FL8_SL:
1663         case ASI_FL16_S:
1664         case ASI_FL16_SL:
1665         case ASI_MON_S:
1666             if (mem_idx == MMU_USER_IDX) {
1667                 mem_idx = MMU_USER_SECONDARY_IDX;
1668             } else if (mem_idx == MMU_KERNEL_IDX) {
1669                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1670             }
1671             break;
1672         case ASI_P:  /* Primary */
1673         case ASI_PL: /* Primary LE */
1674         case ASI_TWINX_P:
1675         case ASI_TWINX_PL:
1676         case ASI_BLK_COMMIT_P:
1677         case ASI_BLK_P:
1678         case ASI_BLK_PL:
1679         case ASI_FL8_P:
1680         case ASI_FL8_PL:
1681         case ASI_FL16_P:
1682         case ASI_FL16_PL:
1683         case ASI_MON_P:
1684             break;
1685         }
1686         switch (asi) {
1687         case ASI_REAL:
1688         case ASI_REAL_IO:
1689         case ASI_REAL_L:
1690         case ASI_REAL_IO_L:
1691         case ASI_N:
1692         case ASI_NL:
1693         case ASI_AIUP:
1694         case ASI_AIUPL:
1695         case ASI_AIUS:
1696         case ASI_AIUSL:
1697         case ASI_S:
1698         case ASI_SL:
1699         case ASI_P:
1700         case ASI_PL:
1701         case ASI_MON_P:
1702         case ASI_MON_S:
1703         case ASI_MON_AIUP:
1704         case ASI_MON_AIUS:
1705             type = GET_ASI_DIRECT;
1706             break;
1707         case ASI_TWINX_REAL:
1708         case ASI_TWINX_REAL_L:
1709         case ASI_TWINX_N:
1710         case ASI_TWINX_NL:
1711         case ASI_TWINX_AIUP:
1712         case ASI_TWINX_AIUP_L:
1713         case ASI_TWINX_AIUS:
1714         case ASI_TWINX_AIUS_L:
1715         case ASI_TWINX_P:
1716         case ASI_TWINX_PL:
1717         case ASI_TWINX_S:
1718         case ASI_TWINX_SL:
1719         case ASI_QUAD_LDD_PHYS:
1720         case ASI_QUAD_LDD_PHYS_L:
1721         case ASI_NUCLEUS_QUAD_LDD:
1722         case ASI_NUCLEUS_QUAD_LDD_L:
1723             type = GET_ASI_DTWINX;
1724             break;
1725         case ASI_BLK_COMMIT_P:
1726         case ASI_BLK_COMMIT_S:
1727         case ASI_BLK_AIUP_4V:
1728         case ASI_BLK_AIUP_L_4V:
1729         case ASI_BLK_AIUP:
1730         case ASI_BLK_AIUPL:
1731         case ASI_BLK_AIUS_4V:
1732         case ASI_BLK_AIUS_L_4V:
1733         case ASI_BLK_AIUS:
1734         case ASI_BLK_AIUSL:
1735         case ASI_BLK_S:
1736         case ASI_BLK_SL:
1737         case ASI_BLK_P:
1738         case ASI_BLK_PL:
1739             type = GET_ASI_BLOCK;
1740             break;
1741         case ASI_FL8_S:
1742         case ASI_FL8_SL:
1743         case ASI_FL8_P:
1744         case ASI_FL8_PL:
1745             memop = MO_UB;
1746             type = GET_ASI_SHORT;
1747             break;
1748         case ASI_FL16_S:
1749         case ASI_FL16_SL:
1750         case ASI_FL16_P:
1751         case ASI_FL16_PL:
1752             memop = MO_TEUW;
1753             type = GET_ASI_SHORT;
1754             break;
1755         }
1756         /* The little-endian asis all have bit 3 set.  */
1757         if (asi & 8) {
1758             memop ^= MO_BSWAP;
1759         }
1760     }
1761 #endif
1762 
1763  done:
1764     return (DisasASI){ type, asi, mem_idx, memop };
1765 }
1766 
1767 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1768 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1769                               TCGv_i32 asi, TCGv_i32 mop)
1770 {
1771     g_assert_not_reached();
1772 }
1773 
1774 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1775                               TCGv_i32 asi, TCGv_i32 mop)
1776 {
1777     g_assert_not_reached();
1778 }
1779 #endif
1780 
1781 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1782 {
1783     switch (da->type) {
1784     case GET_ASI_EXCP:
1785         break;
1786     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1787         gen_exception(dc, TT_ILL_INSN);
1788         break;
1789     case GET_ASI_DIRECT:
1790         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1791         break;
1792 
1793     case GET_ASI_CODE:
1794 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1795         {
1796             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1797             TCGv_i64 t64 = tcg_temp_new_i64();
1798 
1799             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1800             tcg_gen_trunc_i64_tl(dst, t64);
1801         }
1802         break;
1803 #else
1804         g_assert_not_reached();
1805 #endif
1806 
1807     default:
1808         {
1809             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1810             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1811 
1812             save_state(dc);
1813 #ifdef TARGET_SPARC64
1814             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1815 #else
1816             {
1817                 TCGv_i64 t64 = tcg_temp_new_i64();
1818                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1819                 tcg_gen_trunc_i64_tl(dst, t64);
1820             }
1821 #endif
1822         }
1823         break;
1824     }
1825 }
1826 
1827 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1828 {
1829     switch (da->type) {
1830     case GET_ASI_EXCP:
1831         break;
1832 
1833     case GET_ASI_DTWINX: /* Reserved for stda.  */
1834         if (TARGET_LONG_BITS == 32) {
1835             gen_exception(dc, TT_ILL_INSN);
1836             break;
1837         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1838             /* Pre OpenSPARC CPUs don't have these */
1839             gen_exception(dc, TT_ILL_INSN);
1840             break;
1841         }
1842         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1843         /* fall through */
1844 
1845     case GET_ASI_DIRECT:
1846         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1847         break;
1848 
1849     case GET_ASI_BCOPY:
1850         assert(TARGET_LONG_BITS == 32);
1851         /*
1852          * Copy 32 bytes from the address in SRC to ADDR.
1853          *
1854          * From Ross RT625 hyperSPARC manual, section 4.6:
1855          * "Block Copy and Block Fill will work only on cache line boundaries."
1856          *
1857          * It does not specify if an unaliged address is truncated or trapped.
1858          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1859          * is obviously wrong.  The only place I can see this used is in the
1860          * Linux kernel which begins with page alignment, advancing by 32,
1861          * so is always aligned.  Assume truncation as the simpler option.
1862          *
1863          * Since the loads and stores are paired, allow the copy to happen
1864          * in the host endianness.  The copy need not be atomic.
1865          */
1866         {
1867             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1868             TCGv saddr = tcg_temp_new();
1869             TCGv daddr = tcg_temp_new();
1870             TCGv_i128 tmp = tcg_temp_new_i128();
1871 
1872             tcg_gen_andi_tl(saddr, src, -32);
1873             tcg_gen_andi_tl(daddr, addr, -32);
1874             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1875             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1876             tcg_gen_addi_tl(saddr, saddr, 16);
1877             tcg_gen_addi_tl(daddr, daddr, 16);
1878             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1879             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1880         }
1881         break;
1882 
1883     default:
1884         {
1885             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1886             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1887 
1888             save_state(dc);
1889 #ifdef TARGET_SPARC64
1890             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1891 #else
1892             {
1893                 TCGv_i64 t64 = tcg_temp_new_i64();
1894                 tcg_gen_extu_tl_i64(t64, src);
1895                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1896             }
1897 #endif
1898 
1899             /* A write to a TLB register may alter page maps.  End the TB. */
1900             dc->npc = DYNAMIC_PC;
1901         }
1902         break;
1903     }
1904 }
1905 
1906 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1907                          TCGv dst, TCGv src, TCGv addr)
1908 {
1909     switch (da->type) {
1910     case GET_ASI_EXCP:
1911         break;
1912     case GET_ASI_DIRECT:
1913         tcg_gen_atomic_xchg_tl(dst, addr, src,
1914                                da->mem_idx, da->memop | MO_ALIGN);
1915         break;
1916     default:
1917         /* ??? Should be DAE_invalid_asi.  */
1918         gen_exception(dc, TT_DATA_ACCESS);
1919         break;
1920     }
1921 }
1922 
1923 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1924                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1925 {
1926     switch (da->type) {
1927     case GET_ASI_EXCP:
1928         return;
1929     case GET_ASI_DIRECT:
1930         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1931                                   da->mem_idx, da->memop | MO_ALIGN);
1932         break;
1933     default:
1934         /* ??? Should be DAE_invalid_asi.  */
1935         gen_exception(dc, TT_DATA_ACCESS);
1936         break;
1937     }
1938 }
1939 
1940 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1941 {
1942     switch (da->type) {
1943     case GET_ASI_EXCP:
1944         break;
1945     case GET_ASI_DIRECT:
1946         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1947                                da->mem_idx, MO_UB);
1948         break;
1949     default:
1950         /* ??? In theory, this should be raise DAE_invalid_asi.
1951            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1952         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1953             gen_helper_exit_atomic(tcg_env);
1954         } else {
1955             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1956             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1957             TCGv_i64 s64, t64;
1958 
1959             save_state(dc);
1960             t64 = tcg_temp_new_i64();
1961             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1962 
1963             s64 = tcg_constant_i64(0xff);
1964             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1965 
1966             tcg_gen_trunc_i64_tl(dst, t64);
1967 
1968             /* End the TB.  */
1969             dc->npc = DYNAMIC_PC;
1970         }
1971         break;
1972     }
1973 }
1974 
1975 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1976                         TCGv addr, int rd)
1977 {
1978     MemOp memop = da->memop;
1979     MemOp size = memop & MO_SIZE;
1980     TCGv_i32 d32;
1981     TCGv_i64 d64, l64;
1982     TCGv addr_tmp;
1983 
1984     /* TODO: Use 128-bit load/store below. */
1985     if (size == MO_128) {
1986         memop = (memop & ~MO_SIZE) | MO_64;
1987     }
1988 
1989     switch (da->type) {
1990     case GET_ASI_EXCP:
1991         break;
1992 
1993     case GET_ASI_DIRECT:
1994         memop |= MO_ALIGN_4;
1995         switch (size) {
1996         case MO_32:
1997             d32 = tcg_temp_new_i32();
1998             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1999             gen_store_fpr_F(dc, rd, d32);
2000             break;
2001 
2002         case MO_64:
2003             d64 = tcg_temp_new_i64();
2004             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2005             gen_store_fpr_D(dc, rd, d64);
2006             break;
2007 
2008         case MO_128:
2009             d64 = tcg_temp_new_i64();
2010             l64 = tcg_temp_new_i64();
2011             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2012             addr_tmp = tcg_temp_new();
2013             tcg_gen_addi_tl(addr_tmp, addr, 8);
2014             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2015             gen_store_fpr_D(dc, rd, d64);
2016             gen_store_fpr_D(dc, rd + 2, l64);
2017             break;
2018         default:
2019             g_assert_not_reached();
2020         }
2021         break;
2022 
2023     case GET_ASI_BLOCK:
2024         /* Valid for lddfa on aligned registers only.  */
2025         if (orig_size == MO_64 && (rd & 7) == 0) {
2026             /* The first operation checks required alignment.  */
2027             addr_tmp = tcg_temp_new();
2028             d64 = tcg_temp_new_i64();
2029             for (int i = 0; ; ++i) {
2030                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2031                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2032                 gen_store_fpr_D(dc, rd + 2 * i, d64);
2033                 if (i == 7) {
2034                     break;
2035                 }
2036                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2037                 addr = addr_tmp;
2038             }
2039         } else {
2040             gen_exception(dc, TT_ILL_INSN);
2041         }
2042         break;
2043 
2044     case GET_ASI_SHORT:
2045         /* Valid for lddfa only.  */
2046         if (orig_size == MO_64) {
2047             d64 = tcg_temp_new_i64();
2048             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2049             gen_store_fpr_D(dc, rd, d64);
2050         } else {
2051             gen_exception(dc, TT_ILL_INSN);
2052         }
2053         break;
2054 
2055     default:
2056         {
2057             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2058             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2059 
2060             save_state(dc);
2061             /* According to the table in the UA2011 manual, the only
2062                other asis that are valid for ldfa/lddfa/ldqfa are
2063                the NO_FAULT asis.  We still need a helper for these,
2064                but we can just use the integer asi helper for them.  */
2065             switch (size) {
2066             case MO_32:
2067                 d64 = tcg_temp_new_i64();
2068                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2069                 d32 = tcg_temp_new_i32();
2070                 tcg_gen_extrl_i64_i32(d32, d64);
2071                 gen_store_fpr_F(dc, rd, d32);
2072                 break;
2073             case MO_64:
2074                 d64 = tcg_temp_new_i64();
2075                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2076                 gen_store_fpr_D(dc, rd, d64);
2077                 break;
2078             case MO_128:
2079                 d64 = tcg_temp_new_i64();
2080                 l64 = tcg_temp_new_i64();
2081                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2082                 addr_tmp = tcg_temp_new();
2083                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2084                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2085                 gen_store_fpr_D(dc, rd, d64);
2086                 gen_store_fpr_D(dc, rd + 2, l64);
2087                 break;
2088             default:
2089                 g_assert_not_reached();
2090             }
2091         }
2092         break;
2093     }
2094 }
2095 
2096 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2097                         TCGv addr, int rd)
2098 {
2099     MemOp memop = da->memop;
2100     MemOp size = memop & MO_SIZE;
2101     TCGv_i32 d32;
2102     TCGv_i64 d64;
2103     TCGv addr_tmp;
2104 
2105     /* TODO: Use 128-bit load/store below. */
2106     if (size == MO_128) {
2107         memop = (memop & ~MO_SIZE) | MO_64;
2108     }
2109 
2110     switch (da->type) {
2111     case GET_ASI_EXCP:
2112         break;
2113 
2114     case GET_ASI_DIRECT:
2115         memop |= MO_ALIGN_4;
2116         switch (size) {
2117         case MO_32:
2118             d32 = gen_load_fpr_F(dc, rd);
2119             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2120             break;
2121         case MO_64:
2122             d64 = gen_load_fpr_D(dc, rd);
2123             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2124             break;
2125         case MO_128:
2126             /* Only 4-byte alignment required.  However, it is legal for the
2127                cpu to signal the alignment fault, and the OS trap handler is
2128                required to fix it up.  Requiring 16-byte alignment here avoids
2129                having to probe the second page before performing the first
2130                write.  */
2131             d64 = gen_load_fpr_D(dc, rd);
2132             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2133             addr_tmp = tcg_temp_new();
2134             tcg_gen_addi_tl(addr_tmp, addr, 8);
2135             d64 = gen_load_fpr_D(dc, rd + 2);
2136             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2137             break;
2138         default:
2139             g_assert_not_reached();
2140         }
2141         break;
2142 
2143     case GET_ASI_BLOCK:
2144         /* Valid for stdfa on aligned registers only.  */
2145         if (orig_size == MO_64 && (rd & 7) == 0) {
2146             /* The first operation checks required alignment.  */
2147             addr_tmp = tcg_temp_new();
2148             for (int i = 0; ; ++i) {
2149                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2150                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2151                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2152                 if (i == 7) {
2153                     break;
2154                 }
2155                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2156                 addr = addr_tmp;
2157             }
2158         } else {
2159             gen_exception(dc, TT_ILL_INSN);
2160         }
2161         break;
2162 
2163     case GET_ASI_SHORT:
2164         /* Valid for stdfa only.  */
2165         if (orig_size == MO_64) {
2166             d64 = gen_load_fpr_D(dc, rd);
2167             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2168         } else {
2169             gen_exception(dc, TT_ILL_INSN);
2170         }
2171         break;
2172 
2173     default:
2174         /* According to the table in the UA2011 manual, the only
2175            other asis that are valid for ldfa/lddfa/ldqfa are
2176            the PST* asis, which aren't currently handled.  */
2177         gen_exception(dc, TT_ILL_INSN);
2178         break;
2179     }
2180 }
2181 
2182 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2183 {
2184     TCGv hi = gen_dest_gpr(dc, rd);
2185     TCGv lo = gen_dest_gpr(dc, rd + 1);
2186 
2187     switch (da->type) {
2188     case GET_ASI_EXCP:
2189         return;
2190 
2191     case GET_ASI_DTWINX:
2192 #ifdef TARGET_SPARC64
2193         {
2194             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2195             TCGv_i128 t = tcg_temp_new_i128();
2196 
2197             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2198             /*
2199              * Note that LE twinx acts as if each 64-bit register result is
2200              * byte swapped.  We perform one 128-bit LE load, so must swap
2201              * the order of the writebacks.
2202              */
2203             if ((mop & MO_BSWAP) == MO_TE) {
2204                 tcg_gen_extr_i128_i64(lo, hi, t);
2205             } else {
2206                 tcg_gen_extr_i128_i64(hi, lo, t);
2207             }
2208         }
2209         break;
2210 #else
2211         g_assert_not_reached();
2212 #endif
2213 
2214     case GET_ASI_DIRECT:
2215         {
2216             TCGv_i64 tmp = tcg_temp_new_i64();
2217 
2218             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2219 
2220             /* Note that LE ldda acts as if each 32-bit register
2221                result is byte swapped.  Having just performed one
2222                64-bit bswap, we need now to swap the writebacks.  */
2223             if ((da->memop & MO_BSWAP) == MO_TE) {
2224                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2225             } else {
2226                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2227             }
2228         }
2229         break;
2230 
2231     case GET_ASI_CODE:
2232 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2233         {
2234             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2235             TCGv_i64 tmp = tcg_temp_new_i64();
2236 
2237             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2238 
2239             /* See above.  */
2240             if ((da->memop & MO_BSWAP) == MO_TE) {
2241                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2242             } else {
2243                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2244             }
2245         }
2246         break;
2247 #else
2248         g_assert_not_reached();
2249 #endif
2250 
2251     default:
2252         /* ??? In theory we've handled all of the ASIs that are valid
2253            for ldda, and this should raise DAE_invalid_asi.  However,
2254            real hardware allows others.  This can be seen with e.g.
2255            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2256         {
2257             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2258             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2259             TCGv_i64 tmp = tcg_temp_new_i64();
2260 
2261             save_state(dc);
2262             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2263 
2264             /* See above.  */
2265             if ((da->memop & MO_BSWAP) == MO_TE) {
2266                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2267             } else {
2268                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2269             }
2270         }
2271         break;
2272     }
2273 
2274     gen_store_gpr(dc, rd, hi);
2275     gen_store_gpr(dc, rd + 1, lo);
2276 }
2277 
2278 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2279 {
2280     TCGv hi = gen_load_gpr(dc, rd);
2281     TCGv lo = gen_load_gpr(dc, rd + 1);
2282 
2283     switch (da->type) {
2284     case GET_ASI_EXCP:
2285         break;
2286 
2287     case GET_ASI_DTWINX:
2288 #ifdef TARGET_SPARC64
2289         {
2290             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2291             TCGv_i128 t = tcg_temp_new_i128();
2292 
2293             /*
2294              * Note that LE twinx acts as if each 64-bit register result is
2295              * byte swapped.  We perform one 128-bit LE store, so must swap
2296              * the order of the construction.
2297              */
2298             if ((mop & MO_BSWAP) == MO_TE) {
2299                 tcg_gen_concat_i64_i128(t, lo, hi);
2300             } else {
2301                 tcg_gen_concat_i64_i128(t, hi, lo);
2302             }
2303             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2304         }
2305         break;
2306 #else
2307         g_assert_not_reached();
2308 #endif
2309 
2310     case GET_ASI_DIRECT:
2311         {
2312             TCGv_i64 t64 = tcg_temp_new_i64();
2313 
2314             /* Note that LE stda acts as if each 32-bit register result is
2315                byte swapped.  We will perform one 64-bit LE store, so now
2316                we must swap the order of the construction.  */
2317             if ((da->memop & MO_BSWAP) == MO_TE) {
2318                 tcg_gen_concat_tl_i64(t64, lo, hi);
2319             } else {
2320                 tcg_gen_concat_tl_i64(t64, hi, lo);
2321             }
2322             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2323         }
2324         break;
2325 
2326     case GET_ASI_BFILL:
2327         assert(TARGET_LONG_BITS == 32);
2328         /*
2329          * Store 32 bytes of [rd:rd+1] to ADDR.
2330          * See comments for GET_ASI_COPY above.
2331          */
2332         {
2333             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2334             TCGv_i64 t8 = tcg_temp_new_i64();
2335             TCGv_i128 t16 = tcg_temp_new_i128();
2336             TCGv daddr = tcg_temp_new();
2337 
2338             tcg_gen_concat_tl_i64(t8, lo, hi);
2339             tcg_gen_concat_i64_i128(t16, t8, t8);
2340             tcg_gen_andi_tl(daddr, addr, -32);
2341             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2342             tcg_gen_addi_tl(daddr, daddr, 16);
2343             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2344         }
2345         break;
2346 
2347     default:
2348         /* ??? In theory we've handled all of the ASIs that are valid
2349            for stda, and this should raise DAE_invalid_asi.  */
2350         {
2351             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2352             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2353             TCGv_i64 t64 = tcg_temp_new_i64();
2354 
2355             /* See above.  */
2356             if ((da->memop & MO_BSWAP) == MO_TE) {
2357                 tcg_gen_concat_tl_i64(t64, lo, hi);
2358             } else {
2359                 tcg_gen_concat_tl_i64(t64, hi, lo);
2360             }
2361 
2362             save_state(dc);
2363             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2364         }
2365         break;
2366     }
2367 }
2368 
2369 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2370 {
2371 #ifdef TARGET_SPARC64
2372     TCGv_i32 c32, zero, dst, s1, s2;
2373     TCGv_i64 c64 = tcg_temp_new_i64();
2374 
2375     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2376        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2377        the later.  */
2378     c32 = tcg_temp_new_i32();
2379     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2380     tcg_gen_extrl_i64_i32(c32, c64);
2381 
2382     s1 = gen_load_fpr_F(dc, rs);
2383     s2 = gen_load_fpr_F(dc, rd);
2384     dst = tcg_temp_new_i32();
2385     zero = tcg_constant_i32(0);
2386 
2387     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2388 
2389     gen_store_fpr_F(dc, rd, dst);
2390 #else
2391     qemu_build_not_reached();
2392 #endif
2393 }
2394 
2395 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2396 {
2397 #ifdef TARGET_SPARC64
2398     TCGv_i64 dst = tcg_temp_new_i64();
2399     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2400                         gen_load_fpr_D(dc, rs),
2401                         gen_load_fpr_D(dc, rd));
2402     gen_store_fpr_D(dc, rd, dst);
2403 #else
2404     qemu_build_not_reached();
2405 #endif
2406 }
2407 
2408 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2409 {
2410 #ifdef TARGET_SPARC64
2411     TCGv c2 = tcg_constant_tl(cmp->c2);
2412     TCGv_i64 h = tcg_temp_new_i64();
2413     TCGv_i64 l = tcg_temp_new_i64();
2414 
2415     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2416                         gen_load_fpr_D(dc, rs),
2417                         gen_load_fpr_D(dc, rd));
2418     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2419                         gen_load_fpr_D(dc, rs + 2),
2420                         gen_load_fpr_D(dc, rd + 2));
2421     gen_store_fpr_D(dc, rd, h);
2422     gen_store_fpr_D(dc, rd + 2, l);
2423 #else
2424     qemu_build_not_reached();
2425 #endif
2426 }
2427 
2428 #ifdef TARGET_SPARC64
2429 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2430 {
2431     TCGv_i32 r_tl = tcg_temp_new_i32();
2432 
2433     /* load env->tl into r_tl */
2434     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2435 
2436     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2437     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2438 
2439     /* calculate offset to current trap state from env->ts, reuse r_tl */
2440     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2441     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2442 
2443     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2444     {
2445         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2446         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2447         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2448     }
2449 }
2450 #endif
2451 
2452 static int extract_dfpreg(DisasContext *dc, int x)
2453 {
2454     int r = x & 0x1e;
2455 #ifdef TARGET_SPARC64
2456     r |= (x & 1) << 5;
2457 #endif
2458     return r;
2459 }
2460 
2461 static int extract_qfpreg(DisasContext *dc, int x)
2462 {
2463     int r = x & 0x1c;
2464 #ifdef TARGET_SPARC64
2465     r |= (x & 1) << 5;
2466 #endif
2467     return r;
2468 }
2469 
2470 /* Include the auto-generated decoder.  */
2471 #include "decode-insns.c.inc"
2472 
2473 #define TRANS(NAME, AVAIL, FUNC, ...) \
2474     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2475     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2476 
2477 #define avail_ALL(C)      true
2478 #ifdef TARGET_SPARC64
2479 # define avail_32(C)      false
2480 # define avail_ASR17(C)   false
2481 # define avail_CASA(C)    true
2482 # define avail_DIV(C)     true
2483 # define avail_MUL(C)     true
2484 # define avail_POWERDOWN(C) false
2485 # define avail_64(C)      true
2486 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2487 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2488 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2489 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2490 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2491 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2492 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2493 # define avail_VIS3B(C)   avail_VIS3(C)
2494 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2495 #else
2496 # define avail_32(C)      true
2497 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2498 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2499 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2500 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2501 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2502 # define avail_64(C)      false
2503 # define avail_FMAF(C)    false
2504 # define avail_GL(C)      false
2505 # define avail_HYPV(C)    false
2506 # define avail_IMA(C)     false
2507 # define avail_VIS1(C)    false
2508 # define avail_VIS2(C)    false
2509 # define avail_VIS3(C)    false
2510 # define avail_VIS3B(C)   false
2511 # define avail_VIS4(C)    false
2512 #endif
2513 
2514 /* Default case for non jump instructions. */
2515 static bool advance_pc(DisasContext *dc)
2516 {
2517     TCGLabel *l1;
2518 
2519     finishing_insn(dc);
2520 
2521     if (dc->npc & 3) {
2522         switch (dc->npc) {
2523         case DYNAMIC_PC:
2524         case DYNAMIC_PC_LOOKUP:
2525             dc->pc = dc->npc;
2526             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2527             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2528             break;
2529 
2530         case JUMP_PC:
2531             /* we can do a static jump */
2532             l1 = gen_new_label();
2533             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2534 
2535             /* jump not taken */
2536             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2537 
2538             /* jump taken */
2539             gen_set_label(l1);
2540             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2541 
2542             dc->base.is_jmp = DISAS_NORETURN;
2543             break;
2544 
2545         default:
2546             g_assert_not_reached();
2547         }
2548     } else {
2549         dc->pc = dc->npc;
2550         dc->npc = dc->npc + 4;
2551     }
2552     return true;
2553 }
2554 
2555 /*
2556  * Major opcodes 00 and 01 -- branches, call, and sethi
2557  */
2558 
2559 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2560                               bool annul, int disp)
2561 {
2562     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2563     target_ulong npc;
2564 
2565     finishing_insn(dc);
2566 
2567     if (cmp->cond == TCG_COND_ALWAYS) {
2568         if (annul) {
2569             dc->pc = dest;
2570             dc->npc = dest + 4;
2571         } else {
2572             gen_mov_pc_npc(dc);
2573             dc->npc = dest;
2574         }
2575         return true;
2576     }
2577 
2578     if (cmp->cond == TCG_COND_NEVER) {
2579         npc = dc->npc;
2580         if (npc & 3) {
2581             gen_mov_pc_npc(dc);
2582             if (annul) {
2583                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2584             }
2585             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2586         } else {
2587             dc->pc = npc + (annul ? 4 : 0);
2588             dc->npc = dc->pc + 4;
2589         }
2590         return true;
2591     }
2592 
2593     flush_cond(dc);
2594     npc = dc->npc;
2595 
2596     if (annul) {
2597         TCGLabel *l1 = gen_new_label();
2598 
2599         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2600         gen_goto_tb(dc, 0, npc, dest);
2601         gen_set_label(l1);
2602         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2603 
2604         dc->base.is_jmp = DISAS_NORETURN;
2605     } else {
2606         if (npc & 3) {
2607             switch (npc) {
2608             case DYNAMIC_PC:
2609             case DYNAMIC_PC_LOOKUP:
2610                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2611                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2612                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2613                                    cmp->c1, tcg_constant_tl(cmp->c2),
2614                                    tcg_constant_tl(dest), cpu_npc);
2615                 dc->pc = npc;
2616                 break;
2617             default:
2618                 g_assert_not_reached();
2619             }
2620         } else {
2621             dc->pc = npc;
2622             dc->npc = JUMP_PC;
2623             dc->jump = *cmp;
2624             dc->jump_pc[0] = dest;
2625             dc->jump_pc[1] = npc + 4;
2626 
2627             /* The condition for cpu_cond is always NE -- normalize. */
2628             if (cmp->cond == TCG_COND_NE) {
2629                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2630             } else {
2631                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2632             }
2633             dc->cpu_cond_live = true;
2634         }
2635     }
2636     return true;
2637 }
2638 
2639 static bool raise_priv(DisasContext *dc)
2640 {
2641     gen_exception(dc, TT_PRIV_INSN);
2642     return true;
2643 }
2644 
2645 static bool raise_unimpfpop(DisasContext *dc)
2646 {
2647     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2648     return true;
2649 }
2650 
2651 static bool gen_trap_float128(DisasContext *dc)
2652 {
2653     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2654         return false;
2655     }
2656     return raise_unimpfpop(dc);
2657 }
2658 
2659 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2660 {
2661     DisasCompare cmp;
2662 
2663     gen_compare(&cmp, a->cc, a->cond, dc);
2664     return advance_jump_cond(dc, &cmp, a->a, a->i);
2665 }
2666 
2667 TRANS(Bicc, ALL, do_bpcc, a)
2668 TRANS(BPcc,  64, do_bpcc, a)
2669 
2670 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2671 {
2672     DisasCompare cmp;
2673 
2674     if (gen_trap_if_nofpu_fpexception(dc)) {
2675         return true;
2676     }
2677     gen_fcompare(&cmp, a->cc, a->cond);
2678     return advance_jump_cond(dc, &cmp, a->a, a->i);
2679 }
2680 
2681 TRANS(FBPfcc,  64, do_fbpfcc, a)
2682 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2683 
2684 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2685 {
2686     DisasCompare cmp;
2687 
2688     if (!avail_64(dc)) {
2689         return false;
2690     }
2691     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2692         return false;
2693     }
2694     return advance_jump_cond(dc, &cmp, a->a, a->i);
2695 }
2696 
2697 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2698 {
2699     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2700 
2701     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2702     gen_mov_pc_npc(dc);
2703     dc->npc = target;
2704     return true;
2705 }
2706 
2707 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2708 {
2709     /*
2710      * For sparc32, always generate the no-coprocessor exception.
2711      * For sparc64, always generate illegal instruction.
2712      */
2713 #ifdef TARGET_SPARC64
2714     return false;
2715 #else
2716     gen_exception(dc, TT_NCP_INSN);
2717     return true;
2718 #endif
2719 }
2720 
2721 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2722 {
2723     /* Special-case %g0 because that's the canonical nop.  */
2724     if (a->rd) {
2725         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2726     }
2727     return advance_pc(dc);
2728 }
2729 
2730 /*
2731  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2732  */
2733 
2734 static bool do_tcc(DisasContext *dc, int cond, int cc,
2735                    int rs1, bool imm, int rs2_or_imm)
2736 {
2737     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2738                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2739     DisasCompare cmp;
2740     TCGLabel *lab;
2741     TCGv_i32 trap;
2742 
2743     /* Trap never.  */
2744     if (cond == 0) {
2745         return advance_pc(dc);
2746     }
2747 
2748     /*
2749      * Immediate traps are the most common case.  Since this value is
2750      * live across the branch, it really pays to evaluate the constant.
2751      */
2752     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2753         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2754     } else {
2755         trap = tcg_temp_new_i32();
2756         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2757         if (imm) {
2758             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2759         } else {
2760             TCGv_i32 t2 = tcg_temp_new_i32();
2761             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2762             tcg_gen_add_i32(trap, trap, t2);
2763         }
2764         tcg_gen_andi_i32(trap, trap, mask);
2765         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2766     }
2767 
2768     finishing_insn(dc);
2769 
2770     /* Trap always.  */
2771     if (cond == 8) {
2772         save_state(dc);
2773         gen_helper_raise_exception(tcg_env, trap);
2774         dc->base.is_jmp = DISAS_NORETURN;
2775         return true;
2776     }
2777 
2778     /* Conditional trap.  */
2779     flush_cond(dc);
2780     lab = delay_exceptionv(dc, trap);
2781     gen_compare(&cmp, cc, cond, dc);
2782     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2783 
2784     return advance_pc(dc);
2785 }
2786 
2787 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2788 {
2789     if (avail_32(dc) && a->cc) {
2790         return false;
2791     }
2792     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2793 }
2794 
2795 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2796 {
2797     if (avail_64(dc)) {
2798         return false;
2799     }
2800     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2801 }
2802 
2803 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2804 {
2805     if (avail_32(dc)) {
2806         return false;
2807     }
2808     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2809 }
2810 
2811 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2812 {
2813     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2814     return advance_pc(dc);
2815 }
2816 
2817 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2818 {
2819     if (avail_32(dc)) {
2820         return false;
2821     }
2822     if (a->mmask) {
2823         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2824         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2825     }
2826     if (a->cmask) {
2827         /* For #Sync, etc, end the TB to recognize interrupts. */
2828         dc->base.is_jmp = DISAS_EXIT;
2829     }
2830     return advance_pc(dc);
2831 }
2832 
2833 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2834                           TCGv (*func)(DisasContext *, TCGv))
2835 {
2836     if (!priv) {
2837         return raise_priv(dc);
2838     }
2839     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2840     return advance_pc(dc);
2841 }
2842 
2843 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2844 {
2845     return cpu_y;
2846 }
2847 
2848 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2849 {
2850     /*
2851      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2852      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2853      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2854      */
2855     if (avail_64(dc) && a->rs1 != 0) {
2856         return false;
2857     }
2858     return do_rd_special(dc, true, a->rd, do_rdy);
2859 }
2860 
2861 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2862 {
2863     gen_helper_rdasr17(dst, tcg_env);
2864     return dst;
2865 }
2866 
2867 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2868 
2869 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2870 {
2871     gen_helper_rdccr(dst, tcg_env);
2872     return dst;
2873 }
2874 
2875 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2876 
2877 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2878 {
2879 #ifdef TARGET_SPARC64
2880     return tcg_constant_tl(dc->asi);
2881 #else
2882     qemu_build_not_reached();
2883 #endif
2884 }
2885 
2886 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2887 
2888 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2889 {
2890     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2891 
2892     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2893     if (translator_io_start(&dc->base)) {
2894         dc->base.is_jmp = DISAS_EXIT;
2895     }
2896     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2897                               tcg_constant_i32(dc->mem_idx));
2898     return dst;
2899 }
2900 
2901 /* TODO: non-priv access only allowed when enabled. */
2902 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2903 
2904 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2905 {
2906     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2907 }
2908 
2909 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2910 
2911 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2912 {
2913     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2914     return dst;
2915 }
2916 
2917 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2918 
2919 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2920 {
2921     gen_trap_ifnofpu(dc);
2922     return cpu_gsr;
2923 }
2924 
2925 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2926 
2927 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2928 {
2929     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2930     return dst;
2931 }
2932 
2933 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2934 
2935 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2936 {
2937     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2938     return dst;
2939 }
2940 
2941 /* TODO: non-priv access only allowed when enabled. */
2942 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2943 
2944 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2945 {
2946     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2947 
2948     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2949     if (translator_io_start(&dc->base)) {
2950         dc->base.is_jmp = DISAS_EXIT;
2951     }
2952     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2953                               tcg_constant_i32(dc->mem_idx));
2954     return dst;
2955 }
2956 
2957 /* TODO: non-priv access only allowed when enabled. */
2958 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2959 
2960 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2961 {
2962     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2963     return dst;
2964 }
2965 
2966 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2967 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2968 
2969 /*
2970  * UltraSPARC-T1 Strand status.
2971  * HYPV check maybe not enough, UA2005 & UA2007 describe
2972  * this ASR as impl. dep
2973  */
2974 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2975 {
2976     return tcg_constant_tl(1);
2977 }
2978 
2979 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2980 
2981 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2982 {
2983     gen_helper_rdpsr(dst, tcg_env);
2984     return dst;
2985 }
2986 
2987 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2988 
2989 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2990 {
2991     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2992     return dst;
2993 }
2994 
2995 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2996 
2997 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2998 {
2999     TCGv_i32 tl = tcg_temp_new_i32();
3000     TCGv_ptr tp = tcg_temp_new_ptr();
3001 
3002     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3003     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3004     tcg_gen_shli_i32(tl, tl, 3);
3005     tcg_gen_ext_i32_ptr(tp, tl);
3006     tcg_gen_add_ptr(tp, tp, tcg_env);
3007 
3008     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3009     return dst;
3010 }
3011 
3012 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3013 
3014 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3015 {
3016     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3017     return dst;
3018 }
3019 
3020 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3021 
3022 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3023 {
3024     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3025     return dst;
3026 }
3027 
3028 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3029 
3030 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3031 {
3032     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3033     return dst;
3034 }
3035 
3036 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3037 
3038 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3039 {
3040     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3041     return dst;
3042 }
3043 
3044 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3045       do_rdhstick_cmpr)
3046 
3047 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3048 {
3049     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3050     return dst;
3051 }
3052 
3053 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3054 
3055 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3056 {
3057 #ifdef TARGET_SPARC64
3058     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3059 
3060     gen_load_trap_state_at_tl(r_tsptr);
3061     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3062     return dst;
3063 #else
3064     qemu_build_not_reached();
3065 #endif
3066 }
3067 
3068 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3069 
3070 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3071 {
3072 #ifdef TARGET_SPARC64
3073     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3074 
3075     gen_load_trap_state_at_tl(r_tsptr);
3076     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3077     return dst;
3078 #else
3079     qemu_build_not_reached();
3080 #endif
3081 }
3082 
3083 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3084 
3085 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3086 {
3087 #ifdef TARGET_SPARC64
3088     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3089 
3090     gen_load_trap_state_at_tl(r_tsptr);
3091     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3092     return dst;
3093 #else
3094     qemu_build_not_reached();
3095 #endif
3096 }
3097 
3098 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3099 
3100 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3101 {
3102 #ifdef TARGET_SPARC64
3103     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3104 
3105     gen_load_trap_state_at_tl(r_tsptr);
3106     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3107     return dst;
3108 #else
3109     qemu_build_not_reached();
3110 #endif
3111 }
3112 
3113 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3114 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3115 
3116 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3117 {
3118     return cpu_tbr;
3119 }
3120 
3121 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3122 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3123 
3124 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3125 {
3126     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3127     return dst;
3128 }
3129 
3130 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3131 
3132 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3133 {
3134     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3135     return dst;
3136 }
3137 
3138 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3139 
3140 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3141 {
3142     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3143     return dst;
3144 }
3145 
3146 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3147 
3148 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3149 {
3150     gen_helper_rdcwp(dst, tcg_env);
3151     return dst;
3152 }
3153 
3154 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3155 
3156 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3157 {
3158     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3159     return dst;
3160 }
3161 
3162 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3163 
3164 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3165 {
3166     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3167     return dst;
3168 }
3169 
3170 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3171       do_rdcanrestore)
3172 
3173 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3174 {
3175     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3176     return dst;
3177 }
3178 
3179 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3180 
3181 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3182 {
3183     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3184     return dst;
3185 }
3186 
3187 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3188 
3189 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3190 {
3191     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3192     return dst;
3193 }
3194 
3195 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3196 
3197 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3198 {
3199     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3200     return dst;
3201 }
3202 
3203 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3204 
3205 /* UA2005 strand status */
3206 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3207 {
3208     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3209     return dst;
3210 }
3211 
3212 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3213 
3214 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3215 {
3216     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3217     return dst;
3218 }
3219 
3220 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3221 
3222 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3223 {
3224     if (avail_64(dc)) {
3225         gen_helper_flushw(tcg_env);
3226         return advance_pc(dc);
3227     }
3228     return false;
3229 }
3230 
3231 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3232                           void (*func)(DisasContext *, TCGv))
3233 {
3234     TCGv src;
3235 
3236     /* For simplicity, we under-decoded the rs2 form. */
3237     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3238         return false;
3239     }
3240     if (!priv) {
3241         return raise_priv(dc);
3242     }
3243 
3244     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3245         src = tcg_constant_tl(a->rs2_or_imm);
3246     } else {
3247         TCGv src1 = gen_load_gpr(dc, a->rs1);
3248         if (a->rs2_or_imm == 0) {
3249             src = src1;
3250         } else {
3251             src = tcg_temp_new();
3252             if (a->imm) {
3253                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3254             } else {
3255                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3256             }
3257         }
3258     }
3259     func(dc, src);
3260     return advance_pc(dc);
3261 }
3262 
3263 static void do_wry(DisasContext *dc, TCGv src)
3264 {
3265     tcg_gen_ext32u_tl(cpu_y, src);
3266 }
3267 
3268 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3269 
3270 static void do_wrccr(DisasContext *dc, TCGv src)
3271 {
3272     gen_helper_wrccr(tcg_env, src);
3273 }
3274 
3275 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3276 
3277 static void do_wrasi(DisasContext *dc, TCGv src)
3278 {
3279     TCGv tmp = tcg_temp_new();
3280 
3281     tcg_gen_ext8u_tl(tmp, src);
3282     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3283     /* End TB to notice changed ASI. */
3284     dc->base.is_jmp = DISAS_EXIT;
3285 }
3286 
3287 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3288 
3289 static void do_wrfprs(DisasContext *dc, TCGv src)
3290 {
3291 #ifdef TARGET_SPARC64
3292     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3293     dc->fprs_dirty = 0;
3294     dc->base.is_jmp = DISAS_EXIT;
3295 #else
3296     qemu_build_not_reached();
3297 #endif
3298 }
3299 
3300 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3301 
3302 static void do_wrgsr(DisasContext *dc, TCGv src)
3303 {
3304     gen_trap_ifnofpu(dc);
3305     tcg_gen_mov_tl(cpu_gsr, src);
3306 }
3307 
3308 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3309 
3310 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3311 {
3312     gen_helper_set_softint(tcg_env, src);
3313 }
3314 
3315 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3316 
3317 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3318 {
3319     gen_helper_clear_softint(tcg_env, src);
3320 }
3321 
3322 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3323 
3324 static void do_wrsoftint(DisasContext *dc, TCGv src)
3325 {
3326     gen_helper_write_softint(tcg_env, src);
3327 }
3328 
3329 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3330 
3331 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3332 {
3333     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3334 
3335     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3336     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3337     translator_io_start(&dc->base);
3338     gen_helper_tick_set_limit(r_tickptr, src);
3339     /* End TB to handle timer interrupt */
3340     dc->base.is_jmp = DISAS_EXIT;
3341 }
3342 
3343 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3344 
3345 static void do_wrstick(DisasContext *dc, TCGv src)
3346 {
3347 #ifdef TARGET_SPARC64
3348     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3349 
3350     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3351     translator_io_start(&dc->base);
3352     gen_helper_tick_set_count(r_tickptr, src);
3353     /* End TB to handle timer interrupt */
3354     dc->base.is_jmp = DISAS_EXIT;
3355 #else
3356     qemu_build_not_reached();
3357 #endif
3358 }
3359 
3360 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3361 
3362 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3363 {
3364     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3365 
3366     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3367     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3368     translator_io_start(&dc->base);
3369     gen_helper_tick_set_limit(r_tickptr, src);
3370     /* End TB to handle timer interrupt */
3371     dc->base.is_jmp = DISAS_EXIT;
3372 }
3373 
3374 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3375 
3376 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3377 {
3378     finishing_insn(dc);
3379     save_state(dc);
3380     gen_helper_power_down(tcg_env);
3381 }
3382 
3383 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3384 
3385 static void do_wrmwait(DisasContext *dc, TCGv src)
3386 {
3387     /*
3388      * TODO: This is a stub version of mwait, which merely recognizes
3389      * interrupts immediately and does not wait.
3390      */
3391     dc->base.is_jmp = DISAS_EXIT;
3392 }
3393 
3394 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3395 
3396 static void do_wrpsr(DisasContext *dc, TCGv src)
3397 {
3398     gen_helper_wrpsr(tcg_env, src);
3399     dc->base.is_jmp = DISAS_EXIT;
3400 }
3401 
3402 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3403 
3404 static void do_wrwim(DisasContext *dc, TCGv src)
3405 {
3406     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3407     TCGv tmp = tcg_temp_new();
3408 
3409     tcg_gen_andi_tl(tmp, src, mask);
3410     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3411 }
3412 
3413 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3414 
3415 static void do_wrtpc(DisasContext *dc, TCGv src)
3416 {
3417 #ifdef TARGET_SPARC64
3418     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3419 
3420     gen_load_trap_state_at_tl(r_tsptr);
3421     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3422 #else
3423     qemu_build_not_reached();
3424 #endif
3425 }
3426 
3427 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3428 
3429 static void do_wrtnpc(DisasContext *dc, TCGv src)
3430 {
3431 #ifdef TARGET_SPARC64
3432     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3433 
3434     gen_load_trap_state_at_tl(r_tsptr);
3435     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3436 #else
3437     qemu_build_not_reached();
3438 #endif
3439 }
3440 
3441 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3442 
3443 static void do_wrtstate(DisasContext *dc, TCGv src)
3444 {
3445 #ifdef TARGET_SPARC64
3446     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3447 
3448     gen_load_trap_state_at_tl(r_tsptr);
3449     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3450 #else
3451     qemu_build_not_reached();
3452 #endif
3453 }
3454 
3455 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3456 
3457 static void do_wrtt(DisasContext *dc, TCGv src)
3458 {
3459 #ifdef TARGET_SPARC64
3460     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3461 
3462     gen_load_trap_state_at_tl(r_tsptr);
3463     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3464 #else
3465     qemu_build_not_reached();
3466 #endif
3467 }
3468 
3469 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3470 
3471 static void do_wrtick(DisasContext *dc, TCGv src)
3472 {
3473     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3474 
3475     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3476     translator_io_start(&dc->base);
3477     gen_helper_tick_set_count(r_tickptr, src);
3478     /* End TB to handle timer interrupt */
3479     dc->base.is_jmp = DISAS_EXIT;
3480 }
3481 
3482 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3483 
3484 static void do_wrtba(DisasContext *dc, TCGv src)
3485 {
3486     tcg_gen_mov_tl(cpu_tbr, src);
3487 }
3488 
3489 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3490 
3491 static void do_wrpstate(DisasContext *dc, TCGv src)
3492 {
3493     save_state(dc);
3494     if (translator_io_start(&dc->base)) {
3495         dc->base.is_jmp = DISAS_EXIT;
3496     }
3497     gen_helper_wrpstate(tcg_env, src);
3498     dc->npc = DYNAMIC_PC;
3499 }
3500 
3501 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3502 
3503 static void do_wrtl(DisasContext *dc, TCGv src)
3504 {
3505     save_state(dc);
3506     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3507     dc->npc = DYNAMIC_PC;
3508 }
3509 
3510 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3511 
3512 static void do_wrpil(DisasContext *dc, TCGv src)
3513 {
3514     if (translator_io_start(&dc->base)) {
3515         dc->base.is_jmp = DISAS_EXIT;
3516     }
3517     gen_helper_wrpil(tcg_env, src);
3518 }
3519 
3520 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3521 
3522 static void do_wrcwp(DisasContext *dc, TCGv src)
3523 {
3524     gen_helper_wrcwp(tcg_env, src);
3525 }
3526 
3527 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3528 
3529 static void do_wrcansave(DisasContext *dc, TCGv src)
3530 {
3531     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3532 }
3533 
3534 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3535 
3536 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3537 {
3538     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3539 }
3540 
3541 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3542 
3543 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3544 {
3545     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3546 }
3547 
3548 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3549 
3550 static void do_wrotherwin(DisasContext *dc, TCGv src)
3551 {
3552     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3553 }
3554 
3555 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3556 
3557 static void do_wrwstate(DisasContext *dc, TCGv src)
3558 {
3559     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3560 }
3561 
3562 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3563 
3564 static void do_wrgl(DisasContext *dc, TCGv src)
3565 {
3566     gen_helper_wrgl(tcg_env, src);
3567 }
3568 
3569 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3570 
3571 /* UA2005 strand status */
3572 static void do_wrssr(DisasContext *dc, TCGv src)
3573 {
3574     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3575 }
3576 
3577 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3578 
3579 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3580 
3581 static void do_wrhpstate(DisasContext *dc, TCGv src)
3582 {
3583     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3584     dc->base.is_jmp = DISAS_EXIT;
3585 }
3586 
3587 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3588 
3589 static void do_wrhtstate(DisasContext *dc, TCGv src)
3590 {
3591     TCGv_i32 tl = tcg_temp_new_i32();
3592     TCGv_ptr tp = tcg_temp_new_ptr();
3593 
3594     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3595     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3596     tcg_gen_shli_i32(tl, tl, 3);
3597     tcg_gen_ext_i32_ptr(tp, tl);
3598     tcg_gen_add_ptr(tp, tp, tcg_env);
3599 
3600     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3601 }
3602 
3603 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3604 
3605 static void do_wrhintp(DisasContext *dc, TCGv src)
3606 {
3607     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3608 }
3609 
3610 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3611 
3612 static void do_wrhtba(DisasContext *dc, TCGv src)
3613 {
3614     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3615 }
3616 
3617 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3618 
3619 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3620 {
3621     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3622 
3623     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3624     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3625     translator_io_start(&dc->base);
3626     gen_helper_tick_set_limit(r_tickptr, src);
3627     /* End TB to handle timer interrupt */
3628     dc->base.is_jmp = DISAS_EXIT;
3629 }
3630 
3631 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3632       do_wrhstick_cmpr)
3633 
3634 static bool do_saved_restored(DisasContext *dc, bool saved)
3635 {
3636     if (!supervisor(dc)) {
3637         return raise_priv(dc);
3638     }
3639     if (saved) {
3640         gen_helper_saved(tcg_env);
3641     } else {
3642         gen_helper_restored(tcg_env);
3643     }
3644     return advance_pc(dc);
3645 }
3646 
3647 TRANS(SAVED, 64, do_saved_restored, true)
3648 TRANS(RESTORED, 64, do_saved_restored, false)
3649 
3650 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3651 {
3652     return advance_pc(dc);
3653 }
3654 
3655 /*
3656  * TODO: Need a feature bit for sparcv8.
3657  * In the meantime, treat all 32-bit cpus like sparcv7.
3658  */
3659 TRANS(NOP_v7, 32, trans_NOP, a)
3660 TRANS(NOP_v9, 64, trans_NOP, a)
3661 
3662 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3663                          void (*func)(TCGv, TCGv, TCGv),
3664                          void (*funci)(TCGv, TCGv, target_long),
3665                          bool logic_cc)
3666 {
3667     TCGv dst, src1;
3668 
3669     /* For simplicity, we under-decoded the rs2 form. */
3670     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3671         return false;
3672     }
3673 
3674     if (logic_cc) {
3675         dst = cpu_cc_N;
3676     } else {
3677         dst = gen_dest_gpr(dc, a->rd);
3678     }
3679     src1 = gen_load_gpr(dc, a->rs1);
3680 
3681     if (a->imm || a->rs2_or_imm == 0) {
3682         if (funci) {
3683             funci(dst, src1, a->rs2_or_imm);
3684         } else {
3685             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3686         }
3687     } else {
3688         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3689     }
3690 
3691     if (logic_cc) {
3692         if (TARGET_LONG_BITS == 64) {
3693             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3694             tcg_gen_movi_tl(cpu_icc_C, 0);
3695         }
3696         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3697         tcg_gen_movi_tl(cpu_cc_C, 0);
3698         tcg_gen_movi_tl(cpu_cc_V, 0);
3699     }
3700 
3701     gen_store_gpr(dc, a->rd, dst);
3702     return advance_pc(dc);
3703 }
3704 
3705 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3706                      void (*func)(TCGv, TCGv, TCGv),
3707                      void (*funci)(TCGv, TCGv, target_long),
3708                      void (*func_cc)(TCGv, TCGv, TCGv))
3709 {
3710     if (a->cc) {
3711         return do_arith_int(dc, a, func_cc, NULL, false);
3712     }
3713     return do_arith_int(dc, a, func, funci, false);
3714 }
3715 
3716 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3717                      void (*func)(TCGv, TCGv, TCGv),
3718                      void (*funci)(TCGv, TCGv, target_long))
3719 {
3720     return do_arith_int(dc, a, func, funci, a->cc);
3721 }
3722 
3723 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3724 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3725 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3726 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3727 
3728 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3729 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3730 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3731 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3732 
3733 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3734 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3735 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3736 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3737 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3738 
3739 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3740 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3741 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3742 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3743 
3744 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3745 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3746 
3747 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3748 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3749 
3750 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3751 {
3752     /* OR with %g0 is the canonical alias for MOV. */
3753     if (!a->cc && a->rs1 == 0) {
3754         if (a->imm || a->rs2_or_imm == 0) {
3755             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3756         } else if (a->rs2_or_imm & ~0x1f) {
3757             /* For simplicity, we under-decoded the rs2 form. */
3758             return false;
3759         } else {
3760             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3761         }
3762         return advance_pc(dc);
3763     }
3764     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3765 }
3766 
3767 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3768 {
3769     TCGv_i64 t1, t2;
3770     TCGv dst;
3771 
3772     if (!avail_DIV(dc)) {
3773         return false;
3774     }
3775     /* For simplicity, we under-decoded the rs2 form. */
3776     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3777         return false;
3778     }
3779 
3780     if (unlikely(a->rs2_or_imm == 0)) {
3781         gen_exception(dc, TT_DIV_ZERO);
3782         return true;
3783     }
3784 
3785     if (a->imm) {
3786         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3787     } else {
3788         TCGLabel *lab;
3789         TCGv_i32 n2;
3790 
3791         finishing_insn(dc);
3792         flush_cond(dc);
3793 
3794         n2 = tcg_temp_new_i32();
3795         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3796 
3797         lab = delay_exception(dc, TT_DIV_ZERO);
3798         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3799 
3800         t2 = tcg_temp_new_i64();
3801 #ifdef TARGET_SPARC64
3802         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3803 #else
3804         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3805 #endif
3806     }
3807 
3808     t1 = tcg_temp_new_i64();
3809     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3810 
3811     tcg_gen_divu_i64(t1, t1, t2);
3812     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3813 
3814     dst = gen_dest_gpr(dc, a->rd);
3815     tcg_gen_trunc_i64_tl(dst, t1);
3816     gen_store_gpr(dc, a->rd, dst);
3817     return advance_pc(dc);
3818 }
3819 
3820 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3821 {
3822     TCGv dst, src1, src2;
3823 
3824     if (!avail_64(dc)) {
3825         return false;
3826     }
3827     /* For simplicity, we under-decoded the rs2 form. */
3828     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3829         return false;
3830     }
3831 
3832     if (unlikely(a->rs2_or_imm == 0)) {
3833         gen_exception(dc, TT_DIV_ZERO);
3834         return true;
3835     }
3836 
3837     if (a->imm) {
3838         src2 = tcg_constant_tl(a->rs2_or_imm);
3839     } else {
3840         TCGLabel *lab;
3841 
3842         finishing_insn(dc);
3843         flush_cond(dc);
3844 
3845         lab = delay_exception(dc, TT_DIV_ZERO);
3846         src2 = cpu_regs[a->rs2_or_imm];
3847         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3848     }
3849 
3850     dst = gen_dest_gpr(dc, a->rd);
3851     src1 = gen_load_gpr(dc, a->rs1);
3852 
3853     tcg_gen_divu_tl(dst, src1, src2);
3854     gen_store_gpr(dc, a->rd, dst);
3855     return advance_pc(dc);
3856 }
3857 
3858 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3859 {
3860     TCGv dst, src1, src2;
3861 
3862     if (!avail_64(dc)) {
3863         return false;
3864     }
3865     /* For simplicity, we under-decoded the rs2 form. */
3866     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3867         return false;
3868     }
3869 
3870     if (unlikely(a->rs2_or_imm == 0)) {
3871         gen_exception(dc, TT_DIV_ZERO);
3872         return true;
3873     }
3874 
3875     dst = gen_dest_gpr(dc, a->rd);
3876     src1 = gen_load_gpr(dc, a->rs1);
3877 
3878     if (a->imm) {
3879         if (unlikely(a->rs2_or_imm == -1)) {
3880             tcg_gen_neg_tl(dst, src1);
3881             gen_store_gpr(dc, a->rd, dst);
3882             return advance_pc(dc);
3883         }
3884         src2 = tcg_constant_tl(a->rs2_or_imm);
3885     } else {
3886         TCGLabel *lab;
3887         TCGv t1, t2;
3888 
3889         finishing_insn(dc);
3890         flush_cond(dc);
3891 
3892         lab = delay_exception(dc, TT_DIV_ZERO);
3893         src2 = cpu_regs[a->rs2_or_imm];
3894         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3895 
3896         /*
3897          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3898          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3899          */
3900         t1 = tcg_temp_new();
3901         t2 = tcg_temp_new();
3902         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3903         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3904         tcg_gen_and_tl(t1, t1, t2);
3905         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3906                            tcg_constant_tl(1), src2);
3907         src2 = t1;
3908     }
3909 
3910     tcg_gen_div_tl(dst, src1, src2);
3911     gen_store_gpr(dc, a->rd, dst);
3912     return advance_pc(dc);
3913 }
3914 
3915 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3916                      int width, bool cc, bool little_endian)
3917 {
3918     TCGv dst, s1, s2, l, r, t, m;
3919     uint64_t amask = address_mask_i(dc, -8);
3920 
3921     dst = gen_dest_gpr(dc, a->rd);
3922     s1 = gen_load_gpr(dc, a->rs1);
3923     s2 = gen_load_gpr(dc, a->rs2);
3924 
3925     if (cc) {
3926         gen_op_subcc(cpu_cc_N, s1, s2);
3927     }
3928 
3929     l = tcg_temp_new();
3930     r = tcg_temp_new();
3931     t = tcg_temp_new();
3932 
3933     switch (width) {
3934     case 8:
3935         tcg_gen_andi_tl(l, s1, 7);
3936         tcg_gen_andi_tl(r, s2, 7);
3937         tcg_gen_xori_tl(r, r, 7);
3938         m = tcg_constant_tl(0xff);
3939         break;
3940     case 16:
3941         tcg_gen_extract_tl(l, s1, 1, 2);
3942         tcg_gen_extract_tl(r, s2, 1, 2);
3943         tcg_gen_xori_tl(r, r, 3);
3944         m = tcg_constant_tl(0xf);
3945         break;
3946     case 32:
3947         tcg_gen_extract_tl(l, s1, 2, 1);
3948         tcg_gen_extract_tl(r, s2, 2, 1);
3949         tcg_gen_xori_tl(r, r, 1);
3950         m = tcg_constant_tl(0x3);
3951         break;
3952     default:
3953         abort();
3954     }
3955 
3956     /* Compute Left Edge */
3957     if (little_endian) {
3958         tcg_gen_shl_tl(l, m, l);
3959         tcg_gen_and_tl(l, l, m);
3960     } else {
3961         tcg_gen_shr_tl(l, m, l);
3962     }
3963     /* Compute Right Edge */
3964     if (little_endian) {
3965         tcg_gen_shr_tl(r, m, r);
3966     } else {
3967         tcg_gen_shl_tl(r, m, r);
3968         tcg_gen_and_tl(r, r, m);
3969     }
3970 
3971     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3972     tcg_gen_xor_tl(t, s1, s2);
3973     tcg_gen_and_tl(r, r, l);
3974     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3975 
3976     gen_store_gpr(dc, a->rd, dst);
3977     return advance_pc(dc);
3978 }
3979 
3980 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3981 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3982 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3983 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3984 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3985 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3986 
3987 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3988 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3989 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3990 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3991 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3992 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3993 
3994 static bool do_rr(DisasContext *dc, arg_r_r *a,
3995                   void (*func)(TCGv, TCGv))
3996 {
3997     TCGv dst = gen_dest_gpr(dc, a->rd);
3998     TCGv src = gen_load_gpr(dc, a->rs);
3999 
4000     func(dst, src);
4001     gen_store_gpr(dc, a->rd, dst);
4002     return advance_pc(dc);
4003 }
4004 
4005 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4006 
4007 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4008                    void (*func)(TCGv, TCGv, TCGv))
4009 {
4010     TCGv dst = gen_dest_gpr(dc, a->rd);
4011     TCGv src1 = gen_load_gpr(dc, a->rs1);
4012     TCGv src2 = gen_load_gpr(dc, a->rs2);
4013 
4014     func(dst, src1, src2);
4015     gen_store_gpr(dc, a->rd, dst);
4016     return advance_pc(dc);
4017 }
4018 
4019 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4020 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4021 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4022 
4023 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4024 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4025 
4026 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4027 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4028 
4029 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4030 
4031 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4032 {
4033 #ifdef TARGET_SPARC64
4034     TCGv tmp = tcg_temp_new();
4035 
4036     tcg_gen_add_tl(tmp, s1, s2);
4037     tcg_gen_andi_tl(dst, tmp, -8);
4038     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4039 #else
4040     g_assert_not_reached();
4041 #endif
4042 }
4043 
4044 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4045 {
4046 #ifdef TARGET_SPARC64
4047     TCGv tmp = tcg_temp_new();
4048 
4049     tcg_gen_add_tl(tmp, s1, s2);
4050     tcg_gen_andi_tl(dst, tmp, -8);
4051     tcg_gen_neg_tl(tmp, tmp);
4052     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4053 #else
4054     g_assert_not_reached();
4055 #endif
4056 }
4057 
4058 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4059 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4060 
4061 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4062 {
4063 #ifdef TARGET_SPARC64
4064     tcg_gen_add_tl(dst, s1, s2);
4065     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4066 #else
4067     g_assert_not_reached();
4068 #endif
4069 }
4070 
4071 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4072 
4073 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4074 {
4075     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4076     return true;
4077 }
4078 
4079 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4080 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4081 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4082 
4083 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4084 {
4085     TCGv dst, src1, src2;
4086 
4087     /* Reject 64-bit shifts for sparc32. */
4088     if (avail_32(dc) && a->x) {
4089         return false;
4090     }
4091 
4092     src2 = tcg_temp_new();
4093     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4094     src1 = gen_load_gpr(dc, a->rs1);
4095     dst = gen_dest_gpr(dc, a->rd);
4096 
4097     if (l) {
4098         tcg_gen_shl_tl(dst, src1, src2);
4099         if (!a->x) {
4100             tcg_gen_ext32u_tl(dst, dst);
4101         }
4102     } else if (u) {
4103         if (!a->x) {
4104             tcg_gen_ext32u_tl(dst, src1);
4105             src1 = dst;
4106         }
4107         tcg_gen_shr_tl(dst, src1, src2);
4108     } else {
4109         if (!a->x) {
4110             tcg_gen_ext32s_tl(dst, src1);
4111             src1 = dst;
4112         }
4113         tcg_gen_sar_tl(dst, src1, src2);
4114     }
4115     gen_store_gpr(dc, a->rd, dst);
4116     return advance_pc(dc);
4117 }
4118 
4119 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4120 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4121 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4122 
4123 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4124 {
4125     TCGv dst, src1;
4126 
4127     /* Reject 64-bit shifts for sparc32. */
4128     if (avail_32(dc) && (a->x || a->i >= 32)) {
4129         return false;
4130     }
4131 
4132     src1 = gen_load_gpr(dc, a->rs1);
4133     dst = gen_dest_gpr(dc, a->rd);
4134 
4135     if (avail_32(dc) || a->x) {
4136         if (l) {
4137             tcg_gen_shli_tl(dst, src1, a->i);
4138         } else if (u) {
4139             tcg_gen_shri_tl(dst, src1, a->i);
4140         } else {
4141             tcg_gen_sari_tl(dst, src1, a->i);
4142         }
4143     } else {
4144         if (l) {
4145             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4146         } else if (u) {
4147             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4148         } else {
4149             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4150         }
4151     }
4152     gen_store_gpr(dc, a->rd, dst);
4153     return advance_pc(dc);
4154 }
4155 
4156 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4157 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4158 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4159 
4160 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4161 {
4162     /* For simplicity, we under-decoded the rs2 form. */
4163     if (!imm && rs2_or_imm & ~0x1f) {
4164         return NULL;
4165     }
4166     if (imm || rs2_or_imm == 0) {
4167         return tcg_constant_tl(rs2_or_imm);
4168     } else {
4169         return cpu_regs[rs2_or_imm];
4170     }
4171 }
4172 
4173 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4174 {
4175     TCGv dst = gen_load_gpr(dc, rd);
4176     TCGv c2 = tcg_constant_tl(cmp->c2);
4177 
4178     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4179     gen_store_gpr(dc, rd, dst);
4180     return advance_pc(dc);
4181 }
4182 
4183 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4184 {
4185     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4186     DisasCompare cmp;
4187 
4188     if (src2 == NULL) {
4189         return false;
4190     }
4191     gen_compare(&cmp, a->cc, a->cond, dc);
4192     return do_mov_cond(dc, &cmp, a->rd, src2);
4193 }
4194 
4195 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4196 {
4197     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4198     DisasCompare cmp;
4199 
4200     if (src2 == NULL) {
4201         return false;
4202     }
4203     gen_fcompare(&cmp, a->cc, a->cond);
4204     return do_mov_cond(dc, &cmp, a->rd, src2);
4205 }
4206 
4207 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4208 {
4209     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4210     DisasCompare cmp;
4211 
4212     if (src2 == NULL) {
4213         return false;
4214     }
4215     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4216         return false;
4217     }
4218     return do_mov_cond(dc, &cmp, a->rd, src2);
4219 }
4220 
4221 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4222                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4223 {
4224     TCGv src1, sum;
4225 
4226     /* For simplicity, we under-decoded the rs2 form. */
4227     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4228         return false;
4229     }
4230 
4231     /*
4232      * Always load the sum into a new temporary.
4233      * This is required to capture the value across a window change,
4234      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4235      */
4236     sum = tcg_temp_new();
4237     src1 = gen_load_gpr(dc, a->rs1);
4238     if (a->imm || a->rs2_or_imm == 0) {
4239         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4240     } else {
4241         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4242     }
4243     return func(dc, a->rd, sum);
4244 }
4245 
4246 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4247 {
4248     /*
4249      * Preserve pc across advance, so that we can delay
4250      * the writeback to rd until after src is consumed.
4251      */
4252     target_ulong cur_pc = dc->pc;
4253 
4254     gen_check_align(dc, src, 3);
4255 
4256     gen_mov_pc_npc(dc);
4257     tcg_gen_mov_tl(cpu_npc, src);
4258     gen_address_mask(dc, cpu_npc);
4259     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4260 
4261     dc->npc = DYNAMIC_PC_LOOKUP;
4262     return true;
4263 }
4264 
4265 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4266 
4267 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4268 {
4269     if (!supervisor(dc)) {
4270         return raise_priv(dc);
4271     }
4272 
4273     gen_check_align(dc, src, 3);
4274 
4275     gen_mov_pc_npc(dc);
4276     tcg_gen_mov_tl(cpu_npc, src);
4277     gen_helper_rett(tcg_env);
4278 
4279     dc->npc = DYNAMIC_PC;
4280     return true;
4281 }
4282 
4283 TRANS(RETT, 32, do_add_special, a, do_rett)
4284 
4285 static bool do_return(DisasContext *dc, int rd, TCGv src)
4286 {
4287     gen_check_align(dc, src, 3);
4288     gen_helper_restore(tcg_env);
4289 
4290     gen_mov_pc_npc(dc);
4291     tcg_gen_mov_tl(cpu_npc, src);
4292     gen_address_mask(dc, cpu_npc);
4293 
4294     dc->npc = DYNAMIC_PC_LOOKUP;
4295     return true;
4296 }
4297 
4298 TRANS(RETURN, 64, do_add_special, a, do_return)
4299 
4300 static bool do_save(DisasContext *dc, int rd, TCGv src)
4301 {
4302     gen_helper_save(tcg_env);
4303     gen_store_gpr(dc, rd, src);
4304     return advance_pc(dc);
4305 }
4306 
4307 TRANS(SAVE, ALL, do_add_special, a, do_save)
4308 
4309 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4310 {
4311     gen_helper_restore(tcg_env);
4312     gen_store_gpr(dc, rd, src);
4313     return advance_pc(dc);
4314 }
4315 
4316 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4317 
4318 static bool do_done_retry(DisasContext *dc, bool done)
4319 {
4320     if (!supervisor(dc)) {
4321         return raise_priv(dc);
4322     }
4323     dc->npc = DYNAMIC_PC;
4324     dc->pc = DYNAMIC_PC;
4325     translator_io_start(&dc->base);
4326     if (done) {
4327         gen_helper_done(tcg_env);
4328     } else {
4329         gen_helper_retry(tcg_env);
4330     }
4331     return true;
4332 }
4333 
4334 TRANS(DONE, 64, do_done_retry, true)
4335 TRANS(RETRY, 64, do_done_retry, false)
4336 
4337 /*
4338  * Major opcode 11 -- load and store instructions
4339  */
4340 
4341 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4342 {
4343     TCGv addr, tmp = NULL;
4344 
4345     /* For simplicity, we under-decoded the rs2 form. */
4346     if (!imm && rs2_or_imm & ~0x1f) {
4347         return NULL;
4348     }
4349 
4350     addr = gen_load_gpr(dc, rs1);
4351     if (rs2_or_imm) {
4352         tmp = tcg_temp_new();
4353         if (imm) {
4354             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4355         } else {
4356             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4357         }
4358         addr = tmp;
4359     }
4360     if (AM_CHECK(dc)) {
4361         if (!tmp) {
4362             tmp = tcg_temp_new();
4363         }
4364         tcg_gen_ext32u_tl(tmp, addr);
4365         addr = tmp;
4366     }
4367     return addr;
4368 }
4369 
4370 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4371 {
4372     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4373     DisasASI da;
4374 
4375     if (addr == NULL) {
4376         return false;
4377     }
4378     da = resolve_asi(dc, a->asi, mop);
4379 
4380     reg = gen_dest_gpr(dc, a->rd);
4381     gen_ld_asi(dc, &da, reg, addr);
4382     gen_store_gpr(dc, a->rd, reg);
4383     return advance_pc(dc);
4384 }
4385 
4386 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4387 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4388 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4389 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4390 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4391 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4392 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4393 
4394 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4395 {
4396     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4397     DisasASI da;
4398 
4399     if (addr == NULL) {
4400         return false;
4401     }
4402     da = resolve_asi(dc, a->asi, mop);
4403 
4404     reg = gen_load_gpr(dc, a->rd);
4405     gen_st_asi(dc, &da, reg, addr);
4406     return advance_pc(dc);
4407 }
4408 
4409 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4410 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4411 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4412 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4413 
4414 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4415 {
4416     TCGv addr;
4417     DisasASI da;
4418 
4419     if (a->rd & 1) {
4420         return false;
4421     }
4422     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4423     if (addr == NULL) {
4424         return false;
4425     }
4426     da = resolve_asi(dc, a->asi, MO_TEUQ);
4427     gen_ldda_asi(dc, &da, addr, a->rd);
4428     return advance_pc(dc);
4429 }
4430 
4431 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4432 {
4433     TCGv addr;
4434     DisasASI da;
4435 
4436     if (a->rd & 1) {
4437         return false;
4438     }
4439     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4440     if (addr == NULL) {
4441         return false;
4442     }
4443     da = resolve_asi(dc, a->asi, MO_TEUQ);
4444     gen_stda_asi(dc, &da, addr, a->rd);
4445     return advance_pc(dc);
4446 }
4447 
4448 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4449 {
4450     TCGv addr, reg;
4451     DisasASI da;
4452 
4453     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4454     if (addr == NULL) {
4455         return false;
4456     }
4457     da = resolve_asi(dc, a->asi, MO_UB);
4458 
4459     reg = gen_dest_gpr(dc, a->rd);
4460     gen_ldstub_asi(dc, &da, reg, addr);
4461     gen_store_gpr(dc, a->rd, reg);
4462     return advance_pc(dc);
4463 }
4464 
4465 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4466 {
4467     TCGv addr, dst, src;
4468     DisasASI da;
4469 
4470     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4471     if (addr == NULL) {
4472         return false;
4473     }
4474     da = resolve_asi(dc, a->asi, MO_TEUL);
4475 
4476     dst = gen_dest_gpr(dc, a->rd);
4477     src = gen_load_gpr(dc, a->rd);
4478     gen_swap_asi(dc, &da, dst, src, addr);
4479     gen_store_gpr(dc, a->rd, dst);
4480     return advance_pc(dc);
4481 }
4482 
4483 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4484 {
4485     TCGv addr, o, n, c;
4486     DisasASI da;
4487 
4488     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4489     if (addr == NULL) {
4490         return false;
4491     }
4492     da = resolve_asi(dc, a->asi, mop);
4493 
4494     o = gen_dest_gpr(dc, a->rd);
4495     n = gen_load_gpr(dc, a->rd);
4496     c = gen_load_gpr(dc, a->rs2_or_imm);
4497     gen_cas_asi(dc, &da, o, n, c, addr);
4498     gen_store_gpr(dc, a->rd, o);
4499     return advance_pc(dc);
4500 }
4501 
4502 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4503 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4504 
4505 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4506 {
4507     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4508     DisasASI da;
4509 
4510     if (addr == NULL) {
4511         return false;
4512     }
4513     if (gen_trap_if_nofpu_fpexception(dc)) {
4514         return true;
4515     }
4516     if (sz == MO_128 && gen_trap_float128(dc)) {
4517         return true;
4518     }
4519     da = resolve_asi(dc, a->asi, MO_TE | sz);
4520     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4521     gen_update_fprs_dirty(dc, a->rd);
4522     return advance_pc(dc);
4523 }
4524 
4525 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4526 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4527 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4528 
4529 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4530 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4531 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4532 
4533 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4534 {
4535     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4536     DisasASI da;
4537 
4538     if (addr == NULL) {
4539         return false;
4540     }
4541     /* Store insns are ok in fp_exception_pending state. */
4542     if (gen_trap_ifnofpu(dc)) {
4543         return true;
4544     }
4545     if (sz == MO_128 && gen_trap_float128(dc)) {
4546         return true;
4547     }
4548     da = resolve_asi(dc, a->asi, MO_TE | sz);
4549     gen_stf_asi(dc, &da, sz, addr, a->rd);
4550     return advance_pc(dc);
4551 }
4552 
4553 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4554 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4555 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4556 
4557 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4558 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4559 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4560 
4561 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4562 {
4563     TCGv addr;
4564 
4565     if (!avail_32(dc)) {
4566         return false;
4567     }
4568     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4569     if (addr == NULL) {
4570         return false;
4571     }
4572     if (!supervisor(dc)) {
4573         return raise_priv(dc);
4574     }
4575 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4576     if (gen_trap_ifnofpu(dc)) {
4577         return true;
4578     }
4579     if (!dc->fsr_qne) {
4580         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4581         return true;
4582     }
4583 
4584     /* Store the single element from the queue. */
4585     TCGv_i64 fq = tcg_temp_new_i64();
4586     tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4587     tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4588 
4589     /* Mark the queue empty, transitioning to fp_execute state. */
4590     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4591                    offsetof(CPUSPARCState, fsr_qne));
4592     dc->fsr_qne = 0;
4593 
4594     return advance_pc(dc);
4595 #else
4596     qemu_build_not_reached();
4597 #endif
4598 }
4599 
4600 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4601 {
4602     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4603     TCGv_i32 tmp;
4604 
4605     if (addr == NULL) {
4606         return false;
4607     }
4608     if (gen_trap_if_nofpu_fpexception(dc)) {
4609         return true;
4610     }
4611 
4612     tmp = tcg_temp_new_i32();
4613     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4614 
4615     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4616     /* LDFSR does not change FCC[1-3]. */
4617 
4618     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4619     return advance_pc(dc);
4620 }
4621 
4622 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4623 {
4624 #ifdef TARGET_SPARC64
4625     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4626     TCGv_i64 t64;
4627     TCGv_i32 lo, hi;
4628 
4629     if (addr == NULL) {
4630         return false;
4631     }
4632     if (gen_trap_if_nofpu_fpexception(dc)) {
4633         return true;
4634     }
4635 
4636     t64 = tcg_temp_new_i64();
4637     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4638 
4639     lo = tcg_temp_new_i32();
4640     hi = cpu_fcc[3];
4641     tcg_gen_extr_i64_i32(lo, hi, t64);
4642     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4643     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4644     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4645     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4646 
4647     if (entire) {
4648         gen_helper_set_fsr_nofcc(tcg_env, lo);
4649     } else {
4650         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4651     }
4652     return advance_pc(dc);
4653 #else
4654     return false;
4655 #endif
4656 }
4657 
4658 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4659 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4660 
4661 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4662 {
4663     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4664     TCGv fsr;
4665 
4666     if (addr == NULL) {
4667         return false;
4668     }
4669     /* Store insns are ok in fp_exception_pending state. */
4670     if (gen_trap_ifnofpu(dc)) {
4671         return true;
4672     }
4673 
4674     fsr = tcg_temp_new();
4675     gen_helper_get_fsr(fsr, tcg_env);
4676     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4677     return advance_pc(dc);
4678 }
4679 
4680 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4681 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4682 
4683 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4684 {
4685     if (gen_trap_ifnofpu(dc)) {
4686         return true;
4687     }
4688     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4689     return advance_pc(dc);
4690 }
4691 
4692 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4693 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4694 
4695 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4696 {
4697     if (gen_trap_ifnofpu(dc)) {
4698         return true;
4699     }
4700     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4701     return advance_pc(dc);
4702 }
4703 
4704 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4705 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4706 
4707 static bool do_ff(DisasContext *dc, arg_r_r *a,
4708                   void (*func)(TCGv_i32, TCGv_i32))
4709 {
4710     TCGv_i32 tmp;
4711 
4712     if (gen_trap_if_nofpu_fpexception(dc)) {
4713         return true;
4714     }
4715 
4716     tmp = gen_load_fpr_F(dc, a->rs);
4717     func(tmp, tmp);
4718     gen_store_fpr_F(dc, a->rd, tmp);
4719     return advance_pc(dc);
4720 }
4721 
4722 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4723 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4724 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4725 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4726 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4727 
4728 static bool do_fd(DisasContext *dc, arg_r_r *a,
4729                   void (*func)(TCGv_i32, TCGv_i64))
4730 {
4731     TCGv_i32 dst;
4732     TCGv_i64 src;
4733 
4734     if (gen_trap_ifnofpu(dc)) {
4735         return true;
4736     }
4737 
4738     dst = tcg_temp_new_i32();
4739     src = gen_load_fpr_D(dc, a->rs);
4740     func(dst, src);
4741     gen_store_fpr_F(dc, a->rd, dst);
4742     return advance_pc(dc);
4743 }
4744 
4745 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4746 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4747 
4748 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4749                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4750 {
4751     TCGv_i32 tmp;
4752 
4753     if (gen_trap_if_nofpu_fpexception(dc)) {
4754         return true;
4755     }
4756 
4757     tmp = gen_load_fpr_F(dc, a->rs);
4758     func(tmp, tcg_env, tmp);
4759     gen_store_fpr_F(dc, a->rd, tmp);
4760     return advance_pc(dc);
4761 }
4762 
4763 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4764 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4765 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4766 
4767 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4768                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4769 {
4770     TCGv_i32 dst;
4771     TCGv_i64 src;
4772 
4773     if (gen_trap_if_nofpu_fpexception(dc)) {
4774         return true;
4775     }
4776 
4777     dst = tcg_temp_new_i32();
4778     src = gen_load_fpr_D(dc, a->rs);
4779     func(dst, tcg_env, src);
4780     gen_store_fpr_F(dc, a->rd, dst);
4781     return advance_pc(dc);
4782 }
4783 
4784 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4785 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4786 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4787 
4788 static bool do_dd(DisasContext *dc, arg_r_r *a,
4789                   void (*func)(TCGv_i64, TCGv_i64))
4790 {
4791     TCGv_i64 dst, src;
4792 
4793     if (gen_trap_if_nofpu_fpexception(dc)) {
4794         return true;
4795     }
4796 
4797     dst = tcg_temp_new_i64();
4798     src = gen_load_fpr_D(dc, a->rs);
4799     func(dst, src);
4800     gen_store_fpr_D(dc, a->rd, dst);
4801     return advance_pc(dc);
4802 }
4803 
4804 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4805 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4806 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4807 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4808 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4809 
4810 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4811                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4812 {
4813     TCGv_i64 dst, src;
4814 
4815     if (gen_trap_if_nofpu_fpexception(dc)) {
4816         return true;
4817     }
4818 
4819     dst = tcg_temp_new_i64();
4820     src = gen_load_fpr_D(dc, a->rs);
4821     func(dst, tcg_env, src);
4822     gen_store_fpr_D(dc, a->rd, dst);
4823     return advance_pc(dc);
4824 }
4825 
4826 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4827 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4828 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4829 
4830 static bool do_df(DisasContext *dc, arg_r_r *a,
4831                   void (*func)(TCGv_i64, TCGv_i32))
4832 {
4833     TCGv_i64 dst;
4834     TCGv_i32 src;
4835 
4836     if (gen_trap_ifnofpu(dc)) {
4837         return true;
4838     }
4839 
4840     dst = tcg_temp_new_i64();
4841     src = gen_load_fpr_F(dc, a->rs);
4842     func(dst, src);
4843     gen_store_fpr_D(dc, a->rd, dst);
4844     return advance_pc(dc);
4845 }
4846 
4847 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4848 
4849 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4850                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4851 {
4852     TCGv_i64 dst;
4853     TCGv_i32 src;
4854 
4855     if (gen_trap_if_nofpu_fpexception(dc)) {
4856         return true;
4857     }
4858 
4859     dst = tcg_temp_new_i64();
4860     src = gen_load_fpr_F(dc, a->rs);
4861     func(dst, tcg_env, src);
4862     gen_store_fpr_D(dc, a->rd, dst);
4863     return advance_pc(dc);
4864 }
4865 
4866 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4867 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4868 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4869 
4870 static bool do_qq(DisasContext *dc, arg_r_r *a,
4871                   void (*func)(TCGv_i128, TCGv_i128))
4872 {
4873     TCGv_i128 t;
4874 
4875     if (gen_trap_ifnofpu(dc)) {
4876         return true;
4877     }
4878     if (gen_trap_float128(dc)) {
4879         return true;
4880     }
4881 
4882     gen_op_clear_ieee_excp_and_FTT();
4883     t = gen_load_fpr_Q(dc, a->rs);
4884     func(t, t);
4885     gen_store_fpr_Q(dc, a->rd, t);
4886     return advance_pc(dc);
4887 }
4888 
4889 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4890 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4891 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4892 
4893 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4894                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4895 {
4896     TCGv_i128 t;
4897 
4898     if (gen_trap_if_nofpu_fpexception(dc)) {
4899         return true;
4900     }
4901     if (gen_trap_float128(dc)) {
4902         return true;
4903     }
4904 
4905     t = gen_load_fpr_Q(dc, a->rs);
4906     func(t, tcg_env, t);
4907     gen_store_fpr_Q(dc, a->rd, t);
4908     return advance_pc(dc);
4909 }
4910 
4911 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4912 
4913 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4914                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4915 {
4916     TCGv_i128 src;
4917     TCGv_i32 dst;
4918 
4919     if (gen_trap_if_nofpu_fpexception(dc)) {
4920         return true;
4921     }
4922     if (gen_trap_float128(dc)) {
4923         return true;
4924     }
4925 
4926     src = gen_load_fpr_Q(dc, a->rs);
4927     dst = tcg_temp_new_i32();
4928     func(dst, tcg_env, src);
4929     gen_store_fpr_F(dc, a->rd, dst);
4930     return advance_pc(dc);
4931 }
4932 
4933 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4934 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4935 
4936 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4937                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4938 {
4939     TCGv_i128 src;
4940     TCGv_i64 dst;
4941 
4942     if (gen_trap_if_nofpu_fpexception(dc)) {
4943         return true;
4944     }
4945     if (gen_trap_float128(dc)) {
4946         return true;
4947     }
4948 
4949     src = gen_load_fpr_Q(dc, a->rs);
4950     dst = tcg_temp_new_i64();
4951     func(dst, tcg_env, src);
4952     gen_store_fpr_D(dc, a->rd, dst);
4953     return advance_pc(dc);
4954 }
4955 
4956 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4957 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4958 
4959 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4960                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4961 {
4962     TCGv_i32 src;
4963     TCGv_i128 dst;
4964 
4965     if (gen_trap_if_nofpu_fpexception(dc)) {
4966         return true;
4967     }
4968     if (gen_trap_float128(dc)) {
4969         return true;
4970     }
4971 
4972     src = gen_load_fpr_F(dc, a->rs);
4973     dst = tcg_temp_new_i128();
4974     func(dst, tcg_env, src);
4975     gen_store_fpr_Q(dc, a->rd, dst);
4976     return advance_pc(dc);
4977 }
4978 
4979 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4980 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4981 
4982 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4983                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4984 {
4985     TCGv_i64 src;
4986     TCGv_i128 dst;
4987 
4988     if (gen_trap_if_nofpu_fpexception(dc)) {
4989         return true;
4990     }
4991 
4992     src = gen_load_fpr_D(dc, a->rs);
4993     dst = tcg_temp_new_i128();
4994     func(dst, tcg_env, src);
4995     gen_store_fpr_Q(dc, a->rd, dst);
4996     return advance_pc(dc);
4997 }
4998 
4999 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5000 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5001 
5002 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5003                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5004 {
5005     TCGv_i32 src1, src2;
5006 
5007     if (gen_trap_ifnofpu(dc)) {
5008         return true;
5009     }
5010 
5011     src1 = gen_load_fpr_F(dc, a->rs1);
5012     src2 = gen_load_fpr_F(dc, a->rs2);
5013     func(src1, src1, src2);
5014     gen_store_fpr_F(dc, a->rd, src1);
5015     return advance_pc(dc);
5016 }
5017 
5018 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5019 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5020 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5021 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5022 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5023 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5024 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5025 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5026 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5027 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5028 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5029 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5030 
5031 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5032 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5033 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5034 
5035 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5036 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5037 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5038 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5039 
5040 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5041                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5042 {
5043     TCGv_i32 src1, src2;
5044 
5045     if (gen_trap_if_nofpu_fpexception(dc)) {
5046         return true;
5047     }
5048 
5049     src1 = gen_load_fpr_F(dc, a->rs1);
5050     src2 = gen_load_fpr_F(dc, a->rs2);
5051     func(src1, tcg_env, src1, src2);
5052     gen_store_fpr_F(dc, a->rd, src1);
5053     return advance_pc(dc);
5054 }
5055 
5056 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5057 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5058 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5059 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5060 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5061 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5062 
5063 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5064                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5065 {
5066     TCGv_i64 dst;
5067     TCGv_i32 src1, src2;
5068 
5069     if (gen_trap_ifnofpu(dc)) {
5070         return true;
5071     }
5072 
5073     dst = tcg_temp_new_i64();
5074     src1 = gen_load_fpr_F(dc, a->rs1);
5075     src2 = gen_load_fpr_F(dc, a->rs2);
5076     func(dst, src1, src2);
5077     gen_store_fpr_D(dc, a->rd, dst);
5078     return advance_pc(dc);
5079 }
5080 
5081 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5082 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5083 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5084 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5085 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5086 
5087 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5088                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5089 {
5090     TCGv_i64 dst, src2;
5091     TCGv_i32 src1;
5092 
5093     if (gen_trap_ifnofpu(dc)) {
5094         return true;
5095     }
5096 
5097     dst = tcg_temp_new_i64();
5098     src1 = gen_load_fpr_F(dc, a->rs1);
5099     src2 = gen_load_fpr_D(dc, a->rs2);
5100     func(dst, src1, src2);
5101     gen_store_fpr_D(dc, a->rd, dst);
5102     return advance_pc(dc);
5103 }
5104 
5105 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5106 
5107 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5108                         void (*func)(unsigned, uint32_t, uint32_t,
5109                                      uint32_t, uint32_t, uint32_t))
5110 {
5111     if (gen_trap_ifnofpu(dc)) {
5112         return true;
5113     }
5114 
5115     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5116          gen_offset_fpr_D(a->rs2), 8, 8);
5117     return advance_pc(dc);
5118 }
5119 
5120 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5121 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5122 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5123 
5124 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5125 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5126 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5127 
5128 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5129 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5130 
5131 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5132 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5133 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5134 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5135 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5136 
5137 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5138 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5139 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5140 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5141 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5142 
5143 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5144 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5145 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5146 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5147 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5148 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5149 
5150 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5151 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5152 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5153 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5154 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5155 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5156 
5157 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5158 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5159 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5160 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5161 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5162 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5163 
5164 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5165                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5166 {
5167     TCGv_i64 dst, src1, src2;
5168 
5169     if (gen_trap_ifnofpu(dc)) {
5170         return true;
5171     }
5172 
5173     dst = tcg_temp_new_i64();
5174     src1 = gen_load_fpr_D(dc, a->rs1);
5175     src2 = gen_load_fpr_D(dc, a->rs2);
5176     func(dst, src1, src2);
5177     gen_store_fpr_D(dc, a->rd, dst);
5178     return advance_pc(dc);
5179 }
5180 
5181 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5182 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5183 
5184 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5185 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5186 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5187 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5188 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5189 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5190 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5191 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5192 
5193 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5194 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5195 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5196 
5197 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5198 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5199 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5200 
5201 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5202 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5203 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5204 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5205 
5206 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5207                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5208 {
5209     TCGv_i64 src1, src2;
5210     TCGv dst;
5211 
5212     if (gen_trap_ifnofpu(dc)) {
5213         return true;
5214     }
5215 
5216     dst = gen_dest_gpr(dc, a->rd);
5217     src1 = gen_load_fpr_D(dc, a->rs1);
5218     src2 = gen_load_fpr_D(dc, a->rs2);
5219     func(dst, src1, src2);
5220     gen_store_gpr(dc, a->rd, dst);
5221     return advance_pc(dc);
5222 }
5223 
5224 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5225 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5226 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5227 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5228 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5229 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5230 
5231 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5232 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5233 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5234 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5235 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5236 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5237 
5238 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5239 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5240 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5241 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5242 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5243 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5244 
5245 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5246 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5247 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5248 
5249 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5250                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5251 {
5252     TCGv_i64 dst, src1, src2;
5253 
5254     if (gen_trap_if_nofpu_fpexception(dc)) {
5255         return true;
5256     }
5257 
5258     dst = tcg_temp_new_i64();
5259     src1 = gen_load_fpr_D(dc, a->rs1);
5260     src2 = gen_load_fpr_D(dc, a->rs2);
5261     func(dst, tcg_env, src1, src2);
5262     gen_store_fpr_D(dc, a->rd, dst);
5263     return advance_pc(dc);
5264 }
5265 
5266 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5267 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5268 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5269 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5270 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5271 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5272 
5273 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5274 {
5275     TCGv_i64 dst;
5276     TCGv_i32 src1, src2;
5277 
5278     if (gen_trap_if_nofpu_fpexception(dc)) {
5279         return true;
5280     }
5281     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5282         return raise_unimpfpop(dc);
5283     }
5284 
5285     dst = tcg_temp_new_i64();
5286     src1 = gen_load_fpr_F(dc, a->rs1);
5287     src2 = gen_load_fpr_F(dc, a->rs2);
5288     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5289     gen_store_fpr_D(dc, a->rd, dst);
5290     return advance_pc(dc);
5291 }
5292 
5293 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5294 {
5295     TCGv_i64 dst;
5296     TCGv_i32 src1, src2;
5297 
5298     if (!avail_VIS3(dc)) {
5299         return false;
5300     }
5301     if (gen_trap_ifnofpu(dc)) {
5302         return true;
5303     }
5304     dst = tcg_temp_new_i64();
5305     src1 = gen_load_fpr_F(dc, a->rs1);
5306     src2 = gen_load_fpr_F(dc, a->rs2);
5307     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5308     gen_store_fpr_D(dc, a->rd, dst);
5309     return advance_pc(dc);
5310 }
5311 
5312 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5313                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5314 {
5315     TCGv_i32 dst, src1, src2, src3;
5316 
5317     if (gen_trap_ifnofpu(dc)) {
5318         return true;
5319     }
5320 
5321     src1 = gen_load_fpr_F(dc, a->rs1);
5322     src2 = gen_load_fpr_F(dc, a->rs2);
5323     src3 = gen_load_fpr_F(dc, a->rs3);
5324     dst = tcg_temp_new_i32();
5325     func(dst, src1, src2, src3);
5326     gen_store_fpr_F(dc, a->rd, dst);
5327     return advance_pc(dc);
5328 }
5329 
5330 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5331 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5332 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5333 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5334 
5335 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5336                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5337 {
5338     TCGv_i64 dst, src1, src2, src3;
5339 
5340     if (gen_trap_ifnofpu(dc)) {
5341         return true;
5342     }
5343 
5344     dst  = tcg_temp_new_i64();
5345     src1 = gen_load_fpr_D(dc, a->rs1);
5346     src2 = gen_load_fpr_D(dc, a->rs2);
5347     src3 = gen_load_fpr_D(dc, a->rs3);
5348     func(dst, src1, src2, src3);
5349     gen_store_fpr_D(dc, a->rd, dst);
5350     return advance_pc(dc);
5351 }
5352 
5353 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5354 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5355 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5356 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5357 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5358 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5359 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5360 
5361 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5362 {
5363     TCGv_i64 dst, src1, src2;
5364     TCGv src3;
5365 
5366     if (!avail_VIS4(dc)) {
5367         return false;
5368     }
5369     if (gen_trap_ifnofpu(dc)) {
5370         return true;
5371     }
5372 
5373     dst  = tcg_temp_new_i64();
5374     src1 = gen_load_fpr_D(dc, a->rd);
5375     src2 = gen_load_fpr_D(dc, a->rs2);
5376     src3 = gen_load_gpr(dc, a->rs1);
5377     gen_op_faligndata_i(dst, src1, src2, src3);
5378     gen_store_fpr_D(dc, a->rd, dst);
5379     return advance_pc(dc);
5380 }
5381 
5382 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5383                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5384 {
5385     TCGv_i128 src1, src2;
5386 
5387     if (gen_trap_if_nofpu_fpexception(dc)) {
5388         return true;
5389     }
5390     if (gen_trap_float128(dc)) {
5391         return true;
5392     }
5393 
5394     src1 = gen_load_fpr_Q(dc, a->rs1);
5395     src2 = gen_load_fpr_Q(dc, a->rs2);
5396     func(src1, tcg_env, src1, src2);
5397     gen_store_fpr_Q(dc, a->rd, src1);
5398     return advance_pc(dc);
5399 }
5400 
5401 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5402 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5403 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5404 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5405 
5406 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5407 {
5408     TCGv_i64 src1, src2;
5409     TCGv_i128 dst;
5410 
5411     if (gen_trap_if_nofpu_fpexception(dc)) {
5412         return true;
5413     }
5414     if (gen_trap_float128(dc)) {
5415         return true;
5416     }
5417 
5418     src1 = gen_load_fpr_D(dc, a->rs1);
5419     src2 = gen_load_fpr_D(dc, a->rs2);
5420     dst = tcg_temp_new_i128();
5421     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5422     gen_store_fpr_Q(dc, a->rd, dst);
5423     return advance_pc(dc);
5424 }
5425 
5426 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5427                      void (*func)(DisasContext *, DisasCompare *, int, int))
5428 {
5429     DisasCompare cmp;
5430 
5431     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5432         return false;
5433     }
5434     if (gen_trap_ifnofpu(dc)) {
5435         return true;
5436     }
5437     if (is_128 && gen_trap_float128(dc)) {
5438         return true;
5439     }
5440 
5441     gen_op_clear_ieee_excp_and_FTT();
5442     func(dc, &cmp, a->rd, a->rs2);
5443     return advance_pc(dc);
5444 }
5445 
5446 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5447 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5448 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5449 
5450 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5451                       void (*func)(DisasContext *, DisasCompare *, int, int))
5452 {
5453     DisasCompare cmp;
5454 
5455     if (gen_trap_ifnofpu(dc)) {
5456         return true;
5457     }
5458     if (is_128 && gen_trap_float128(dc)) {
5459         return true;
5460     }
5461 
5462     gen_op_clear_ieee_excp_and_FTT();
5463     gen_compare(&cmp, a->cc, a->cond, dc);
5464     func(dc, &cmp, a->rd, a->rs2);
5465     return advance_pc(dc);
5466 }
5467 
5468 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5469 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5470 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5471 
5472 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5473                        void (*func)(DisasContext *, DisasCompare *, int, int))
5474 {
5475     DisasCompare cmp;
5476 
5477     if (gen_trap_ifnofpu(dc)) {
5478         return true;
5479     }
5480     if (is_128 && gen_trap_float128(dc)) {
5481         return true;
5482     }
5483 
5484     gen_op_clear_ieee_excp_and_FTT();
5485     gen_fcompare(&cmp, a->cc, a->cond);
5486     func(dc, &cmp, a->rd, a->rs2);
5487     return advance_pc(dc);
5488 }
5489 
5490 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5491 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5492 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5493 
5494 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5495 {
5496     TCGv_i32 src1, src2;
5497 
5498     if (avail_32(dc) && a->cc != 0) {
5499         return false;
5500     }
5501     if (gen_trap_if_nofpu_fpexception(dc)) {
5502         return true;
5503     }
5504 
5505     src1 = gen_load_fpr_F(dc, a->rs1);
5506     src2 = gen_load_fpr_F(dc, a->rs2);
5507     if (e) {
5508         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5509     } else {
5510         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5511     }
5512     return advance_pc(dc);
5513 }
5514 
5515 TRANS(FCMPs, ALL, do_fcmps, a, false)
5516 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5517 
5518 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5519 {
5520     TCGv_i64 src1, src2;
5521 
5522     if (avail_32(dc) && a->cc != 0) {
5523         return false;
5524     }
5525     if (gen_trap_if_nofpu_fpexception(dc)) {
5526         return true;
5527     }
5528 
5529     src1 = gen_load_fpr_D(dc, a->rs1);
5530     src2 = gen_load_fpr_D(dc, a->rs2);
5531     if (e) {
5532         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5533     } else {
5534         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5535     }
5536     return advance_pc(dc);
5537 }
5538 
5539 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5540 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5541 
5542 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5543 {
5544     TCGv_i128 src1, src2;
5545 
5546     if (avail_32(dc) && a->cc != 0) {
5547         return false;
5548     }
5549     if (gen_trap_if_nofpu_fpexception(dc)) {
5550         return true;
5551     }
5552     if (gen_trap_float128(dc)) {
5553         return true;
5554     }
5555 
5556     src1 = gen_load_fpr_Q(dc, a->rs1);
5557     src2 = gen_load_fpr_Q(dc, a->rs2);
5558     if (e) {
5559         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5560     } else {
5561         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5562     }
5563     return advance_pc(dc);
5564 }
5565 
5566 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5567 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5568 
5569 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5570 {
5571     TCGv_i32 src1, src2;
5572 
5573     if (!avail_VIS3(dc)) {
5574         return false;
5575     }
5576     if (gen_trap_ifnofpu(dc)) {
5577         return true;
5578     }
5579 
5580     src1 = gen_load_fpr_F(dc, a->rs1);
5581     src2 = gen_load_fpr_F(dc, a->rs2);
5582     gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5583     return advance_pc(dc);
5584 }
5585 
5586 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5587 {
5588     TCGv_i64 src1, src2;
5589 
5590     if (!avail_VIS3(dc)) {
5591         return false;
5592     }
5593     if (gen_trap_ifnofpu(dc)) {
5594         return true;
5595     }
5596 
5597     src1 = gen_load_fpr_D(dc, a->rs1);
5598     src2 = gen_load_fpr_D(dc, a->rs2);
5599     gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5600     return advance_pc(dc);
5601 }
5602 
5603 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5604                       int (*offset)(unsigned int),
5605                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5606 {
5607     TCGv dst;
5608 
5609     if (gen_trap_ifnofpu(dc)) {
5610         return true;
5611     }
5612     dst = gen_dest_gpr(dc, a->rd);
5613     load(dst, tcg_env, offset(a->rs));
5614     gen_store_gpr(dc, a->rd, dst);
5615     return advance_pc(dc);
5616 }
5617 
5618 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5619 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5620 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5621 
5622 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5623                       int (*offset)(unsigned int),
5624                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5625 {
5626     TCGv src;
5627 
5628     if (gen_trap_ifnofpu(dc)) {
5629         return true;
5630     }
5631     src = gen_load_gpr(dc, a->rs);
5632     store(src, tcg_env, offset(a->rd));
5633     return advance_pc(dc);
5634 }
5635 
5636 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5637 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5638 
5639 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5640 {
5641     DisasContext *dc = container_of(dcbase, DisasContext, base);
5642     int bound;
5643 
5644     dc->pc = dc->base.pc_first;
5645     dc->npc = (target_ulong)dc->base.tb->cs_base;
5646     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5647     dc->def = &cpu_env(cs)->def;
5648     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5649     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5650 #ifndef CONFIG_USER_ONLY
5651     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5652 # ifdef TARGET_SPARC64
5653     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5654 # else
5655     dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5656 # endif
5657 #endif
5658 #ifdef TARGET_SPARC64
5659     dc->fprs_dirty = 0;
5660     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5661 #endif
5662     /*
5663      * if we reach a page boundary, we stop generation so that the
5664      * PC of a TT_TFAULT exception is always in the right page
5665      */
5666     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5667     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5668 }
5669 
5670 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5671 {
5672 }
5673 
5674 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5675 {
5676     DisasContext *dc = container_of(dcbase, DisasContext, base);
5677     target_ulong npc = dc->npc;
5678 
5679     if (npc & 3) {
5680         switch (npc) {
5681         case JUMP_PC:
5682             assert(dc->jump_pc[1] == dc->pc + 4);
5683             npc = dc->jump_pc[0] | JUMP_PC;
5684             break;
5685         case DYNAMIC_PC:
5686         case DYNAMIC_PC_LOOKUP:
5687             npc = DYNAMIC_PC;
5688             break;
5689         default:
5690             g_assert_not_reached();
5691         }
5692     }
5693     tcg_gen_insn_start(dc->pc, npc);
5694 }
5695 
5696 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5697 {
5698     DisasContext *dc = container_of(dcbase, DisasContext, base);
5699     unsigned int insn;
5700 
5701     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5702     dc->base.pc_next += 4;
5703 
5704     if (!decode(dc, insn)) {
5705         gen_exception(dc, TT_ILL_INSN);
5706     }
5707 
5708     if (dc->base.is_jmp == DISAS_NORETURN) {
5709         return;
5710     }
5711     if (dc->pc != dc->base.pc_next) {
5712         dc->base.is_jmp = DISAS_TOO_MANY;
5713     }
5714 }
5715 
5716 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5717 {
5718     DisasContext *dc = container_of(dcbase, DisasContext, base);
5719     DisasDelayException *e, *e_next;
5720     bool may_lookup;
5721 
5722     finishing_insn(dc);
5723 
5724     switch (dc->base.is_jmp) {
5725     case DISAS_NEXT:
5726     case DISAS_TOO_MANY:
5727         if (((dc->pc | dc->npc) & 3) == 0) {
5728             /* static PC and NPC: we can use direct chaining */
5729             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5730             break;
5731         }
5732 
5733         may_lookup = true;
5734         if (dc->pc & 3) {
5735             switch (dc->pc) {
5736             case DYNAMIC_PC_LOOKUP:
5737                 break;
5738             case DYNAMIC_PC:
5739                 may_lookup = false;
5740                 break;
5741             default:
5742                 g_assert_not_reached();
5743             }
5744         } else {
5745             tcg_gen_movi_tl(cpu_pc, dc->pc);
5746         }
5747 
5748         if (dc->npc & 3) {
5749             switch (dc->npc) {
5750             case JUMP_PC:
5751                 gen_generic_branch(dc);
5752                 break;
5753             case DYNAMIC_PC:
5754                 may_lookup = false;
5755                 break;
5756             case DYNAMIC_PC_LOOKUP:
5757                 break;
5758             default:
5759                 g_assert_not_reached();
5760             }
5761         } else {
5762             tcg_gen_movi_tl(cpu_npc, dc->npc);
5763         }
5764         if (may_lookup) {
5765             tcg_gen_lookup_and_goto_ptr();
5766         } else {
5767             tcg_gen_exit_tb(NULL, 0);
5768         }
5769         break;
5770 
5771     case DISAS_NORETURN:
5772        break;
5773 
5774     case DISAS_EXIT:
5775         /* Exit TB */
5776         save_state(dc);
5777         tcg_gen_exit_tb(NULL, 0);
5778         break;
5779 
5780     default:
5781         g_assert_not_reached();
5782     }
5783 
5784     for (e = dc->delay_excp_list; e ; e = e_next) {
5785         gen_set_label(e->lab);
5786 
5787         tcg_gen_movi_tl(cpu_pc, e->pc);
5788         if (e->npc % 4 == 0) {
5789             tcg_gen_movi_tl(cpu_npc, e->npc);
5790         }
5791         gen_helper_raise_exception(tcg_env, e->excp);
5792 
5793         e_next = e->next;
5794         g_free(e);
5795     }
5796 }
5797 
5798 static const TranslatorOps sparc_tr_ops = {
5799     .init_disas_context = sparc_tr_init_disas_context,
5800     .tb_start           = sparc_tr_tb_start,
5801     .insn_start         = sparc_tr_insn_start,
5802     .translate_insn     = sparc_tr_translate_insn,
5803     .tb_stop            = sparc_tr_tb_stop,
5804 };
5805 
5806 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5807                            vaddr pc, void *host_pc)
5808 {
5809     DisasContext dc = {};
5810 
5811     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5812 }
5813 
5814 void sparc_tcg_init(void)
5815 {
5816     static const char gregnames[32][4] = {
5817         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5818         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5819         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5820         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5821     };
5822 
5823     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5824 #ifdef TARGET_SPARC64
5825         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5826         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5827         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5828         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5829         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5830 #else
5831         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5832 #endif
5833     };
5834 
5835     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5836 #ifdef TARGET_SPARC64
5837         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5838         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5839         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5840 #endif
5841         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5842         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5843         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5844         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5845         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5846         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5847         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5848         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5849         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5850     };
5851 
5852     unsigned int i;
5853 
5854     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5855                                          offsetof(CPUSPARCState, regwptr),
5856                                          "regwptr");
5857 
5858     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5859         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5860     }
5861 
5862     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5863         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5864     }
5865 
5866     cpu_regs[0] = NULL;
5867     for (i = 1; i < 8; ++i) {
5868         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5869                                          offsetof(CPUSPARCState, gregs[i]),
5870                                          gregnames[i]);
5871     }
5872 
5873     for (i = 8; i < 32; ++i) {
5874         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5875                                          (i - 8) * sizeof(target_ulong),
5876                                          gregnames[i]);
5877     }
5878 }
5879