xref: /qemu/target/sparc/translate.c (revision db65ac5e258e75e9aec45626bf1071626094e057)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "asi.h"
34 #include "target/sparc/translate.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 #ifdef TARGET_SPARC64
41 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
42 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
43 # define gen_helper_rett(E)                     qemu_build_not_reached()
44 # define gen_helper_power_down(E)               qemu_build_not_reached()
45 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
46 #else
47 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
48 # define gen_helper_done(E)                     qemu_build_not_reached()
49 # define gen_helper_flushw(E)                   qemu_build_not_reached()
50 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
51 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
52 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
53 # define gen_helper_restored(E)                 qemu_build_not_reached()
54 # define gen_helper_retry(E)                    qemu_build_not_reached()
55 # define gen_helper_saved(E)                    qemu_build_not_reached()
56 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
57 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
58 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
59 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
103 # define MAXTL_MASK                             0
104 #endif
105 
106 #define DISAS_EXIT  DISAS_TARGET_0
107 
108 /* global register indexes */
109 static TCGv_ptr cpu_regwptr;
110 static TCGv cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128 
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z  cpu_xcc_Z
131 #define cpu_cc_C  cpu_xcc_C
132 #else
133 #define cpu_cc_Z  cpu_icc_Z
134 #define cpu_cc_C  cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138 
139 /* Floating point comparison registers */
140 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
141 
142 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X)  env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X)  env_field_offsetof(X)
148 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
149 #endif
150 
151 typedef struct DisasCompare {
152     TCGCond cond;
153     TCGv c1;
154     int c2;
155 } DisasCompare;
156 
157 typedef struct DisasDelayException {
158     struct DisasDelayException *next;
159     TCGLabel *lab;
160     TCGv_i32 excp;
161     /* Saved state at parent insn. */
162     target_ulong pc;
163     target_ulong npc;
164 } DisasDelayException;
165 
166 typedef struct DisasContext {
167     DisasContextBase base;
168     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
169     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
170 
171     /* Used when JUMP_PC value is used. */
172     DisasCompare jump;
173     target_ulong jump_pc[2];
174 
175     int mem_idx;
176     bool cpu_cond_live;
177     bool fpu_enabled;
178     bool address_mask_32bit;
179 #ifndef CONFIG_USER_ONLY
180     bool supervisor;
181 #ifdef TARGET_SPARC64
182     bool hypervisor;
183 #else
184     bool fsr_qne;
185 #endif
186 #endif
187 
188     sparc_def_t *def;
189 #ifdef TARGET_SPARC64
190     int fprs_dirty;
191     int asi;
192 #endif
193     DisasDelayException *delay_excp_list;
194 } DisasContext;
195 
196 // This function uses non-native bit order
197 #define GET_FIELD(X, FROM, TO)                                  \
198     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
199 
200 // This function uses the order in the manuals, i.e. bit 0 is 2^0
201 #define GET_FIELD_SP(X, FROM, TO)               \
202     GET_FIELD(X, 31 - (TO), 31 - (FROM))
203 
204 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
205 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
206 
207 #define UA2005_HTRAP_MASK 0xff
208 #define V8_TRAP_MASK 0x7f
209 
210 #define IS_IMM (insn & (1<<13))
211 
212 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
213 {
214 #if defined(TARGET_SPARC64)
215     int bit = (rd < 32) ? 1 : 2;
216     /* If we know we've already set this bit within the TB,
217        we can avoid setting it again.  */
218     if (!(dc->fprs_dirty & bit)) {
219         dc->fprs_dirty |= bit;
220         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
221     }
222 #endif
223 }
224 
225 /* floating point registers moves */
226 
227 static int gen_offset_fpr_F(unsigned int reg)
228 {
229     int ret;
230 
231     tcg_debug_assert(reg < 32);
232     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
233     if (reg & 1) {
234         ret += offsetof(CPU_DoubleU, l.lower);
235     } else {
236         ret += offsetof(CPU_DoubleU, l.upper);
237     }
238     return ret;
239 }
240 
241 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
242 {
243     TCGv_i32 ret = tcg_temp_new_i32();
244     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
245     return ret;
246 }
247 
248 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
249 {
250     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
251     gen_update_fprs_dirty(dc, dst);
252 }
253 
254 static int gen_offset_fpr_D(unsigned int reg)
255 {
256     tcg_debug_assert(reg < 64);
257     tcg_debug_assert(reg % 2 == 0);
258     return offsetof(CPUSPARCState, fpr[reg / 2]);
259 }
260 
261 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
262 {
263     TCGv_i64 ret = tcg_temp_new_i64();
264     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
265     return ret;
266 }
267 
268 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
269 {
270     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
271     gen_update_fprs_dirty(dc, dst);
272 }
273 
274 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
275 {
276     TCGv_i128 ret = tcg_temp_new_i128();
277     TCGv_i64 h = gen_load_fpr_D(dc, src);
278     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
279 
280     tcg_gen_concat_i64_i128(ret, l, h);
281     return ret;
282 }
283 
284 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
285 {
286     TCGv_i64 h = tcg_temp_new_i64();
287     TCGv_i64 l = tcg_temp_new_i64();
288 
289     tcg_gen_extr_i128_i64(l, h, v);
290     gen_store_fpr_D(dc, dst, h);
291     gen_store_fpr_D(dc, dst + 2, l);
292 }
293 
294 /* moves */
295 #ifdef CONFIG_USER_ONLY
296 #define supervisor(dc) 0
297 #define hypervisor(dc) 0
298 #else
299 #ifdef TARGET_SPARC64
300 #define hypervisor(dc) (dc->hypervisor)
301 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
302 #else
303 #define supervisor(dc) (dc->supervisor)
304 #define hypervisor(dc) 0
305 #endif
306 #endif
307 
308 #if !defined(TARGET_SPARC64)
309 # define AM_CHECK(dc)  false
310 #elif defined(TARGET_ABI32)
311 # define AM_CHECK(dc)  true
312 #elif defined(CONFIG_USER_ONLY)
313 # define AM_CHECK(dc)  false
314 #else
315 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
316 #endif
317 
318 static void gen_address_mask(DisasContext *dc, TCGv addr)
319 {
320     if (AM_CHECK(dc)) {
321         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
322     }
323 }
324 
325 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
326 {
327     return AM_CHECK(dc) ? (uint32_t)addr : addr;
328 }
329 
330 static TCGv gen_load_gpr(DisasContext *dc, int reg)
331 {
332     if (reg > 0) {
333         assert(reg < 32);
334         return cpu_regs[reg];
335     } else {
336         TCGv t = tcg_temp_new();
337         tcg_gen_movi_tl(t, 0);
338         return t;
339     }
340 }
341 
342 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
343 {
344     if (reg > 0) {
345         assert(reg < 32);
346         tcg_gen_mov_tl(cpu_regs[reg], v);
347     }
348 }
349 
350 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
351 {
352     if (reg > 0) {
353         assert(reg < 32);
354         return cpu_regs[reg];
355     } else {
356         return tcg_temp_new();
357     }
358 }
359 
360 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
361 {
362     return translator_use_goto_tb(&s->base, pc) &&
363            translator_use_goto_tb(&s->base, npc);
364 }
365 
366 static void gen_goto_tb(DisasContext *s, int tb_num,
367                         target_ulong pc, target_ulong npc)
368 {
369     if (use_goto_tb(s, pc, npc))  {
370         /* jump to same page: we can use a direct jump */
371         tcg_gen_goto_tb(tb_num);
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_exit_tb(s->base.tb, tb_num);
375     } else {
376         /* jump to another page: we can use an indirect jump */
377         tcg_gen_movi_tl(cpu_pc, pc);
378         tcg_gen_movi_tl(cpu_npc, npc);
379         tcg_gen_lookup_and_goto_ptr();
380     }
381 }
382 
383 static TCGv gen_carry32(void)
384 {
385     if (TARGET_LONG_BITS == 64) {
386         TCGv t = tcg_temp_new();
387         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
388         return t;
389     }
390     return cpu_icc_C;
391 }
392 
393 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
394 {
395     TCGv z = tcg_constant_tl(0);
396 
397     if (cin) {
398         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
399         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
400     } else {
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
402     }
403     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
404     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
405     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
406     if (TARGET_LONG_BITS == 64) {
407         /*
408          * Carry-in to bit 32 is result ^ src1 ^ src2.
409          * We already have the src xor term in Z, from computation of V.
410          */
411         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
412         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
413     }
414     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
415     tcg_gen_mov_tl(dst, cpu_cc_N);
416 }
417 
418 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     gen_op_addcc_int(dst, src1, src2, NULL);
421 }
422 
423 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
424 {
425     TCGv t = tcg_temp_new();
426 
427     /* Save the tag bits around modification of dst. */
428     tcg_gen_or_tl(t, src1, src2);
429 
430     gen_op_addcc(dst, src1, src2);
431 
432     /* Incorprate tag bits into icc.V */
433     tcg_gen_andi_tl(t, t, 3);
434     tcg_gen_neg_tl(t, t);
435     tcg_gen_ext32u_tl(t, t);
436     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
437 }
438 
439 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
440 {
441     tcg_gen_add_tl(dst, src1, src2);
442     tcg_gen_add_tl(dst, dst, gen_carry32());
443 }
444 
445 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     gen_op_addcc_int(dst, src1, src2, gen_carry32());
448 }
449 
450 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
451 {
452     tcg_gen_add_tl(dst, src1, src2);
453     tcg_gen_add_tl(dst, dst, cpu_cc_C);
454 }
455 
456 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
457 {
458     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
459 }
460 
461 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
462 {
463     TCGv z = tcg_constant_tl(0);
464 
465     if (cin) {
466         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
467         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
468     } else {
469         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
470     }
471     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
472     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
473     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
474     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
475 #ifdef TARGET_SPARC64
476     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
477     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
478 #endif
479     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
480     tcg_gen_mov_tl(dst, cpu_cc_N);
481 }
482 
483 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     gen_op_subcc_int(dst, src1, src2, NULL);
486 }
487 
488 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
489 {
490     TCGv t = tcg_temp_new();
491 
492     /* Save the tag bits around modification of dst. */
493     tcg_gen_or_tl(t, src1, src2);
494 
495     gen_op_subcc(dst, src1, src2);
496 
497     /* Incorprate tag bits into icc.V */
498     tcg_gen_andi_tl(t, t, 3);
499     tcg_gen_neg_tl(t, t);
500     tcg_gen_ext32u_tl(t, t);
501     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
502 }
503 
504 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
505 {
506     tcg_gen_sub_tl(dst, src1, src2);
507     tcg_gen_sub_tl(dst, dst, gen_carry32());
508 }
509 
510 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     gen_op_subcc_int(dst, src1, src2, gen_carry32());
513 }
514 
515 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
516 {
517     tcg_gen_sub_tl(dst, src1, src2);
518     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
519 }
520 
521 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
522 {
523     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
524 }
525 
526 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
527 {
528     TCGv zero = tcg_constant_tl(0);
529     TCGv one = tcg_constant_tl(1);
530     TCGv t_src1 = tcg_temp_new();
531     TCGv t_src2 = tcg_temp_new();
532     TCGv t0 = tcg_temp_new();
533 
534     tcg_gen_ext32u_tl(t_src1, src1);
535     tcg_gen_ext32u_tl(t_src2, src2);
536 
537     /*
538      * if (!(env->y & 1))
539      *   src2 = 0;
540      */
541     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
542 
543     /*
544      * b2 = src1 & 1;
545      * y = (b2 << 31) | (y >> 1);
546      */
547     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
548     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
549 
550     // b1 = N ^ V;
551     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
552 
553     /*
554      * src1 = (b1 << 31) | (src1 >> 1)
555      */
556     tcg_gen_andi_tl(t0, t0, 1u << 31);
557     tcg_gen_shri_tl(t_src1, t_src1, 1);
558     tcg_gen_or_tl(t_src1, t_src1, t0);
559 
560     gen_op_addcc(dst, t_src1, t_src2);
561 }
562 
563 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
564 {
565 #if TARGET_LONG_BITS == 32
566     if (sign_ext) {
567         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
568     } else {
569         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
570     }
571 #else
572     TCGv t0 = tcg_temp_new_i64();
573     TCGv t1 = tcg_temp_new_i64();
574 
575     if (sign_ext) {
576         tcg_gen_ext32s_i64(t0, src1);
577         tcg_gen_ext32s_i64(t1, src2);
578     } else {
579         tcg_gen_ext32u_i64(t0, src1);
580         tcg_gen_ext32u_i64(t1, src2);
581     }
582 
583     tcg_gen_mul_i64(dst, t0, t1);
584     tcg_gen_shri_i64(cpu_y, dst, 32);
585 #endif
586 }
587 
588 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
589 {
590     /* zero-extend truncated operands before multiplication */
591     gen_op_multiply(dst, src1, src2, 0);
592 }
593 
594 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
595 {
596     /* sign-extend truncated operands before multiplication */
597     gen_op_multiply(dst, src1, src2, 1);
598 }
599 
600 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
601 {
602     TCGv discard = tcg_temp_new();
603     tcg_gen_mulu2_tl(discard, dst, src1, src2);
604 }
605 
606 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
607                            TCGv_i64 src2, TCGv_i64 src3)
608 {
609     TCGv_i64 t = tcg_temp_new_i64();
610 
611     tcg_gen_mul_i64(t, src1, src2);
612     tcg_gen_add_i64(dst, src3, t);
613 }
614 
615 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
616                              TCGv_i64 src2, TCGv_i64 src3)
617 {
618     TCGv_i64 l = tcg_temp_new_i64();
619     TCGv_i64 h = tcg_temp_new_i64();
620     TCGv_i64 z = tcg_constant_i64(0);
621 
622     tcg_gen_mulu2_i64(l, h, src1, src2);
623     tcg_gen_add2_i64(l, dst, l, h, src3, z);
624 }
625 
626 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
627 {
628 #ifdef TARGET_SPARC64
629     gen_helper_sdiv(dst, tcg_env, src1, src2);
630     tcg_gen_ext32s_tl(dst, dst);
631 #else
632     TCGv_i64 t64 = tcg_temp_new_i64();
633     gen_helper_sdiv(t64, tcg_env, src1, src2);
634     tcg_gen_trunc_i64_tl(dst, t64);
635 #endif
636 }
637 
638 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
639 {
640     TCGv_i64 t64;
641 
642 #ifdef TARGET_SPARC64
643     t64 = cpu_cc_V;
644 #else
645     t64 = tcg_temp_new_i64();
646 #endif
647 
648     gen_helper_udiv(t64, tcg_env, src1, src2);
649 
650 #ifdef TARGET_SPARC64
651     tcg_gen_ext32u_tl(cpu_cc_N, t64);
652     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
653     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
654     tcg_gen_movi_tl(cpu_icc_C, 0);
655 #else
656     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
657 #endif
658     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
659     tcg_gen_movi_tl(cpu_cc_C, 0);
660     tcg_gen_mov_tl(dst, cpu_cc_N);
661 }
662 
663 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
664 {
665     TCGv_i64 t64;
666 
667 #ifdef TARGET_SPARC64
668     t64 = cpu_cc_V;
669 #else
670     t64 = tcg_temp_new_i64();
671 #endif
672 
673     gen_helper_sdiv(t64, tcg_env, src1, src2);
674 
675 #ifdef TARGET_SPARC64
676     tcg_gen_ext32s_tl(cpu_cc_N, t64);
677     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
678     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
679     tcg_gen_movi_tl(cpu_icc_C, 0);
680 #else
681     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
682 #endif
683     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
684     tcg_gen_movi_tl(cpu_cc_C, 0);
685     tcg_gen_mov_tl(dst, cpu_cc_N);
686 }
687 
688 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
689 {
690     gen_helper_taddcctv(dst, tcg_env, src1, src2);
691 }
692 
693 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
694 {
695     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
696 }
697 
698 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
699 {
700     tcg_gen_ctpop_tl(dst, src2);
701 }
702 
703 static void gen_op_lzcnt(TCGv dst, TCGv src)
704 {
705     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
706 }
707 
708 #ifndef TARGET_SPARC64
709 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
710 {
711     g_assert_not_reached();
712 }
713 #endif
714 
715 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
716 {
717     gen_helper_array8(dst, src1, src2);
718     tcg_gen_shli_tl(dst, dst, 1);
719 }
720 
721 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
722 {
723     gen_helper_array8(dst, src1, src2);
724     tcg_gen_shli_tl(dst, dst, 2);
725 }
726 
727 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
728 {
729 #ifdef TARGET_SPARC64
730     gen_helper_fpack16(dst, cpu_gsr, src);
731 #else
732     g_assert_not_reached();
733 #endif
734 }
735 
736 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
737 {
738 #ifdef TARGET_SPARC64
739     gen_helper_fpackfix(dst, cpu_gsr, src);
740 #else
741     g_assert_not_reached();
742 #endif
743 }
744 
745 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
746 {
747 #ifdef TARGET_SPARC64
748     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
749 #else
750     g_assert_not_reached();
751 #endif
752 }
753 
754 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
755 {
756     TCGv_i32 t[2];
757 
758     for (int i = 0; i < 2; i++) {
759         TCGv_i32 u = tcg_temp_new_i32();
760         TCGv_i32 v = tcg_temp_new_i32();
761 
762         tcg_gen_sextract_i32(u, src1, i * 16, 16);
763         tcg_gen_sextract_i32(v, src2, i * 16, 16);
764         tcg_gen_add_i32(u, u, v);
765         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
766         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
767         t[i] = u;
768     }
769     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
770 }
771 
772 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
773 {
774     TCGv_i32 t[2];
775 
776     for (int i = 0; i < 2; i++) {
777         TCGv_i32 u = tcg_temp_new_i32();
778         TCGv_i32 v = tcg_temp_new_i32();
779 
780         tcg_gen_sextract_i32(u, src1, i * 16, 16);
781         tcg_gen_sextract_i32(v, src2, i * 16, 16);
782         tcg_gen_sub_i32(u, u, v);
783         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
784         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
785         t[i] = u;
786     }
787     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
788 }
789 
790 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
791 {
792     TCGv_i32 r = tcg_temp_new_i32();
793     TCGv_i32 t = tcg_temp_new_i32();
794     TCGv_i32 v = tcg_temp_new_i32();
795     TCGv_i32 z = tcg_constant_i32(0);
796 
797     tcg_gen_add_i32(r, src1, src2);
798     tcg_gen_xor_i32(t, src1, src2);
799     tcg_gen_xor_i32(v, r, src2);
800     tcg_gen_andc_i32(v, v, t);
801 
802     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
803     tcg_gen_addi_i32(t, t, INT32_MAX);
804 
805     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
806 }
807 
808 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
809 {
810     TCGv_i32 r = tcg_temp_new_i32();
811     TCGv_i32 t = tcg_temp_new_i32();
812     TCGv_i32 v = tcg_temp_new_i32();
813     TCGv_i32 z = tcg_constant_i32(0);
814 
815     tcg_gen_sub_i32(r, src1, src2);
816     tcg_gen_xor_i32(t, src1, src2);
817     tcg_gen_xor_i32(v, r, src1);
818     tcg_gen_and_i32(v, v, t);
819 
820     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
821     tcg_gen_addi_i32(t, t, INT32_MAX);
822 
823     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
824 }
825 
826 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
827                                 TCGv_i64 s2, TCGv gsr)
828 {
829 #ifdef TARGET_SPARC64
830     TCGv t1, t2, shift;
831 
832     t1 = tcg_temp_new();
833     t2 = tcg_temp_new();
834     shift = tcg_temp_new();
835 
836     tcg_gen_andi_tl(shift, gsr, 7);
837     tcg_gen_shli_tl(shift, shift, 3);
838     tcg_gen_shl_tl(t1, s1, shift);
839 
840     /*
841      * A shift of 64 does not produce 0 in TCG.  Divide this into a
842      * shift of (up to 63) followed by a constant shift of 1.
843      */
844     tcg_gen_xori_tl(shift, shift, 63);
845     tcg_gen_shr_tl(t2, s2, shift);
846     tcg_gen_shri_tl(t2, t2, 1);
847 
848     tcg_gen_or_tl(dst, t1, t2);
849 #else
850     g_assert_not_reached();
851 #endif
852 }
853 
854 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
855 {
856     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
857 }
858 
859 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
860 {
861 #ifdef TARGET_SPARC64
862     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
863 #else
864     g_assert_not_reached();
865 #endif
866 }
867 
868 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
869 {
870 #ifdef TARGET_SPARC64
871     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
872 #else
873     g_assert_not_reached();
874 #endif
875 }
876 
877 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
878 {
879     tcg_gen_ext16s_i32(src2, src2);
880     gen_helper_fmul8x16a(dst, src1, src2);
881 }
882 
883 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
884 {
885     tcg_gen_sari_i32(src2, src2, 16);
886     gen_helper_fmul8x16a(dst, src1, src2);
887 }
888 
889 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
890 {
891     TCGv_i32 t0 = tcg_temp_new_i32();
892     TCGv_i32 t1 = tcg_temp_new_i32();
893     TCGv_i32 t2 = tcg_temp_new_i32();
894 
895     tcg_gen_ext8u_i32(t0, src1);
896     tcg_gen_ext16s_i32(t1, src2);
897     tcg_gen_mul_i32(t0, t0, t1);
898 
899     tcg_gen_extract_i32(t1, src1, 16, 8);
900     tcg_gen_sextract_i32(t2, src2, 16, 16);
901     tcg_gen_mul_i32(t1, t1, t2);
902 
903     tcg_gen_concat_i32_i64(dst, t0, t1);
904 }
905 
906 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
907 {
908     TCGv_i32 t0 = tcg_temp_new_i32();
909     TCGv_i32 t1 = tcg_temp_new_i32();
910     TCGv_i32 t2 = tcg_temp_new_i32();
911 
912     /*
913      * The insn description talks about extracting the upper 8 bits
914      * of the signed 16-bit input rs1, performing the multiply, then
915      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
916      * the rs1 input, which avoids the need for two shifts.
917      */
918     tcg_gen_ext16s_i32(t0, src1);
919     tcg_gen_andi_i32(t0, t0, ~0xff);
920     tcg_gen_ext16s_i32(t1, src2);
921     tcg_gen_mul_i32(t0, t0, t1);
922 
923     tcg_gen_sextract_i32(t1, src1, 16, 16);
924     tcg_gen_andi_i32(t1, t1, ~0xff);
925     tcg_gen_sextract_i32(t2, src2, 16, 16);
926     tcg_gen_mul_i32(t1, t1, t2);
927 
928     tcg_gen_concat_i32_i64(dst, t0, t1);
929 }
930 
931 #ifdef TARGET_SPARC64
932 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
933                              TCGv_vec src1, TCGv_vec src2)
934 {
935     TCGv_vec a = tcg_temp_new_vec_matching(dst);
936     TCGv_vec c = tcg_temp_new_vec_matching(dst);
937 
938     tcg_gen_add_vec(vece, a, src1, src2);
939     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
940     /* Vector cmp produces -1 for true, so subtract to add carry. */
941     tcg_gen_sub_vec(vece, dst, a, c);
942 }
943 
944 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
945                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
946 {
947     static const TCGOpcode vecop_list[] = {
948         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
949     };
950     static const GVecGen3 op = {
951         .fni8 = gen_helper_fchksm16,
952         .fniv = gen_vec_fchksm16,
953         .opt_opc = vecop_list,
954         .vece = MO_16,
955     };
956     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
957 }
958 
959 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
960                             TCGv_vec src1, TCGv_vec src2)
961 {
962     TCGv_vec t = tcg_temp_new_vec_matching(dst);
963 
964     tcg_gen_or_vec(vece, t, src1, src2);
965     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
966     tcg_gen_sari_vec(vece, src1, src1, 1);
967     tcg_gen_sari_vec(vece, src2, src2, 1);
968     tcg_gen_add_vec(vece, dst, src1, src2);
969     tcg_gen_add_vec(vece, dst, dst, t);
970 }
971 
972 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
973                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
974 {
975     static const TCGOpcode vecop_list[] = {
976         INDEX_op_add_vec, INDEX_op_sari_vec,
977     };
978     static const GVecGen3 op = {
979         .fni8 = gen_helper_fmean16,
980         .fniv = gen_vec_fmean16,
981         .opt_opc = vecop_list,
982         .vece = MO_16,
983     };
984     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
985 }
986 #else
987 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
988 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
989 #endif
990 
991 static void finishing_insn(DisasContext *dc)
992 {
993     /*
994      * From here, there is no future path through an unwinding exception.
995      * If the current insn cannot raise an exception, the computation of
996      * cpu_cond may be able to be elided.
997      */
998     if (dc->cpu_cond_live) {
999         tcg_gen_discard_tl(cpu_cond);
1000         dc->cpu_cond_live = false;
1001     }
1002 }
1003 
1004 static void gen_generic_branch(DisasContext *dc)
1005 {
1006     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1007     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1008     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1009 
1010     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1011 }
1012 
1013 /* call this function before using the condition register as it may
1014    have been set for a jump */
1015 static void flush_cond(DisasContext *dc)
1016 {
1017     if (dc->npc == JUMP_PC) {
1018         gen_generic_branch(dc);
1019         dc->npc = DYNAMIC_PC_LOOKUP;
1020     }
1021 }
1022 
1023 static void save_npc(DisasContext *dc)
1024 {
1025     if (dc->npc & 3) {
1026         switch (dc->npc) {
1027         case JUMP_PC:
1028             gen_generic_branch(dc);
1029             dc->npc = DYNAMIC_PC_LOOKUP;
1030             break;
1031         case DYNAMIC_PC:
1032         case DYNAMIC_PC_LOOKUP:
1033             break;
1034         default:
1035             g_assert_not_reached();
1036         }
1037     } else {
1038         tcg_gen_movi_tl(cpu_npc, dc->npc);
1039     }
1040 }
1041 
1042 static void save_state(DisasContext *dc)
1043 {
1044     tcg_gen_movi_tl(cpu_pc, dc->pc);
1045     save_npc(dc);
1046 }
1047 
1048 static void gen_exception(DisasContext *dc, int which)
1049 {
1050     finishing_insn(dc);
1051     save_state(dc);
1052     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1053     dc->base.is_jmp = DISAS_NORETURN;
1054 }
1055 
1056 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1057 {
1058     DisasDelayException *e = g_new0(DisasDelayException, 1);
1059 
1060     e->next = dc->delay_excp_list;
1061     dc->delay_excp_list = e;
1062 
1063     e->lab = gen_new_label();
1064     e->excp = excp;
1065     e->pc = dc->pc;
1066     /* Caller must have used flush_cond before branch. */
1067     assert(e->npc != JUMP_PC);
1068     e->npc = dc->npc;
1069 
1070     return e->lab;
1071 }
1072 
1073 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1074 {
1075     return delay_exceptionv(dc, tcg_constant_i32(excp));
1076 }
1077 
1078 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1079 {
1080     TCGv t = tcg_temp_new();
1081     TCGLabel *lab;
1082 
1083     tcg_gen_andi_tl(t, addr, mask);
1084 
1085     flush_cond(dc);
1086     lab = delay_exception(dc, TT_UNALIGNED);
1087     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1088 }
1089 
1090 static void gen_mov_pc_npc(DisasContext *dc)
1091 {
1092     finishing_insn(dc);
1093 
1094     if (dc->npc & 3) {
1095         switch (dc->npc) {
1096         case JUMP_PC:
1097             gen_generic_branch(dc);
1098             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1099             dc->pc = DYNAMIC_PC_LOOKUP;
1100             break;
1101         case DYNAMIC_PC:
1102         case DYNAMIC_PC_LOOKUP:
1103             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1104             dc->pc = dc->npc;
1105             break;
1106         default:
1107             g_assert_not_reached();
1108         }
1109     } else {
1110         dc->pc = dc->npc;
1111     }
1112 }
1113 
1114 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1115                         DisasContext *dc)
1116 {
1117     TCGv t1;
1118 
1119     cmp->c1 = t1 = tcg_temp_new();
1120     cmp->c2 = 0;
1121 
1122     switch (cond & 7) {
1123     case 0x0: /* never */
1124         cmp->cond = TCG_COND_NEVER;
1125         cmp->c1 = tcg_constant_tl(0);
1126         break;
1127 
1128     case 0x1: /* eq: Z */
1129         cmp->cond = TCG_COND_EQ;
1130         if (TARGET_LONG_BITS == 32 || xcc) {
1131             tcg_gen_mov_tl(t1, cpu_cc_Z);
1132         } else {
1133             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1134         }
1135         break;
1136 
1137     case 0x2: /* le: Z | (N ^ V) */
1138         /*
1139          * Simplify:
1140          *   cc_Z || (N ^ V) < 0        NE
1141          *   cc_Z && !((N ^ V) < 0)     EQ
1142          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1143          */
1144         cmp->cond = TCG_COND_EQ;
1145         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1146         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1147         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1148         if (TARGET_LONG_BITS == 64 && !xcc) {
1149             tcg_gen_ext32u_tl(t1, t1);
1150         }
1151         break;
1152 
1153     case 0x3: /* lt: N ^ V */
1154         cmp->cond = TCG_COND_LT;
1155         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1156         if (TARGET_LONG_BITS == 64 && !xcc) {
1157             tcg_gen_ext32s_tl(t1, t1);
1158         }
1159         break;
1160 
1161     case 0x4: /* leu: Z | C */
1162         /*
1163          * Simplify:
1164          *   cc_Z == 0 || cc_C != 0     NE
1165          *   cc_Z != 0 && cc_C == 0     EQ
1166          *   cc_Z & (cc_C ? 0 : -1)     EQ
1167          *   cc_Z & (cc_C - 1)          EQ
1168          */
1169         cmp->cond = TCG_COND_EQ;
1170         if (TARGET_LONG_BITS == 32 || xcc) {
1171             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1172             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1173         } else {
1174             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1175             tcg_gen_subi_tl(t1, t1, 1);
1176             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1177             tcg_gen_ext32u_tl(t1, t1);
1178         }
1179         break;
1180 
1181     case 0x5: /* ltu: C */
1182         cmp->cond = TCG_COND_NE;
1183         if (TARGET_LONG_BITS == 32 || xcc) {
1184             tcg_gen_mov_tl(t1, cpu_cc_C);
1185         } else {
1186             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1187         }
1188         break;
1189 
1190     case 0x6: /* neg: N */
1191         cmp->cond = TCG_COND_LT;
1192         if (TARGET_LONG_BITS == 32 || xcc) {
1193             tcg_gen_mov_tl(t1, cpu_cc_N);
1194         } else {
1195             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1196         }
1197         break;
1198 
1199     case 0x7: /* vs: V */
1200         cmp->cond = TCG_COND_LT;
1201         if (TARGET_LONG_BITS == 32 || xcc) {
1202             tcg_gen_mov_tl(t1, cpu_cc_V);
1203         } else {
1204             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1205         }
1206         break;
1207     }
1208     if (cond & 8) {
1209         cmp->cond = tcg_invert_cond(cmp->cond);
1210     }
1211 }
1212 
1213 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1214 {
1215     TCGv_i32 fcc = cpu_fcc[cc];
1216     TCGv_i32 c1 = fcc;
1217     int c2 = 0;
1218     TCGCond tcond;
1219 
1220     /*
1221      * FCC values:
1222      * 0 =
1223      * 1 <
1224      * 2 >
1225      * 3 unordered
1226      */
1227     switch (cond & 7) {
1228     case 0x0: /* fbn */
1229         tcond = TCG_COND_NEVER;
1230         break;
1231     case 0x1: /* fbne : !0 */
1232         tcond = TCG_COND_NE;
1233         break;
1234     case 0x2: /* fblg : 1 or 2 */
1235         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1236         c1 = tcg_temp_new_i32();
1237         tcg_gen_addi_i32(c1, fcc, -1);
1238         c2 = 1;
1239         tcond = TCG_COND_LEU;
1240         break;
1241     case 0x3: /* fbul : 1 or 3 */
1242         c1 = tcg_temp_new_i32();
1243         tcg_gen_andi_i32(c1, fcc, 1);
1244         tcond = TCG_COND_NE;
1245         break;
1246     case 0x4: /* fbl  : 1 */
1247         c2 = 1;
1248         tcond = TCG_COND_EQ;
1249         break;
1250     case 0x5: /* fbug : 2 or 3 */
1251         c2 = 2;
1252         tcond = TCG_COND_GEU;
1253         break;
1254     case 0x6: /* fbg  : 2 */
1255         c2 = 2;
1256         tcond = TCG_COND_EQ;
1257         break;
1258     case 0x7: /* fbu  : 3 */
1259         c2 = 3;
1260         tcond = TCG_COND_EQ;
1261         break;
1262     }
1263     if (cond & 8) {
1264         tcond = tcg_invert_cond(tcond);
1265     }
1266 
1267     cmp->cond = tcond;
1268     cmp->c2 = c2;
1269     cmp->c1 = tcg_temp_new();
1270     tcg_gen_extu_i32_tl(cmp->c1, c1);
1271 }
1272 
1273 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1274 {
1275     static const TCGCond cond_reg[4] = {
1276         TCG_COND_NEVER,  /* reserved */
1277         TCG_COND_EQ,
1278         TCG_COND_LE,
1279         TCG_COND_LT,
1280     };
1281     TCGCond tcond;
1282 
1283     if ((cond & 3) == 0) {
1284         return false;
1285     }
1286     tcond = cond_reg[cond & 3];
1287     if (cond & 4) {
1288         tcond = tcg_invert_cond(tcond);
1289     }
1290 
1291     cmp->cond = tcond;
1292     cmp->c1 = tcg_temp_new();
1293     cmp->c2 = 0;
1294     tcg_gen_mov_tl(cmp->c1, r_src);
1295     return true;
1296 }
1297 
1298 static void gen_op_clear_ieee_excp_and_FTT(void)
1299 {
1300     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1301                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1302 }
1303 
1304 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1305 {
1306     gen_op_clear_ieee_excp_and_FTT();
1307     tcg_gen_mov_i32(dst, src);
1308 }
1309 
1310 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1311 {
1312     gen_op_clear_ieee_excp_and_FTT();
1313     tcg_gen_xori_i32(dst, src, 1u << 31);
1314 }
1315 
1316 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1317 {
1318     gen_op_clear_ieee_excp_and_FTT();
1319     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1320 }
1321 
1322 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1323 {
1324     gen_op_clear_ieee_excp_and_FTT();
1325     tcg_gen_mov_i64(dst, src);
1326 }
1327 
1328 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1329 {
1330     gen_op_clear_ieee_excp_and_FTT();
1331     tcg_gen_xori_i64(dst, src, 1ull << 63);
1332 }
1333 
1334 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1335 {
1336     gen_op_clear_ieee_excp_and_FTT();
1337     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1338 }
1339 
1340 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1341 {
1342     TCGv_i64 l = tcg_temp_new_i64();
1343     TCGv_i64 h = tcg_temp_new_i64();
1344 
1345     tcg_gen_extr_i128_i64(l, h, src);
1346     tcg_gen_xori_i64(h, h, 1ull << 63);
1347     tcg_gen_concat_i64_i128(dst, l, h);
1348 }
1349 
1350 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1351 {
1352     TCGv_i64 l = tcg_temp_new_i64();
1353     TCGv_i64 h = tcg_temp_new_i64();
1354 
1355     tcg_gen_extr_i128_i64(l, h, src);
1356     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1357     tcg_gen_concat_i64_i128(dst, l, h);
1358 }
1359 
1360 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1361 {
1362     TCGv_i32 z = tcg_constant_i32(0);
1363     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
1364 }
1365 
1366 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1367 {
1368     TCGv_i32 z = tcg_constant_i32(0);
1369     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
1370 }
1371 
1372 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1373 {
1374     TCGv_i32 z = tcg_constant_i32(0);
1375     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1376     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1377 }
1378 
1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1380 {
1381     TCGv_i32 z = tcg_constant_i32(0);
1382     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1383     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1384 }
1385 
1386 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1387 {
1388     TCGv_i32 z = tcg_constant_i32(0);
1389     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1390                                    float_muladd_negate_result);
1391     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1392 }
1393 
1394 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1395 {
1396     TCGv_i32 z = tcg_constant_i32(0);
1397     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1398                                    float_muladd_negate_result);
1399     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1400 }
1401 
1402 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1403 {
1404     TCGv_i32 z = tcg_constant_i32(0);
1405     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1406     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1407 }
1408 
1409 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1410 {
1411     TCGv_i32 z = tcg_constant_i32(0);
1412     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1413     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1414 }
1415 
1416 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1417 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1418 {
1419     TCGv_i32 fone = tcg_constant_i32(float32_one);
1420     TCGv_i32 mone = tcg_constant_i32(-1);
1421     TCGv_i32 op = tcg_constant_i32(0);
1422     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1423 }
1424 
1425 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1426 {
1427     TCGv_i64 fone = tcg_constant_i64(float64_one);
1428     TCGv_i32 mone = tcg_constant_i32(-1);
1429     TCGv_i32 op = tcg_constant_i32(0);
1430     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1431 }
1432 
1433 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1434 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1435 {
1436     TCGv_i32 fone = tcg_constant_i32(float32_one);
1437     TCGv_i32 mone = tcg_constant_i32(-1);
1438     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1439     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1440 }
1441 
1442 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1443 {
1444     TCGv_i64 fone = tcg_constant_i64(float64_one);
1445     TCGv_i32 mone = tcg_constant_i32(-1);
1446     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1447     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1448 }
1449 
1450 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1451 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1452 {
1453     TCGv_i32 fone = tcg_constant_i32(float32_one);
1454     TCGv_i32 mone = tcg_constant_i32(-1);
1455     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1456     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1457 }
1458 
1459 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1460 {
1461     TCGv_i64 fone = tcg_constant_i64(float64_one);
1462     TCGv_i32 mone = tcg_constant_i32(-1);
1463     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1464     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1465 }
1466 
1467 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1468 {
1469     /*
1470      * CEXC is only set when succesfully completing an FPop,
1471      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1472      * Thus we can simply store FTT into this field.
1473      */
1474     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1475                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1476     gen_exception(dc, TT_FP_EXCP);
1477 }
1478 
1479 static bool gen_trap_ifnofpu(DisasContext *dc)
1480 {
1481 #if !defined(CONFIG_USER_ONLY)
1482     if (!dc->fpu_enabled) {
1483         gen_exception(dc, TT_NFPU_INSN);
1484         return true;
1485     }
1486 #endif
1487     return false;
1488 }
1489 
1490 static bool gen_trap_iffpexception(DisasContext *dc)
1491 {
1492 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1493     /*
1494      * There are 3 states for the sparc32 fpu:
1495      * Normally the fpu is in fp_execute, and all insns are allowed.
1496      * When an exception is signaled, it moves to fp_exception_pending state.
1497      * Upon seeing the next FPop, the fpu moves to fp_exception state,
1498      * populates the FQ, and generates an fp_exception trap.
1499      * The fpu remains in fp_exception state until FQ becomes empty
1500      * after execution of a STDFQ instruction.  While the fpu is in
1501      * fp_exception state, and FPop, fp load or fp branch insn will
1502      * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1503      * and the insn will not be entered into the FQ.
1504      *
1505      * In QEMU, we do not model the fp_exception_pending state and
1506      * instead populate FQ and raise the exception immediately.
1507      * But we can still honor fp_exception state by noticing when
1508      * the FQ is not empty.
1509      */
1510     if (dc->fsr_qne) {
1511         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1512         return true;
1513     }
1514 #endif
1515     return false;
1516 }
1517 
1518 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1519 {
1520     return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1521 }
1522 
1523 /* asi moves */
1524 typedef enum {
1525     GET_ASI_HELPER,
1526     GET_ASI_EXCP,
1527     GET_ASI_DIRECT,
1528     GET_ASI_DTWINX,
1529     GET_ASI_CODE,
1530     GET_ASI_BLOCK,
1531     GET_ASI_SHORT,
1532     GET_ASI_BCOPY,
1533     GET_ASI_BFILL,
1534 } ASIType;
1535 
1536 typedef struct {
1537     ASIType type;
1538     int asi;
1539     int mem_idx;
1540     MemOp memop;
1541 } DisasASI;
1542 
1543 /*
1544  * Build DisasASI.
1545  * For asi == -1, treat as non-asi.
1546  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1547  */
1548 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1549 {
1550     ASIType type = GET_ASI_HELPER;
1551     int mem_idx = dc->mem_idx;
1552 
1553     if (asi == -1) {
1554         /* Artificial "non-asi" case. */
1555         type = GET_ASI_DIRECT;
1556         goto done;
1557     }
1558 
1559 #ifndef TARGET_SPARC64
1560     /* Before v9, all asis are immediate and privileged.  */
1561     if (asi < 0) {
1562         gen_exception(dc, TT_ILL_INSN);
1563         type = GET_ASI_EXCP;
1564     } else if (supervisor(dc)
1565                /* Note that LEON accepts ASI_USERDATA in user mode, for
1566                   use with CASA.  Also note that previous versions of
1567                   QEMU allowed (and old versions of gcc emitted) ASI_P
1568                   for LEON, which is incorrect.  */
1569                || (asi == ASI_USERDATA
1570                    && (dc->def->features & CPU_FEATURE_CASA))) {
1571         switch (asi) {
1572         case ASI_USERDATA:    /* User data access */
1573             mem_idx = MMU_USER_IDX;
1574             type = GET_ASI_DIRECT;
1575             break;
1576         case ASI_KERNELDATA:  /* Supervisor data access */
1577             mem_idx = MMU_KERNEL_IDX;
1578             type = GET_ASI_DIRECT;
1579             break;
1580         case ASI_USERTXT:     /* User text access */
1581             mem_idx = MMU_USER_IDX;
1582             type = GET_ASI_CODE;
1583             break;
1584         case ASI_KERNELTXT:   /* Supervisor text access */
1585             mem_idx = MMU_KERNEL_IDX;
1586             type = GET_ASI_CODE;
1587             break;
1588         case ASI_M_BYPASS:    /* MMU passthrough */
1589         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1590             mem_idx = MMU_PHYS_IDX;
1591             type = GET_ASI_DIRECT;
1592             break;
1593         case ASI_M_BCOPY: /* Block copy, sta access */
1594             mem_idx = MMU_KERNEL_IDX;
1595             type = GET_ASI_BCOPY;
1596             break;
1597         case ASI_M_BFILL: /* Block fill, stda access */
1598             mem_idx = MMU_KERNEL_IDX;
1599             type = GET_ASI_BFILL;
1600             break;
1601         }
1602 
1603         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1604          * permissions check in get_physical_address(..).
1605          */
1606         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1607     } else {
1608         gen_exception(dc, TT_PRIV_INSN);
1609         type = GET_ASI_EXCP;
1610     }
1611 #else
1612     if (asi < 0) {
1613         asi = dc->asi;
1614     }
1615     /* With v9, all asis below 0x80 are privileged.  */
1616     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1617        down that bit into DisasContext.  For the moment that's ok,
1618        since the direct implementations below doesn't have any ASIs
1619        in the restricted [0x30, 0x7f] range, and the check will be
1620        done properly in the helper.  */
1621     if (!supervisor(dc) && asi < 0x80) {
1622         gen_exception(dc, TT_PRIV_ACT);
1623         type = GET_ASI_EXCP;
1624     } else {
1625         switch (asi) {
1626         case ASI_REAL:      /* Bypass */
1627         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1628         case ASI_REAL_L:    /* Bypass LE */
1629         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1630         case ASI_TWINX_REAL:   /* Real address, twinx */
1631         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1632         case ASI_QUAD_LDD_PHYS:
1633         case ASI_QUAD_LDD_PHYS_L:
1634             mem_idx = MMU_PHYS_IDX;
1635             break;
1636         case ASI_N:  /* Nucleus */
1637         case ASI_NL: /* Nucleus LE */
1638         case ASI_TWINX_N:
1639         case ASI_TWINX_NL:
1640         case ASI_NUCLEUS_QUAD_LDD:
1641         case ASI_NUCLEUS_QUAD_LDD_L:
1642             if (hypervisor(dc)) {
1643                 mem_idx = MMU_PHYS_IDX;
1644             } else {
1645                 mem_idx = MMU_NUCLEUS_IDX;
1646             }
1647             break;
1648         case ASI_AIUP:  /* As if user primary */
1649         case ASI_AIUPL: /* As if user primary LE */
1650         case ASI_TWINX_AIUP:
1651         case ASI_TWINX_AIUP_L:
1652         case ASI_BLK_AIUP_4V:
1653         case ASI_BLK_AIUP_L_4V:
1654         case ASI_BLK_AIUP:
1655         case ASI_BLK_AIUPL:
1656         case ASI_MON_AIUP:
1657             mem_idx = MMU_USER_IDX;
1658             break;
1659         case ASI_AIUS:  /* As if user secondary */
1660         case ASI_AIUSL: /* As if user secondary LE */
1661         case ASI_TWINX_AIUS:
1662         case ASI_TWINX_AIUS_L:
1663         case ASI_BLK_AIUS_4V:
1664         case ASI_BLK_AIUS_L_4V:
1665         case ASI_BLK_AIUS:
1666         case ASI_BLK_AIUSL:
1667         case ASI_MON_AIUS:
1668             mem_idx = MMU_USER_SECONDARY_IDX;
1669             break;
1670         case ASI_S:  /* Secondary */
1671         case ASI_SL: /* Secondary LE */
1672         case ASI_TWINX_S:
1673         case ASI_TWINX_SL:
1674         case ASI_BLK_COMMIT_S:
1675         case ASI_BLK_S:
1676         case ASI_BLK_SL:
1677         case ASI_FL8_S:
1678         case ASI_FL8_SL:
1679         case ASI_FL16_S:
1680         case ASI_FL16_SL:
1681         case ASI_MON_S:
1682             if (mem_idx == MMU_USER_IDX) {
1683                 mem_idx = MMU_USER_SECONDARY_IDX;
1684             } else if (mem_idx == MMU_KERNEL_IDX) {
1685                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1686             }
1687             break;
1688         case ASI_P:  /* Primary */
1689         case ASI_PL: /* Primary LE */
1690         case ASI_TWINX_P:
1691         case ASI_TWINX_PL:
1692         case ASI_BLK_COMMIT_P:
1693         case ASI_BLK_P:
1694         case ASI_BLK_PL:
1695         case ASI_FL8_P:
1696         case ASI_FL8_PL:
1697         case ASI_FL16_P:
1698         case ASI_FL16_PL:
1699         case ASI_MON_P:
1700             break;
1701         }
1702         switch (asi) {
1703         case ASI_REAL:
1704         case ASI_REAL_IO:
1705         case ASI_REAL_L:
1706         case ASI_REAL_IO_L:
1707         case ASI_N:
1708         case ASI_NL:
1709         case ASI_AIUP:
1710         case ASI_AIUPL:
1711         case ASI_AIUS:
1712         case ASI_AIUSL:
1713         case ASI_S:
1714         case ASI_SL:
1715         case ASI_P:
1716         case ASI_PL:
1717         case ASI_MON_P:
1718         case ASI_MON_S:
1719         case ASI_MON_AIUP:
1720         case ASI_MON_AIUS:
1721             type = GET_ASI_DIRECT;
1722             break;
1723         case ASI_TWINX_REAL:
1724         case ASI_TWINX_REAL_L:
1725         case ASI_TWINX_N:
1726         case ASI_TWINX_NL:
1727         case ASI_TWINX_AIUP:
1728         case ASI_TWINX_AIUP_L:
1729         case ASI_TWINX_AIUS:
1730         case ASI_TWINX_AIUS_L:
1731         case ASI_TWINX_P:
1732         case ASI_TWINX_PL:
1733         case ASI_TWINX_S:
1734         case ASI_TWINX_SL:
1735         case ASI_QUAD_LDD_PHYS:
1736         case ASI_QUAD_LDD_PHYS_L:
1737         case ASI_NUCLEUS_QUAD_LDD:
1738         case ASI_NUCLEUS_QUAD_LDD_L:
1739             type = GET_ASI_DTWINX;
1740             break;
1741         case ASI_BLK_COMMIT_P:
1742         case ASI_BLK_COMMIT_S:
1743         case ASI_BLK_AIUP_4V:
1744         case ASI_BLK_AIUP_L_4V:
1745         case ASI_BLK_AIUP:
1746         case ASI_BLK_AIUPL:
1747         case ASI_BLK_AIUS_4V:
1748         case ASI_BLK_AIUS_L_4V:
1749         case ASI_BLK_AIUS:
1750         case ASI_BLK_AIUSL:
1751         case ASI_BLK_S:
1752         case ASI_BLK_SL:
1753         case ASI_BLK_P:
1754         case ASI_BLK_PL:
1755             type = GET_ASI_BLOCK;
1756             break;
1757         case ASI_FL8_S:
1758         case ASI_FL8_SL:
1759         case ASI_FL8_P:
1760         case ASI_FL8_PL:
1761             memop = MO_UB;
1762             type = GET_ASI_SHORT;
1763             break;
1764         case ASI_FL16_S:
1765         case ASI_FL16_SL:
1766         case ASI_FL16_P:
1767         case ASI_FL16_PL:
1768             memop = MO_TEUW;
1769             type = GET_ASI_SHORT;
1770             break;
1771         }
1772         /* The little-endian asis all have bit 3 set.  */
1773         if (asi & 8) {
1774             memop ^= MO_BSWAP;
1775         }
1776     }
1777 #endif
1778 
1779  done:
1780     return (DisasASI){ type, asi, mem_idx, memop };
1781 }
1782 
1783 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1784 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1785                               TCGv_i32 asi, TCGv_i32 mop)
1786 {
1787     g_assert_not_reached();
1788 }
1789 
1790 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1791                               TCGv_i32 asi, TCGv_i32 mop)
1792 {
1793     g_assert_not_reached();
1794 }
1795 #endif
1796 
1797 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1798 {
1799     switch (da->type) {
1800     case GET_ASI_EXCP:
1801         break;
1802     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1803         gen_exception(dc, TT_ILL_INSN);
1804         break;
1805     case GET_ASI_DIRECT:
1806         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1807         break;
1808 
1809     case GET_ASI_CODE:
1810 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1811         {
1812             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1813             TCGv_i64 t64 = tcg_temp_new_i64();
1814 
1815             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1816             tcg_gen_trunc_i64_tl(dst, t64);
1817         }
1818         break;
1819 #else
1820         g_assert_not_reached();
1821 #endif
1822 
1823     default:
1824         {
1825             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1826             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1827 
1828             save_state(dc);
1829 #ifdef TARGET_SPARC64
1830             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1831 #else
1832             {
1833                 TCGv_i64 t64 = tcg_temp_new_i64();
1834                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1835                 tcg_gen_trunc_i64_tl(dst, t64);
1836             }
1837 #endif
1838         }
1839         break;
1840     }
1841 }
1842 
1843 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1844 {
1845     switch (da->type) {
1846     case GET_ASI_EXCP:
1847         break;
1848 
1849     case GET_ASI_DTWINX: /* Reserved for stda.  */
1850         if (TARGET_LONG_BITS == 32) {
1851             gen_exception(dc, TT_ILL_INSN);
1852             break;
1853         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1854             /* Pre OpenSPARC CPUs don't have these */
1855             gen_exception(dc, TT_ILL_INSN);
1856             break;
1857         }
1858         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1859         /* fall through */
1860 
1861     case GET_ASI_DIRECT:
1862         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1863         break;
1864 
1865     case GET_ASI_BCOPY:
1866         assert(TARGET_LONG_BITS == 32);
1867         /*
1868          * Copy 32 bytes from the address in SRC to ADDR.
1869          *
1870          * From Ross RT625 hyperSPARC manual, section 4.6:
1871          * "Block Copy and Block Fill will work only on cache line boundaries."
1872          *
1873          * It does not specify if an unaliged address is truncated or trapped.
1874          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1875          * is obviously wrong.  The only place I can see this used is in the
1876          * Linux kernel which begins with page alignment, advancing by 32,
1877          * so is always aligned.  Assume truncation as the simpler option.
1878          *
1879          * Since the loads and stores are paired, allow the copy to happen
1880          * in the host endianness.  The copy need not be atomic.
1881          */
1882         {
1883             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1884             TCGv saddr = tcg_temp_new();
1885             TCGv daddr = tcg_temp_new();
1886             TCGv_i128 tmp = tcg_temp_new_i128();
1887 
1888             tcg_gen_andi_tl(saddr, src, -32);
1889             tcg_gen_andi_tl(daddr, addr, -32);
1890             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1891             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1892             tcg_gen_addi_tl(saddr, saddr, 16);
1893             tcg_gen_addi_tl(daddr, daddr, 16);
1894             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1895             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1896         }
1897         break;
1898 
1899     default:
1900         {
1901             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1902             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1903 
1904             save_state(dc);
1905 #ifdef TARGET_SPARC64
1906             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1907 #else
1908             {
1909                 TCGv_i64 t64 = tcg_temp_new_i64();
1910                 tcg_gen_extu_tl_i64(t64, src);
1911                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1912             }
1913 #endif
1914 
1915             /* A write to a TLB register may alter page maps.  End the TB. */
1916             dc->npc = DYNAMIC_PC;
1917         }
1918         break;
1919     }
1920 }
1921 
1922 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1923                          TCGv dst, TCGv src, TCGv addr)
1924 {
1925     switch (da->type) {
1926     case GET_ASI_EXCP:
1927         break;
1928     case GET_ASI_DIRECT:
1929         tcg_gen_atomic_xchg_tl(dst, addr, src,
1930                                da->mem_idx, da->memop | MO_ALIGN);
1931         break;
1932     default:
1933         /* ??? Should be DAE_invalid_asi.  */
1934         gen_exception(dc, TT_DATA_ACCESS);
1935         break;
1936     }
1937 }
1938 
1939 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1940                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1941 {
1942     switch (da->type) {
1943     case GET_ASI_EXCP:
1944         return;
1945     case GET_ASI_DIRECT:
1946         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1947                                   da->mem_idx, da->memop | MO_ALIGN);
1948         break;
1949     default:
1950         /* ??? Should be DAE_invalid_asi.  */
1951         gen_exception(dc, TT_DATA_ACCESS);
1952         break;
1953     }
1954 }
1955 
1956 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1957 {
1958     switch (da->type) {
1959     case GET_ASI_EXCP:
1960         break;
1961     case GET_ASI_DIRECT:
1962         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1963                                da->mem_idx, MO_UB);
1964         break;
1965     default:
1966         /* ??? In theory, this should be raise DAE_invalid_asi.
1967            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1968         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1969             gen_helper_exit_atomic(tcg_env);
1970         } else {
1971             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1972             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1973             TCGv_i64 s64, t64;
1974 
1975             save_state(dc);
1976             t64 = tcg_temp_new_i64();
1977             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1978 
1979             s64 = tcg_constant_i64(0xff);
1980             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1981 
1982             tcg_gen_trunc_i64_tl(dst, t64);
1983 
1984             /* End the TB.  */
1985             dc->npc = DYNAMIC_PC;
1986         }
1987         break;
1988     }
1989 }
1990 
1991 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1992                         TCGv addr, int rd)
1993 {
1994     MemOp memop = da->memop;
1995     MemOp size = memop & MO_SIZE;
1996     TCGv_i32 d32;
1997     TCGv_i64 d64, l64;
1998     TCGv addr_tmp;
1999 
2000     /* TODO: Use 128-bit load/store below. */
2001     if (size == MO_128) {
2002         memop = (memop & ~MO_SIZE) | MO_64;
2003     }
2004 
2005     switch (da->type) {
2006     case GET_ASI_EXCP:
2007         break;
2008 
2009     case GET_ASI_DIRECT:
2010         memop |= MO_ALIGN_4;
2011         switch (size) {
2012         case MO_32:
2013             d32 = tcg_temp_new_i32();
2014             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2015             gen_store_fpr_F(dc, rd, d32);
2016             break;
2017 
2018         case MO_64:
2019             d64 = tcg_temp_new_i64();
2020             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2021             gen_store_fpr_D(dc, rd, d64);
2022             break;
2023 
2024         case MO_128:
2025             d64 = tcg_temp_new_i64();
2026             l64 = tcg_temp_new_i64();
2027             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2028             addr_tmp = tcg_temp_new();
2029             tcg_gen_addi_tl(addr_tmp, addr, 8);
2030             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2031             gen_store_fpr_D(dc, rd, d64);
2032             gen_store_fpr_D(dc, rd + 2, l64);
2033             break;
2034         default:
2035             g_assert_not_reached();
2036         }
2037         break;
2038 
2039     case GET_ASI_BLOCK:
2040         /* Valid for lddfa on aligned registers only.  */
2041         if (orig_size == MO_64 && (rd & 7) == 0) {
2042             /* The first operation checks required alignment.  */
2043             addr_tmp = tcg_temp_new();
2044             d64 = tcg_temp_new_i64();
2045             for (int i = 0; ; ++i) {
2046                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2047                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2048                 gen_store_fpr_D(dc, rd + 2 * i, d64);
2049                 if (i == 7) {
2050                     break;
2051                 }
2052                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2053                 addr = addr_tmp;
2054             }
2055         } else {
2056             gen_exception(dc, TT_ILL_INSN);
2057         }
2058         break;
2059 
2060     case GET_ASI_SHORT:
2061         /* Valid for lddfa only.  */
2062         if (orig_size == MO_64) {
2063             d64 = tcg_temp_new_i64();
2064             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2065             gen_store_fpr_D(dc, rd, d64);
2066         } else {
2067             gen_exception(dc, TT_ILL_INSN);
2068         }
2069         break;
2070 
2071     default:
2072         {
2073             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2074             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2075 
2076             save_state(dc);
2077             /* According to the table in the UA2011 manual, the only
2078                other asis that are valid for ldfa/lddfa/ldqfa are
2079                the NO_FAULT asis.  We still need a helper for these,
2080                but we can just use the integer asi helper for them.  */
2081             switch (size) {
2082             case MO_32:
2083                 d64 = tcg_temp_new_i64();
2084                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2085                 d32 = tcg_temp_new_i32();
2086                 tcg_gen_extrl_i64_i32(d32, d64);
2087                 gen_store_fpr_F(dc, rd, d32);
2088                 break;
2089             case MO_64:
2090                 d64 = tcg_temp_new_i64();
2091                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2092                 gen_store_fpr_D(dc, rd, d64);
2093                 break;
2094             case MO_128:
2095                 d64 = tcg_temp_new_i64();
2096                 l64 = tcg_temp_new_i64();
2097                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2098                 addr_tmp = tcg_temp_new();
2099                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2100                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2101                 gen_store_fpr_D(dc, rd, d64);
2102                 gen_store_fpr_D(dc, rd + 2, l64);
2103                 break;
2104             default:
2105                 g_assert_not_reached();
2106             }
2107         }
2108         break;
2109     }
2110 }
2111 
2112 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2113                         TCGv addr, int rd)
2114 {
2115     MemOp memop = da->memop;
2116     MemOp size = memop & MO_SIZE;
2117     TCGv_i32 d32;
2118     TCGv_i64 d64;
2119     TCGv addr_tmp;
2120 
2121     /* TODO: Use 128-bit load/store below. */
2122     if (size == MO_128) {
2123         memop = (memop & ~MO_SIZE) | MO_64;
2124     }
2125 
2126     switch (da->type) {
2127     case GET_ASI_EXCP:
2128         break;
2129 
2130     case GET_ASI_DIRECT:
2131         memop |= MO_ALIGN_4;
2132         switch (size) {
2133         case MO_32:
2134             d32 = gen_load_fpr_F(dc, rd);
2135             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2136             break;
2137         case MO_64:
2138             d64 = gen_load_fpr_D(dc, rd);
2139             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2140             break;
2141         case MO_128:
2142             /* Only 4-byte alignment required.  However, it is legal for the
2143                cpu to signal the alignment fault, and the OS trap handler is
2144                required to fix it up.  Requiring 16-byte alignment here avoids
2145                having to probe the second page before performing the first
2146                write.  */
2147             d64 = gen_load_fpr_D(dc, rd);
2148             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2149             addr_tmp = tcg_temp_new();
2150             tcg_gen_addi_tl(addr_tmp, addr, 8);
2151             d64 = gen_load_fpr_D(dc, rd + 2);
2152             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2153             break;
2154         default:
2155             g_assert_not_reached();
2156         }
2157         break;
2158 
2159     case GET_ASI_BLOCK:
2160         /* Valid for stdfa on aligned registers only.  */
2161         if (orig_size == MO_64 && (rd & 7) == 0) {
2162             /* The first operation checks required alignment.  */
2163             addr_tmp = tcg_temp_new();
2164             for (int i = 0; ; ++i) {
2165                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2166                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2167                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2168                 if (i == 7) {
2169                     break;
2170                 }
2171                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2172                 addr = addr_tmp;
2173             }
2174         } else {
2175             gen_exception(dc, TT_ILL_INSN);
2176         }
2177         break;
2178 
2179     case GET_ASI_SHORT:
2180         /* Valid for stdfa only.  */
2181         if (orig_size == MO_64) {
2182             d64 = gen_load_fpr_D(dc, rd);
2183             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2184         } else {
2185             gen_exception(dc, TT_ILL_INSN);
2186         }
2187         break;
2188 
2189     default:
2190         /* According to the table in the UA2011 manual, the only
2191            other asis that are valid for ldfa/lddfa/ldqfa are
2192            the PST* asis, which aren't currently handled.  */
2193         gen_exception(dc, TT_ILL_INSN);
2194         break;
2195     }
2196 }
2197 
2198 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2199 {
2200     TCGv hi = gen_dest_gpr(dc, rd);
2201     TCGv lo = gen_dest_gpr(dc, rd + 1);
2202 
2203     switch (da->type) {
2204     case GET_ASI_EXCP:
2205         return;
2206 
2207     case GET_ASI_DTWINX:
2208 #ifdef TARGET_SPARC64
2209         {
2210             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2211             TCGv_i128 t = tcg_temp_new_i128();
2212 
2213             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2214             /*
2215              * Note that LE twinx acts as if each 64-bit register result is
2216              * byte swapped.  We perform one 128-bit LE load, so must swap
2217              * the order of the writebacks.
2218              */
2219             if ((mop & MO_BSWAP) == MO_TE) {
2220                 tcg_gen_extr_i128_i64(lo, hi, t);
2221             } else {
2222                 tcg_gen_extr_i128_i64(hi, lo, t);
2223             }
2224         }
2225         break;
2226 #else
2227         g_assert_not_reached();
2228 #endif
2229 
2230     case GET_ASI_DIRECT:
2231         {
2232             TCGv_i64 tmp = tcg_temp_new_i64();
2233 
2234             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2235 
2236             /* Note that LE ldda acts as if each 32-bit register
2237                result is byte swapped.  Having just performed one
2238                64-bit bswap, we need now to swap the writebacks.  */
2239             if ((da->memop & MO_BSWAP) == MO_TE) {
2240                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2241             } else {
2242                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2243             }
2244         }
2245         break;
2246 
2247     case GET_ASI_CODE:
2248 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2249         {
2250             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2251             TCGv_i64 tmp = tcg_temp_new_i64();
2252 
2253             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2254 
2255             /* See above.  */
2256             if ((da->memop & MO_BSWAP) == MO_TE) {
2257                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2258             } else {
2259                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2260             }
2261         }
2262         break;
2263 #else
2264         g_assert_not_reached();
2265 #endif
2266 
2267     default:
2268         /* ??? In theory we've handled all of the ASIs that are valid
2269            for ldda, and this should raise DAE_invalid_asi.  However,
2270            real hardware allows others.  This can be seen with e.g.
2271            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2272         {
2273             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2274             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2275             TCGv_i64 tmp = tcg_temp_new_i64();
2276 
2277             save_state(dc);
2278             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2279 
2280             /* See above.  */
2281             if ((da->memop & MO_BSWAP) == MO_TE) {
2282                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2283             } else {
2284                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2285             }
2286         }
2287         break;
2288     }
2289 
2290     gen_store_gpr(dc, rd, hi);
2291     gen_store_gpr(dc, rd + 1, lo);
2292 }
2293 
2294 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2295 {
2296     TCGv hi = gen_load_gpr(dc, rd);
2297     TCGv lo = gen_load_gpr(dc, rd + 1);
2298 
2299     switch (da->type) {
2300     case GET_ASI_EXCP:
2301         break;
2302 
2303     case GET_ASI_DTWINX:
2304 #ifdef TARGET_SPARC64
2305         {
2306             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2307             TCGv_i128 t = tcg_temp_new_i128();
2308 
2309             /*
2310              * Note that LE twinx acts as if each 64-bit register result is
2311              * byte swapped.  We perform one 128-bit LE store, so must swap
2312              * the order of the construction.
2313              */
2314             if ((mop & MO_BSWAP) == MO_TE) {
2315                 tcg_gen_concat_i64_i128(t, lo, hi);
2316             } else {
2317                 tcg_gen_concat_i64_i128(t, hi, lo);
2318             }
2319             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2320         }
2321         break;
2322 #else
2323         g_assert_not_reached();
2324 #endif
2325 
2326     case GET_ASI_DIRECT:
2327         {
2328             TCGv_i64 t64 = tcg_temp_new_i64();
2329 
2330             /* Note that LE stda acts as if each 32-bit register result is
2331                byte swapped.  We will perform one 64-bit LE store, so now
2332                we must swap the order of the construction.  */
2333             if ((da->memop & MO_BSWAP) == MO_TE) {
2334                 tcg_gen_concat_tl_i64(t64, lo, hi);
2335             } else {
2336                 tcg_gen_concat_tl_i64(t64, hi, lo);
2337             }
2338             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2339         }
2340         break;
2341 
2342     case GET_ASI_BFILL:
2343         assert(TARGET_LONG_BITS == 32);
2344         /*
2345          * Store 32 bytes of [rd:rd+1] to ADDR.
2346          * See comments for GET_ASI_COPY above.
2347          */
2348         {
2349             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2350             TCGv_i64 t8 = tcg_temp_new_i64();
2351             TCGv_i128 t16 = tcg_temp_new_i128();
2352             TCGv daddr = tcg_temp_new();
2353 
2354             tcg_gen_concat_tl_i64(t8, lo, hi);
2355             tcg_gen_concat_i64_i128(t16, t8, t8);
2356             tcg_gen_andi_tl(daddr, addr, -32);
2357             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2358             tcg_gen_addi_tl(daddr, daddr, 16);
2359             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2360         }
2361         break;
2362 
2363     default:
2364         /* ??? In theory we've handled all of the ASIs that are valid
2365            for stda, and this should raise DAE_invalid_asi.  */
2366         {
2367             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2368             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2369             TCGv_i64 t64 = tcg_temp_new_i64();
2370 
2371             /* See above.  */
2372             if ((da->memop & MO_BSWAP) == MO_TE) {
2373                 tcg_gen_concat_tl_i64(t64, lo, hi);
2374             } else {
2375                 tcg_gen_concat_tl_i64(t64, hi, lo);
2376             }
2377 
2378             save_state(dc);
2379             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2380         }
2381         break;
2382     }
2383 }
2384 
2385 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2386 {
2387 #ifdef TARGET_SPARC64
2388     TCGv_i32 c32, zero, dst, s1, s2;
2389     TCGv_i64 c64 = tcg_temp_new_i64();
2390 
2391     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2392        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2393        the later.  */
2394     c32 = tcg_temp_new_i32();
2395     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2396     tcg_gen_extrl_i64_i32(c32, c64);
2397 
2398     s1 = gen_load_fpr_F(dc, rs);
2399     s2 = gen_load_fpr_F(dc, rd);
2400     dst = tcg_temp_new_i32();
2401     zero = tcg_constant_i32(0);
2402 
2403     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2404 
2405     gen_store_fpr_F(dc, rd, dst);
2406 #else
2407     qemu_build_not_reached();
2408 #endif
2409 }
2410 
2411 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2412 {
2413 #ifdef TARGET_SPARC64
2414     TCGv_i64 dst = tcg_temp_new_i64();
2415     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2416                         gen_load_fpr_D(dc, rs),
2417                         gen_load_fpr_D(dc, rd));
2418     gen_store_fpr_D(dc, rd, dst);
2419 #else
2420     qemu_build_not_reached();
2421 #endif
2422 }
2423 
2424 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2425 {
2426 #ifdef TARGET_SPARC64
2427     TCGv c2 = tcg_constant_tl(cmp->c2);
2428     TCGv_i64 h = tcg_temp_new_i64();
2429     TCGv_i64 l = tcg_temp_new_i64();
2430 
2431     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2432                         gen_load_fpr_D(dc, rs),
2433                         gen_load_fpr_D(dc, rd));
2434     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2435                         gen_load_fpr_D(dc, rs + 2),
2436                         gen_load_fpr_D(dc, rd + 2));
2437     gen_store_fpr_D(dc, rd, h);
2438     gen_store_fpr_D(dc, rd + 2, l);
2439 #else
2440     qemu_build_not_reached();
2441 #endif
2442 }
2443 
2444 #ifdef TARGET_SPARC64
2445 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2446 {
2447     TCGv_i32 r_tl = tcg_temp_new_i32();
2448 
2449     /* load env->tl into r_tl */
2450     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2451 
2452     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2453     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2454 
2455     /* calculate offset to current trap state from env->ts, reuse r_tl */
2456     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2457     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2458 
2459     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2460     {
2461         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2462         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2463         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2464     }
2465 }
2466 #endif
2467 
2468 static int extract_dfpreg(DisasContext *dc, int x)
2469 {
2470     int r = x & 0x1e;
2471 #ifdef TARGET_SPARC64
2472     r |= (x & 1) << 5;
2473 #endif
2474     return r;
2475 }
2476 
2477 static int extract_qfpreg(DisasContext *dc, int x)
2478 {
2479     int r = x & 0x1c;
2480 #ifdef TARGET_SPARC64
2481     r |= (x & 1) << 5;
2482 #endif
2483     return r;
2484 }
2485 
2486 /* Include the auto-generated decoder.  */
2487 #include "decode-insns.c.inc"
2488 
2489 #define TRANS(NAME, AVAIL, FUNC, ...) \
2490     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2491     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2492 
2493 #define avail_ALL(C)      true
2494 #ifdef TARGET_SPARC64
2495 # define avail_32(C)      false
2496 # define avail_ASR17(C)   false
2497 # define avail_CASA(C)    true
2498 # define avail_DIV(C)     true
2499 # define avail_MUL(C)     true
2500 # define avail_POWERDOWN(C) false
2501 # define avail_64(C)      true
2502 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2503 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2504 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2505 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2506 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2507 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2508 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2509 # define avail_VIS3B(C)   avail_VIS3(C)
2510 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2511 #else
2512 # define avail_32(C)      true
2513 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2514 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2515 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2516 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2517 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2518 # define avail_64(C)      false
2519 # define avail_FMAF(C)    false
2520 # define avail_GL(C)      false
2521 # define avail_HYPV(C)    false
2522 # define avail_IMA(C)     false
2523 # define avail_VIS1(C)    false
2524 # define avail_VIS2(C)    false
2525 # define avail_VIS3(C)    false
2526 # define avail_VIS3B(C)   false
2527 # define avail_VIS4(C)    false
2528 #endif
2529 
2530 /* Default case for non jump instructions. */
2531 static bool advance_pc(DisasContext *dc)
2532 {
2533     TCGLabel *l1;
2534 
2535     finishing_insn(dc);
2536 
2537     if (dc->npc & 3) {
2538         switch (dc->npc) {
2539         case DYNAMIC_PC:
2540         case DYNAMIC_PC_LOOKUP:
2541             dc->pc = dc->npc;
2542             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2543             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2544             break;
2545 
2546         case JUMP_PC:
2547             /* we can do a static jump */
2548             l1 = gen_new_label();
2549             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2550 
2551             /* jump not taken */
2552             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2553 
2554             /* jump taken */
2555             gen_set_label(l1);
2556             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2557 
2558             dc->base.is_jmp = DISAS_NORETURN;
2559             break;
2560 
2561         default:
2562             g_assert_not_reached();
2563         }
2564     } else {
2565         dc->pc = dc->npc;
2566         dc->npc = dc->npc + 4;
2567     }
2568     return true;
2569 }
2570 
2571 /*
2572  * Major opcodes 00 and 01 -- branches, call, and sethi
2573  */
2574 
2575 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2576                               bool annul, int disp)
2577 {
2578     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2579     target_ulong npc;
2580 
2581     finishing_insn(dc);
2582 
2583     if (cmp->cond == TCG_COND_ALWAYS) {
2584         if (annul) {
2585             dc->pc = dest;
2586             dc->npc = dest + 4;
2587         } else {
2588             gen_mov_pc_npc(dc);
2589             dc->npc = dest;
2590         }
2591         return true;
2592     }
2593 
2594     if (cmp->cond == TCG_COND_NEVER) {
2595         npc = dc->npc;
2596         if (npc & 3) {
2597             gen_mov_pc_npc(dc);
2598             if (annul) {
2599                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2600             }
2601             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2602         } else {
2603             dc->pc = npc + (annul ? 4 : 0);
2604             dc->npc = dc->pc + 4;
2605         }
2606         return true;
2607     }
2608 
2609     flush_cond(dc);
2610     npc = dc->npc;
2611 
2612     if (annul) {
2613         TCGLabel *l1 = gen_new_label();
2614 
2615         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2616         gen_goto_tb(dc, 0, npc, dest);
2617         gen_set_label(l1);
2618         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2619 
2620         dc->base.is_jmp = DISAS_NORETURN;
2621     } else {
2622         if (npc & 3) {
2623             switch (npc) {
2624             case DYNAMIC_PC:
2625             case DYNAMIC_PC_LOOKUP:
2626                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2627                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2628                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2629                                    cmp->c1, tcg_constant_tl(cmp->c2),
2630                                    tcg_constant_tl(dest), cpu_npc);
2631                 dc->pc = npc;
2632                 break;
2633             default:
2634                 g_assert_not_reached();
2635             }
2636         } else {
2637             dc->pc = npc;
2638             dc->npc = JUMP_PC;
2639             dc->jump = *cmp;
2640             dc->jump_pc[0] = dest;
2641             dc->jump_pc[1] = npc + 4;
2642 
2643             /* The condition for cpu_cond is always NE -- normalize. */
2644             if (cmp->cond == TCG_COND_NE) {
2645                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2646             } else {
2647                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2648             }
2649             dc->cpu_cond_live = true;
2650         }
2651     }
2652     return true;
2653 }
2654 
2655 static bool raise_priv(DisasContext *dc)
2656 {
2657     gen_exception(dc, TT_PRIV_INSN);
2658     return true;
2659 }
2660 
2661 static bool raise_unimpfpop(DisasContext *dc)
2662 {
2663     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2664     return true;
2665 }
2666 
2667 static bool gen_trap_float128(DisasContext *dc)
2668 {
2669     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2670         return false;
2671     }
2672     return raise_unimpfpop(dc);
2673 }
2674 
2675 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2676 {
2677     DisasCompare cmp;
2678 
2679     gen_compare(&cmp, a->cc, a->cond, dc);
2680     return advance_jump_cond(dc, &cmp, a->a, a->i);
2681 }
2682 
2683 TRANS(Bicc, ALL, do_bpcc, a)
2684 TRANS(BPcc,  64, do_bpcc, a)
2685 
2686 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2687 {
2688     DisasCompare cmp;
2689 
2690     if (gen_trap_if_nofpu_fpexception(dc)) {
2691         return true;
2692     }
2693     gen_fcompare(&cmp, a->cc, a->cond);
2694     return advance_jump_cond(dc, &cmp, a->a, a->i);
2695 }
2696 
2697 TRANS(FBPfcc,  64, do_fbpfcc, a)
2698 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2699 
2700 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2701 {
2702     DisasCompare cmp;
2703 
2704     if (!avail_64(dc)) {
2705         return false;
2706     }
2707     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2708         return false;
2709     }
2710     return advance_jump_cond(dc, &cmp, a->a, a->i);
2711 }
2712 
2713 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2714 {
2715     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2716 
2717     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2718     gen_mov_pc_npc(dc);
2719     dc->npc = target;
2720     return true;
2721 }
2722 
2723 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2724 {
2725     /*
2726      * For sparc32, always generate the no-coprocessor exception.
2727      * For sparc64, always generate illegal instruction.
2728      */
2729 #ifdef TARGET_SPARC64
2730     return false;
2731 #else
2732     gen_exception(dc, TT_NCP_INSN);
2733     return true;
2734 #endif
2735 }
2736 
2737 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2738 {
2739     /* Special-case %g0 because that's the canonical nop.  */
2740     if (a->rd) {
2741         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2742     }
2743     return advance_pc(dc);
2744 }
2745 
2746 /*
2747  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2748  */
2749 
2750 static bool do_tcc(DisasContext *dc, int cond, int cc,
2751                    int rs1, bool imm, int rs2_or_imm)
2752 {
2753     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2754                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2755     DisasCompare cmp;
2756     TCGLabel *lab;
2757     TCGv_i32 trap;
2758 
2759     /* Trap never.  */
2760     if (cond == 0) {
2761         return advance_pc(dc);
2762     }
2763 
2764     /*
2765      * Immediate traps are the most common case.  Since this value is
2766      * live across the branch, it really pays to evaluate the constant.
2767      */
2768     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2769         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2770     } else {
2771         trap = tcg_temp_new_i32();
2772         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2773         if (imm) {
2774             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2775         } else {
2776             TCGv_i32 t2 = tcg_temp_new_i32();
2777             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2778             tcg_gen_add_i32(trap, trap, t2);
2779         }
2780         tcg_gen_andi_i32(trap, trap, mask);
2781         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2782     }
2783 
2784     finishing_insn(dc);
2785 
2786     /* Trap always.  */
2787     if (cond == 8) {
2788         save_state(dc);
2789         gen_helper_raise_exception(tcg_env, trap);
2790         dc->base.is_jmp = DISAS_NORETURN;
2791         return true;
2792     }
2793 
2794     /* Conditional trap.  */
2795     flush_cond(dc);
2796     lab = delay_exceptionv(dc, trap);
2797     gen_compare(&cmp, cc, cond, dc);
2798     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2799 
2800     return advance_pc(dc);
2801 }
2802 
2803 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2804 {
2805     if (avail_32(dc) && a->cc) {
2806         return false;
2807     }
2808     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2809 }
2810 
2811 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2812 {
2813     if (avail_64(dc)) {
2814         return false;
2815     }
2816     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2817 }
2818 
2819 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2820 {
2821     if (avail_32(dc)) {
2822         return false;
2823     }
2824     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2825 }
2826 
2827 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2828 {
2829     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2830     return advance_pc(dc);
2831 }
2832 
2833 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2834 {
2835     if (avail_32(dc)) {
2836         return false;
2837     }
2838     if (a->mmask) {
2839         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2840         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2841     }
2842     if (a->cmask) {
2843         /* For #Sync, etc, end the TB to recognize interrupts. */
2844         dc->base.is_jmp = DISAS_EXIT;
2845     }
2846     return advance_pc(dc);
2847 }
2848 
2849 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2850                           TCGv (*func)(DisasContext *, TCGv))
2851 {
2852     if (!priv) {
2853         return raise_priv(dc);
2854     }
2855     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2856     return advance_pc(dc);
2857 }
2858 
2859 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2860 {
2861     return cpu_y;
2862 }
2863 
2864 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2865 {
2866     /*
2867      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2868      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2869      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2870      */
2871     if (avail_64(dc) && a->rs1 != 0) {
2872         return false;
2873     }
2874     return do_rd_special(dc, true, a->rd, do_rdy);
2875 }
2876 
2877 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2878 {
2879     gen_helper_rdasr17(dst, tcg_env);
2880     return dst;
2881 }
2882 
2883 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2884 
2885 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2886 {
2887     gen_helper_rdccr(dst, tcg_env);
2888     return dst;
2889 }
2890 
2891 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2892 
2893 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2894 {
2895 #ifdef TARGET_SPARC64
2896     return tcg_constant_tl(dc->asi);
2897 #else
2898     qemu_build_not_reached();
2899 #endif
2900 }
2901 
2902 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2903 
2904 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2905 {
2906     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2907 
2908     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2909     if (translator_io_start(&dc->base)) {
2910         dc->base.is_jmp = DISAS_EXIT;
2911     }
2912     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2913                               tcg_constant_i32(dc->mem_idx));
2914     return dst;
2915 }
2916 
2917 /* TODO: non-priv access only allowed when enabled. */
2918 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2919 
2920 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2921 {
2922     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2923 }
2924 
2925 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2926 
2927 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2928 {
2929     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2930     return dst;
2931 }
2932 
2933 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2934 
2935 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2936 {
2937     gen_trap_ifnofpu(dc);
2938     return cpu_gsr;
2939 }
2940 
2941 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2942 
2943 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2944 {
2945     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2946     return dst;
2947 }
2948 
2949 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2950 
2951 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2952 {
2953     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2954     return dst;
2955 }
2956 
2957 /* TODO: non-priv access only allowed when enabled. */
2958 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2959 
2960 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2961 {
2962     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2963 
2964     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2965     if (translator_io_start(&dc->base)) {
2966         dc->base.is_jmp = DISAS_EXIT;
2967     }
2968     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2969                               tcg_constant_i32(dc->mem_idx));
2970     return dst;
2971 }
2972 
2973 /* TODO: non-priv access only allowed when enabled. */
2974 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2975 
2976 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2977 {
2978     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2979     return dst;
2980 }
2981 
2982 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2983 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2984 
2985 /*
2986  * UltraSPARC-T1 Strand status.
2987  * HYPV check maybe not enough, UA2005 & UA2007 describe
2988  * this ASR as impl. dep
2989  */
2990 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2991 {
2992     return tcg_constant_tl(1);
2993 }
2994 
2995 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2996 
2997 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2998 {
2999     gen_helper_rdpsr(dst, tcg_env);
3000     return dst;
3001 }
3002 
3003 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3004 
3005 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3006 {
3007     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3008     return dst;
3009 }
3010 
3011 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3012 
3013 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3014 {
3015     TCGv_i32 tl = tcg_temp_new_i32();
3016     TCGv_ptr tp = tcg_temp_new_ptr();
3017 
3018     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3019     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3020     tcg_gen_shli_i32(tl, tl, 3);
3021     tcg_gen_ext_i32_ptr(tp, tl);
3022     tcg_gen_add_ptr(tp, tp, tcg_env);
3023 
3024     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3025     return dst;
3026 }
3027 
3028 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3029 
3030 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3031 {
3032     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3033     return dst;
3034 }
3035 
3036 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3037 
3038 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3039 {
3040     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3041     return dst;
3042 }
3043 
3044 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3045 
3046 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3047 {
3048     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3049     return dst;
3050 }
3051 
3052 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3053 
3054 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3055 {
3056     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3057     return dst;
3058 }
3059 
3060 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3061       do_rdhstick_cmpr)
3062 
3063 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3064 {
3065     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3066     return dst;
3067 }
3068 
3069 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3070 
3071 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3072 {
3073 #ifdef TARGET_SPARC64
3074     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3075 
3076     gen_load_trap_state_at_tl(r_tsptr);
3077     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3078     return dst;
3079 #else
3080     qemu_build_not_reached();
3081 #endif
3082 }
3083 
3084 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3085 
3086 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3087 {
3088 #ifdef TARGET_SPARC64
3089     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3090 
3091     gen_load_trap_state_at_tl(r_tsptr);
3092     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3093     return dst;
3094 #else
3095     qemu_build_not_reached();
3096 #endif
3097 }
3098 
3099 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3100 
3101 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3102 {
3103 #ifdef TARGET_SPARC64
3104     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3105 
3106     gen_load_trap_state_at_tl(r_tsptr);
3107     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3108     return dst;
3109 #else
3110     qemu_build_not_reached();
3111 #endif
3112 }
3113 
3114 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3115 
3116 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3117 {
3118 #ifdef TARGET_SPARC64
3119     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3120 
3121     gen_load_trap_state_at_tl(r_tsptr);
3122     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3123     return dst;
3124 #else
3125     qemu_build_not_reached();
3126 #endif
3127 }
3128 
3129 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3130 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3131 
3132 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3133 {
3134     return cpu_tbr;
3135 }
3136 
3137 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3138 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3139 
3140 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3141 {
3142     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3143     return dst;
3144 }
3145 
3146 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3147 
3148 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3149 {
3150     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3151     return dst;
3152 }
3153 
3154 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3155 
3156 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3157 {
3158     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3159     return dst;
3160 }
3161 
3162 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3163 
3164 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3165 {
3166     gen_helper_rdcwp(dst, tcg_env);
3167     return dst;
3168 }
3169 
3170 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3171 
3172 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3173 {
3174     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3175     return dst;
3176 }
3177 
3178 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3179 
3180 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3181 {
3182     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3183     return dst;
3184 }
3185 
3186 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3187       do_rdcanrestore)
3188 
3189 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3190 {
3191     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3192     return dst;
3193 }
3194 
3195 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3196 
3197 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3198 {
3199     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3200     return dst;
3201 }
3202 
3203 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3204 
3205 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3206 {
3207     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3208     return dst;
3209 }
3210 
3211 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3212 
3213 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3214 {
3215     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3216     return dst;
3217 }
3218 
3219 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3220 
3221 /* UA2005 strand status */
3222 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3223 {
3224     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3225     return dst;
3226 }
3227 
3228 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3229 
3230 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3231 {
3232     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3233     return dst;
3234 }
3235 
3236 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3237 
3238 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3239 {
3240     if (avail_64(dc)) {
3241         gen_helper_flushw(tcg_env);
3242         return advance_pc(dc);
3243     }
3244     return false;
3245 }
3246 
3247 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3248                           void (*func)(DisasContext *, TCGv))
3249 {
3250     TCGv src;
3251 
3252     /* For simplicity, we under-decoded the rs2 form. */
3253     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3254         return false;
3255     }
3256     if (!priv) {
3257         return raise_priv(dc);
3258     }
3259 
3260     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3261         src = tcg_constant_tl(a->rs2_or_imm);
3262     } else {
3263         TCGv src1 = gen_load_gpr(dc, a->rs1);
3264         if (a->rs2_or_imm == 0) {
3265             src = src1;
3266         } else {
3267             src = tcg_temp_new();
3268             if (a->imm) {
3269                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3270             } else {
3271                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3272             }
3273         }
3274     }
3275     func(dc, src);
3276     return advance_pc(dc);
3277 }
3278 
3279 static void do_wry(DisasContext *dc, TCGv src)
3280 {
3281     tcg_gen_ext32u_tl(cpu_y, src);
3282 }
3283 
3284 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3285 
3286 static void do_wrccr(DisasContext *dc, TCGv src)
3287 {
3288     gen_helper_wrccr(tcg_env, src);
3289 }
3290 
3291 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3292 
3293 static void do_wrasi(DisasContext *dc, TCGv src)
3294 {
3295     TCGv tmp = tcg_temp_new();
3296 
3297     tcg_gen_ext8u_tl(tmp, src);
3298     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3299     /* End TB to notice changed ASI. */
3300     dc->base.is_jmp = DISAS_EXIT;
3301 }
3302 
3303 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3304 
3305 static void do_wrfprs(DisasContext *dc, TCGv src)
3306 {
3307 #ifdef TARGET_SPARC64
3308     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3309     dc->fprs_dirty = 0;
3310     dc->base.is_jmp = DISAS_EXIT;
3311 #else
3312     qemu_build_not_reached();
3313 #endif
3314 }
3315 
3316 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3317 
3318 static void do_wrgsr(DisasContext *dc, TCGv src)
3319 {
3320     gen_trap_ifnofpu(dc);
3321     tcg_gen_mov_tl(cpu_gsr, src);
3322 }
3323 
3324 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3325 
3326 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3327 {
3328     gen_helper_set_softint(tcg_env, src);
3329 }
3330 
3331 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3332 
3333 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3334 {
3335     gen_helper_clear_softint(tcg_env, src);
3336 }
3337 
3338 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3339 
3340 static void do_wrsoftint(DisasContext *dc, TCGv src)
3341 {
3342     gen_helper_write_softint(tcg_env, src);
3343 }
3344 
3345 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3346 
3347 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3348 {
3349     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3350 
3351     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3352     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3353     translator_io_start(&dc->base);
3354     gen_helper_tick_set_limit(r_tickptr, src);
3355     /* End TB to handle timer interrupt */
3356     dc->base.is_jmp = DISAS_EXIT;
3357 }
3358 
3359 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3360 
3361 static void do_wrstick(DisasContext *dc, TCGv src)
3362 {
3363 #ifdef TARGET_SPARC64
3364     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3365 
3366     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3367     translator_io_start(&dc->base);
3368     gen_helper_tick_set_count(r_tickptr, src);
3369     /* End TB to handle timer interrupt */
3370     dc->base.is_jmp = DISAS_EXIT;
3371 #else
3372     qemu_build_not_reached();
3373 #endif
3374 }
3375 
3376 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3377 
3378 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3379 {
3380     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3381 
3382     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3383     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3384     translator_io_start(&dc->base);
3385     gen_helper_tick_set_limit(r_tickptr, src);
3386     /* End TB to handle timer interrupt */
3387     dc->base.is_jmp = DISAS_EXIT;
3388 }
3389 
3390 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3391 
3392 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3393 {
3394     finishing_insn(dc);
3395     save_state(dc);
3396     gen_helper_power_down(tcg_env);
3397 }
3398 
3399 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3400 
3401 static void do_wrmwait(DisasContext *dc, TCGv src)
3402 {
3403     /*
3404      * TODO: This is a stub version of mwait, which merely recognizes
3405      * interrupts immediately and does not wait.
3406      */
3407     dc->base.is_jmp = DISAS_EXIT;
3408 }
3409 
3410 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3411 
3412 static void do_wrpsr(DisasContext *dc, TCGv src)
3413 {
3414     gen_helper_wrpsr(tcg_env, src);
3415     dc->base.is_jmp = DISAS_EXIT;
3416 }
3417 
3418 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3419 
3420 static void do_wrwim(DisasContext *dc, TCGv src)
3421 {
3422     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3423     TCGv tmp = tcg_temp_new();
3424 
3425     tcg_gen_andi_tl(tmp, src, mask);
3426     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3427 }
3428 
3429 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3430 
3431 static void do_wrtpc(DisasContext *dc, TCGv src)
3432 {
3433 #ifdef TARGET_SPARC64
3434     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3435 
3436     gen_load_trap_state_at_tl(r_tsptr);
3437     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3438 #else
3439     qemu_build_not_reached();
3440 #endif
3441 }
3442 
3443 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3444 
3445 static void do_wrtnpc(DisasContext *dc, TCGv src)
3446 {
3447 #ifdef TARGET_SPARC64
3448     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3449 
3450     gen_load_trap_state_at_tl(r_tsptr);
3451     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3452 #else
3453     qemu_build_not_reached();
3454 #endif
3455 }
3456 
3457 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3458 
3459 static void do_wrtstate(DisasContext *dc, TCGv src)
3460 {
3461 #ifdef TARGET_SPARC64
3462     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3463 
3464     gen_load_trap_state_at_tl(r_tsptr);
3465     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3466 #else
3467     qemu_build_not_reached();
3468 #endif
3469 }
3470 
3471 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3472 
3473 static void do_wrtt(DisasContext *dc, TCGv src)
3474 {
3475 #ifdef TARGET_SPARC64
3476     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3477 
3478     gen_load_trap_state_at_tl(r_tsptr);
3479     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3480 #else
3481     qemu_build_not_reached();
3482 #endif
3483 }
3484 
3485 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3486 
3487 static void do_wrtick(DisasContext *dc, TCGv src)
3488 {
3489     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3490 
3491     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3492     translator_io_start(&dc->base);
3493     gen_helper_tick_set_count(r_tickptr, src);
3494     /* End TB to handle timer interrupt */
3495     dc->base.is_jmp = DISAS_EXIT;
3496 }
3497 
3498 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3499 
3500 static void do_wrtba(DisasContext *dc, TCGv src)
3501 {
3502     tcg_gen_mov_tl(cpu_tbr, src);
3503 }
3504 
3505 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3506 
3507 static void do_wrpstate(DisasContext *dc, TCGv src)
3508 {
3509     save_state(dc);
3510     if (translator_io_start(&dc->base)) {
3511         dc->base.is_jmp = DISAS_EXIT;
3512     }
3513     gen_helper_wrpstate(tcg_env, src);
3514     dc->npc = DYNAMIC_PC;
3515 }
3516 
3517 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3518 
3519 static void do_wrtl(DisasContext *dc, TCGv src)
3520 {
3521     save_state(dc);
3522     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3523     dc->npc = DYNAMIC_PC;
3524 }
3525 
3526 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3527 
3528 static void do_wrpil(DisasContext *dc, TCGv src)
3529 {
3530     if (translator_io_start(&dc->base)) {
3531         dc->base.is_jmp = DISAS_EXIT;
3532     }
3533     gen_helper_wrpil(tcg_env, src);
3534 }
3535 
3536 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3537 
3538 static void do_wrcwp(DisasContext *dc, TCGv src)
3539 {
3540     gen_helper_wrcwp(tcg_env, src);
3541 }
3542 
3543 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3544 
3545 static void do_wrcansave(DisasContext *dc, TCGv src)
3546 {
3547     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3548 }
3549 
3550 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3551 
3552 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3553 {
3554     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3555 }
3556 
3557 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3558 
3559 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3560 {
3561     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3562 }
3563 
3564 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3565 
3566 static void do_wrotherwin(DisasContext *dc, TCGv src)
3567 {
3568     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3569 }
3570 
3571 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3572 
3573 static void do_wrwstate(DisasContext *dc, TCGv src)
3574 {
3575     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3576 }
3577 
3578 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3579 
3580 static void do_wrgl(DisasContext *dc, TCGv src)
3581 {
3582     gen_helper_wrgl(tcg_env, src);
3583 }
3584 
3585 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3586 
3587 /* UA2005 strand status */
3588 static void do_wrssr(DisasContext *dc, TCGv src)
3589 {
3590     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3591 }
3592 
3593 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3594 
3595 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3596 
3597 static void do_wrhpstate(DisasContext *dc, TCGv src)
3598 {
3599     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3600     dc->base.is_jmp = DISAS_EXIT;
3601 }
3602 
3603 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3604 
3605 static void do_wrhtstate(DisasContext *dc, TCGv src)
3606 {
3607     TCGv_i32 tl = tcg_temp_new_i32();
3608     TCGv_ptr tp = tcg_temp_new_ptr();
3609 
3610     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3611     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3612     tcg_gen_shli_i32(tl, tl, 3);
3613     tcg_gen_ext_i32_ptr(tp, tl);
3614     tcg_gen_add_ptr(tp, tp, tcg_env);
3615 
3616     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3617 }
3618 
3619 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3620 
3621 static void do_wrhintp(DisasContext *dc, TCGv src)
3622 {
3623     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3624 }
3625 
3626 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3627 
3628 static void do_wrhtba(DisasContext *dc, TCGv src)
3629 {
3630     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3631 }
3632 
3633 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3634 
3635 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3636 {
3637     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3638 
3639     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3640     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3641     translator_io_start(&dc->base);
3642     gen_helper_tick_set_limit(r_tickptr, src);
3643     /* End TB to handle timer interrupt */
3644     dc->base.is_jmp = DISAS_EXIT;
3645 }
3646 
3647 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3648       do_wrhstick_cmpr)
3649 
3650 static bool do_saved_restored(DisasContext *dc, bool saved)
3651 {
3652     if (!supervisor(dc)) {
3653         return raise_priv(dc);
3654     }
3655     if (saved) {
3656         gen_helper_saved(tcg_env);
3657     } else {
3658         gen_helper_restored(tcg_env);
3659     }
3660     return advance_pc(dc);
3661 }
3662 
3663 TRANS(SAVED, 64, do_saved_restored, true)
3664 TRANS(RESTORED, 64, do_saved_restored, false)
3665 
3666 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3667 {
3668     return advance_pc(dc);
3669 }
3670 
3671 /*
3672  * TODO: Need a feature bit for sparcv8.
3673  * In the meantime, treat all 32-bit cpus like sparcv7.
3674  */
3675 TRANS(NOP_v7, 32, trans_NOP, a)
3676 TRANS(NOP_v9, 64, trans_NOP, a)
3677 
3678 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3679                          void (*func)(TCGv, TCGv, TCGv),
3680                          void (*funci)(TCGv, TCGv, target_long),
3681                          bool logic_cc)
3682 {
3683     TCGv dst, src1;
3684 
3685     /* For simplicity, we under-decoded the rs2 form. */
3686     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3687         return false;
3688     }
3689 
3690     if (logic_cc) {
3691         dst = cpu_cc_N;
3692     } else {
3693         dst = gen_dest_gpr(dc, a->rd);
3694     }
3695     src1 = gen_load_gpr(dc, a->rs1);
3696 
3697     if (a->imm || a->rs2_or_imm == 0) {
3698         if (funci) {
3699             funci(dst, src1, a->rs2_or_imm);
3700         } else {
3701             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3702         }
3703     } else {
3704         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3705     }
3706 
3707     if (logic_cc) {
3708         if (TARGET_LONG_BITS == 64) {
3709             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3710             tcg_gen_movi_tl(cpu_icc_C, 0);
3711         }
3712         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3713         tcg_gen_movi_tl(cpu_cc_C, 0);
3714         tcg_gen_movi_tl(cpu_cc_V, 0);
3715     }
3716 
3717     gen_store_gpr(dc, a->rd, dst);
3718     return advance_pc(dc);
3719 }
3720 
3721 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3722                      void (*func)(TCGv, TCGv, TCGv),
3723                      void (*funci)(TCGv, TCGv, target_long),
3724                      void (*func_cc)(TCGv, TCGv, TCGv))
3725 {
3726     if (a->cc) {
3727         return do_arith_int(dc, a, func_cc, NULL, false);
3728     }
3729     return do_arith_int(dc, a, func, funci, false);
3730 }
3731 
3732 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3733                      void (*func)(TCGv, TCGv, TCGv),
3734                      void (*funci)(TCGv, TCGv, target_long))
3735 {
3736     return do_arith_int(dc, a, func, funci, a->cc);
3737 }
3738 
3739 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3740 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3741 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3742 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3743 
3744 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3745 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3746 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3747 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3748 
3749 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3750 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3751 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3752 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3753 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3754 
3755 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3756 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3757 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3758 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3759 
3760 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3761 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3762 
3763 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3764 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3765 
3766 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3767 {
3768     /* OR with %g0 is the canonical alias for MOV. */
3769     if (!a->cc && a->rs1 == 0) {
3770         if (a->imm || a->rs2_or_imm == 0) {
3771             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3772         } else if (a->rs2_or_imm & ~0x1f) {
3773             /* For simplicity, we under-decoded the rs2 form. */
3774             return false;
3775         } else {
3776             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3777         }
3778         return advance_pc(dc);
3779     }
3780     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3781 }
3782 
3783 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3784 {
3785     TCGv_i64 t1, t2;
3786     TCGv dst;
3787 
3788     if (!avail_DIV(dc)) {
3789         return false;
3790     }
3791     /* For simplicity, we under-decoded the rs2 form. */
3792     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3793         return false;
3794     }
3795 
3796     if (unlikely(a->rs2_or_imm == 0)) {
3797         gen_exception(dc, TT_DIV_ZERO);
3798         return true;
3799     }
3800 
3801     if (a->imm) {
3802         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3803     } else {
3804         TCGLabel *lab;
3805         TCGv_i32 n2;
3806 
3807         finishing_insn(dc);
3808         flush_cond(dc);
3809 
3810         n2 = tcg_temp_new_i32();
3811         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3812 
3813         lab = delay_exception(dc, TT_DIV_ZERO);
3814         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3815 
3816         t2 = tcg_temp_new_i64();
3817 #ifdef TARGET_SPARC64
3818         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3819 #else
3820         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3821 #endif
3822     }
3823 
3824     t1 = tcg_temp_new_i64();
3825     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3826 
3827     tcg_gen_divu_i64(t1, t1, t2);
3828     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3829 
3830     dst = gen_dest_gpr(dc, a->rd);
3831     tcg_gen_trunc_i64_tl(dst, t1);
3832     gen_store_gpr(dc, a->rd, dst);
3833     return advance_pc(dc);
3834 }
3835 
3836 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3837 {
3838     TCGv dst, src1, src2;
3839 
3840     if (!avail_64(dc)) {
3841         return false;
3842     }
3843     /* For simplicity, we under-decoded the rs2 form. */
3844     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3845         return false;
3846     }
3847 
3848     if (unlikely(a->rs2_or_imm == 0)) {
3849         gen_exception(dc, TT_DIV_ZERO);
3850         return true;
3851     }
3852 
3853     if (a->imm) {
3854         src2 = tcg_constant_tl(a->rs2_or_imm);
3855     } else {
3856         TCGLabel *lab;
3857 
3858         finishing_insn(dc);
3859         flush_cond(dc);
3860 
3861         lab = delay_exception(dc, TT_DIV_ZERO);
3862         src2 = cpu_regs[a->rs2_or_imm];
3863         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3864     }
3865 
3866     dst = gen_dest_gpr(dc, a->rd);
3867     src1 = gen_load_gpr(dc, a->rs1);
3868 
3869     tcg_gen_divu_tl(dst, src1, src2);
3870     gen_store_gpr(dc, a->rd, dst);
3871     return advance_pc(dc);
3872 }
3873 
3874 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3875 {
3876     TCGv dst, src1, src2;
3877 
3878     if (!avail_64(dc)) {
3879         return false;
3880     }
3881     /* For simplicity, we under-decoded the rs2 form. */
3882     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3883         return false;
3884     }
3885 
3886     if (unlikely(a->rs2_or_imm == 0)) {
3887         gen_exception(dc, TT_DIV_ZERO);
3888         return true;
3889     }
3890 
3891     dst = gen_dest_gpr(dc, a->rd);
3892     src1 = gen_load_gpr(dc, a->rs1);
3893 
3894     if (a->imm) {
3895         if (unlikely(a->rs2_or_imm == -1)) {
3896             tcg_gen_neg_tl(dst, src1);
3897             gen_store_gpr(dc, a->rd, dst);
3898             return advance_pc(dc);
3899         }
3900         src2 = tcg_constant_tl(a->rs2_or_imm);
3901     } else {
3902         TCGLabel *lab;
3903         TCGv t1, t2;
3904 
3905         finishing_insn(dc);
3906         flush_cond(dc);
3907 
3908         lab = delay_exception(dc, TT_DIV_ZERO);
3909         src2 = cpu_regs[a->rs2_or_imm];
3910         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3911 
3912         /*
3913          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3914          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3915          */
3916         t1 = tcg_temp_new();
3917         t2 = tcg_temp_new();
3918         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3919         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3920         tcg_gen_and_tl(t1, t1, t2);
3921         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3922                            tcg_constant_tl(1), src2);
3923         src2 = t1;
3924     }
3925 
3926     tcg_gen_div_tl(dst, src1, src2);
3927     gen_store_gpr(dc, a->rd, dst);
3928     return advance_pc(dc);
3929 }
3930 
3931 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3932                      int width, bool cc, bool little_endian)
3933 {
3934     TCGv dst, s1, s2, l, r, t, m;
3935     uint64_t amask = address_mask_i(dc, -8);
3936 
3937     dst = gen_dest_gpr(dc, a->rd);
3938     s1 = gen_load_gpr(dc, a->rs1);
3939     s2 = gen_load_gpr(dc, a->rs2);
3940 
3941     if (cc) {
3942         gen_op_subcc(cpu_cc_N, s1, s2);
3943     }
3944 
3945     l = tcg_temp_new();
3946     r = tcg_temp_new();
3947     t = tcg_temp_new();
3948 
3949     switch (width) {
3950     case 8:
3951         tcg_gen_andi_tl(l, s1, 7);
3952         tcg_gen_andi_tl(r, s2, 7);
3953         tcg_gen_xori_tl(r, r, 7);
3954         m = tcg_constant_tl(0xff);
3955         break;
3956     case 16:
3957         tcg_gen_extract_tl(l, s1, 1, 2);
3958         tcg_gen_extract_tl(r, s2, 1, 2);
3959         tcg_gen_xori_tl(r, r, 3);
3960         m = tcg_constant_tl(0xf);
3961         break;
3962     case 32:
3963         tcg_gen_extract_tl(l, s1, 2, 1);
3964         tcg_gen_extract_tl(r, s2, 2, 1);
3965         tcg_gen_xori_tl(r, r, 1);
3966         m = tcg_constant_tl(0x3);
3967         break;
3968     default:
3969         abort();
3970     }
3971 
3972     /* Compute Left Edge */
3973     if (little_endian) {
3974         tcg_gen_shl_tl(l, m, l);
3975         tcg_gen_and_tl(l, l, m);
3976     } else {
3977         tcg_gen_shr_tl(l, m, l);
3978     }
3979     /* Compute Right Edge */
3980     if (little_endian) {
3981         tcg_gen_shr_tl(r, m, r);
3982     } else {
3983         tcg_gen_shl_tl(r, m, r);
3984         tcg_gen_and_tl(r, r, m);
3985     }
3986 
3987     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3988     tcg_gen_xor_tl(t, s1, s2);
3989     tcg_gen_and_tl(r, r, l);
3990     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3991 
3992     gen_store_gpr(dc, a->rd, dst);
3993     return advance_pc(dc);
3994 }
3995 
3996 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3997 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3998 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3999 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4000 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4001 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4002 
4003 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4004 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4005 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4006 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4007 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4008 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4009 
4010 static bool do_rr(DisasContext *dc, arg_r_r *a,
4011                   void (*func)(TCGv, TCGv))
4012 {
4013     TCGv dst = gen_dest_gpr(dc, a->rd);
4014     TCGv src = gen_load_gpr(dc, a->rs);
4015 
4016     func(dst, src);
4017     gen_store_gpr(dc, a->rd, dst);
4018     return advance_pc(dc);
4019 }
4020 
4021 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4022 
4023 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4024                    void (*func)(TCGv, TCGv, TCGv))
4025 {
4026     TCGv dst = gen_dest_gpr(dc, a->rd);
4027     TCGv src1 = gen_load_gpr(dc, a->rs1);
4028     TCGv src2 = gen_load_gpr(dc, a->rs2);
4029 
4030     func(dst, src1, src2);
4031     gen_store_gpr(dc, a->rd, dst);
4032     return advance_pc(dc);
4033 }
4034 
4035 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4036 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4037 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4038 
4039 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4040 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4041 
4042 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4043 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4044 
4045 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4046 
4047 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4048 {
4049 #ifdef TARGET_SPARC64
4050     TCGv tmp = tcg_temp_new();
4051 
4052     tcg_gen_add_tl(tmp, s1, s2);
4053     tcg_gen_andi_tl(dst, tmp, -8);
4054     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4055 #else
4056     g_assert_not_reached();
4057 #endif
4058 }
4059 
4060 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4061 {
4062 #ifdef TARGET_SPARC64
4063     TCGv tmp = tcg_temp_new();
4064 
4065     tcg_gen_add_tl(tmp, s1, s2);
4066     tcg_gen_andi_tl(dst, tmp, -8);
4067     tcg_gen_neg_tl(tmp, tmp);
4068     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4069 #else
4070     g_assert_not_reached();
4071 #endif
4072 }
4073 
4074 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4075 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4076 
4077 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4078 {
4079 #ifdef TARGET_SPARC64
4080     tcg_gen_add_tl(dst, s1, s2);
4081     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4082 #else
4083     g_assert_not_reached();
4084 #endif
4085 }
4086 
4087 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4088 
4089 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4090 {
4091     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4092     return true;
4093 }
4094 
4095 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4096 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4097 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4098 
4099 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4100 {
4101     TCGv dst, src1, src2;
4102 
4103     /* Reject 64-bit shifts for sparc32. */
4104     if (avail_32(dc) && a->x) {
4105         return false;
4106     }
4107 
4108     src2 = tcg_temp_new();
4109     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4110     src1 = gen_load_gpr(dc, a->rs1);
4111     dst = gen_dest_gpr(dc, a->rd);
4112 
4113     if (l) {
4114         tcg_gen_shl_tl(dst, src1, src2);
4115         if (!a->x) {
4116             tcg_gen_ext32u_tl(dst, dst);
4117         }
4118     } else if (u) {
4119         if (!a->x) {
4120             tcg_gen_ext32u_tl(dst, src1);
4121             src1 = dst;
4122         }
4123         tcg_gen_shr_tl(dst, src1, src2);
4124     } else {
4125         if (!a->x) {
4126             tcg_gen_ext32s_tl(dst, src1);
4127             src1 = dst;
4128         }
4129         tcg_gen_sar_tl(dst, src1, src2);
4130     }
4131     gen_store_gpr(dc, a->rd, dst);
4132     return advance_pc(dc);
4133 }
4134 
4135 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4136 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4137 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4138 
4139 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4140 {
4141     TCGv dst, src1;
4142 
4143     /* Reject 64-bit shifts for sparc32. */
4144     if (avail_32(dc) && (a->x || a->i >= 32)) {
4145         return false;
4146     }
4147 
4148     src1 = gen_load_gpr(dc, a->rs1);
4149     dst = gen_dest_gpr(dc, a->rd);
4150 
4151     if (avail_32(dc) || a->x) {
4152         if (l) {
4153             tcg_gen_shli_tl(dst, src1, a->i);
4154         } else if (u) {
4155             tcg_gen_shri_tl(dst, src1, a->i);
4156         } else {
4157             tcg_gen_sari_tl(dst, src1, a->i);
4158         }
4159     } else {
4160         if (l) {
4161             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4162         } else if (u) {
4163             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4164         } else {
4165             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4166         }
4167     }
4168     gen_store_gpr(dc, a->rd, dst);
4169     return advance_pc(dc);
4170 }
4171 
4172 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4173 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4174 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4175 
4176 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4177 {
4178     /* For simplicity, we under-decoded the rs2 form. */
4179     if (!imm && rs2_or_imm & ~0x1f) {
4180         return NULL;
4181     }
4182     if (imm || rs2_or_imm == 0) {
4183         return tcg_constant_tl(rs2_or_imm);
4184     } else {
4185         return cpu_regs[rs2_or_imm];
4186     }
4187 }
4188 
4189 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4190 {
4191     TCGv dst = gen_load_gpr(dc, rd);
4192     TCGv c2 = tcg_constant_tl(cmp->c2);
4193 
4194     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4195     gen_store_gpr(dc, rd, dst);
4196     return advance_pc(dc);
4197 }
4198 
4199 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4200 {
4201     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4202     DisasCompare cmp;
4203 
4204     if (src2 == NULL) {
4205         return false;
4206     }
4207     gen_compare(&cmp, a->cc, a->cond, dc);
4208     return do_mov_cond(dc, &cmp, a->rd, src2);
4209 }
4210 
4211 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4212 {
4213     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4214     DisasCompare cmp;
4215 
4216     if (src2 == NULL) {
4217         return false;
4218     }
4219     gen_fcompare(&cmp, a->cc, a->cond);
4220     return do_mov_cond(dc, &cmp, a->rd, src2);
4221 }
4222 
4223 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4224 {
4225     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4226     DisasCompare cmp;
4227 
4228     if (src2 == NULL) {
4229         return false;
4230     }
4231     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4232         return false;
4233     }
4234     return do_mov_cond(dc, &cmp, a->rd, src2);
4235 }
4236 
4237 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4238                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4239 {
4240     TCGv src1, sum;
4241 
4242     /* For simplicity, we under-decoded the rs2 form. */
4243     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4244         return false;
4245     }
4246 
4247     /*
4248      * Always load the sum into a new temporary.
4249      * This is required to capture the value across a window change,
4250      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4251      */
4252     sum = tcg_temp_new();
4253     src1 = gen_load_gpr(dc, a->rs1);
4254     if (a->imm || a->rs2_or_imm == 0) {
4255         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4256     } else {
4257         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4258     }
4259     return func(dc, a->rd, sum);
4260 }
4261 
4262 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4263 {
4264     /*
4265      * Preserve pc across advance, so that we can delay
4266      * the writeback to rd until after src is consumed.
4267      */
4268     target_ulong cur_pc = dc->pc;
4269 
4270     gen_check_align(dc, src, 3);
4271 
4272     gen_mov_pc_npc(dc);
4273     tcg_gen_mov_tl(cpu_npc, src);
4274     gen_address_mask(dc, cpu_npc);
4275     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4276 
4277     dc->npc = DYNAMIC_PC_LOOKUP;
4278     return true;
4279 }
4280 
4281 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4282 
4283 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4284 {
4285     if (!supervisor(dc)) {
4286         return raise_priv(dc);
4287     }
4288 
4289     gen_check_align(dc, src, 3);
4290 
4291     gen_mov_pc_npc(dc);
4292     tcg_gen_mov_tl(cpu_npc, src);
4293     gen_helper_rett(tcg_env);
4294 
4295     dc->npc = DYNAMIC_PC;
4296     return true;
4297 }
4298 
4299 TRANS(RETT, 32, do_add_special, a, do_rett)
4300 
4301 static bool do_return(DisasContext *dc, int rd, TCGv src)
4302 {
4303     gen_check_align(dc, src, 3);
4304     gen_helper_restore(tcg_env);
4305 
4306     gen_mov_pc_npc(dc);
4307     tcg_gen_mov_tl(cpu_npc, src);
4308     gen_address_mask(dc, cpu_npc);
4309 
4310     dc->npc = DYNAMIC_PC_LOOKUP;
4311     return true;
4312 }
4313 
4314 TRANS(RETURN, 64, do_add_special, a, do_return)
4315 
4316 static bool do_save(DisasContext *dc, int rd, TCGv src)
4317 {
4318     gen_helper_save(tcg_env);
4319     gen_store_gpr(dc, rd, src);
4320     return advance_pc(dc);
4321 }
4322 
4323 TRANS(SAVE, ALL, do_add_special, a, do_save)
4324 
4325 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4326 {
4327     gen_helper_restore(tcg_env);
4328     gen_store_gpr(dc, rd, src);
4329     return advance_pc(dc);
4330 }
4331 
4332 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4333 
4334 static bool do_done_retry(DisasContext *dc, bool done)
4335 {
4336     if (!supervisor(dc)) {
4337         return raise_priv(dc);
4338     }
4339     dc->npc = DYNAMIC_PC;
4340     dc->pc = DYNAMIC_PC;
4341     translator_io_start(&dc->base);
4342     if (done) {
4343         gen_helper_done(tcg_env);
4344     } else {
4345         gen_helper_retry(tcg_env);
4346     }
4347     return true;
4348 }
4349 
4350 TRANS(DONE, 64, do_done_retry, true)
4351 TRANS(RETRY, 64, do_done_retry, false)
4352 
4353 /*
4354  * Major opcode 11 -- load and store instructions
4355  */
4356 
4357 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4358 {
4359     TCGv addr, tmp = NULL;
4360 
4361     /* For simplicity, we under-decoded the rs2 form. */
4362     if (!imm && rs2_or_imm & ~0x1f) {
4363         return NULL;
4364     }
4365 
4366     addr = gen_load_gpr(dc, rs1);
4367     if (rs2_or_imm) {
4368         tmp = tcg_temp_new();
4369         if (imm) {
4370             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4371         } else {
4372             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4373         }
4374         addr = tmp;
4375     }
4376     if (AM_CHECK(dc)) {
4377         if (!tmp) {
4378             tmp = tcg_temp_new();
4379         }
4380         tcg_gen_ext32u_tl(tmp, addr);
4381         addr = tmp;
4382     }
4383     return addr;
4384 }
4385 
4386 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4387 {
4388     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4389     DisasASI da;
4390 
4391     if (addr == NULL) {
4392         return false;
4393     }
4394     da = resolve_asi(dc, a->asi, mop);
4395 
4396     reg = gen_dest_gpr(dc, a->rd);
4397     gen_ld_asi(dc, &da, reg, addr);
4398     gen_store_gpr(dc, a->rd, reg);
4399     return advance_pc(dc);
4400 }
4401 
4402 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4403 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4404 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4405 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4406 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4407 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4408 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4409 
4410 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4411 {
4412     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4413     DisasASI da;
4414 
4415     if (addr == NULL) {
4416         return false;
4417     }
4418     da = resolve_asi(dc, a->asi, mop);
4419 
4420     reg = gen_load_gpr(dc, a->rd);
4421     gen_st_asi(dc, &da, reg, addr);
4422     return advance_pc(dc);
4423 }
4424 
4425 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4426 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4427 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4428 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4429 
4430 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4431 {
4432     TCGv addr;
4433     DisasASI da;
4434 
4435     if (a->rd & 1) {
4436         return false;
4437     }
4438     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4439     if (addr == NULL) {
4440         return false;
4441     }
4442     da = resolve_asi(dc, a->asi, MO_TEUQ);
4443     gen_ldda_asi(dc, &da, addr, a->rd);
4444     return advance_pc(dc);
4445 }
4446 
4447 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4448 {
4449     TCGv addr;
4450     DisasASI da;
4451 
4452     if (a->rd & 1) {
4453         return false;
4454     }
4455     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4456     if (addr == NULL) {
4457         return false;
4458     }
4459     da = resolve_asi(dc, a->asi, MO_TEUQ);
4460     gen_stda_asi(dc, &da, addr, a->rd);
4461     return advance_pc(dc);
4462 }
4463 
4464 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4465 {
4466     TCGv addr, reg;
4467     DisasASI da;
4468 
4469     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4470     if (addr == NULL) {
4471         return false;
4472     }
4473     da = resolve_asi(dc, a->asi, MO_UB);
4474 
4475     reg = gen_dest_gpr(dc, a->rd);
4476     gen_ldstub_asi(dc, &da, reg, addr);
4477     gen_store_gpr(dc, a->rd, reg);
4478     return advance_pc(dc);
4479 }
4480 
4481 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4482 {
4483     TCGv addr, dst, src;
4484     DisasASI da;
4485 
4486     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4487     if (addr == NULL) {
4488         return false;
4489     }
4490     da = resolve_asi(dc, a->asi, MO_TEUL);
4491 
4492     dst = gen_dest_gpr(dc, a->rd);
4493     src = gen_load_gpr(dc, a->rd);
4494     gen_swap_asi(dc, &da, dst, src, addr);
4495     gen_store_gpr(dc, a->rd, dst);
4496     return advance_pc(dc);
4497 }
4498 
4499 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4500 {
4501     TCGv addr, o, n, c;
4502     DisasASI da;
4503 
4504     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4505     if (addr == NULL) {
4506         return false;
4507     }
4508     da = resolve_asi(dc, a->asi, mop);
4509 
4510     o = gen_dest_gpr(dc, a->rd);
4511     n = gen_load_gpr(dc, a->rd);
4512     c = gen_load_gpr(dc, a->rs2_or_imm);
4513     gen_cas_asi(dc, &da, o, n, c, addr);
4514     gen_store_gpr(dc, a->rd, o);
4515     return advance_pc(dc);
4516 }
4517 
4518 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4519 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4520 
4521 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4522 {
4523     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4524     DisasASI da;
4525 
4526     if (addr == NULL) {
4527         return false;
4528     }
4529     if (gen_trap_if_nofpu_fpexception(dc)) {
4530         return true;
4531     }
4532     if (sz == MO_128 && gen_trap_float128(dc)) {
4533         return true;
4534     }
4535     da = resolve_asi(dc, a->asi, MO_TE | sz);
4536     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4537     gen_update_fprs_dirty(dc, a->rd);
4538     return advance_pc(dc);
4539 }
4540 
4541 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4542 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4543 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4544 
4545 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4546 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4547 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4548 
4549 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4550 {
4551     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4552     DisasASI da;
4553 
4554     if (addr == NULL) {
4555         return false;
4556     }
4557     /* Store insns are ok in fp_exception_pending state. */
4558     if (gen_trap_ifnofpu(dc)) {
4559         return true;
4560     }
4561     if (sz == MO_128 && gen_trap_float128(dc)) {
4562         return true;
4563     }
4564     da = resolve_asi(dc, a->asi, MO_TE | sz);
4565     gen_stf_asi(dc, &da, sz, addr, a->rd);
4566     return advance_pc(dc);
4567 }
4568 
4569 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4570 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4571 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4572 
4573 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4574 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4575 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4576 
4577 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4578 {
4579     TCGv addr;
4580 
4581     if (!avail_32(dc)) {
4582         return false;
4583     }
4584     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4585     if (addr == NULL) {
4586         return false;
4587     }
4588     if (!supervisor(dc)) {
4589         return raise_priv(dc);
4590     }
4591 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4592     if (gen_trap_ifnofpu(dc)) {
4593         return true;
4594     }
4595     if (!dc->fsr_qne) {
4596         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4597         return true;
4598     }
4599 
4600     /* Store the single element from the queue. */
4601     TCGv_i64 fq = tcg_temp_new_i64();
4602     tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4603     tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4604 
4605     /* Mark the queue empty, transitioning to fp_execute state. */
4606     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4607                    offsetof(CPUSPARCState, fsr_qne));
4608     dc->fsr_qne = 0;
4609 
4610     return advance_pc(dc);
4611 #else
4612     qemu_build_not_reached();
4613 #endif
4614 }
4615 
4616 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4617 {
4618     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4619     TCGv_i32 tmp;
4620 
4621     if (addr == NULL) {
4622         return false;
4623     }
4624     if (gen_trap_if_nofpu_fpexception(dc)) {
4625         return true;
4626     }
4627 
4628     tmp = tcg_temp_new_i32();
4629     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4630 
4631     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4632     /* LDFSR does not change FCC[1-3]. */
4633 
4634     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4635     return advance_pc(dc);
4636 }
4637 
4638 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4639 {
4640 #ifdef TARGET_SPARC64
4641     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4642     TCGv_i64 t64;
4643     TCGv_i32 lo, hi;
4644 
4645     if (addr == NULL) {
4646         return false;
4647     }
4648     if (gen_trap_if_nofpu_fpexception(dc)) {
4649         return true;
4650     }
4651 
4652     t64 = tcg_temp_new_i64();
4653     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4654 
4655     lo = tcg_temp_new_i32();
4656     hi = cpu_fcc[3];
4657     tcg_gen_extr_i64_i32(lo, hi, t64);
4658     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4659     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4660     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4661     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4662 
4663     if (entire) {
4664         gen_helper_set_fsr_nofcc(tcg_env, lo);
4665     } else {
4666         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4667     }
4668     return advance_pc(dc);
4669 #else
4670     return false;
4671 #endif
4672 }
4673 
4674 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4675 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4676 
4677 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4678 {
4679     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4680     TCGv fsr;
4681 
4682     if (addr == NULL) {
4683         return false;
4684     }
4685     /* Store insns are ok in fp_exception_pending state. */
4686     if (gen_trap_ifnofpu(dc)) {
4687         return true;
4688     }
4689 
4690     fsr = tcg_temp_new();
4691     gen_helper_get_fsr(fsr, tcg_env);
4692     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4693     return advance_pc(dc);
4694 }
4695 
4696 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4697 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4698 
4699 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4700 {
4701     if (gen_trap_ifnofpu(dc)) {
4702         return true;
4703     }
4704     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4705     return advance_pc(dc);
4706 }
4707 
4708 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4709 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4710 
4711 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4712 {
4713     if (gen_trap_ifnofpu(dc)) {
4714         return true;
4715     }
4716     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4721 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4722 
4723 static bool do_ff(DisasContext *dc, arg_r_r *a,
4724                   void (*func)(TCGv_i32, TCGv_i32))
4725 {
4726     TCGv_i32 tmp;
4727 
4728     if (gen_trap_if_nofpu_fpexception(dc)) {
4729         return true;
4730     }
4731 
4732     tmp = gen_load_fpr_F(dc, a->rs);
4733     func(tmp, tmp);
4734     gen_store_fpr_F(dc, a->rd, tmp);
4735     return advance_pc(dc);
4736 }
4737 
4738 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4739 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4740 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4741 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4742 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4743 
4744 static bool do_fd(DisasContext *dc, arg_r_r *a,
4745                   void (*func)(TCGv_i32, TCGv_i64))
4746 {
4747     TCGv_i32 dst;
4748     TCGv_i64 src;
4749 
4750     if (gen_trap_ifnofpu(dc)) {
4751         return true;
4752     }
4753 
4754     dst = tcg_temp_new_i32();
4755     src = gen_load_fpr_D(dc, a->rs);
4756     func(dst, src);
4757     gen_store_fpr_F(dc, a->rd, dst);
4758     return advance_pc(dc);
4759 }
4760 
4761 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4762 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4763 
4764 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4765                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4766 {
4767     TCGv_i32 tmp;
4768 
4769     if (gen_trap_if_nofpu_fpexception(dc)) {
4770         return true;
4771     }
4772 
4773     tmp = gen_load_fpr_F(dc, a->rs);
4774     func(tmp, tcg_env, tmp);
4775     gen_store_fpr_F(dc, a->rd, tmp);
4776     return advance_pc(dc);
4777 }
4778 
4779 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4780 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4781 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4782 
4783 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4784                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4785 {
4786     TCGv_i32 dst;
4787     TCGv_i64 src;
4788 
4789     if (gen_trap_if_nofpu_fpexception(dc)) {
4790         return true;
4791     }
4792 
4793     dst = tcg_temp_new_i32();
4794     src = gen_load_fpr_D(dc, a->rs);
4795     func(dst, tcg_env, src);
4796     gen_store_fpr_F(dc, a->rd, dst);
4797     return advance_pc(dc);
4798 }
4799 
4800 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4801 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4802 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4803 
4804 static bool do_dd(DisasContext *dc, arg_r_r *a,
4805                   void (*func)(TCGv_i64, TCGv_i64))
4806 {
4807     TCGv_i64 dst, src;
4808 
4809     if (gen_trap_if_nofpu_fpexception(dc)) {
4810         return true;
4811     }
4812 
4813     dst = tcg_temp_new_i64();
4814     src = gen_load_fpr_D(dc, a->rs);
4815     func(dst, src);
4816     gen_store_fpr_D(dc, a->rd, dst);
4817     return advance_pc(dc);
4818 }
4819 
4820 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4821 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4822 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4823 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4824 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4825 
4826 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4827                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4828 {
4829     TCGv_i64 dst, src;
4830 
4831     if (gen_trap_if_nofpu_fpexception(dc)) {
4832         return true;
4833     }
4834 
4835     dst = tcg_temp_new_i64();
4836     src = gen_load_fpr_D(dc, a->rs);
4837     func(dst, tcg_env, src);
4838     gen_store_fpr_D(dc, a->rd, dst);
4839     return advance_pc(dc);
4840 }
4841 
4842 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4843 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4844 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4845 
4846 static bool do_df(DisasContext *dc, arg_r_r *a,
4847                   void (*func)(TCGv_i64, TCGv_i32))
4848 {
4849     TCGv_i64 dst;
4850     TCGv_i32 src;
4851 
4852     if (gen_trap_ifnofpu(dc)) {
4853         return true;
4854     }
4855 
4856     dst = tcg_temp_new_i64();
4857     src = gen_load_fpr_F(dc, a->rs);
4858     func(dst, src);
4859     gen_store_fpr_D(dc, a->rd, dst);
4860     return advance_pc(dc);
4861 }
4862 
4863 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4864 
4865 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4866                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4867 {
4868     TCGv_i64 dst;
4869     TCGv_i32 src;
4870 
4871     if (gen_trap_if_nofpu_fpexception(dc)) {
4872         return true;
4873     }
4874 
4875     dst = tcg_temp_new_i64();
4876     src = gen_load_fpr_F(dc, a->rs);
4877     func(dst, tcg_env, src);
4878     gen_store_fpr_D(dc, a->rd, dst);
4879     return advance_pc(dc);
4880 }
4881 
4882 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4883 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4884 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4885 
4886 static bool do_qq(DisasContext *dc, arg_r_r *a,
4887                   void (*func)(TCGv_i128, TCGv_i128))
4888 {
4889     TCGv_i128 t;
4890 
4891     if (gen_trap_ifnofpu(dc)) {
4892         return true;
4893     }
4894     if (gen_trap_float128(dc)) {
4895         return true;
4896     }
4897 
4898     gen_op_clear_ieee_excp_and_FTT();
4899     t = gen_load_fpr_Q(dc, a->rs);
4900     func(t, t);
4901     gen_store_fpr_Q(dc, a->rd, t);
4902     return advance_pc(dc);
4903 }
4904 
4905 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4906 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4907 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4908 
4909 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4910                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4911 {
4912     TCGv_i128 t;
4913 
4914     if (gen_trap_if_nofpu_fpexception(dc)) {
4915         return true;
4916     }
4917     if (gen_trap_float128(dc)) {
4918         return true;
4919     }
4920 
4921     t = gen_load_fpr_Q(dc, a->rs);
4922     func(t, tcg_env, t);
4923     gen_store_fpr_Q(dc, a->rd, t);
4924     return advance_pc(dc);
4925 }
4926 
4927 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4928 
4929 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4930                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4931 {
4932     TCGv_i128 src;
4933     TCGv_i32 dst;
4934 
4935     if (gen_trap_if_nofpu_fpexception(dc)) {
4936         return true;
4937     }
4938     if (gen_trap_float128(dc)) {
4939         return true;
4940     }
4941 
4942     src = gen_load_fpr_Q(dc, a->rs);
4943     dst = tcg_temp_new_i32();
4944     func(dst, tcg_env, src);
4945     gen_store_fpr_F(dc, a->rd, dst);
4946     return advance_pc(dc);
4947 }
4948 
4949 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4950 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4951 
4952 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4953                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4954 {
4955     TCGv_i128 src;
4956     TCGv_i64 dst;
4957 
4958     if (gen_trap_if_nofpu_fpexception(dc)) {
4959         return true;
4960     }
4961     if (gen_trap_float128(dc)) {
4962         return true;
4963     }
4964 
4965     src = gen_load_fpr_Q(dc, a->rs);
4966     dst = tcg_temp_new_i64();
4967     func(dst, tcg_env, src);
4968     gen_store_fpr_D(dc, a->rd, dst);
4969     return advance_pc(dc);
4970 }
4971 
4972 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4973 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4974 
4975 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4976                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4977 {
4978     TCGv_i32 src;
4979     TCGv_i128 dst;
4980 
4981     if (gen_trap_if_nofpu_fpexception(dc)) {
4982         return true;
4983     }
4984     if (gen_trap_float128(dc)) {
4985         return true;
4986     }
4987 
4988     src = gen_load_fpr_F(dc, a->rs);
4989     dst = tcg_temp_new_i128();
4990     func(dst, tcg_env, src);
4991     gen_store_fpr_Q(dc, a->rd, dst);
4992     return advance_pc(dc);
4993 }
4994 
4995 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4996 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4997 
4998 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4999                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
5000 {
5001     TCGv_i64 src;
5002     TCGv_i128 dst;
5003 
5004     if (gen_trap_if_nofpu_fpexception(dc)) {
5005         return true;
5006     }
5007 
5008     src = gen_load_fpr_D(dc, a->rs);
5009     dst = tcg_temp_new_i128();
5010     func(dst, tcg_env, src);
5011     gen_store_fpr_Q(dc, a->rd, dst);
5012     return advance_pc(dc);
5013 }
5014 
5015 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5016 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5017 
5018 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5019                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5020 {
5021     TCGv_i32 src1, src2;
5022 
5023     if (gen_trap_ifnofpu(dc)) {
5024         return true;
5025     }
5026 
5027     src1 = gen_load_fpr_F(dc, a->rs1);
5028     src2 = gen_load_fpr_F(dc, a->rs2);
5029     func(src1, src1, src2);
5030     gen_store_fpr_F(dc, a->rd, src1);
5031     return advance_pc(dc);
5032 }
5033 
5034 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5035 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5036 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5037 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5038 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5039 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5040 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5041 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5042 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5043 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5044 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5045 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5046 
5047 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5048 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5049 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5050 
5051 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5052 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5053 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5054 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5055 
5056 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5057                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5058 {
5059     TCGv_i32 src1, src2;
5060 
5061     if (gen_trap_if_nofpu_fpexception(dc)) {
5062         return true;
5063     }
5064 
5065     src1 = gen_load_fpr_F(dc, a->rs1);
5066     src2 = gen_load_fpr_F(dc, a->rs2);
5067     func(src1, tcg_env, src1, src2);
5068     gen_store_fpr_F(dc, a->rd, src1);
5069     return advance_pc(dc);
5070 }
5071 
5072 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5073 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5074 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5075 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5076 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5077 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5078 
5079 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5080                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5081 {
5082     TCGv_i64 dst;
5083     TCGv_i32 src1, src2;
5084 
5085     if (gen_trap_ifnofpu(dc)) {
5086         return true;
5087     }
5088 
5089     dst = tcg_temp_new_i64();
5090     src1 = gen_load_fpr_F(dc, a->rs1);
5091     src2 = gen_load_fpr_F(dc, a->rs2);
5092     func(dst, src1, src2);
5093     gen_store_fpr_D(dc, a->rd, dst);
5094     return advance_pc(dc);
5095 }
5096 
5097 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5098 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5099 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5100 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5101 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5102 
5103 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5104                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5105 {
5106     TCGv_i64 dst, src2;
5107     TCGv_i32 src1;
5108 
5109     if (gen_trap_ifnofpu(dc)) {
5110         return true;
5111     }
5112 
5113     dst = tcg_temp_new_i64();
5114     src1 = gen_load_fpr_F(dc, a->rs1);
5115     src2 = gen_load_fpr_D(dc, a->rs2);
5116     func(dst, src1, src2);
5117     gen_store_fpr_D(dc, a->rd, dst);
5118     return advance_pc(dc);
5119 }
5120 
5121 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5122 
5123 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5124                         void (*func)(unsigned, uint32_t, uint32_t,
5125                                      uint32_t, uint32_t, uint32_t))
5126 {
5127     if (gen_trap_ifnofpu(dc)) {
5128         return true;
5129     }
5130 
5131     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5132          gen_offset_fpr_D(a->rs2), 8, 8);
5133     return advance_pc(dc);
5134 }
5135 
5136 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5137 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5138 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5139 
5140 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5141 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5142 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5143 
5144 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5145 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5146 
5147 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5148 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5149 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5150 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5151 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5152 
5153 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5154 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5155 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5156 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5157 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5158 
5159 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5160 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5161 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5162 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5163 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5164 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5165 
5166 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5167 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5168 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5169 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5170 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5171 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5172 
5173 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5174 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5175 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5176 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5177 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5178 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5179 
5180 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5181                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5182 {
5183     TCGv_i64 dst, src1, src2;
5184 
5185     if (gen_trap_ifnofpu(dc)) {
5186         return true;
5187     }
5188 
5189     dst = tcg_temp_new_i64();
5190     src1 = gen_load_fpr_D(dc, a->rs1);
5191     src2 = gen_load_fpr_D(dc, a->rs2);
5192     func(dst, src1, src2);
5193     gen_store_fpr_D(dc, a->rd, dst);
5194     return advance_pc(dc);
5195 }
5196 
5197 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5198 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5199 
5200 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5201 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5202 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5203 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5204 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5205 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5206 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5207 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5208 
5209 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5210 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5211 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5212 
5213 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5214 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5215 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5216 
5217 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5218 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5219 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5220 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5221 
5222 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5223                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5224 {
5225     TCGv_i64 src1, src2;
5226     TCGv dst;
5227 
5228     if (gen_trap_ifnofpu(dc)) {
5229         return true;
5230     }
5231 
5232     dst = gen_dest_gpr(dc, a->rd);
5233     src1 = gen_load_fpr_D(dc, a->rs1);
5234     src2 = gen_load_fpr_D(dc, a->rs2);
5235     func(dst, src1, src2);
5236     gen_store_gpr(dc, a->rd, dst);
5237     return advance_pc(dc);
5238 }
5239 
5240 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5241 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5242 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5243 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5244 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5245 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5246 
5247 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5248 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5249 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5250 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5251 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5252 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5253 
5254 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5255 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5256 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5257 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5258 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5259 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5260 
5261 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5262 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5263 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5264 
5265 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5266                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5267 {
5268     TCGv_i64 dst, src1, src2;
5269 
5270     if (gen_trap_if_nofpu_fpexception(dc)) {
5271         return true;
5272     }
5273 
5274     dst = tcg_temp_new_i64();
5275     src1 = gen_load_fpr_D(dc, a->rs1);
5276     src2 = gen_load_fpr_D(dc, a->rs2);
5277     func(dst, tcg_env, src1, src2);
5278     gen_store_fpr_D(dc, a->rd, dst);
5279     return advance_pc(dc);
5280 }
5281 
5282 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5283 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5284 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5285 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5286 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5287 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5288 
5289 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5290 {
5291     TCGv_i64 dst;
5292     TCGv_i32 src1, src2;
5293 
5294     if (gen_trap_if_nofpu_fpexception(dc)) {
5295         return true;
5296     }
5297     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5298         return raise_unimpfpop(dc);
5299     }
5300 
5301     dst = tcg_temp_new_i64();
5302     src1 = gen_load_fpr_F(dc, a->rs1);
5303     src2 = gen_load_fpr_F(dc, a->rs2);
5304     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5305     gen_store_fpr_D(dc, a->rd, dst);
5306     return advance_pc(dc);
5307 }
5308 
5309 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5310 {
5311     TCGv_i64 dst;
5312     TCGv_i32 src1, src2;
5313 
5314     if (!avail_VIS3(dc)) {
5315         return false;
5316     }
5317     if (gen_trap_ifnofpu(dc)) {
5318         return true;
5319     }
5320     dst = tcg_temp_new_i64();
5321     src1 = gen_load_fpr_F(dc, a->rs1);
5322     src2 = gen_load_fpr_F(dc, a->rs2);
5323     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5324     gen_store_fpr_D(dc, a->rd, dst);
5325     return advance_pc(dc);
5326 }
5327 
5328 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5329                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5330 {
5331     TCGv_i32 dst, src1, src2, src3;
5332 
5333     if (gen_trap_ifnofpu(dc)) {
5334         return true;
5335     }
5336 
5337     src1 = gen_load_fpr_F(dc, a->rs1);
5338     src2 = gen_load_fpr_F(dc, a->rs2);
5339     src3 = gen_load_fpr_F(dc, a->rs3);
5340     dst = tcg_temp_new_i32();
5341     func(dst, src1, src2, src3);
5342     gen_store_fpr_F(dc, a->rd, dst);
5343     return advance_pc(dc);
5344 }
5345 
5346 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5347 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5348 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5349 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5350 
5351 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5352                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5353 {
5354     TCGv_i64 dst, src1, src2, src3;
5355 
5356     if (gen_trap_ifnofpu(dc)) {
5357         return true;
5358     }
5359 
5360     dst  = tcg_temp_new_i64();
5361     src1 = gen_load_fpr_D(dc, a->rs1);
5362     src2 = gen_load_fpr_D(dc, a->rs2);
5363     src3 = gen_load_fpr_D(dc, a->rs3);
5364     func(dst, src1, src2, src3);
5365     gen_store_fpr_D(dc, a->rd, dst);
5366     return advance_pc(dc);
5367 }
5368 
5369 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5370 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5371 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5372 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5373 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5374 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5375 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5376 
5377 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5378 {
5379     TCGv_i64 dst, src1, src2;
5380     TCGv src3;
5381 
5382     if (!avail_VIS4(dc)) {
5383         return false;
5384     }
5385     if (gen_trap_ifnofpu(dc)) {
5386         return true;
5387     }
5388 
5389     dst  = tcg_temp_new_i64();
5390     src1 = gen_load_fpr_D(dc, a->rd);
5391     src2 = gen_load_fpr_D(dc, a->rs2);
5392     src3 = gen_load_gpr(dc, a->rs1);
5393     gen_op_faligndata_i(dst, src1, src2, src3);
5394     gen_store_fpr_D(dc, a->rd, dst);
5395     return advance_pc(dc);
5396 }
5397 
5398 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5399                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5400 {
5401     TCGv_i128 src1, src2;
5402 
5403     if (gen_trap_if_nofpu_fpexception(dc)) {
5404         return true;
5405     }
5406     if (gen_trap_float128(dc)) {
5407         return true;
5408     }
5409 
5410     src1 = gen_load_fpr_Q(dc, a->rs1);
5411     src2 = gen_load_fpr_Q(dc, a->rs2);
5412     func(src1, tcg_env, src1, src2);
5413     gen_store_fpr_Q(dc, a->rd, src1);
5414     return advance_pc(dc);
5415 }
5416 
5417 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5418 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5419 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5420 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5421 
5422 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5423 {
5424     TCGv_i64 src1, src2;
5425     TCGv_i128 dst;
5426 
5427     if (gen_trap_if_nofpu_fpexception(dc)) {
5428         return true;
5429     }
5430     if (gen_trap_float128(dc)) {
5431         return true;
5432     }
5433 
5434     src1 = gen_load_fpr_D(dc, a->rs1);
5435     src2 = gen_load_fpr_D(dc, a->rs2);
5436     dst = tcg_temp_new_i128();
5437     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5438     gen_store_fpr_Q(dc, a->rd, dst);
5439     return advance_pc(dc);
5440 }
5441 
5442 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5443                      void (*func)(DisasContext *, DisasCompare *, int, int))
5444 {
5445     DisasCompare cmp;
5446 
5447     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5448         return false;
5449     }
5450     if (gen_trap_ifnofpu(dc)) {
5451         return true;
5452     }
5453     if (is_128 && gen_trap_float128(dc)) {
5454         return true;
5455     }
5456 
5457     gen_op_clear_ieee_excp_and_FTT();
5458     func(dc, &cmp, a->rd, a->rs2);
5459     return advance_pc(dc);
5460 }
5461 
5462 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5463 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5464 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5465 
5466 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5467                       void (*func)(DisasContext *, DisasCompare *, int, int))
5468 {
5469     DisasCompare cmp;
5470 
5471     if (gen_trap_ifnofpu(dc)) {
5472         return true;
5473     }
5474     if (is_128 && gen_trap_float128(dc)) {
5475         return true;
5476     }
5477 
5478     gen_op_clear_ieee_excp_and_FTT();
5479     gen_compare(&cmp, a->cc, a->cond, dc);
5480     func(dc, &cmp, a->rd, a->rs2);
5481     return advance_pc(dc);
5482 }
5483 
5484 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5485 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5486 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5487 
5488 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5489                        void (*func)(DisasContext *, DisasCompare *, int, int))
5490 {
5491     DisasCompare cmp;
5492 
5493     if (gen_trap_ifnofpu(dc)) {
5494         return true;
5495     }
5496     if (is_128 && gen_trap_float128(dc)) {
5497         return true;
5498     }
5499 
5500     gen_op_clear_ieee_excp_and_FTT();
5501     gen_fcompare(&cmp, a->cc, a->cond);
5502     func(dc, &cmp, a->rd, a->rs2);
5503     return advance_pc(dc);
5504 }
5505 
5506 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5507 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5508 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5509 
5510 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5511 {
5512     TCGv_i32 src1, src2;
5513 
5514     if (avail_32(dc) && a->cc != 0) {
5515         return false;
5516     }
5517     if (gen_trap_if_nofpu_fpexception(dc)) {
5518         return true;
5519     }
5520 
5521     src1 = gen_load_fpr_F(dc, a->rs1);
5522     src2 = gen_load_fpr_F(dc, a->rs2);
5523     if (e) {
5524         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5525     } else {
5526         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5527     }
5528     return advance_pc(dc);
5529 }
5530 
5531 TRANS(FCMPs, ALL, do_fcmps, a, false)
5532 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5533 
5534 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5535 {
5536     TCGv_i64 src1, src2;
5537 
5538     if (avail_32(dc) && a->cc != 0) {
5539         return false;
5540     }
5541     if (gen_trap_if_nofpu_fpexception(dc)) {
5542         return true;
5543     }
5544 
5545     src1 = gen_load_fpr_D(dc, a->rs1);
5546     src2 = gen_load_fpr_D(dc, a->rs2);
5547     if (e) {
5548         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5549     } else {
5550         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5551     }
5552     return advance_pc(dc);
5553 }
5554 
5555 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5556 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5557 
5558 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5559 {
5560     TCGv_i128 src1, src2;
5561 
5562     if (avail_32(dc) && a->cc != 0) {
5563         return false;
5564     }
5565     if (gen_trap_if_nofpu_fpexception(dc)) {
5566         return true;
5567     }
5568     if (gen_trap_float128(dc)) {
5569         return true;
5570     }
5571 
5572     src1 = gen_load_fpr_Q(dc, a->rs1);
5573     src2 = gen_load_fpr_Q(dc, a->rs2);
5574     if (e) {
5575         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5576     } else {
5577         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5578     }
5579     return advance_pc(dc);
5580 }
5581 
5582 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5583 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5584 
5585 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5586 {
5587     TCGv_i32 src1, src2;
5588 
5589     if (!avail_VIS3(dc)) {
5590         return false;
5591     }
5592     if (gen_trap_ifnofpu(dc)) {
5593         return true;
5594     }
5595 
5596     src1 = gen_load_fpr_F(dc, a->rs1);
5597     src2 = gen_load_fpr_F(dc, a->rs2);
5598     gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5599     return advance_pc(dc);
5600 }
5601 
5602 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5603 {
5604     TCGv_i64 src1, src2;
5605 
5606     if (!avail_VIS3(dc)) {
5607         return false;
5608     }
5609     if (gen_trap_ifnofpu(dc)) {
5610         return true;
5611     }
5612 
5613     src1 = gen_load_fpr_D(dc, a->rs1);
5614     src2 = gen_load_fpr_D(dc, a->rs2);
5615     gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5616     return advance_pc(dc);
5617 }
5618 
5619 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5620                       int (*offset)(unsigned int),
5621                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5622 {
5623     TCGv dst;
5624 
5625     if (gen_trap_ifnofpu(dc)) {
5626         return true;
5627     }
5628     dst = gen_dest_gpr(dc, a->rd);
5629     load(dst, tcg_env, offset(a->rs));
5630     gen_store_gpr(dc, a->rd, dst);
5631     return advance_pc(dc);
5632 }
5633 
5634 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5635 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5636 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5637 
5638 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5639                       int (*offset)(unsigned int),
5640                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5641 {
5642     TCGv src;
5643 
5644     if (gen_trap_ifnofpu(dc)) {
5645         return true;
5646     }
5647     src = gen_load_gpr(dc, a->rs);
5648     store(src, tcg_env, offset(a->rd));
5649     return advance_pc(dc);
5650 }
5651 
5652 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5653 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5654 
5655 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5656 {
5657     DisasContext *dc = container_of(dcbase, DisasContext, base);
5658     int bound;
5659 
5660     dc->pc = dc->base.pc_first;
5661     dc->npc = (target_ulong)dc->base.tb->cs_base;
5662     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5663     dc->def = &cpu_env(cs)->def;
5664     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5665     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5666 #ifndef CONFIG_USER_ONLY
5667     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5668 # ifdef TARGET_SPARC64
5669     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5670 # else
5671     dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5672 # endif
5673 #endif
5674 #ifdef TARGET_SPARC64
5675     dc->fprs_dirty = 0;
5676     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5677 #endif
5678     /*
5679      * if we reach a page boundary, we stop generation so that the
5680      * PC of a TT_TFAULT exception is always in the right page
5681      */
5682     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5683     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5684 }
5685 
5686 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5687 {
5688 }
5689 
5690 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5691 {
5692     DisasContext *dc = container_of(dcbase, DisasContext, base);
5693     target_ulong npc = dc->npc;
5694 
5695     if (npc & 3) {
5696         switch (npc) {
5697         case JUMP_PC:
5698             assert(dc->jump_pc[1] == dc->pc + 4);
5699             npc = dc->jump_pc[0] | JUMP_PC;
5700             break;
5701         case DYNAMIC_PC:
5702         case DYNAMIC_PC_LOOKUP:
5703             npc = DYNAMIC_PC;
5704             break;
5705         default:
5706             g_assert_not_reached();
5707         }
5708     }
5709     tcg_gen_insn_start(dc->pc, npc);
5710 }
5711 
5712 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5713 {
5714     DisasContext *dc = container_of(dcbase, DisasContext, base);
5715     unsigned int insn;
5716 
5717     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5718     dc->base.pc_next += 4;
5719 
5720     if (!decode(dc, insn)) {
5721         gen_exception(dc, TT_ILL_INSN);
5722     }
5723 
5724     if (dc->base.is_jmp == DISAS_NORETURN) {
5725         return;
5726     }
5727     if (dc->pc != dc->base.pc_next) {
5728         dc->base.is_jmp = DISAS_TOO_MANY;
5729     }
5730 }
5731 
5732 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5733 {
5734     DisasContext *dc = container_of(dcbase, DisasContext, base);
5735     DisasDelayException *e, *e_next;
5736     bool may_lookup;
5737 
5738     finishing_insn(dc);
5739 
5740     switch (dc->base.is_jmp) {
5741     case DISAS_NEXT:
5742     case DISAS_TOO_MANY:
5743         if (((dc->pc | dc->npc) & 3) == 0) {
5744             /* static PC and NPC: we can use direct chaining */
5745             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5746             break;
5747         }
5748 
5749         may_lookup = true;
5750         if (dc->pc & 3) {
5751             switch (dc->pc) {
5752             case DYNAMIC_PC_LOOKUP:
5753                 break;
5754             case DYNAMIC_PC:
5755                 may_lookup = false;
5756                 break;
5757             default:
5758                 g_assert_not_reached();
5759             }
5760         } else {
5761             tcg_gen_movi_tl(cpu_pc, dc->pc);
5762         }
5763 
5764         if (dc->npc & 3) {
5765             switch (dc->npc) {
5766             case JUMP_PC:
5767                 gen_generic_branch(dc);
5768                 break;
5769             case DYNAMIC_PC:
5770                 may_lookup = false;
5771                 break;
5772             case DYNAMIC_PC_LOOKUP:
5773                 break;
5774             default:
5775                 g_assert_not_reached();
5776             }
5777         } else {
5778             tcg_gen_movi_tl(cpu_npc, dc->npc);
5779         }
5780         if (may_lookup) {
5781             tcg_gen_lookup_and_goto_ptr();
5782         } else {
5783             tcg_gen_exit_tb(NULL, 0);
5784         }
5785         break;
5786 
5787     case DISAS_NORETURN:
5788        break;
5789 
5790     case DISAS_EXIT:
5791         /* Exit TB */
5792         save_state(dc);
5793         tcg_gen_exit_tb(NULL, 0);
5794         break;
5795 
5796     default:
5797         g_assert_not_reached();
5798     }
5799 
5800     for (e = dc->delay_excp_list; e ; e = e_next) {
5801         gen_set_label(e->lab);
5802 
5803         tcg_gen_movi_tl(cpu_pc, e->pc);
5804         if (e->npc % 4 == 0) {
5805             tcg_gen_movi_tl(cpu_npc, e->npc);
5806         }
5807         gen_helper_raise_exception(tcg_env, e->excp);
5808 
5809         e_next = e->next;
5810         g_free(e);
5811     }
5812 }
5813 
5814 static const TranslatorOps sparc_tr_ops = {
5815     .init_disas_context = sparc_tr_init_disas_context,
5816     .tb_start           = sparc_tr_tb_start,
5817     .insn_start         = sparc_tr_insn_start,
5818     .translate_insn     = sparc_tr_translate_insn,
5819     .tb_stop            = sparc_tr_tb_stop,
5820 };
5821 
5822 void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
5823                           int *max_insns, vaddr pc, void *host_pc)
5824 {
5825     DisasContext dc = {};
5826 
5827     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5828 }
5829 
5830 void sparc_tcg_init(void)
5831 {
5832     static const char gregnames[32][4] = {
5833         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5834         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5835         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5836         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5837     };
5838 
5839     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5840 #ifdef TARGET_SPARC64
5841         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5842         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5843         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5844         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5845         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5846 #else
5847         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5848 #endif
5849     };
5850 
5851     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5852 #ifdef TARGET_SPARC64
5853         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5854         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5855         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5856 #endif
5857         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5858         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5859         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5860         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5861         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5862         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5863         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5864         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5865         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5866     };
5867 
5868     unsigned int i;
5869 
5870     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5871                                          offsetof(CPUSPARCState, regwptr),
5872                                          "regwptr");
5873 
5874     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5875         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5876     }
5877 
5878     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5879         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5880     }
5881 
5882     cpu_regs[0] = NULL;
5883     for (i = 1; i < 8; ++i) {
5884         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5885                                          offsetof(CPUSPARCState, gregs[i]),
5886                                          gregnames[i]);
5887     }
5888 
5889     for (i = 8; i < 32; ++i) {
5890         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5891                                          (i - 8) * sizeof(target_ulong),
5892                                          gregnames[i]);
5893     }
5894 }
5895