xref: /qemu/target/sparc/translate.c (revision bcfee4938f8d4e8bf5f49981d3c8a78cf267cb4e)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/target_page.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/translation-block.h"
32 #include "exec/log.h"
33 #include "fpu/softfloat.h"
34 #include "asi.h"
35 #include "target/sparc/translate.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 #ifdef TARGET_SPARC64
42 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
43 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
44 # define gen_helper_rett(E)                     qemu_build_not_reached()
45 # define gen_helper_power_down(E)               qemu_build_not_reached()
46 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
47 #else
48 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
49 # define gen_helper_done(E)                     qemu_build_not_reached()
50 # define gen_helper_flushw(E)                   qemu_build_not_reached()
51 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
52 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
53 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
54 # define gen_helper_restored(E)                 qemu_build_not_reached()
55 # define gen_helper_retry(E)                    qemu_build_not_reached()
56 # define gen_helper_saved(E)                    qemu_build_not_reached()
57 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
58 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
59 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
60 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
61 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
64 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
65 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
66 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
67 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
103 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
104 # define MAXTL_MASK                             0
105 #endif
106 
107 #define DISAS_EXIT  DISAS_TARGET_0
108 
109 /* global register indexes */
110 static TCGv_ptr cpu_regwptr;
111 static TCGv cpu_pc, cpu_npc;
112 static TCGv cpu_regs[32];
113 static TCGv cpu_y;
114 static TCGv cpu_tbr;
115 static TCGv cpu_cond;
116 static TCGv cpu_cc_N;
117 static TCGv cpu_cc_V;
118 static TCGv cpu_icc_Z;
119 static TCGv cpu_icc_C;
120 #ifdef TARGET_SPARC64
121 static TCGv cpu_xcc_Z;
122 static TCGv cpu_xcc_C;
123 static TCGv_i32 cpu_fprs;
124 static TCGv cpu_gsr;
125 #else
126 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
127 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
128 #endif
129 
130 #ifdef TARGET_SPARC64
131 #define cpu_cc_Z  cpu_xcc_Z
132 #define cpu_cc_C  cpu_xcc_C
133 #else
134 #define cpu_cc_Z  cpu_icc_Z
135 #define cpu_cc_C  cpu_icc_C
136 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
137 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
138 #endif
139 
140 /* Floating point comparison registers */
141 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
142 
143 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
144 #ifdef TARGET_SPARC64
145 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
146 # define env64_field_offsetof(X)  env_field_offsetof(X)
147 #else
148 # define env32_field_offsetof(X)  env_field_offsetof(X)
149 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
150 #endif
151 
152 typedef struct DisasCompare {
153     TCGCond cond;
154     TCGv c1;
155     int c2;
156 } DisasCompare;
157 
158 typedef struct DisasDelayException {
159     struct DisasDelayException *next;
160     TCGLabel *lab;
161     TCGv_i32 excp;
162     /* Saved state at parent insn. */
163     target_ulong pc;
164     target_ulong npc;
165 } DisasDelayException;
166 
167 typedef struct DisasContext {
168     DisasContextBase base;
169     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
170     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
171 
172     /* Used when JUMP_PC value is used. */
173     DisasCompare jump;
174     target_ulong jump_pc[2];
175 
176     int mem_idx;
177     bool cpu_cond_live;
178     bool fpu_enabled;
179     bool address_mask_32bit;
180 #ifndef CONFIG_USER_ONLY
181     bool supervisor;
182 #ifdef TARGET_SPARC64
183     bool hypervisor;
184 #else
185     bool fsr_qne;
186 #endif
187 #endif
188 
189     sparc_def_t *def;
190 #ifdef TARGET_SPARC64
191     int fprs_dirty;
192     int asi;
193 #endif
194     DisasDelayException *delay_excp_list;
195 } DisasContext;
196 
197 // This function uses non-native bit order
198 #define GET_FIELD(X, FROM, TO)                                  \
199     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
200 
201 // This function uses the order in the manuals, i.e. bit 0 is 2^0
202 #define GET_FIELD_SP(X, FROM, TO)               \
203     GET_FIELD(X, 31 - (TO), 31 - (FROM))
204 
205 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
206 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
207 
208 #define UA2005_HTRAP_MASK 0xff
209 #define V8_TRAP_MASK 0x7f
210 
211 #define IS_IMM (insn & (1<<13))
212 
213 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
214 {
215 #if defined(TARGET_SPARC64)
216     int bit = (rd < 32) ? 1 : 2;
217     /* If we know we've already set this bit within the TB,
218        we can avoid setting it again.  */
219     if (!(dc->fprs_dirty & bit)) {
220         dc->fprs_dirty |= bit;
221         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
222     }
223 #endif
224 }
225 
226 /* floating point registers moves */
227 
228 static int gen_offset_fpr_F(unsigned int reg)
229 {
230     int ret;
231 
232     tcg_debug_assert(reg < 32);
233     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
234     if (reg & 1) {
235         ret += offsetof(CPU_DoubleU, l.lower);
236     } else {
237         ret += offsetof(CPU_DoubleU, l.upper);
238     }
239     return ret;
240 }
241 
242 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
243 {
244     TCGv_i32 ret = tcg_temp_new_i32();
245     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
246     return ret;
247 }
248 
249 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
250 {
251     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
252     gen_update_fprs_dirty(dc, dst);
253 }
254 
255 static int gen_offset_fpr_D(unsigned int reg)
256 {
257     tcg_debug_assert(reg < 64);
258     tcg_debug_assert(reg % 2 == 0);
259     return offsetof(CPUSPARCState, fpr[reg / 2]);
260 }
261 
262 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
263 {
264     TCGv_i64 ret = tcg_temp_new_i64();
265     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
266     return ret;
267 }
268 
269 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
270 {
271     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
272     gen_update_fprs_dirty(dc, dst);
273 }
274 
275 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
276 {
277     TCGv_i128 ret = tcg_temp_new_i128();
278     TCGv_i64 h = gen_load_fpr_D(dc, src);
279     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
280 
281     tcg_gen_concat_i64_i128(ret, l, h);
282     return ret;
283 }
284 
285 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
286 {
287     TCGv_i64 h = tcg_temp_new_i64();
288     TCGv_i64 l = tcg_temp_new_i64();
289 
290     tcg_gen_extr_i128_i64(l, h, v);
291     gen_store_fpr_D(dc, dst, h);
292     gen_store_fpr_D(dc, dst + 2, l);
293 }
294 
295 /* moves */
296 #ifdef CONFIG_USER_ONLY
297 #define supervisor(dc) 0
298 #define hypervisor(dc) 0
299 #else
300 #ifdef TARGET_SPARC64
301 #define hypervisor(dc) (dc->hypervisor)
302 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
303 #else
304 #define supervisor(dc) (dc->supervisor)
305 #define hypervisor(dc) 0
306 #endif
307 #endif
308 
309 #if !defined(TARGET_SPARC64)
310 # define AM_CHECK(dc)  false
311 #elif defined(TARGET_ABI32)
312 # define AM_CHECK(dc)  true
313 #elif defined(CONFIG_USER_ONLY)
314 # define AM_CHECK(dc)  false
315 #else
316 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
317 #endif
318 
319 static void gen_address_mask(DisasContext *dc, TCGv addr)
320 {
321     if (AM_CHECK(dc)) {
322         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
323     }
324 }
325 
326 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
327 {
328     return AM_CHECK(dc) ? (uint32_t)addr : addr;
329 }
330 
331 static TCGv gen_load_gpr(DisasContext *dc, int reg)
332 {
333     if (reg > 0) {
334         assert(reg < 32);
335         return cpu_regs[reg];
336     } else {
337         TCGv t = tcg_temp_new();
338         tcg_gen_movi_tl(t, 0);
339         return t;
340     }
341 }
342 
343 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
344 {
345     if (reg > 0) {
346         assert(reg < 32);
347         tcg_gen_mov_tl(cpu_regs[reg], v);
348     }
349 }
350 
351 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
352 {
353     if (reg > 0) {
354         assert(reg < 32);
355         return cpu_regs[reg];
356     } else {
357         return tcg_temp_new();
358     }
359 }
360 
361 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
362 {
363     return translator_use_goto_tb(&s->base, pc) &&
364            translator_use_goto_tb(&s->base, npc);
365 }
366 
367 static void gen_goto_tb(DisasContext *s, int tb_num,
368                         target_ulong pc, target_ulong npc)
369 {
370     if (use_goto_tb(s, pc, npc))  {
371         /* jump to same page: we can use a direct jump */
372         tcg_gen_goto_tb(tb_num);
373         tcg_gen_movi_tl(cpu_pc, pc);
374         tcg_gen_movi_tl(cpu_npc, npc);
375         tcg_gen_exit_tb(s->base.tb, tb_num);
376     } else {
377         /* jump to another page: we can use an indirect jump */
378         tcg_gen_movi_tl(cpu_pc, pc);
379         tcg_gen_movi_tl(cpu_npc, npc);
380         tcg_gen_lookup_and_goto_ptr();
381     }
382 }
383 
384 static TCGv gen_carry32(void)
385 {
386     if (TARGET_LONG_BITS == 64) {
387         TCGv t = tcg_temp_new();
388         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
389         return t;
390     }
391     return cpu_icc_C;
392 }
393 
394 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
395 {
396     TCGv z = tcg_constant_tl(0);
397 
398     if (cin) {
399         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
400         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
401     } else {
402         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
403     }
404     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
405     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
406     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
407     if (TARGET_LONG_BITS == 64) {
408         /*
409          * Carry-in to bit 32 is result ^ src1 ^ src2.
410          * We already have the src xor term in Z, from computation of V.
411          */
412         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
413         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
414     }
415     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
416     tcg_gen_mov_tl(dst, cpu_cc_N);
417 }
418 
419 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
420 {
421     gen_op_addcc_int(dst, src1, src2, NULL);
422 }
423 
424 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
425 {
426     TCGv t = tcg_temp_new();
427 
428     /* Save the tag bits around modification of dst. */
429     tcg_gen_or_tl(t, src1, src2);
430 
431     gen_op_addcc(dst, src1, src2);
432 
433     /* Incorprate tag bits into icc.V */
434     tcg_gen_andi_tl(t, t, 3);
435     tcg_gen_neg_tl(t, t);
436     tcg_gen_ext32u_tl(t, t);
437     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
438 }
439 
440 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
441 {
442     tcg_gen_add_tl(dst, src1, src2);
443     tcg_gen_add_tl(dst, dst, gen_carry32());
444 }
445 
446 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
447 {
448     gen_op_addcc_int(dst, src1, src2, gen_carry32());
449 }
450 
451 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
452 {
453     tcg_gen_add_tl(dst, src1, src2);
454     tcg_gen_add_tl(dst, dst, cpu_cc_C);
455 }
456 
457 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
458 {
459     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
460 }
461 
462 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
463 {
464     TCGv z = tcg_constant_tl(0);
465 
466     if (cin) {
467         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
468         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
469     } else {
470         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
471     }
472     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
473     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
474     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
475     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
476 #ifdef TARGET_SPARC64
477     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
478     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
479 #endif
480     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
481     tcg_gen_mov_tl(dst, cpu_cc_N);
482 }
483 
484 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
485 {
486     gen_op_subcc_int(dst, src1, src2, NULL);
487 }
488 
489 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
490 {
491     TCGv t = tcg_temp_new();
492 
493     /* Save the tag bits around modification of dst. */
494     tcg_gen_or_tl(t, src1, src2);
495 
496     gen_op_subcc(dst, src1, src2);
497 
498     /* Incorprate tag bits into icc.V */
499     tcg_gen_andi_tl(t, t, 3);
500     tcg_gen_neg_tl(t, t);
501     tcg_gen_ext32u_tl(t, t);
502     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
503 }
504 
505 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
506 {
507     tcg_gen_sub_tl(dst, src1, src2);
508     tcg_gen_sub_tl(dst, dst, gen_carry32());
509 }
510 
511 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
512 {
513     gen_op_subcc_int(dst, src1, src2, gen_carry32());
514 }
515 
516 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
517 {
518     tcg_gen_sub_tl(dst, src1, src2);
519     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
520 }
521 
522 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
523 {
524     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
525 }
526 
527 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
528 {
529     TCGv zero = tcg_constant_tl(0);
530     TCGv one = tcg_constant_tl(1);
531     TCGv t_src1 = tcg_temp_new();
532     TCGv t_src2 = tcg_temp_new();
533     TCGv t0 = tcg_temp_new();
534 
535     tcg_gen_ext32u_tl(t_src1, src1);
536     tcg_gen_ext32u_tl(t_src2, src2);
537 
538     /*
539      * if (!(env->y & 1))
540      *   src2 = 0;
541      */
542     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
543 
544     /*
545      * b2 = src1 & 1;
546      * y = (b2 << 31) | (y >> 1);
547      */
548     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
549     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
550 
551     // b1 = N ^ V;
552     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
553 
554     /*
555      * src1 = (b1 << 31) | (src1 >> 1)
556      */
557     tcg_gen_andi_tl(t0, t0, 1u << 31);
558     tcg_gen_shri_tl(t_src1, t_src1, 1);
559     tcg_gen_or_tl(t_src1, t_src1, t0);
560 
561     gen_op_addcc(dst, t_src1, t_src2);
562 }
563 
564 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
565 {
566 #if TARGET_LONG_BITS == 32
567     if (sign_ext) {
568         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
569     } else {
570         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
571     }
572 #else
573     TCGv t0 = tcg_temp_new_i64();
574     TCGv t1 = tcg_temp_new_i64();
575 
576     if (sign_ext) {
577         tcg_gen_ext32s_i64(t0, src1);
578         tcg_gen_ext32s_i64(t1, src2);
579     } else {
580         tcg_gen_ext32u_i64(t0, src1);
581         tcg_gen_ext32u_i64(t1, src2);
582     }
583 
584     tcg_gen_mul_i64(dst, t0, t1);
585     tcg_gen_shri_i64(cpu_y, dst, 32);
586 #endif
587 }
588 
589 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
590 {
591     /* zero-extend truncated operands before multiplication */
592     gen_op_multiply(dst, src1, src2, 0);
593 }
594 
595 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
596 {
597     /* sign-extend truncated operands before multiplication */
598     gen_op_multiply(dst, src1, src2, 1);
599 }
600 
601 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
602 {
603     TCGv discard = tcg_temp_new();
604     tcg_gen_mulu2_tl(discard, dst, src1, src2);
605 }
606 
607 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
608                            TCGv_i64 src2, TCGv_i64 src3)
609 {
610     TCGv_i64 t = tcg_temp_new_i64();
611 
612     tcg_gen_mul_i64(t, src1, src2);
613     tcg_gen_add_i64(dst, src3, t);
614 }
615 
616 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
617                              TCGv_i64 src2, TCGv_i64 src3)
618 {
619     TCGv_i64 l = tcg_temp_new_i64();
620     TCGv_i64 h = tcg_temp_new_i64();
621     TCGv_i64 z = tcg_constant_i64(0);
622 
623     tcg_gen_mulu2_i64(l, h, src1, src2);
624     tcg_gen_add2_i64(l, dst, l, h, src3, z);
625 }
626 
627 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
628 {
629 #ifdef TARGET_SPARC64
630     gen_helper_sdiv(dst, tcg_env, src1, src2);
631     tcg_gen_ext32s_tl(dst, dst);
632 #else
633     TCGv_i64 t64 = tcg_temp_new_i64();
634     gen_helper_sdiv(t64, tcg_env, src1, src2);
635     tcg_gen_trunc_i64_tl(dst, t64);
636 #endif
637 }
638 
639 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
640 {
641     TCGv_i64 t64;
642 
643 #ifdef TARGET_SPARC64
644     t64 = cpu_cc_V;
645 #else
646     t64 = tcg_temp_new_i64();
647 #endif
648 
649     gen_helper_udiv(t64, tcg_env, src1, src2);
650 
651 #ifdef TARGET_SPARC64
652     tcg_gen_ext32u_tl(cpu_cc_N, t64);
653     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
654     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
655     tcg_gen_movi_tl(cpu_icc_C, 0);
656 #else
657     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
658 #endif
659     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
660     tcg_gen_movi_tl(cpu_cc_C, 0);
661     tcg_gen_mov_tl(dst, cpu_cc_N);
662 }
663 
664 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
665 {
666     TCGv_i64 t64;
667 
668 #ifdef TARGET_SPARC64
669     t64 = cpu_cc_V;
670 #else
671     t64 = tcg_temp_new_i64();
672 #endif
673 
674     gen_helper_sdiv(t64, tcg_env, src1, src2);
675 
676 #ifdef TARGET_SPARC64
677     tcg_gen_ext32s_tl(cpu_cc_N, t64);
678     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
679     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
680     tcg_gen_movi_tl(cpu_icc_C, 0);
681 #else
682     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
683 #endif
684     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
685     tcg_gen_movi_tl(cpu_cc_C, 0);
686     tcg_gen_mov_tl(dst, cpu_cc_N);
687 }
688 
689 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
690 {
691     gen_helper_taddcctv(dst, tcg_env, src1, src2);
692 }
693 
694 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
695 {
696     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
697 }
698 
699 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
700 {
701     tcg_gen_ctpop_tl(dst, src2);
702 }
703 
704 static void gen_op_lzcnt(TCGv dst, TCGv src)
705 {
706     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
707 }
708 
709 #ifndef TARGET_SPARC64
710 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
711 {
712     g_assert_not_reached();
713 }
714 #endif
715 
716 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
717 {
718     gen_helper_array8(dst, src1, src2);
719     tcg_gen_shli_tl(dst, dst, 1);
720 }
721 
722 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
723 {
724     gen_helper_array8(dst, src1, src2);
725     tcg_gen_shli_tl(dst, dst, 2);
726 }
727 
728 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
729 {
730 #ifdef TARGET_SPARC64
731     gen_helper_fpack16(dst, cpu_gsr, src);
732 #else
733     g_assert_not_reached();
734 #endif
735 }
736 
737 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
738 {
739 #ifdef TARGET_SPARC64
740     gen_helper_fpackfix(dst, cpu_gsr, src);
741 #else
742     g_assert_not_reached();
743 #endif
744 }
745 
746 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
747 {
748 #ifdef TARGET_SPARC64
749     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
750 #else
751     g_assert_not_reached();
752 #endif
753 }
754 
755 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
756 {
757     TCGv_i32 t[2];
758 
759     for (int i = 0; i < 2; i++) {
760         TCGv_i32 u = tcg_temp_new_i32();
761         TCGv_i32 v = tcg_temp_new_i32();
762 
763         tcg_gen_sextract_i32(u, src1, i * 16, 16);
764         tcg_gen_sextract_i32(v, src2, i * 16, 16);
765         tcg_gen_add_i32(u, u, v);
766         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
767         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
768         t[i] = u;
769     }
770     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
771 }
772 
773 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
774 {
775     TCGv_i32 t[2];
776 
777     for (int i = 0; i < 2; i++) {
778         TCGv_i32 u = tcg_temp_new_i32();
779         TCGv_i32 v = tcg_temp_new_i32();
780 
781         tcg_gen_sextract_i32(u, src1, i * 16, 16);
782         tcg_gen_sextract_i32(v, src2, i * 16, 16);
783         tcg_gen_sub_i32(u, u, v);
784         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
785         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
786         t[i] = u;
787     }
788     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
789 }
790 
791 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
792 {
793     TCGv_i32 r = tcg_temp_new_i32();
794     TCGv_i32 t = tcg_temp_new_i32();
795     TCGv_i32 v = tcg_temp_new_i32();
796     TCGv_i32 z = tcg_constant_i32(0);
797 
798     tcg_gen_add_i32(r, src1, src2);
799     tcg_gen_xor_i32(t, src1, src2);
800     tcg_gen_xor_i32(v, r, src2);
801     tcg_gen_andc_i32(v, v, t);
802 
803     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
804     tcg_gen_addi_i32(t, t, INT32_MAX);
805 
806     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
807 }
808 
809 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
810 {
811     TCGv_i32 r = tcg_temp_new_i32();
812     TCGv_i32 t = tcg_temp_new_i32();
813     TCGv_i32 v = tcg_temp_new_i32();
814     TCGv_i32 z = tcg_constant_i32(0);
815 
816     tcg_gen_sub_i32(r, src1, src2);
817     tcg_gen_xor_i32(t, src1, src2);
818     tcg_gen_xor_i32(v, r, src1);
819     tcg_gen_and_i32(v, v, t);
820 
821     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
822     tcg_gen_addi_i32(t, t, INT32_MAX);
823 
824     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
825 }
826 
827 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
828                                 TCGv_i64 s2, TCGv gsr)
829 {
830 #ifdef TARGET_SPARC64
831     TCGv t1, t2, shift;
832 
833     t1 = tcg_temp_new();
834     t2 = tcg_temp_new();
835     shift = tcg_temp_new();
836 
837     tcg_gen_andi_tl(shift, gsr, 7);
838     tcg_gen_shli_tl(shift, shift, 3);
839     tcg_gen_shl_tl(t1, s1, shift);
840 
841     /*
842      * A shift of 64 does not produce 0 in TCG.  Divide this into a
843      * shift of (up to 63) followed by a constant shift of 1.
844      */
845     tcg_gen_xori_tl(shift, shift, 63);
846     tcg_gen_shr_tl(t2, s2, shift);
847     tcg_gen_shri_tl(t2, t2, 1);
848 
849     tcg_gen_or_tl(dst, t1, t2);
850 #else
851     g_assert_not_reached();
852 #endif
853 }
854 
855 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
856 {
857     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
858 }
859 
860 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
861 {
862 #ifdef TARGET_SPARC64
863     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
864 #else
865     g_assert_not_reached();
866 #endif
867 }
868 
869 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
870 {
871 #ifdef TARGET_SPARC64
872     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
873 #else
874     g_assert_not_reached();
875 #endif
876 }
877 
878 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
879 {
880     tcg_gen_ext16s_i32(src2, src2);
881     gen_helper_fmul8x16a(dst, src1, src2);
882 }
883 
884 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
885 {
886     tcg_gen_sari_i32(src2, src2, 16);
887     gen_helper_fmul8x16a(dst, src1, src2);
888 }
889 
890 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
891 {
892     TCGv_i32 t0 = tcg_temp_new_i32();
893     TCGv_i32 t1 = tcg_temp_new_i32();
894     TCGv_i32 t2 = tcg_temp_new_i32();
895 
896     tcg_gen_ext8u_i32(t0, src1);
897     tcg_gen_ext16s_i32(t1, src2);
898     tcg_gen_mul_i32(t0, t0, t1);
899 
900     tcg_gen_extract_i32(t1, src1, 16, 8);
901     tcg_gen_sextract_i32(t2, src2, 16, 16);
902     tcg_gen_mul_i32(t1, t1, t2);
903 
904     tcg_gen_concat_i32_i64(dst, t0, t1);
905 }
906 
907 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
908 {
909     TCGv_i32 t0 = tcg_temp_new_i32();
910     TCGv_i32 t1 = tcg_temp_new_i32();
911     TCGv_i32 t2 = tcg_temp_new_i32();
912 
913     /*
914      * The insn description talks about extracting the upper 8 bits
915      * of the signed 16-bit input rs1, performing the multiply, then
916      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
917      * the rs1 input, which avoids the need for two shifts.
918      */
919     tcg_gen_ext16s_i32(t0, src1);
920     tcg_gen_andi_i32(t0, t0, ~0xff);
921     tcg_gen_ext16s_i32(t1, src2);
922     tcg_gen_mul_i32(t0, t0, t1);
923 
924     tcg_gen_sextract_i32(t1, src1, 16, 16);
925     tcg_gen_andi_i32(t1, t1, ~0xff);
926     tcg_gen_sextract_i32(t2, src2, 16, 16);
927     tcg_gen_mul_i32(t1, t1, t2);
928 
929     tcg_gen_concat_i32_i64(dst, t0, t1);
930 }
931 
932 #ifdef TARGET_SPARC64
933 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
934                              TCGv_vec src1, TCGv_vec src2)
935 {
936     TCGv_vec a = tcg_temp_new_vec_matching(dst);
937     TCGv_vec c = tcg_temp_new_vec_matching(dst);
938 
939     tcg_gen_add_vec(vece, a, src1, src2);
940     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
941     /* Vector cmp produces -1 for true, so subtract to add carry. */
942     tcg_gen_sub_vec(vece, dst, a, c);
943 }
944 
945 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
946                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
947 {
948     static const TCGOpcode vecop_list[] = {
949         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
950     };
951     static const GVecGen3 op = {
952         .fni8 = gen_helper_fchksm16,
953         .fniv = gen_vec_fchksm16,
954         .opt_opc = vecop_list,
955         .vece = MO_16,
956     };
957     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
958 }
959 
960 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
961                             TCGv_vec src1, TCGv_vec src2)
962 {
963     TCGv_vec t = tcg_temp_new_vec_matching(dst);
964 
965     tcg_gen_or_vec(vece, t, src1, src2);
966     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
967     tcg_gen_sari_vec(vece, src1, src1, 1);
968     tcg_gen_sari_vec(vece, src2, src2, 1);
969     tcg_gen_add_vec(vece, dst, src1, src2);
970     tcg_gen_add_vec(vece, dst, dst, t);
971 }
972 
973 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
974                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
975 {
976     static const TCGOpcode vecop_list[] = {
977         INDEX_op_add_vec, INDEX_op_sari_vec,
978     };
979     static const GVecGen3 op = {
980         .fni8 = gen_helper_fmean16,
981         .fniv = gen_vec_fmean16,
982         .opt_opc = vecop_list,
983         .vece = MO_16,
984     };
985     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
986 }
987 #else
988 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
989 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
990 #endif
991 
992 static void finishing_insn(DisasContext *dc)
993 {
994     /*
995      * From here, there is no future path through an unwinding exception.
996      * If the current insn cannot raise an exception, the computation of
997      * cpu_cond may be able to be elided.
998      */
999     if (dc->cpu_cond_live) {
1000         tcg_gen_discard_tl(cpu_cond);
1001         dc->cpu_cond_live = false;
1002     }
1003 }
1004 
1005 static void gen_generic_branch(DisasContext *dc)
1006 {
1007     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1008     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1009     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1010 
1011     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1012 }
1013 
1014 /* call this function before using the condition register as it may
1015    have been set for a jump */
1016 static void flush_cond(DisasContext *dc)
1017 {
1018     if (dc->npc == JUMP_PC) {
1019         gen_generic_branch(dc);
1020         dc->npc = DYNAMIC_PC_LOOKUP;
1021     }
1022 }
1023 
1024 static void save_npc(DisasContext *dc)
1025 {
1026     if (dc->npc & 3) {
1027         switch (dc->npc) {
1028         case JUMP_PC:
1029             gen_generic_branch(dc);
1030             dc->npc = DYNAMIC_PC_LOOKUP;
1031             break;
1032         case DYNAMIC_PC:
1033         case DYNAMIC_PC_LOOKUP:
1034             break;
1035         default:
1036             g_assert_not_reached();
1037         }
1038     } else {
1039         tcg_gen_movi_tl(cpu_npc, dc->npc);
1040     }
1041 }
1042 
1043 static void save_state(DisasContext *dc)
1044 {
1045     tcg_gen_movi_tl(cpu_pc, dc->pc);
1046     save_npc(dc);
1047 }
1048 
1049 static void gen_exception(DisasContext *dc, int which)
1050 {
1051     finishing_insn(dc);
1052     save_state(dc);
1053     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1054     dc->base.is_jmp = DISAS_NORETURN;
1055 }
1056 
1057 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1058 {
1059     DisasDelayException *e = g_new0(DisasDelayException, 1);
1060 
1061     e->next = dc->delay_excp_list;
1062     dc->delay_excp_list = e;
1063 
1064     e->lab = gen_new_label();
1065     e->excp = excp;
1066     e->pc = dc->pc;
1067     /* Caller must have used flush_cond before branch. */
1068     assert(e->npc != JUMP_PC);
1069     e->npc = dc->npc;
1070 
1071     return e->lab;
1072 }
1073 
1074 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1075 {
1076     return delay_exceptionv(dc, tcg_constant_i32(excp));
1077 }
1078 
1079 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1080 {
1081     TCGv t = tcg_temp_new();
1082     TCGLabel *lab;
1083 
1084     tcg_gen_andi_tl(t, addr, mask);
1085 
1086     flush_cond(dc);
1087     lab = delay_exception(dc, TT_UNALIGNED);
1088     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1089 }
1090 
1091 static void gen_mov_pc_npc(DisasContext *dc)
1092 {
1093     finishing_insn(dc);
1094 
1095     if (dc->npc & 3) {
1096         switch (dc->npc) {
1097         case JUMP_PC:
1098             gen_generic_branch(dc);
1099             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1100             dc->pc = DYNAMIC_PC_LOOKUP;
1101             break;
1102         case DYNAMIC_PC:
1103         case DYNAMIC_PC_LOOKUP:
1104             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1105             dc->pc = dc->npc;
1106             break;
1107         default:
1108             g_assert_not_reached();
1109         }
1110     } else {
1111         dc->pc = dc->npc;
1112     }
1113 }
1114 
1115 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1116                         DisasContext *dc)
1117 {
1118     TCGv t1;
1119 
1120     cmp->c1 = t1 = tcg_temp_new();
1121     cmp->c2 = 0;
1122 
1123     switch (cond & 7) {
1124     case 0x0: /* never */
1125         cmp->cond = TCG_COND_NEVER;
1126         cmp->c1 = tcg_constant_tl(0);
1127         break;
1128 
1129     case 0x1: /* eq: Z */
1130         cmp->cond = TCG_COND_EQ;
1131         if (TARGET_LONG_BITS == 32 || xcc) {
1132             tcg_gen_mov_tl(t1, cpu_cc_Z);
1133         } else {
1134             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1135         }
1136         break;
1137 
1138     case 0x2: /* le: Z | (N ^ V) */
1139         /*
1140          * Simplify:
1141          *   cc_Z || (N ^ V) < 0        NE
1142          *   cc_Z && !((N ^ V) < 0)     EQ
1143          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1144          */
1145         cmp->cond = TCG_COND_EQ;
1146         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1147         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1148         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1149         if (TARGET_LONG_BITS == 64 && !xcc) {
1150             tcg_gen_ext32u_tl(t1, t1);
1151         }
1152         break;
1153 
1154     case 0x3: /* lt: N ^ V */
1155         cmp->cond = TCG_COND_LT;
1156         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1157         if (TARGET_LONG_BITS == 64 && !xcc) {
1158             tcg_gen_ext32s_tl(t1, t1);
1159         }
1160         break;
1161 
1162     case 0x4: /* leu: Z | C */
1163         /*
1164          * Simplify:
1165          *   cc_Z == 0 || cc_C != 0     NE
1166          *   cc_Z != 0 && cc_C == 0     EQ
1167          *   cc_Z & (cc_C ? 0 : -1)     EQ
1168          *   cc_Z & (cc_C - 1)          EQ
1169          */
1170         cmp->cond = TCG_COND_EQ;
1171         if (TARGET_LONG_BITS == 32 || xcc) {
1172             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1173             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1174         } else {
1175             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1176             tcg_gen_subi_tl(t1, t1, 1);
1177             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1178             tcg_gen_ext32u_tl(t1, t1);
1179         }
1180         break;
1181 
1182     case 0x5: /* ltu: C */
1183         cmp->cond = TCG_COND_NE;
1184         if (TARGET_LONG_BITS == 32 || xcc) {
1185             tcg_gen_mov_tl(t1, cpu_cc_C);
1186         } else {
1187             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1188         }
1189         break;
1190 
1191     case 0x6: /* neg: N */
1192         cmp->cond = TCG_COND_LT;
1193         if (TARGET_LONG_BITS == 32 || xcc) {
1194             tcg_gen_mov_tl(t1, cpu_cc_N);
1195         } else {
1196             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1197         }
1198         break;
1199 
1200     case 0x7: /* vs: V */
1201         cmp->cond = TCG_COND_LT;
1202         if (TARGET_LONG_BITS == 32 || xcc) {
1203             tcg_gen_mov_tl(t1, cpu_cc_V);
1204         } else {
1205             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1206         }
1207         break;
1208     }
1209     if (cond & 8) {
1210         cmp->cond = tcg_invert_cond(cmp->cond);
1211     }
1212 }
1213 
1214 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1215 {
1216     TCGv_i32 fcc = cpu_fcc[cc];
1217     TCGv_i32 c1 = fcc;
1218     int c2 = 0;
1219     TCGCond tcond;
1220 
1221     /*
1222      * FCC values:
1223      * 0 =
1224      * 1 <
1225      * 2 >
1226      * 3 unordered
1227      */
1228     switch (cond & 7) {
1229     case 0x0: /* fbn */
1230         tcond = TCG_COND_NEVER;
1231         break;
1232     case 0x1: /* fbne : !0 */
1233         tcond = TCG_COND_NE;
1234         break;
1235     case 0x2: /* fblg : 1 or 2 */
1236         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1237         c1 = tcg_temp_new_i32();
1238         tcg_gen_addi_i32(c1, fcc, -1);
1239         c2 = 1;
1240         tcond = TCG_COND_LEU;
1241         break;
1242     case 0x3: /* fbul : 1 or 3 */
1243         c1 = tcg_temp_new_i32();
1244         tcg_gen_andi_i32(c1, fcc, 1);
1245         tcond = TCG_COND_NE;
1246         break;
1247     case 0x4: /* fbl  : 1 */
1248         c2 = 1;
1249         tcond = TCG_COND_EQ;
1250         break;
1251     case 0x5: /* fbug : 2 or 3 */
1252         c2 = 2;
1253         tcond = TCG_COND_GEU;
1254         break;
1255     case 0x6: /* fbg  : 2 */
1256         c2 = 2;
1257         tcond = TCG_COND_EQ;
1258         break;
1259     case 0x7: /* fbu  : 3 */
1260         c2 = 3;
1261         tcond = TCG_COND_EQ;
1262         break;
1263     }
1264     if (cond & 8) {
1265         tcond = tcg_invert_cond(tcond);
1266     }
1267 
1268     cmp->cond = tcond;
1269     cmp->c2 = c2;
1270     cmp->c1 = tcg_temp_new();
1271     tcg_gen_extu_i32_tl(cmp->c1, c1);
1272 }
1273 
1274 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1275 {
1276     static const TCGCond cond_reg[4] = {
1277         TCG_COND_NEVER,  /* reserved */
1278         TCG_COND_EQ,
1279         TCG_COND_LE,
1280         TCG_COND_LT,
1281     };
1282     TCGCond tcond;
1283 
1284     if ((cond & 3) == 0) {
1285         return false;
1286     }
1287     tcond = cond_reg[cond & 3];
1288     if (cond & 4) {
1289         tcond = tcg_invert_cond(tcond);
1290     }
1291 
1292     cmp->cond = tcond;
1293     cmp->c1 = tcg_temp_new();
1294     cmp->c2 = 0;
1295     tcg_gen_mov_tl(cmp->c1, r_src);
1296     return true;
1297 }
1298 
1299 static void gen_op_clear_ieee_excp_and_FTT(void)
1300 {
1301     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1302                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1303 }
1304 
1305 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1306 {
1307     gen_op_clear_ieee_excp_and_FTT();
1308     tcg_gen_mov_i32(dst, src);
1309 }
1310 
1311 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1312 {
1313     gen_op_clear_ieee_excp_and_FTT();
1314     tcg_gen_xori_i32(dst, src, 1u << 31);
1315 }
1316 
1317 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1318 {
1319     gen_op_clear_ieee_excp_and_FTT();
1320     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1321 }
1322 
1323 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1324 {
1325     gen_op_clear_ieee_excp_and_FTT();
1326     tcg_gen_mov_i64(dst, src);
1327 }
1328 
1329 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1330 {
1331     gen_op_clear_ieee_excp_and_FTT();
1332     tcg_gen_xori_i64(dst, src, 1ull << 63);
1333 }
1334 
1335 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1336 {
1337     gen_op_clear_ieee_excp_and_FTT();
1338     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1339 }
1340 
1341 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1342 {
1343     TCGv_i64 l = tcg_temp_new_i64();
1344     TCGv_i64 h = tcg_temp_new_i64();
1345 
1346     tcg_gen_extr_i128_i64(l, h, src);
1347     tcg_gen_xori_i64(h, h, 1ull << 63);
1348     tcg_gen_concat_i64_i128(dst, l, h);
1349 }
1350 
1351 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1352 {
1353     TCGv_i64 l = tcg_temp_new_i64();
1354     TCGv_i64 h = tcg_temp_new_i64();
1355 
1356     tcg_gen_extr_i128_i64(l, h, src);
1357     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1358     tcg_gen_concat_i64_i128(dst, l, h);
1359 }
1360 
1361 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1362 {
1363     TCGv_i32 z = tcg_constant_i32(0);
1364     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
1365 }
1366 
1367 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1368 {
1369     TCGv_i32 z = tcg_constant_i32(0);
1370     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
1371 }
1372 
1373 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1374 {
1375     TCGv_i32 z = tcg_constant_i32(0);
1376     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1377     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1378 }
1379 
1380 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1381 {
1382     TCGv_i32 z = tcg_constant_i32(0);
1383     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1384     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1385 }
1386 
1387 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1388 {
1389     TCGv_i32 z = tcg_constant_i32(0);
1390     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1391                                    float_muladd_negate_result);
1392     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1393 }
1394 
1395 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1396 {
1397     TCGv_i32 z = tcg_constant_i32(0);
1398     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1399                                    float_muladd_negate_result);
1400     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1401 }
1402 
1403 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1404 {
1405     TCGv_i32 z = tcg_constant_i32(0);
1406     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1407     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1408 }
1409 
1410 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1411 {
1412     TCGv_i32 z = tcg_constant_i32(0);
1413     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1414     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1415 }
1416 
1417 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1418 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1419 {
1420     TCGv_i32 fone = tcg_constant_i32(float32_one);
1421     TCGv_i32 mone = tcg_constant_i32(-1);
1422     TCGv_i32 op = tcg_constant_i32(0);
1423     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1424 }
1425 
1426 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1427 {
1428     TCGv_i64 fone = tcg_constant_i64(float64_one);
1429     TCGv_i32 mone = tcg_constant_i32(-1);
1430     TCGv_i32 op = tcg_constant_i32(0);
1431     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1432 }
1433 
1434 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1435 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1436 {
1437     TCGv_i32 fone = tcg_constant_i32(float32_one);
1438     TCGv_i32 mone = tcg_constant_i32(-1);
1439     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1440     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1441 }
1442 
1443 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1444 {
1445     TCGv_i64 fone = tcg_constant_i64(float64_one);
1446     TCGv_i32 mone = tcg_constant_i32(-1);
1447     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1448     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1449 }
1450 
1451 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1452 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1453 {
1454     TCGv_i32 fone = tcg_constant_i32(float32_one);
1455     TCGv_i32 mone = tcg_constant_i32(-1);
1456     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1457     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1458 }
1459 
1460 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1461 {
1462     TCGv_i64 fone = tcg_constant_i64(float64_one);
1463     TCGv_i32 mone = tcg_constant_i32(-1);
1464     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1465     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1466 }
1467 
1468 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1469 {
1470     /*
1471      * CEXC is only set when succesfully completing an FPop,
1472      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1473      * Thus we can simply store FTT into this field.
1474      */
1475     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1476                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1477     gen_exception(dc, TT_FP_EXCP);
1478 }
1479 
1480 static bool gen_trap_ifnofpu(DisasContext *dc)
1481 {
1482 #if !defined(CONFIG_USER_ONLY)
1483     if (!dc->fpu_enabled) {
1484         gen_exception(dc, TT_NFPU_INSN);
1485         return true;
1486     }
1487 #endif
1488     return false;
1489 }
1490 
1491 static bool gen_trap_iffpexception(DisasContext *dc)
1492 {
1493 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1494     /*
1495      * There are 3 states for the sparc32 fpu:
1496      * Normally the fpu is in fp_execute, and all insns are allowed.
1497      * When an exception is signaled, it moves to fp_exception_pending state.
1498      * Upon seeing the next FPop, the fpu moves to fp_exception state,
1499      * populates the FQ, and generates an fp_exception trap.
1500      * The fpu remains in fp_exception state until FQ becomes empty
1501      * after execution of a STDFQ instruction.  While the fpu is in
1502      * fp_exception state, and FPop, fp load or fp branch insn will
1503      * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1504      * and the insn will not be entered into the FQ.
1505      *
1506      * In QEMU, we do not model the fp_exception_pending state and
1507      * instead populate FQ and raise the exception immediately.
1508      * But we can still honor fp_exception state by noticing when
1509      * the FQ is not empty.
1510      */
1511     if (dc->fsr_qne) {
1512         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1513         return true;
1514     }
1515 #endif
1516     return false;
1517 }
1518 
1519 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1520 {
1521     return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1522 }
1523 
1524 /* asi moves */
1525 typedef enum {
1526     GET_ASI_HELPER,
1527     GET_ASI_EXCP,
1528     GET_ASI_DIRECT,
1529     GET_ASI_DTWINX,
1530     GET_ASI_CODE,
1531     GET_ASI_BLOCK,
1532     GET_ASI_SHORT,
1533     GET_ASI_BCOPY,
1534     GET_ASI_BFILL,
1535 } ASIType;
1536 
1537 typedef struct {
1538     ASIType type;
1539     int asi;
1540     int mem_idx;
1541     MemOp memop;
1542 } DisasASI;
1543 
1544 /*
1545  * Build DisasASI.
1546  * For asi == -1, treat as non-asi.
1547  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1548  */
1549 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1550 {
1551     ASIType type = GET_ASI_HELPER;
1552     int mem_idx = dc->mem_idx;
1553 
1554     if (asi == -1) {
1555         /* Artificial "non-asi" case. */
1556         type = GET_ASI_DIRECT;
1557         goto done;
1558     }
1559 
1560 #ifndef TARGET_SPARC64
1561     /* Before v9, all asis are immediate and privileged.  */
1562     if (asi < 0) {
1563         gen_exception(dc, TT_ILL_INSN);
1564         type = GET_ASI_EXCP;
1565     } else if (supervisor(dc)
1566                /* Note that LEON accepts ASI_USERDATA in user mode, for
1567                   use with CASA.  Also note that previous versions of
1568                   QEMU allowed (and old versions of gcc emitted) ASI_P
1569                   for LEON, which is incorrect.  */
1570                || (asi == ASI_USERDATA
1571                    && (dc->def->features & CPU_FEATURE_CASA))) {
1572         switch (asi) {
1573         case ASI_USERDATA:    /* User data access */
1574             mem_idx = MMU_USER_IDX;
1575             type = GET_ASI_DIRECT;
1576             break;
1577         case ASI_KERNELDATA:  /* Supervisor data access */
1578             mem_idx = MMU_KERNEL_IDX;
1579             type = GET_ASI_DIRECT;
1580             break;
1581         case ASI_USERTXT:     /* User text access */
1582             mem_idx = MMU_USER_IDX;
1583             type = GET_ASI_CODE;
1584             break;
1585         case ASI_KERNELTXT:   /* Supervisor text access */
1586             mem_idx = MMU_KERNEL_IDX;
1587             type = GET_ASI_CODE;
1588             break;
1589         case ASI_M_BYPASS:    /* MMU passthrough */
1590         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1591             mem_idx = MMU_PHYS_IDX;
1592             type = GET_ASI_DIRECT;
1593             break;
1594         case ASI_M_BCOPY: /* Block copy, sta access */
1595             mem_idx = MMU_KERNEL_IDX;
1596             type = GET_ASI_BCOPY;
1597             break;
1598         case ASI_M_BFILL: /* Block fill, stda access */
1599             mem_idx = MMU_KERNEL_IDX;
1600             type = GET_ASI_BFILL;
1601             break;
1602         }
1603 
1604         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1605          * permissions check in get_physical_address(..).
1606          */
1607         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1608     } else {
1609         gen_exception(dc, TT_PRIV_INSN);
1610         type = GET_ASI_EXCP;
1611     }
1612 #else
1613     if (asi < 0) {
1614         asi = dc->asi;
1615     }
1616     /* With v9, all asis below 0x80 are privileged.  */
1617     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1618        down that bit into DisasContext.  For the moment that's ok,
1619        since the direct implementations below doesn't have any ASIs
1620        in the restricted [0x30, 0x7f] range, and the check will be
1621        done properly in the helper.  */
1622     if (!supervisor(dc) && asi < 0x80) {
1623         gen_exception(dc, TT_PRIV_ACT);
1624         type = GET_ASI_EXCP;
1625     } else {
1626         switch (asi) {
1627         case ASI_REAL:      /* Bypass */
1628         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1629         case ASI_REAL_L:    /* Bypass LE */
1630         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1631         case ASI_TWINX_REAL:   /* Real address, twinx */
1632         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1633         case ASI_QUAD_LDD_PHYS:
1634         case ASI_QUAD_LDD_PHYS_L:
1635             mem_idx = MMU_PHYS_IDX;
1636             break;
1637         case ASI_N:  /* Nucleus */
1638         case ASI_NL: /* Nucleus LE */
1639         case ASI_TWINX_N:
1640         case ASI_TWINX_NL:
1641         case ASI_NUCLEUS_QUAD_LDD:
1642         case ASI_NUCLEUS_QUAD_LDD_L:
1643             if (hypervisor(dc)) {
1644                 mem_idx = MMU_PHYS_IDX;
1645             } else {
1646                 mem_idx = MMU_NUCLEUS_IDX;
1647             }
1648             break;
1649         case ASI_AIUP:  /* As if user primary */
1650         case ASI_AIUPL: /* As if user primary LE */
1651         case ASI_TWINX_AIUP:
1652         case ASI_TWINX_AIUP_L:
1653         case ASI_BLK_AIUP_4V:
1654         case ASI_BLK_AIUP_L_4V:
1655         case ASI_BLK_AIUP:
1656         case ASI_BLK_AIUPL:
1657         case ASI_MON_AIUP:
1658             mem_idx = MMU_USER_IDX;
1659             break;
1660         case ASI_AIUS:  /* As if user secondary */
1661         case ASI_AIUSL: /* As if user secondary LE */
1662         case ASI_TWINX_AIUS:
1663         case ASI_TWINX_AIUS_L:
1664         case ASI_BLK_AIUS_4V:
1665         case ASI_BLK_AIUS_L_4V:
1666         case ASI_BLK_AIUS:
1667         case ASI_BLK_AIUSL:
1668         case ASI_MON_AIUS:
1669             mem_idx = MMU_USER_SECONDARY_IDX;
1670             break;
1671         case ASI_S:  /* Secondary */
1672         case ASI_SL: /* Secondary LE */
1673         case ASI_TWINX_S:
1674         case ASI_TWINX_SL:
1675         case ASI_BLK_COMMIT_S:
1676         case ASI_BLK_S:
1677         case ASI_BLK_SL:
1678         case ASI_FL8_S:
1679         case ASI_FL8_SL:
1680         case ASI_FL16_S:
1681         case ASI_FL16_SL:
1682         case ASI_MON_S:
1683             if (mem_idx == MMU_USER_IDX) {
1684                 mem_idx = MMU_USER_SECONDARY_IDX;
1685             } else if (mem_idx == MMU_KERNEL_IDX) {
1686                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1687             }
1688             break;
1689         case ASI_P:  /* Primary */
1690         case ASI_PL: /* Primary LE */
1691         case ASI_TWINX_P:
1692         case ASI_TWINX_PL:
1693         case ASI_BLK_COMMIT_P:
1694         case ASI_BLK_P:
1695         case ASI_BLK_PL:
1696         case ASI_FL8_P:
1697         case ASI_FL8_PL:
1698         case ASI_FL16_P:
1699         case ASI_FL16_PL:
1700         case ASI_MON_P:
1701             break;
1702         }
1703         switch (asi) {
1704         case ASI_REAL:
1705         case ASI_REAL_IO:
1706         case ASI_REAL_L:
1707         case ASI_REAL_IO_L:
1708         case ASI_N:
1709         case ASI_NL:
1710         case ASI_AIUP:
1711         case ASI_AIUPL:
1712         case ASI_AIUS:
1713         case ASI_AIUSL:
1714         case ASI_S:
1715         case ASI_SL:
1716         case ASI_P:
1717         case ASI_PL:
1718         case ASI_MON_P:
1719         case ASI_MON_S:
1720         case ASI_MON_AIUP:
1721         case ASI_MON_AIUS:
1722             type = GET_ASI_DIRECT;
1723             break;
1724         case ASI_TWINX_REAL:
1725         case ASI_TWINX_REAL_L:
1726         case ASI_TWINX_N:
1727         case ASI_TWINX_NL:
1728         case ASI_TWINX_AIUP:
1729         case ASI_TWINX_AIUP_L:
1730         case ASI_TWINX_AIUS:
1731         case ASI_TWINX_AIUS_L:
1732         case ASI_TWINX_P:
1733         case ASI_TWINX_PL:
1734         case ASI_TWINX_S:
1735         case ASI_TWINX_SL:
1736         case ASI_QUAD_LDD_PHYS:
1737         case ASI_QUAD_LDD_PHYS_L:
1738         case ASI_NUCLEUS_QUAD_LDD:
1739         case ASI_NUCLEUS_QUAD_LDD_L:
1740             type = GET_ASI_DTWINX;
1741             break;
1742         case ASI_BLK_COMMIT_P:
1743         case ASI_BLK_COMMIT_S:
1744         case ASI_BLK_AIUP_4V:
1745         case ASI_BLK_AIUP_L_4V:
1746         case ASI_BLK_AIUP:
1747         case ASI_BLK_AIUPL:
1748         case ASI_BLK_AIUS_4V:
1749         case ASI_BLK_AIUS_L_4V:
1750         case ASI_BLK_AIUS:
1751         case ASI_BLK_AIUSL:
1752         case ASI_BLK_S:
1753         case ASI_BLK_SL:
1754         case ASI_BLK_P:
1755         case ASI_BLK_PL:
1756             type = GET_ASI_BLOCK;
1757             break;
1758         case ASI_FL8_S:
1759         case ASI_FL8_SL:
1760         case ASI_FL8_P:
1761         case ASI_FL8_PL:
1762             memop = MO_UB;
1763             type = GET_ASI_SHORT;
1764             break;
1765         case ASI_FL16_S:
1766         case ASI_FL16_SL:
1767         case ASI_FL16_P:
1768         case ASI_FL16_PL:
1769             memop = MO_TEUW;
1770             type = GET_ASI_SHORT;
1771             break;
1772         }
1773         /* The little-endian asis all have bit 3 set.  */
1774         if (asi & 8) {
1775             memop ^= MO_BSWAP;
1776         }
1777     }
1778 #endif
1779 
1780  done:
1781     return (DisasASI){ type, asi, mem_idx, memop };
1782 }
1783 
1784 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1785 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1786                               TCGv_i32 asi, TCGv_i32 mop)
1787 {
1788     g_assert_not_reached();
1789 }
1790 
1791 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1792                               TCGv_i32 asi, TCGv_i32 mop)
1793 {
1794     g_assert_not_reached();
1795 }
1796 #endif
1797 
1798 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1799 {
1800     switch (da->type) {
1801     case GET_ASI_EXCP:
1802         break;
1803     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1804         gen_exception(dc, TT_ILL_INSN);
1805         break;
1806     case GET_ASI_DIRECT:
1807         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1808         break;
1809 
1810     case GET_ASI_CODE:
1811 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1812         {
1813             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1814             TCGv_i64 t64 = tcg_temp_new_i64();
1815 
1816             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1817             tcg_gen_trunc_i64_tl(dst, t64);
1818         }
1819         break;
1820 #else
1821         g_assert_not_reached();
1822 #endif
1823 
1824     default:
1825         {
1826             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1827             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1828 
1829             save_state(dc);
1830 #ifdef TARGET_SPARC64
1831             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1832 #else
1833             {
1834                 TCGv_i64 t64 = tcg_temp_new_i64();
1835                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1836                 tcg_gen_trunc_i64_tl(dst, t64);
1837             }
1838 #endif
1839         }
1840         break;
1841     }
1842 }
1843 
1844 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1845 {
1846     switch (da->type) {
1847     case GET_ASI_EXCP:
1848         break;
1849 
1850     case GET_ASI_DTWINX: /* Reserved for stda.  */
1851         if (TARGET_LONG_BITS == 32) {
1852             gen_exception(dc, TT_ILL_INSN);
1853             break;
1854         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1855             /* Pre OpenSPARC CPUs don't have these */
1856             gen_exception(dc, TT_ILL_INSN);
1857             break;
1858         }
1859         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1860         /* fall through */
1861 
1862     case GET_ASI_DIRECT:
1863         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1864         break;
1865 
1866     case GET_ASI_BCOPY:
1867         assert(TARGET_LONG_BITS == 32);
1868         /*
1869          * Copy 32 bytes from the address in SRC to ADDR.
1870          *
1871          * From Ross RT625 hyperSPARC manual, section 4.6:
1872          * "Block Copy and Block Fill will work only on cache line boundaries."
1873          *
1874          * It does not specify if an unaliged address is truncated or trapped.
1875          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1876          * is obviously wrong.  The only place I can see this used is in the
1877          * Linux kernel which begins with page alignment, advancing by 32,
1878          * so is always aligned.  Assume truncation as the simpler option.
1879          *
1880          * Since the loads and stores are paired, allow the copy to happen
1881          * in the host endianness.  The copy need not be atomic.
1882          */
1883         {
1884             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1885             TCGv saddr = tcg_temp_new();
1886             TCGv daddr = tcg_temp_new();
1887             TCGv_i128 tmp = tcg_temp_new_i128();
1888 
1889             tcg_gen_andi_tl(saddr, src, -32);
1890             tcg_gen_andi_tl(daddr, addr, -32);
1891             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1892             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1893             tcg_gen_addi_tl(saddr, saddr, 16);
1894             tcg_gen_addi_tl(daddr, daddr, 16);
1895             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1896             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1897         }
1898         break;
1899 
1900     default:
1901         {
1902             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1903             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1904 
1905             save_state(dc);
1906 #ifdef TARGET_SPARC64
1907             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1908 #else
1909             {
1910                 TCGv_i64 t64 = tcg_temp_new_i64();
1911                 tcg_gen_extu_tl_i64(t64, src);
1912                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1913             }
1914 #endif
1915 
1916             /* A write to a TLB register may alter page maps.  End the TB. */
1917             dc->npc = DYNAMIC_PC;
1918         }
1919         break;
1920     }
1921 }
1922 
1923 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1924                          TCGv dst, TCGv src, TCGv addr)
1925 {
1926     switch (da->type) {
1927     case GET_ASI_EXCP:
1928         break;
1929     case GET_ASI_DIRECT:
1930         tcg_gen_atomic_xchg_tl(dst, addr, src,
1931                                da->mem_idx, da->memop | MO_ALIGN);
1932         break;
1933     default:
1934         /* ??? Should be DAE_invalid_asi.  */
1935         gen_exception(dc, TT_DATA_ACCESS);
1936         break;
1937     }
1938 }
1939 
1940 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1941                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1942 {
1943     switch (da->type) {
1944     case GET_ASI_EXCP:
1945         return;
1946     case GET_ASI_DIRECT:
1947         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1948                                   da->mem_idx, da->memop | MO_ALIGN);
1949         break;
1950     default:
1951         /* ??? Should be DAE_invalid_asi.  */
1952         gen_exception(dc, TT_DATA_ACCESS);
1953         break;
1954     }
1955 }
1956 
1957 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1958 {
1959     switch (da->type) {
1960     case GET_ASI_EXCP:
1961         break;
1962     case GET_ASI_DIRECT:
1963         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1964                                da->mem_idx, MO_UB);
1965         break;
1966     default:
1967         /* ??? In theory, this should be raise DAE_invalid_asi.
1968            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1969         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1970             gen_helper_exit_atomic(tcg_env);
1971         } else {
1972             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1973             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1974             TCGv_i64 s64, t64;
1975 
1976             save_state(dc);
1977             t64 = tcg_temp_new_i64();
1978             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1979 
1980             s64 = tcg_constant_i64(0xff);
1981             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1982 
1983             tcg_gen_trunc_i64_tl(dst, t64);
1984 
1985             /* End the TB.  */
1986             dc->npc = DYNAMIC_PC;
1987         }
1988         break;
1989     }
1990 }
1991 
1992 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1993                         TCGv addr, int rd)
1994 {
1995     MemOp memop = da->memop;
1996     MemOp size = memop & MO_SIZE;
1997     TCGv_i32 d32;
1998     TCGv_i64 d64, l64;
1999     TCGv addr_tmp;
2000 
2001     /* TODO: Use 128-bit load/store below. */
2002     if (size == MO_128) {
2003         memop = (memop & ~MO_SIZE) | MO_64;
2004     }
2005 
2006     switch (da->type) {
2007     case GET_ASI_EXCP:
2008         break;
2009 
2010     case GET_ASI_DIRECT:
2011         memop |= MO_ALIGN_4;
2012         switch (size) {
2013         case MO_32:
2014             d32 = tcg_temp_new_i32();
2015             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2016             gen_store_fpr_F(dc, rd, d32);
2017             break;
2018 
2019         case MO_64:
2020             d64 = tcg_temp_new_i64();
2021             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2022             gen_store_fpr_D(dc, rd, d64);
2023             break;
2024 
2025         case MO_128:
2026             d64 = tcg_temp_new_i64();
2027             l64 = tcg_temp_new_i64();
2028             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2029             addr_tmp = tcg_temp_new();
2030             tcg_gen_addi_tl(addr_tmp, addr, 8);
2031             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2032             gen_store_fpr_D(dc, rd, d64);
2033             gen_store_fpr_D(dc, rd + 2, l64);
2034             break;
2035         default:
2036             g_assert_not_reached();
2037         }
2038         break;
2039 
2040     case GET_ASI_BLOCK:
2041         /* Valid for lddfa on aligned registers only.  */
2042         if (orig_size == MO_64 && (rd & 7) == 0) {
2043             /* The first operation checks required alignment.  */
2044             addr_tmp = tcg_temp_new();
2045             d64 = tcg_temp_new_i64();
2046             for (int i = 0; ; ++i) {
2047                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2048                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2049                 gen_store_fpr_D(dc, rd + 2 * i, d64);
2050                 if (i == 7) {
2051                     break;
2052                 }
2053                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2054                 addr = addr_tmp;
2055             }
2056         } else {
2057             gen_exception(dc, TT_ILL_INSN);
2058         }
2059         break;
2060 
2061     case GET_ASI_SHORT:
2062         /* Valid for lddfa only.  */
2063         if (orig_size == MO_64) {
2064             d64 = tcg_temp_new_i64();
2065             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2066             gen_store_fpr_D(dc, rd, d64);
2067         } else {
2068             gen_exception(dc, TT_ILL_INSN);
2069         }
2070         break;
2071 
2072     default:
2073         {
2074             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2075             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2076 
2077             save_state(dc);
2078             /* According to the table in the UA2011 manual, the only
2079                other asis that are valid for ldfa/lddfa/ldqfa are
2080                the NO_FAULT asis.  We still need a helper for these,
2081                but we can just use the integer asi helper for them.  */
2082             switch (size) {
2083             case MO_32:
2084                 d64 = tcg_temp_new_i64();
2085                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2086                 d32 = tcg_temp_new_i32();
2087                 tcg_gen_extrl_i64_i32(d32, d64);
2088                 gen_store_fpr_F(dc, rd, d32);
2089                 break;
2090             case MO_64:
2091                 d64 = tcg_temp_new_i64();
2092                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2093                 gen_store_fpr_D(dc, rd, d64);
2094                 break;
2095             case MO_128:
2096                 d64 = tcg_temp_new_i64();
2097                 l64 = tcg_temp_new_i64();
2098                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2099                 addr_tmp = tcg_temp_new();
2100                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2101                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2102                 gen_store_fpr_D(dc, rd, d64);
2103                 gen_store_fpr_D(dc, rd + 2, l64);
2104                 break;
2105             default:
2106                 g_assert_not_reached();
2107             }
2108         }
2109         break;
2110     }
2111 }
2112 
2113 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2114                         TCGv addr, int rd)
2115 {
2116     MemOp memop = da->memop;
2117     MemOp size = memop & MO_SIZE;
2118     TCGv_i32 d32;
2119     TCGv_i64 d64;
2120     TCGv addr_tmp;
2121 
2122     /* TODO: Use 128-bit load/store below. */
2123     if (size == MO_128) {
2124         memop = (memop & ~MO_SIZE) | MO_64;
2125     }
2126 
2127     switch (da->type) {
2128     case GET_ASI_EXCP:
2129         break;
2130 
2131     case GET_ASI_DIRECT:
2132         memop |= MO_ALIGN_4;
2133         switch (size) {
2134         case MO_32:
2135             d32 = gen_load_fpr_F(dc, rd);
2136             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2137             break;
2138         case MO_64:
2139             d64 = gen_load_fpr_D(dc, rd);
2140             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2141             break;
2142         case MO_128:
2143             /* Only 4-byte alignment required.  However, it is legal for the
2144                cpu to signal the alignment fault, and the OS trap handler is
2145                required to fix it up.  Requiring 16-byte alignment here avoids
2146                having to probe the second page before performing the first
2147                write.  */
2148             d64 = gen_load_fpr_D(dc, rd);
2149             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2150             addr_tmp = tcg_temp_new();
2151             tcg_gen_addi_tl(addr_tmp, addr, 8);
2152             d64 = gen_load_fpr_D(dc, rd + 2);
2153             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2154             break;
2155         default:
2156             g_assert_not_reached();
2157         }
2158         break;
2159 
2160     case GET_ASI_BLOCK:
2161         /* Valid for stdfa on aligned registers only.  */
2162         if (orig_size == MO_64 && (rd & 7) == 0) {
2163             /* The first operation checks required alignment.  */
2164             addr_tmp = tcg_temp_new();
2165             for (int i = 0; ; ++i) {
2166                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2167                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2168                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2169                 if (i == 7) {
2170                     break;
2171                 }
2172                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2173                 addr = addr_tmp;
2174             }
2175         } else {
2176             gen_exception(dc, TT_ILL_INSN);
2177         }
2178         break;
2179 
2180     case GET_ASI_SHORT:
2181         /* Valid for stdfa only.  */
2182         if (orig_size == MO_64) {
2183             d64 = gen_load_fpr_D(dc, rd);
2184             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2185         } else {
2186             gen_exception(dc, TT_ILL_INSN);
2187         }
2188         break;
2189 
2190     default:
2191         /* According to the table in the UA2011 manual, the only
2192            other asis that are valid for ldfa/lddfa/ldqfa are
2193            the PST* asis, which aren't currently handled.  */
2194         gen_exception(dc, TT_ILL_INSN);
2195         break;
2196     }
2197 }
2198 
2199 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2200 {
2201     TCGv hi = gen_dest_gpr(dc, rd);
2202     TCGv lo = gen_dest_gpr(dc, rd + 1);
2203 
2204     switch (da->type) {
2205     case GET_ASI_EXCP:
2206         return;
2207 
2208     case GET_ASI_DTWINX:
2209 #ifdef TARGET_SPARC64
2210         {
2211             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2212             TCGv_i128 t = tcg_temp_new_i128();
2213 
2214             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2215             /*
2216              * Note that LE twinx acts as if each 64-bit register result is
2217              * byte swapped.  We perform one 128-bit LE load, so must swap
2218              * the order of the writebacks.
2219              */
2220             if ((mop & MO_BSWAP) == MO_TE) {
2221                 tcg_gen_extr_i128_i64(lo, hi, t);
2222             } else {
2223                 tcg_gen_extr_i128_i64(hi, lo, t);
2224             }
2225         }
2226         break;
2227 #else
2228         g_assert_not_reached();
2229 #endif
2230 
2231     case GET_ASI_DIRECT:
2232         {
2233             TCGv_i64 tmp = tcg_temp_new_i64();
2234 
2235             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2236 
2237             /* Note that LE ldda acts as if each 32-bit register
2238                result is byte swapped.  Having just performed one
2239                64-bit bswap, we need now to swap the writebacks.  */
2240             if ((da->memop & MO_BSWAP) == MO_TE) {
2241                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2242             } else {
2243                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2244             }
2245         }
2246         break;
2247 
2248     case GET_ASI_CODE:
2249 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2250         {
2251             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2252             TCGv_i64 tmp = tcg_temp_new_i64();
2253 
2254             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2255 
2256             /* See above.  */
2257             if ((da->memop & MO_BSWAP) == MO_TE) {
2258                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2259             } else {
2260                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2261             }
2262         }
2263         break;
2264 #else
2265         g_assert_not_reached();
2266 #endif
2267 
2268     default:
2269         /* ??? In theory we've handled all of the ASIs that are valid
2270            for ldda, and this should raise DAE_invalid_asi.  However,
2271            real hardware allows others.  This can be seen with e.g.
2272            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2273         {
2274             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2275             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2276             TCGv_i64 tmp = tcg_temp_new_i64();
2277 
2278             save_state(dc);
2279             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2280 
2281             /* See above.  */
2282             if ((da->memop & MO_BSWAP) == MO_TE) {
2283                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2284             } else {
2285                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2286             }
2287         }
2288         break;
2289     }
2290 
2291     gen_store_gpr(dc, rd, hi);
2292     gen_store_gpr(dc, rd + 1, lo);
2293 }
2294 
2295 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2296 {
2297     TCGv hi = gen_load_gpr(dc, rd);
2298     TCGv lo = gen_load_gpr(dc, rd + 1);
2299 
2300     switch (da->type) {
2301     case GET_ASI_EXCP:
2302         break;
2303 
2304     case GET_ASI_DTWINX:
2305 #ifdef TARGET_SPARC64
2306         {
2307             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2308             TCGv_i128 t = tcg_temp_new_i128();
2309 
2310             /*
2311              * Note that LE twinx acts as if each 64-bit register result is
2312              * byte swapped.  We perform one 128-bit LE store, so must swap
2313              * the order of the construction.
2314              */
2315             if ((mop & MO_BSWAP) == MO_TE) {
2316                 tcg_gen_concat_i64_i128(t, lo, hi);
2317             } else {
2318                 tcg_gen_concat_i64_i128(t, hi, lo);
2319             }
2320             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2321         }
2322         break;
2323 #else
2324         g_assert_not_reached();
2325 #endif
2326 
2327     case GET_ASI_DIRECT:
2328         {
2329             TCGv_i64 t64 = tcg_temp_new_i64();
2330 
2331             /* Note that LE stda acts as if each 32-bit register result is
2332                byte swapped.  We will perform one 64-bit LE store, so now
2333                we must swap the order of the construction.  */
2334             if ((da->memop & MO_BSWAP) == MO_TE) {
2335                 tcg_gen_concat_tl_i64(t64, lo, hi);
2336             } else {
2337                 tcg_gen_concat_tl_i64(t64, hi, lo);
2338             }
2339             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2340         }
2341         break;
2342 
2343     case GET_ASI_BFILL:
2344         assert(TARGET_LONG_BITS == 32);
2345         /*
2346          * Store 32 bytes of [rd:rd+1] to ADDR.
2347          * See comments for GET_ASI_COPY above.
2348          */
2349         {
2350             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2351             TCGv_i64 t8 = tcg_temp_new_i64();
2352             TCGv_i128 t16 = tcg_temp_new_i128();
2353             TCGv daddr = tcg_temp_new();
2354 
2355             tcg_gen_concat_tl_i64(t8, lo, hi);
2356             tcg_gen_concat_i64_i128(t16, t8, t8);
2357             tcg_gen_andi_tl(daddr, addr, -32);
2358             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2359             tcg_gen_addi_tl(daddr, daddr, 16);
2360             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2361         }
2362         break;
2363 
2364     default:
2365         /* ??? In theory we've handled all of the ASIs that are valid
2366            for stda, and this should raise DAE_invalid_asi.  */
2367         {
2368             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2369             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2370             TCGv_i64 t64 = tcg_temp_new_i64();
2371 
2372             /* See above.  */
2373             if ((da->memop & MO_BSWAP) == MO_TE) {
2374                 tcg_gen_concat_tl_i64(t64, lo, hi);
2375             } else {
2376                 tcg_gen_concat_tl_i64(t64, hi, lo);
2377             }
2378 
2379             save_state(dc);
2380             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2381         }
2382         break;
2383     }
2384 }
2385 
2386 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2387 {
2388 #ifdef TARGET_SPARC64
2389     TCGv_i32 c32, zero, dst, s1, s2;
2390     TCGv_i64 c64 = tcg_temp_new_i64();
2391 
2392     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2393        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2394        the later.  */
2395     c32 = tcg_temp_new_i32();
2396     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2397     tcg_gen_extrl_i64_i32(c32, c64);
2398 
2399     s1 = gen_load_fpr_F(dc, rs);
2400     s2 = gen_load_fpr_F(dc, rd);
2401     dst = tcg_temp_new_i32();
2402     zero = tcg_constant_i32(0);
2403 
2404     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2405 
2406     gen_store_fpr_F(dc, rd, dst);
2407 #else
2408     qemu_build_not_reached();
2409 #endif
2410 }
2411 
2412 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2413 {
2414 #ifdef TARGET_SPARC64
2415     TCGv_i64 dst = tcg_temp_new_i64();
2416     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2417                         gen_load_fpr_D(dc, rs),
2418                         gen_load_fpr_D(dc, rd));
2419     gen_store_fpr_D(dc, rd, dst);
2420 #else
2421     qemu_build_not_reached();
2422 #endif
2423 }
2424 
2425 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2426 {
2427 #ifdef TARGET_SPARC64
2428     TCGv c2 = tcg_constant_tl(cmp->c2);
2429     TCGv_i64 h = tcg_temp_new_i64();
2430     TCGv_i64 l = tcg_temp_new_i64();
2431 
2432     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2433                         gen_load_fpr_D(dc, rs),
2434                         gen_load_fpr_D(dc, rd));
2435     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2436                         gen_load_fpr_D(dc, rs + 2),
2437                         gen_load_fpr_D(dc, rd + 2));
2438     gen_store_fpr_D(dc, rd, h);
2439     gen_store_fpr_D(dc, rd + 2, l);
2440 #else
2441     qemu_build_not_reached();
2442 #endif
2443 }
2444 
2445 #ifdef TARGET_SPARC64
2446 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2447 {
2448     TCGv_i32 r_tl = tcg_temp_new_i32();
2449 
2450     /* load env->tl into r_tl */
2451     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2452 
2453     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2454     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2455 
2456     /* calculate offset to current trap state from env->ts, reuse r_tl */
2457     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2458     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2459 
2460     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2461     {
2462         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2463         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2464         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2465     }
2466 }
2467 #endif
2468 
2469 static int extract_dfpreg(DisasContext *dc, int x)
2470 {
2471     int r = x & 0x1e;
2472 #ifdef TARGET_SPARC64
2473     r |= (x & 1) << 5;
2474 #endif
2475     return r;
2476 }
2477 
2478 static int extract_qfpreg(DisasContext *dc, int x)
2479 {
2480     int r = x & 0x1c;
2481 #ifdef TARGET_SPARC64
2482     r |= (x & 1) << 5;
2483 #endif
2484     return r;
2485 }
2486 
2487 /* Include the auto-generated decoder.  */
2488 #include "decode-insns.c.inc"
2489 
2490 #define TRANS(NAME, AVAIL, FUNC, ...) \
2491     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2492     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2493 
2494 #define avail_ALL(C)      true
2495 #ifdef TARGET_SPARC64
2496 # define avail_32(C)      false
2497 # define avail_ASR17(C)   false
2498 # define avail_CASA(C)    true
2499 # define avail_DIV(C)     true
2500 # define avail_MUL(C)     true
2501 # define avail_POWERDOWN(C) false
2502 # define avail_64(C)      true
2503 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2504 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2505 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2506 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2507 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2508 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2509 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2510 # define avail_VIS3B(C)   avail_VIS3(C)
2511 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2512 #else
2513 # define avail_32(C)      true
2514 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2515 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2516 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2517 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2518 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2519 # define avail_64(C)      false
2520 # define avail_FMAF(C)    false
2521 # define avail_GL(C)      false
2522 # define avail_HYPV(C)    false
2523 # define avail_IMA(C)     false
2524 # define avail_VIS1(C)    false
2525 # define avail_VIS2(C)    false
2526 # define avail_VIS3(C)    false
2527 # define avail_VIS3B(C)   false
2528 # define avail_VIS4(C)    false
2529 #endif
2530 
2531 /* Default case for non jump instructions. */
2532 static bool advance_pc(DisasContext *dc)
2533 {
2534     TCGLabel *l1;
2535 
2536     finishing_insn(dc);
2537 
2538     if (dc->npc & 3) {
2539         switch (dc->npc) {
2540         case DYNAMIC_PC:
2541         case DYNAMIC_PC_LOOKUP:
2542             dc->pc = dc->npc;
2543             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2544             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2545             break;
2546 
2547         case JUMP_PC:
2548             /* we can do a static jump */
2549             l1 = gen_new_label();
2550             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2551 
2552             /* jump not taken */
2553             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2554 
2555             /* jump taken */
2556             gen_set_label(l1);
2557             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2558 
2559             dc->base.is_jmp = DISAS_NORETURN;
2560             break;
2561 
2562         default:
2563             g_assert_not_reached();
2564         }
2565     } else {
2566         dc->pc = dc->npc;
2567         dc->npc = dc->npc + 4;
2568     }
2569     return true;
2570 }
2571 
2572 /*
2573  * Major opcodes 00 and 01 -- branches, call, and sethi
2574  */
2575 
2576 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2577                               bool annul, int disp)
2578 {
2579     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2580     target_ulong npc;
2581 
2582     finishing_insn(dc);
2583 
2584     if (cmp->cond == TCG_COND_ALWAYS) {
2585         if (annul) {
2586             dc->pc = dest;
2587             dc->npc = dest + 4;
2588         } else {
2589             gen_mov_pc_npc(dc);
2590             dc->npc = dest;
2591         }
2592         return true;
2593     }
2594 
2595     if (cmp->cond == TCG_COND_NEVER) {
2596         npc = dc->npc;
2597         if (npc & 3) {
2598             gen_mov_pc_npc(dc);
2599             if (annul) {
2600                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2601             }
2602             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2603         } else {
2604             dc->pc = npc + (annul ? 4 : 0);
2605             dc->npc = dc->pc + 4;
2606         }
2607         return true;
2608     }
2609 
2610     flush_cond(dc);
2611     npc = dc->npc;
2612 
2613     if (annul) {
2614         TCGLabel *l1 = gen_new_label();
2615 
2616         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2617         gen_goto_tb(dc, 0, npc, dest);
2618         gen_set_label(l1);
2619         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2620 
2621         dc->base.is_jmp = DISAS_NORETURN;
2622     } else {
2623         if (npc & 3) {
2624             switch (npc) {
2625             case DYNAMIC_PC:
2626             case DYNAMIC_PC_LOOKUP:
2627                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2628                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2629                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2630                                    cmp->c1, tcg_constant_tl(cmp->c2),
2631                                    tcg_constant_tl(dest), cpu_npc);
2632                 dc->pc = npc;
2633                 break;
2634             default:
2635                 g_assert_not_reached();
2636             }
2637         } else {
2638             dc->pc = npc;
2639             dc->npc = JUMP_PC;
2640             dc->jump = *cmp;
2641             dc->jump_pc[0] = dest;
2642             dc->jump_pc[1] = npc + 4;
2643 
2644             /* The condition for cpu_cond is always NE -- normalize. */
2645             if (cmp->cond == TCG_COND_NE) {
2646                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2647             } else {
2648                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2649             }
2650             dc->cpu_cond_live = true;
2651         }
2652     }
2653     return true;
2654 }
2655 
2656 static bool raise_priv(DisasContext *dc)
2657 {
2658     gen_exception(dc, TT_PRIV_INSN);
2659     return true;
2660 }
2661 
2662 static bool raise_unimpfpop(DisasContext *dc)
2663 {
2664     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2665     return true;
2666 }
2667 
2668 static bool gen_trap_float128(DisasContext *dc)
2669 {
2670     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2671         return false;
2672     }
2673     return raise_unimpfpop(dc);
2674 }
2675 
2676 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2677 {
2678     DisasCompare cmp;
2679 
2680     gen_compare(&cmp, a->cc, a->cond, dc);
2681     return advance_jump_cond(dc, &cmp, a->a, a->i);
2682 }
2683 
2684 TRANS(Bicc, ALL, do_bpcc, a)
2685 TRANS(BPcc,  64, do_bpcc, a)
2686 
2687 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2688 {
2689     DisasCompare cmp;
2690 
2691     if (gen_trap_if_nofpu_fpexception(dc)) {
2692         return true;
2693     }
2694     gen_fcompare(&cmp, a->cc, a->cond);
2695     return advance_jump_cond(dc, &cmp, a->a, a->i);
2696 }
2697 
2698 TRANS(FBPfcc,  64, do_fbpfcc, a)
2699 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2700 
2701 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2702 {
2703     DisasCompare cmp;
2704 
2705     if (!avail_64(dc)) {
2706         return false;
2707     }
2708     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2709         return false;
2710     }
2711     return advance_jump_cond(dc, &cmp, a->a, a->i);
2712 }
2713 
2714 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2715 {
2716     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2717 
2718     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2719     gen_mov_pc_npc(dc);
2720     dc->npc = target;
2721     return true;
2722 }
2723 
2724 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2725 {
2726     /*
2727      * For sparc32, always generate the no-coprocessor exception.
2728      * For sparc64, always generate illegal instruction.
2729      */
2730 #ifdef TARGET_SPARC64
2731     return false;
2732 #else
2733     gen_exception(dc, TT_NCP_INSN);
2734     return true;
2735 #endif
2736 }
2737 
2738 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2739 {
2740     /* Special-case %g0 because that's the canonical nop.  */
2741     if (a->rd) {
2742         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2743     }
2744     return advance_pc(dc);
2745 }
2746 
2747 /*
2748  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2749  */
2750 
2751 static bool do_tcc(DisasContext *dc, int cond, int cc,
2752                    int rs1, bool imm, int rs2_or_imm)
2753 {
2754     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2755                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2756     DisasCompare cmp;
2757     TCGLabel *lab;
2758     TCGv_i32 trap;
2759 
2760     /* Trap never.  */
2761     if (cond == 0) {
2762         return advance_pc(dc);
2763     }
2764 
2765     /*
2766      * Immediate traps are the most common case.  Since this value is
2767      * live across the branch, it really pays to evaluate the constant.
2768      */
2769     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2770         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2771     } else {
2772         trap = tcg_temp_new_i32();
2773         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2774         if (imm) {
2775             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2776         } else {
2777             TCGv_i32 t2 = tcg_temp_new_i32();
2778             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2779             tcg_gen_add_i32(trap, trap, t2);
2780         }
2781         tcg_gen_andi_i32(trap, trap, mask);
2782         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2783     }
2784 
2785     finishing_insn(dc);
2786 
2787     /* Trap always.  */
2788     if (cond == 8) {
2789         save_state(dc);
2790         gen_helper_raise_exception(tcg_env, trap);
2791         dc->base.is_jmp = DISAS_NORETURN;
2792         return true;
2793     }
2794 
2795     /* Conditional trap.  */
2796     flush_cond(dc);
2797     lab = delay_exceptionv(dc, trap);
2798     gen_compare(&cmp, cc, cond, dc);
2799     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2800 
2801     return advance_pc(dc);
2802 }
2803 
2804 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2805 {
2806     if (avail_32(dc) && a->cc) {
2807         return false;
2808     }
2809     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2810 }
2811 
2812 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2813 {
2814     if (avail_64(dc)) {
2815         return false;
2816     }
2817     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2818 }
2819 
2820 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2821 {
2822     if (avail_32(dc)) {
2823         return false;
2824     }
2825     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2826 }
2827 
2828 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2829 {
2830     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2831     return advance_pc(dc);
2832 }
2833 
2834 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2835 {
2836     if (avail_32(dc)) {
2837         return false;
2838     }
2839     if (a->mmask) {
2840         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2841         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2842     }
2843     if (a->cmask) {
2844         /* For #Sync, etc, end the TB to recognize interrupts. */
2845         dc->base.is_jmp = DISAS_EXIT;
2846     }
2847     return advance_pc(dc);
2848 }
2849 
2850 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2851                           TCGv (*func)(DisasContext *, TCGv))
2852 {
2853     if (!priv) {
2854         return raise_priv(dc);
2855     }
2856     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2857     return advance_pc(dc);
2858 }
2859 
2860 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2861 {
2862     return cpu_y;
2863 }
2864 
2865 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2866 {
2867     /*
2868      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2869      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2870      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2871      */
2872     if (avail_64(dc) && a->rs1 != 0) {
2873         return false;
2874     }
2875     return do_rd_special(dc, true, a->rd, do_rdy);
2876 }
2877 
2878 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2879 {
2880     gen_helper_rdasr17(dst, tcg_env);
2881     return dst;
2882 }
2883 
2884 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2885 
2886 static TCGv do_rdpic(DisasContext *dc, TCGv dst)
2887 {
2888     return tcg_constant_tl(0);
2889 }
2890 
2891 TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
2892 
2893 
2894 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2895 {
2896     gen_helper_rdccr(dst, tcg_env);
2897     return dst;
2898 }
2899 
2900 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2901 
2902 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2903 {
2904 #ifdef TARGET_SPARC64
2905     return tcg_constant_tl(dc->asi);
2906 #else
2907     qemu_build_not_reached();
2908 #endif
2909 }
2910 
2911 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2912 
2913 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2914 {
2915     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2916 
2917     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2918     if (translator_io_start(&dc->base)) {
2919         dc->base.is_jmp = DISAS_EXIT;
2920     }
2921     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2922                               tcg_constant_i32(dc->mem_idx));
2923     return dst;
2924 }
2925 
2926 /* TODO: non-priv access only allowed when enabled. */
2927 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2928 
2929 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2930 {
2931     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2932 }
2933 
2934 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2935 
2936 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2937 {
2938     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2939     return dst;
2940 }
2941 
2942 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2943 
2944 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2945 {
2946     gen_trap_ifnofpu(dc);
2947     return cpu_gsr;
2948 }
2949 
2950 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2951 
2952 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2953 {
2954     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2955     return dst;
2956 }
2957 
2958 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2959 
2960 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2961 {
2962     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2963     return dst;
2964 }
2965 
2966 /* TODO: non-priv access only allowed when enabled. */
2967 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2968 
2969 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2970 {
2971     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2972 
2973     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2974     if (translator_io_start(&dc->base)) {
2975         dc->base.is_jmp = DISAS_EXIT;
2976     }
2977     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2978                               tcg_constant_i32(dc->mem_idx));
2979     return dst;
2980 }
2981 
2982 /* TODO: non-priv access only allowed when enabled. */
2983 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2984 
2985 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2986 {
2987     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2988     return dst;
2989 }
2990 
2991 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2992 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2993 
2994 /*
2995  * UltraSPARC-T1 Strand status.
2996  * HYPV check maybe not enough, UA2005 & UA2007 describe
2997  * this ASR as impl. dep
2998  */
2999 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3000 {
3001     return tcg_constant_tl(1);
3002 }
3003 
3004 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3005 
3006 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3007 {
3008     gen_helper_rdpsr(dst, tcg_env);
3009     return dst;
3010 }
3011 
3012 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3013 
3014 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3015 {
3016     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3017     return dst;
3018 }
3019 
3020 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3021 
3022 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3023 {
3024     TCGv_i32 tl = tcg_temp_new_i32();
3025     TCGv_ptr tp = tcg_temp_new_ptr();
3026 
3027     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3028     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3029     tcg_gen_shli_i32(tl, tl, 3);
3030     tcg_gen_ext_i32_ptr(tp, tl);
3031     tcg_gen_add_ptr(tp, tp, tcg_env);
3032 
3033     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3034     return dst;
3035 }
3036 
3037 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3038 
3039 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3040 {
3041     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3042     return dst;
3043 }
3044 
3045 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3046 
3047 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3048 {
3049     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3050     return dst;
3051 }
3052 
3053 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3054 
3055 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3056 {
3057     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3058     return dst;
3059 }
3060 
3061 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3062 
3063 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3064 {
3065     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3066     return dst;
3067 }
3068 
3069 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3070       do_rdhstick_cmpr)
3071 
3072 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3073 {
3074     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3075     return dst;
3076 }
3077 
3078 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3079 
3080 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3081 {
3082 #ifdef TARGET_SPARC64
3083     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3084 
3085     gen_load_trap_state_at_tl(r_tsptr);
3086     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3087     return dst;
3088 #else
3089     qemu_build_not_reached();
3090 #endif
3091 }
3092 
3093 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3094 
3095 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3096 {
3097 #ifdef TARGET_SPARC64
3098     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3099 
3100     gen_load_trap_state_at_tl(r_tsptr);
3101     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3102     return dst;
3103 #else
3104     qemu_build_not_reached();
3105 #endif
3106 }
3107 
3108 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3109 
3110 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3111 {
3112 #ifdef TARGET_SPARC64
3113     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3114 
3115     gen_load_trap_state_at_tl(r_tsptr);
3116     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3117     return dst;
3118 #else
3119     qemu_build_not_reached();
3120 #endif
3121 }
3122 
3123 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3124 
3125 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3126 {
3127 #ifdef TARGET_SPARC64
3128     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3129 
3130     gen_load_trap_state_at_tl(r_tsptr);
3131     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3132     return dst;
3133 #else
3134     qemu_build_not_reached();
3135 #endif
3136 }
3137 
3138 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3139 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3140 
3141 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3142 {
3143     return cpu_tbr;
3144 }
3145 
3146 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3147 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3148 
3149 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3150 {
3151     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3152     return dst;
3153 }
3154 
3155 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3156 
3157 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3158 {
3159     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3160     return dst;
3161 }
3162 
3163 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3164 
3165 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3166 {
3167     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3168     return dst;
3169 }
3170 
3171 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3172 
3173 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3174 {
3175     gen_helper_rdcwp(dst, tcg_env);
3176     return dst;
3177 }
3178 
3179 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3180 
3181 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3182 {
3183     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3184     return dst;
3185 }
3186 
3187 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3188 
3189 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3190 {
3191     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3192     return dst;
3193 }
3194 
3195 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3196       do_rdcanrestore)
3197 
3198 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3199 {
3200     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3201     return dst;
3202 }
3203 
3204 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3205 
3206 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3207 {
3208     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3209     return dst;
3210 }
3211 
3212 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3213 
3214 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3215 {
3216     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3217     return dst;
3218 }
3219 
3220 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3221 
3222 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3223 {
3224     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3225     return dst;
3226 }
3227 
3228 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3229 
3230 /* UA2005 strand status */
3231 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3232 {
3233     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3234     return dst;
3235 }
3236 
3237 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3238 
3239 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3240 {
3241     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3242     return dst;
3243 }
3244 
3245 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3246 
3247 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3248 {
3249     if (avail_64(dc)) {
3250         gen_helper_flushw(tcg_env);
3251         return advance_pc(dc);
3252     }
3253     return false;
3254 }
3255 
3256 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3257                           void (*func)(DisasContext *, TCGv))
3258 {
3259     TCGv src;
3260 
3261     /* For simplicity, we under-decoded the rs2 form. */
3262     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3263         return false;
3264     }
3265     if (!priv) {
3266         return raise_priv(dc);
3267     }
3268 
3269     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3270         src = tcg_constant_tl(a->rs2_or_imm);
3271     } else {
3272         TCGv src1 = gen_load_gpr(dc, a->rs1);
3273         if (a->rs2_or_imm == 0) {
3274             src = src1;
3275         } else {
3276             src = tcg_temp_new();
3277             if (a->imm) {
3278                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3279             } else {
3280                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3281             }
3282         }
3283     }
3284     func(dc, src);
3285     return advance_pc(dc);
3286 }
3287 
3288 static void do_wry(DisasContext *dc, TCGv src)
3289 {
3290     tcg_gen_ext32u_tl(cpu_y, src);
3291 }
3292 
3293 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3294 
3295 static void do_wrccr(DisasContext *dc, TCGv src)
3296 {
3297     gen_helper_wrccr(tcg_env, src);
3298 }
3299 
3300 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3301 
3302 static void do_wrasi(DisasContext *dc, TCGv src)
3303 {
3304     TCGv tmp = tcg_temp_new();
3305 
3306     tcg_gen_ext8u_tl(tmp, src);
3307     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3308     /* End TB to notice changed ASI. */
3309     dc->base.is_jmp = DISAS_EXIT;
3310 }
3311 
3312 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3313 
3314 static void do_wrfprs(DisasContext *dc, TCGv src)
3315 {
3316 #ifdef TARGET_SPARC64
3317     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3318     dc->fprs_dirty = 0;
3319     dc->base.is_jmp = DISAS_EXIT;
3320 #else
3321     qemu_build_not_reached();
3322 #endif
3323 }
3324 
3325 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3326 
3327 static bool do_priv_nop(DisasContext *dc, bool priv)
3328 {
3329     if (!priv) {
3330         return raise_priv(dc);
3331     }
3332     return advance_pc(dc);
3333 }
3334 
3335 TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
3336 TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
3337 
3338 static void do_wrgsr(DisasContext *dc, TCGv src)
3339 {
3340     gen_trap_ifnofpu(dc);
3341     tcg_gen_mov_tl(cpu_gsr, src);
3342 }
3343 
3344 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3345 
3346 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3347 {
3348     gen_helper_set_softint(tcg_env, src);
3349 }
3350 
3351 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3352 
3353 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3354 {
3355     gen_helper_clear_softint(tcg_env, src);
3356 }
3357 
3358 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3359 
3360 static void do_wrsoftint(DisasContext *dc, TCGv src)
3361 {
3362     gen_helper_write_softint(tcg_env, src);
3363 }
3364 
3365 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3366 
3367 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3368 {
3369     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3370 
3371     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3372     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3373     translator_io_start(&dc->base);
3374     gen_helper_tick_set_limit(r_tickptr, src);
3375     /* End TB to handle timer interrupt */
3376     dc->base.is_jmp = DISAS_EXIT;
3377 }
3378 
3379 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3380 
3381 static void do_wrstick(DisasContext *dc, TCGv src)
3382 {
3383 #ifdef TARGET_SPARC64
3384     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3385 
3386     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3387     translator_io_start(&dc->base);
3388     gen_helper_tick_set_count(r_tickptr, src);
3389     /* End TB to handle timer interrupt */
3390     dc->base.is_jmp = DISAS_EXIT;
3391 #else
3392     qemu_build_not_reached();
3393 #endif
3394 }
3395 
3396 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3397 
3398 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3399 {
3400     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3401 
3402     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3403     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3404     translator_io_start(&dc->base);
3405     gen_helper_tick_set_limit(r_tickptr, src);
3406     /* End TB to handle timer interrupt */
3407     dc->base.is_jmp = DISAS_EXIT;
3408 }
3409 
3410 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3411 
3412 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3413 {
3414     finishing_insn(dc);
3415     save_state(dc);
3416     gen_helper_power_down(tcg_env);
3417 }
3418 
3419 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3420 
3421 static void do_wrmwait(DisasContext *dc, TCGv src)
3422 {
3423     /*
3424      * TODO: This is a stub version of mwait, which merely recognizes
3425      * interrupts immediately and does not wait.
3426      */
3427     dc->base.is_jmp = DISAS_EXIT;
3428 }
3429 
3430 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3431 
3432 static void do_wrpsr(DisasContext *dc, TCGv src)
3433 {
3434     gen_helper_wrpsr(tcg_env, src);
3435     dc->base.is_jmp = DISAS_EXIT;
3436 }
3437 
3438 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3439 
3440 static void do_wrwim(DisasContext *dc, TCGv src)
3441 {
3442     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3443     TCGv tmp = tcg_temp_new();
3444 
3445     tcg_gen_andi_tl(tmp, src, mask);
3446     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3447 }
3448 
3449 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3450 
3451 static void do_wrtpc(DisasContext *dc, TCGv src)
3452 {
3453 #ifdef TARGET_SPARC64
3454     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3455 
3456     gen_load_trap_state_at_tl(r_tsptr);
3457     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3458 #else
3459     qemu_build_not_reached();
3460 #endif
3461 }
3462 
3463 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3464 
3465 static void do_wrtnpc(DisasContext *dc, TCGv src)
3466 {
3467 #ifdef TARGET_SPARC64
3468     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3469 
3470     gen_load_trap_state_at_tl(r_tsptr);
3471     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3472 #else
3473     qemu_build_not_reached();
3474 #endif
3475 }
3476 
3477 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3478 
3479 static void do_wrtstate(DisasContext *dc, TCGv src)
3480 {
3481 #ifdef TARGET_SPARC64
3482     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3483 
3484     gen_load_trap_state_at_tl(r_tsptr);
3485     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3486 #else
3487     qemu_build_not_reached();
3488 #endif
3489 }
3490 
3491 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3492 
3493 static void do_wrtt(DisasContext *dc, TCGv src)
3494 {
3495 #ifdef TARGET_SPARC64
3496     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3497 
3498     gen_load_trap_state_at_tl(r_tsptr);
3499     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3500 #else
3501     qemu_build_not_reached();
3502 #endif
3503 }
3504 
3505 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3506 
3507 static void do_wrtick(DisasContext *dc, TCGv src)
3508 {
3509     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3510 
3511     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3512     translator_io_start(&dc->base);
3513     gen_helper_tick_set_count(r_tickptr, src);
3514     /* End TB to handle timer interrupt */
3515     dc->base.is_jmp = DISAS_EXIT;
3516 }
3517 
3518 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3519 
3520 static void do_wrtba(DisasContext *dc, TCGv src)
3521 {
3522     tcg_gen_mov_tl(cpu_tbr, src);
3523 }
3524 
3525 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3526 
3527 static void do_wrpstate(DisasContext *dc, TCGv src)
3528 {
3529     save_state(dc);
3530     if (translator_io_start(&dc->base)) {
3531         dc->base.is_jmp = DISAS_EXIT;
3532     }
3533     gen_helper_wrpstate(tcg_env, src);
3534     dc->npc = DYNAMIC_PC;
3535 }
3536 
3537 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3538 
3539 static void do_wrtl(DisasContext *dc, TCGv src)
3540 {
3541     save_state(dc);
3542     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3543     dc->npc = DYNAMIC_PC;
3544 }
3545 
3546 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3547 
3548 static void do_wrpil(DisasContext *dc, TCGv src)
3549 {
3550     if (translator_io_start(&dc->base)) {
3551         dc->base.is_jmp = DISAS_EXIT;
3552     }
3553     gen_helper_wrpil(tcg_env, src);
3554 }
3555 
3556 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3557 
3558 static void do_wrcwp(DisasContext *dc, TCGv src)
3559 {
3560     gen_helper_wrcwp(tcg_env, src);
3561 }
3562 
3563 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3564 
3565 static void do_wrcansave(DisasContext *dc, TCGv src)
3566 {
3567     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3568 }
3569 
3570 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3571 
3572 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3573 {
3574     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3575 }
3576 
3577 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3578 
3579 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3580 {
3581     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3582 }
3583 
3584 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3585 
3586 static void do_wrotherwin(DisasContext *dc, TCGv src)
3587 {
3588     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3589 }
3590 
3591 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3592 
3593 static void do_wrwstate(DisasContext *dc, TCGv src)
3594 {
3595     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3596 }
3597 
3598 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3599 
3600 static void do_wrgl(DisasContext *dc, TCGv src)
3601 {
3602     gen_helper_wrgl(tcg_env, src);
3603 }
3604 
3605 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3606 
3607 /* UA2005 strand status */
3608 static void do_wrssr(DisasContext *dc, TCGv src)
3609 {
3610     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3611 }
3612 
3613 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3614 
3615 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3616 
3617 static void do_wrhpstate(DisasContext *dc, TCGv src)
3618 {
3619     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3620     dc->base.is_jmp = DISAS_EXIT;
3621 }
3622 
3623 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3624 
3625 static void do_wrhtstate(DisasContext *dc, TCGv src)
3626 {
3627     TCGv_i32 tl = tcg_temp_new_i32();
3628     TCGv_ptr tp = tcg_temp_new_ptr();
3629 
3630     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3631     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3632     tcg_gen_shli_i32(tl, tl, 3);
3633     tcg_gen_ext_i32_ptr(tp, tl);
3634     tcg_gen_add_ptr(tp, tp, tcg_env);
3635 
3636     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3637 }
3638 
3639 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3640 
3641 static void do_wrhintp(DisasContext *dc, TCGv src)
3642 {
3643     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3644 }
3645 
3646 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3647 
3648 static void do_wrhtba(DisasContext *dc, TCGv src)
3649 {
3650     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3651 }
3652 
3653 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3654 
3655 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3656 {
3657     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3658 
3659     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3660     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3661     translator_io_start(&dc->base);
3662     gen_helper_tick_set_limit(r_tickptr, src);
3663     /* End TB to handle timer interrupt */
3664     dc->base.is_jmp = DISAS_EXIT;
3665 }
3666 
3667 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3668       do_wrhstick_cmpr)
3669 
3670 static bool do_saved_restored(DisasContext *dc, bool saved)
3671 {
3672     if (!supervisor(dc)) {
3673         return raise_priv(dc);
3674     }
3675     if (saved) {
3676         gen_helper_saved(tcg_env);
3677     } else {
3678         gen_helper_restored(tcg_env);
3679     }
3680     return advance_pc(dc);
3681 }
3682 
3683 TRANS(SAVED, 64, do_saved_restored, true)
3684 TRANS(RESTORED, 64, do_saved_restored, false)
3685 
3686 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3687 {
3688     return advance_pc(dc);
3689 }
3690 
3691 /*
3692  * TODO: Need a feature bit for sparcv8.
3693  * In the meantime, treat all 32-bit cpus like sparcv7.
3694  */
3695 TRANS(NOP_v7, 32, trans_NOP, a)
3696 TRANS(NOP_v9, 64, trans_NOP, a)
3697 
3698 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3699                          void (*func)(TCGv, TCGv, TCGv),
3700                          void (*funci)(TCGv, TCGv, target_long),
3701                          bool logic_cc)
3702 {
3703     TCGv dst, src1;
3704 
3705     /* For simplicity, we under-decoded the rs2 form. */
3706     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3707         return false;
3708     }
3709 
3710     if (logic_cc) {
3711         dst = cpu_cc_N;
3712     } else {
3713         dst = gen_dest_gpr(dc, a->rd);
3714     }
3715     src1 = gen_load_gpr(dc, a->rs1);
3716 
3717     if (a->imm || a->rs2_or_imm == 0) {
3718         if (funci) {
3719             funci(dst, src1, a->rs2_or_imm);
3720         } else {
3721             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3722         }
3723     } else {
3724         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3725     }
3726 
3727     if (logic_cc) {
3728         if (TARGET_LONG_BITS == 64) {
3729             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3730             tcg_gen_movi_tl(cpu_icc_C, 0);
3731         }
3732         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3733         tcg_gen_movi_tl(cpu_cc_C, 0);
3734         tcg_gen_movi_tl(cpu_cc_V, 0);
3735     }
3736 
3737     gen_store_gpr(dc, a->rd, dst);
3738     return advance_pc(dc);
3739 }
3740 
3741 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3742                      void (*func)(TCGv, TCGv, TCGv),
3743                      void (*funci)(TCGv, TCGv, target_long),
3744                      void (*func_cc)(TCGv, TCGv, TCGv))
3745 {
3746     if (a->cc) {
3747         return do_arith_int(dc, a, func_cc, NULL, false);
3748     }
3749     return do_arith_int(dc, a, func, funci, false);
3750 }
3751 
3752 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3753                      void (*func)(TCGv, TCGv, TCGv),
3754                      void (*funci)(TCGv, TCGv, target_long))
3755 {
3756     return do_arith_int(dc, a, func, funci, a->cc);
3757 }
3758 
3759 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3760 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3761 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3762 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3763 
3764 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3765 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3766 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3767 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3768 
3769 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3770 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3771 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3772 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3773 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3774 
3775 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3776 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3777 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3778 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3779 
3780 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3781 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3782 
3783 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3784 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3785 
3786 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3787 {
3788     /* OR with %g0 is the canonical alias for MOV. */
3789     if (!a->cc && a->rs1 == 0) {
3790         if (a->imm || a->rs2_or_imm == 0) {
3791             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3792         } else if (a->rs2_or_imm & ~0x1f) {
3793             /* For simplicity, we under-decoded the rs2 form. */
3794             return false;
3795         } else {
3796             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3797         }
3798         return advance_pc(dc);
3799     }
3800     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3801 }
3802 
3803 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3804 {
3805     TCGv_i64 t1, t2;
3806     TCGv dst;
3807 
3808     if (!avail_DIV(dc)) {
3809         return false;
3810     }
3811     /* For simplicity, we under-decoded the rs2 form. */
3812     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3813         return false;
3814     }
3815 
3816     if (unlikely(a->rs2_or_imm == 0)) {
3817         gen_exception(dc, TT_DIV_ZERO);
3818         return true;
3819     }
3820 
3821     if (a->imm) {
3822         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3823     } else {
3824         TCGLabel *lab;
3825         TCGv_i32 n2;
3826 
3827         finishing_insn(dc);
3828         flush_cond(dc);
3829 
3830         n2 = tcg_temp_new_i32();
3831         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3832 
3833         lab = delay_exception(dc, TT_DIV_ZERO);
3834         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3835 
3836         t2 = tcg_temp_new_i64();
3837 #ifdef TARGET_SPARC64
3838         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3839 #else
3840         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3841 #endif
3842     }
3843 
3844     t1 = tcg_temp_new_i64();
3845     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3846 
3847     tcg_gen_divu_i64(t1, t1, t2);
3848     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3849 
3850     dst = gen_dest_gpr(dc, a->rd);
3851     tcg_gen_trunc_i64_tl(dst, t1);
3852     gen_store_gpr(dc, a->rd, dst);
3853     return advance_pc(dc);
3854 }
3855 
3856 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3857 {
3858     TCGv dst, src1, src2;
3859 
3860     if (!avail_64(dc)) {
3861         return false;
3862     }
3863     /* For simplicity, we under-decoded the rs2 form. */
3864     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3865         return false;
3866     }
3867 
3868     if (unlikely(a->rs2_or_imm == 0)) {
3869         gen_exception(dc, TT_DIV_ZERO);
3870         return true;
3871     }
3872 
3873     if (a->imm) {
3874         src2 = tcg_constant_tl(a->rs2_or_imm);
3875     } else {
3876         TCGLabel *lab;
3877 
3878         finishing_insn(dc);
3879         flush_cond(dc);
3880 
3881         lab = delay_exception(dc, TT_DIV_ZERO);
3882         src2 = cpu_regs[a->rs2_or_imm];
3883         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3884     }
3885 
3886     dst = gen_dest_gpr(dc, a->rd);
3887     src1 = gen_load_gpr(dc, a->rs1);
3888 
3889     tcg_gen_divu_tl(dst, src1, src2);
3890     gen_store_gpr(dc, a->rd, dst);
3891     return advance_pc(dc);
3892 }
3893 
3894 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3895 {
3896     TCGv dst, src1, src2;
3897 
3898     if (!avail_64(dc)) {
3899         return false;
3900     }
3901     /* For simplicity, we under-decoded the rs2 form. */
3902     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3903         return false;
3904     }
3905 
3906     if (unlikely(a->rs2_or_imm == 0)) {
3907         gen_exception(dc, TT_DIV_ZERO);
3908         return true;
3909     }
3910 
3911     dst = gen_dest_gpr(dc, a->rd);
3912     src1 = gen_load_gpr(dc, a->rs1);
3913 
3914     if (a->imm) {
3915         if (unlikely(a->rs2_or_imm == -1)) {
3916             tcg_gen_neg_tl(dst, src1);
3917             gen_store_gpr(dc, a->rd, dst);
3918             return advance_pc(dc);
3919         }
3920         src2 = tcg_constant_tl(a->rs2_or_imm);
3921     } else {
3922         TCGLabel *lab;
3923         TCGv t1, t2;
3924 
3925         finishing_insn(dc);
3926         flush_cond(dc);
3927 
3928         lab = delay_exception(dc, TT_DIV_ZERO);
3929         src2 = cpu_regs[a->rs2_or_imm];
3930         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3931 
3932         /*
3933          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3934          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3935          */
3936         t1 = tcg_temp_new();
3937         t2 = tcg_temp_new();
3938         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3939         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3940         tcg_gen_and_tl(t1, t1, t2);
3941         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3942                            tcg_constant_tl(1), src2);
3943         src2 = t1;
3944     }
3945 
3946     tcg_gen_div_tl(dst, src1, src2);
3947     gen_store_gpr(dc, a->rd, dst);
3948     return advance_pc(dc);
3949 }
3950 
3951 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3952                      int width, bool cc, bool little_endian)
3953 {
3954     TCGv dst, s1, s2, l, r, t, m;
3955     uint64_t amask = address_mask_i(dc, -8);
3956 
3957     dst = gen_dest_gpr(dc, a->rd);
3958     s1 = gen_load_gpr(dc, a->rs1);
3959     s2 = gen_load_gpr(dc, a->rs2);
3960 
3961     if (cc) {
3962         gen_op_subcc(cpu_cc_N, s1, s2);
3963     }
3964 
3965     l = tcg_temp_new();
3966     r = tcg_temp_new();
3967     t = tcg_temp_new();
3968 
3969     switch (width) {
3970     case 8:
3971         tcg_gen_andi_tl(l, s1, 7);
3972         tcg_gen_andi_tl(r, s2, 7);
3973         tcg_gen_xori_tl(r, r, 7);
3974         m = tcg_constant_tl(0xff);
3975         break;
3976     case 16:
3977         tcg_gen_extract_tl(l, s1, 1, 2);
3978         tcg_gen_extract_tl(r, s2, 1, 2);
3979         tcg_gen_xori_tl(r, r, 3);
3980         m = tcg_constant_tl(0xf);
3981         break;
3982     case 32:
3983         tcg_gen_extract_tl(l, s1, 2, 1);
3984         tcg_gen_extract_tl(r, s2, 2, 1);
3985         tcg_gen_xori_tl(r, r, 1);
3986         m = tcg_constant_tl(0x3);
3987         break;
3988     default:
3989         abort();
3990     }
3991 
3992     /* Compute Left Edge */
3993     if (little_endian) {
3994         tcg_gen_shl_tl(l, m, l);
3995         tcg_gen_and_tl(l, l, m);
3996     } else {
3997         tcg_gen_shr_tl(l, m, l);
3998     }
3999     /* Compute Right Edge */
4000     if (little_endian) {
4001         tcg_gen_shr_tl(r, m, r);
4002     } else {
4003         tcg_gen_shl_tl(r, m, r);
4004         tcg_gen_and_tl(r, r, m);
4005     }
4006 
4007     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
4008     tcg_gen_xor_tl(t, s1, s2);
4009     tcg_gen_and_tl(r, r, l);
4010     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
4011 
4012     gen_store_gpr(dc, a->rd, dst);
4013     return advance_pc(dc);
4014 }
4015 
4016 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
4017 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
4018 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
4019 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4020 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4021 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4022 
4023 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4024 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4025 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4026 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4027 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4028 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4029 
4030 static bool do_rr(DisasContext *dc, arg_r_r *a,
4031                   void (*func)(TCGv, TCGv))
4032 {
4033     TCGv dst = gen_dest_gpr(dc, a->rd);
4034     TCGv src = gen_load_gpr(dc, a->rs);
4035 
4036     func(dst, src);
4037     gen_store_gpr(dc, a->rd, dst);
4038     return advance_pc(dc);
4039 }
4040 
4041 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4042 
4043 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4044                    void (*func)(TCGv, TCGv, TCGv))
4045 {
4046     TCGv dst = gen_dest_gpr(dc, a->rd);
4047     TCGv src1 = gen_load_gpr(dc, a->rs1);
4048     TCGv src2 = gen_load_gpr(dc, a->rs2);
4049 
4050     func(dst, src1, src2);
4051     gen_store_gpr(dc, a->rd, dst);
4052     return advance_pc(dc);
4053 }
4054 
4055 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4056 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4057 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4058 
4059 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4060 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4061 
4062 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4063 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4064 
4065 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4066 
4067 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4068 {
4069 #ifdef TARGET_SPARC64
4070     TCGv tmp = tcg_temp_new();
4071 
4072     tcg_gen_add_tl(tmp, s1, s2);
4073     tcg_gen_andi_tl(dst, tmp, -8);
4074     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4075 #else
4076     g_assert_not_reached();
4077 #endif
4078 }
4079 
4080 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4081 {
4082 #ifdef TARGET_SPARC64
4083     TCGv tmp = tcg_temp_new();
4084 
4085     tcg_gen_add_tl(tmp, s1, s2);
4086     tcg_gen_andi_tl(dst, tmp, -8);
4087     tcg_gen_neg_tl(tmp, tmp);
4088     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4089 #else
4090     g_assert_not_reached();
4091 #endif
4092 }
4093 
4094 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4095 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4096 
4097 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4098 {
4099 #ifdef TARGET_SPARC64
4100     tcg_gen_add_tl(dst, s1, s2);
4101     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4102 #else
4103     g_assert_not_reached();
4104 #endif
4105 }
4106 
4107 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4108 
4109 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4110 {
4111     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4112     return true;
4113 }
4114 
4115 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4116 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4117 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4118 
4119 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4120 {
4121     TCGv dst, src1, src2;
4122 
4123     /* Reject 64-bit shifts for sparc32. */
4124     if (avail_32(dc) && a->x) {
4125         return false;
4126     }
4127 
4128     src2 = tcg_temp_new();
4129     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4130     src1 = gen_load_gpr(dc, a->rs1);
4131     dst = gen_dest_gpr(dc, a->rd);
4132 
4133     if (l) {
4134         tcg_gen_shl_tl(dst, src1, src2);
4135         if (!a->x) {
4136             tcg_gen_ext32u_tl(dst, dst);
4137         }
4138     } else if (u) {
4139         if (!a->x) {
4140             tcg_gen_ext32u_tl(dst, src1);
4141             src1 = dst;
4142         }
4143         tcg_gen_shr_tl(dst, src1, src2);
4144     } else {
4145         if (!a->x) {
4146             tcg_gen_ext32s_tl(dst, src1);
4147             src1 = dst;
4148         }
4149         tcg_gen_sar_tl(dst, src1, src2);
4150     }
4151     gen_store_gpr(dc, a->rd, dst);
4152     return advance_pc(dc);
4153 }
4154 
4155 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4156 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4157 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4158 
4159 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4160 {
4161     TCGv dst, src1;
4162 
4163     /* Reject 64-bit shifts for sparc32. */
4164     if (avail_32(dc) && (a->x || a->i >= 32)) {
4165         return false;
4166     }
4167 
4168     src1 = gen_load_gpr(dc, a->rs1);
4169     dst = gen_dest_gpr(dc, a->rd);
4170 
4171     if (avail_32(dc) || a->x) {
4172         if (l) {
4173             tcg_gen_shli_tl(dst, src1, a->i);
4174         } else if (u) {
4175             tcg_gen_shri_tl(dst, src1, a->i);
4176         } else {
4177             tcg_gen_sari_tl(dst, src1, a->i);
4178         }
4179     } else {
4180         if (l) {
4181             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4182         } else if (u) {
4183             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4184         } else {
4185             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4186         }
4187     }
4188     gen_store_gpr(dc, a->rd, dst);
4189     return advance_pc(dc);
4190 }
4191 
4192 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4193 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4194 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4195 
4196 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4197 {
4198     /* For simplicity, we under-decoded the rs2 form. */
4199     if (!imm && rs2_or_imm & ~0x1f) {
4200         return NULL;
4201     }
4202     if (imm || rs2_or_imm == 0) {
4203         return tcg_constant_tl(rs2_or_imm);
4204     } else {
4205         return cpu_regs[rs2_or_imm];
4206     }
4207 }
4208 
4209 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4210 {
4211     TCGv dst = gen_load_gpr(dc, rd);
4212     TCGv c2 = tcg_constant_tl(cmp->c2);
4213 
4214     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4215     gen_store_gpr(dc, rd, dst);
4216     return advance_pc(dc);
4217 }
4218 
4219 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4220 {
4221     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4222     DisasCompare cmp;
4223 
4224     if (src2 == NULL) {
4225         return false;
4226     }
4227     gen_compare(&cmp, a->cc, a->cond, dc);
4228     return do_mov_cond(dc, &cmp, a->rd, src2);
4229 }
4230 
4231 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4232 {
4233     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4234     DisasCompare cmp;
4235 
4236     if (src2 == NULL) {
4237         return false;
4238     }
4239     gen_fcompare(&cmp, a->cc, a->cond);
4240     return do_mov_cond(dc, &cmp, a->rd, src2);
4241 }
4242 
4243 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4244 {
4245     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4246     DisasCompare cmp;
4247 
4248     if (src2 == NULL) {
4249         return false;
4250     }
4251     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4252         return false;
4253     }
4254     return do_mov_cond(dc, &cmp, a->rd, src2);
4255 }
4256 
4257 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4258                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4259 {
4260     TCGv src1, sum;
4261 
4262     /* For simplicity, we under-decoded the rs2 form. */
4263     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4264         return false;
4265     }
4266 
4267     /*
4268      * Always load the sum into a new temporary.
4269      * This is required to capture the value across a window change,
4270      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4271      */
4272     sum = tcg_temp_new();
4273     src1 = gen_load_gpr(dc, a->rs1);
4274     if (a->imm || a->rs2_or_imm == 0) {
4275         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4276     } else {
4277         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4278     }
4279     return func(dc, a->rd, sum);
4280 }
4281 
4282 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4283 {
4284     /*
4285      * Preserve pc across advance, so that we can delay
4286      * the writeback to rd until after src is consumed.
4287      */
4288     target_ulong cur_pc = dc->pc;
4289 
4290     gen_check_align(dc, src, 3);
4291 
4292     gen_mov_pc_npc(dc);
4293     tcg_gen_mov_tl(cpu_npc, src);
4294     gen_address_mask(dc, cpu_npc);
4295     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4296 
4297     dc->npc = DYNAMIC_PC_LOOKUP;
4298     return true;
4299 }
4300 
4301 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4302 
4303 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4304 {
4305     if (!supervisor(dc)) {
4306         return raise_priv(dc);
4307     }
4308 
4309     gen_check_align(dc, src, 3);
4310 
4311     gen_mov_pc_npc(dc);
4312     tcg_gen_mov_tl(cpu_npc, src);
4313     gen_helper_rett(tcg_env);
4314 
4315     dc->npc = DYNAMIC_PC;
4316     return true;
4317 }
4318 
4319 TRANS(RETT, 32, do_add_special, a, do_rett)
4320 
4321 static bool do_return(DisasContext *dc, int rd, TCGv src)
4322 {
4323     gen_check_align(dc, src, 3);
4324     gen_helper_restore(tcg_env);
4325 
4326     gen_mov_pc_npc(dc);
4327     tcg_gen_mov_tl(cpu_npc, src);
4328     gen_address_mask(dc, cpu_npc);
4329 
4330     dc->npc = DYNAMIC_PC_LOOKUP;
4331     return true;
4332 }
4333 
4334 TRANS(RETURN, 64, do_add_special, a, do_return)
4335 
4336 static bool do_save(DisasContext *dc, int rd, TCGv src)
4337 {
4338     gen_helper_save(tcg_env);
4339     gen_store_gpr(dc, rd, src);
4340     return advance_pc(dc);
4341 }
4342 
4343 TRANS(SAVE, ALL, do_add_special, a, do_save)
4344 
4345 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4346 {
4347     gen_helper_restore(tcg_env);
4348     gen_store_gpr(dc, rd, src);
4349     return advance_pc(dc);
4350 }
4351 
4352 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4353 
4354 static bool do_done_retry(DisasContext *dc, bool done)
4355 {
4356     if (!supervisor(dc)) {
4357         return raise_priv(dc);
4358     }
4359     dc->npc = DYNAMIC_PC;
4360     dc->pc = DYNAMIC_PC;
4361     translator_io_start(&dc->base);
4362     if (done) {
4363         gen_helper_done(tcg_env);
4364     } else {
4365         gen_helper_retry(tcg_env);
4366     }
4367     return true;
4368 }
4369 
4370 TRANS(DONE, 64, do_done_retry, true)
4371 TRANS(RETRY, 64, do_done_retry, false)
4372 
4373 /*
4374  * Major opcode 11 -- load and store instructions
4375  */
4376 
4377 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4378 {
4379     TCGv addr, tmp = NULL;
4380 
4381     /* For simplicity, we under-decoded the rs2 form. */
4382     if (!imm && rs2_or_imm & ~0x1f) {
4383         return NULL;
4384     }
4385 
4386     addr = gen_load_gpr(dc, rs1);
4387     if (rs2_or_imm) {
4388         tmp = tcg_temp_new();
4389         if (imm) {
4390             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4391         } else {
4392             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4393         }
4394         addr = tmp;
4395     }
4396     if (AM_CHECK(dc)) {
4397         if (!tmp) {
4398             tmp = tcg_temp_new();
4399         }
4400         tcg_gen_ext32u_tl(tmp, addr);
4401         addr = tmp;
4402     }
4403     return addr;
4404 }
4405 
4406 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4407 {
4408     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4409     DisasASI da;
4410 
4411     if (addr == NULL) {
4412         return false;
4413     }
4414     da = resolve_asi(dc, a->asi, mop);
4415 
4416     reg = gen_dest_gpr(dc, a->rd);
4417     gen_ld_asi(dc, &da, reg, addr);
4418     gen_store_gpr(dc, a->rd, reg);
4419     return advance_pc(dc);
4420 }
4421 
4422 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4423 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4424 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4425 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4426 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4427 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4428 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4429 
4430 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4431 {
4432     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4433     DisasASI da;
4434 
4435     if (addr == NULL) {
4436         return false;
4437     }
4438     da = resolve_asi(dc, a->asi, mop);
4439 
4440     reg = gen_load_gpr(dc, a->rd);
4441     gen_st_asi(dc, &da, reg, addr);
4442     return advance_pc(dc);
4443 }
4444 
4445 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4446 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4447 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4448 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4449 
4450 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4451 {
4452     TCGv addr;
4453     DisasASI da;
4454 
4455     if (a->rd & 1) {
4456         return false;
4457     }
4458     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4459     if (addr == NULL) {
4460         return false;
4461     }
4462     da = resolve_asi(dc, a->asi, MO_TEUQ);
4463     gen_ldda_asi(dc, &da, addr, a->rd);
4464     return advance_pc(dc);
4465 }
4466 
4467 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4468 {
4469     TCGv addr;
4470     DisasASI da;
4471 
4472     if (a->rd & 1) {
4473         return false;
4474     }
4475     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4476     if (addr == NULL) {
4477         return false;
4478     }
4479     da = resolve_asi(dc, a->asi, MO_TEUQ);
4480     gen_stda_asi(dc, &da, addr, a->rd);
4481     return advance_pc(dc);
4482 }
4483 
4484 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4485 {
4486     TCGv addr, reg;
4487     DisasASI da;
4488 
4489     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4490     if (addr == NULL) {
4491         return false;
4492     }
4493     da = resolve_asi(dc, a->asi, MO_UB);
4494 
4495     reg = gen_dest_gpr(dc, a->rd);
4496     gen_ldstub_asi(dc, &da, reg, addr);
4497     gen_store_gpr(dc, a->rd, reg);
4498     return advance_pc(dc);
4499 }
4500 
4501 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4502 {
4503     TCGv addr, dst, src;
4504     DisasASI da;
4505 
4506     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4507     if (addr == NULL) {
4508         return false;
4509     }
4510     da = resolve_asi(dc, a->asi, MO_TEUL);
4511 
4512     dst = gen_dest_gpr(dc, a->rd);
4513     src = gen_load_gpr(dc, a->rd);
4514     gen_swap_asi(dc, &da, dst, src, addr);
4515     gen_store_gpr(dc, a->rd, dst);
4516     return advance_pc(dc);
4517 }
4518 
4519 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4520 {
4521     TCGv addr, o, n, c;
4522     DisasASI da;
4523 
4524     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4525     if (addr == NULL) {
4526         return false;
4527     }
4528     da = resolve_asi(dc, a->asi, mop);
4529 
4530     o = gen_dest_gpr(dc, a->rd);
4531     n = gen_load_gpr(dc, a->rd);
4532     c = gen_load_gpr(dc, a->rs2_or_imm);
4533     gen_cas_asi(dc, &da, o, n, c, addr);
4534     gen_store_gpr(dc, a->rd, o);
4535     return advance_pc(dc);
4536 }
4537 
4538 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4539 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4540 
4541 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4542 {
4543     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4544     DisasASI da;
4545 
4546     if (addr == NULL) {
4547         return false;
4548     }
4549     if (gen_trap_if_nofpu_fpexception(dc)) {
4550         return true;
4551     }
4552     if (sz == MO_128 && gen_trap_float128(dc)) {
4553         return true;
4554     }
4555     da = resolve_asi(dc, a->asi, MO_TE | sz);
4556     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4557     gen_update_fprs_dirty(dc, a->rd);
4558     return advance_pc(dc);
4559 }
4560 
4561 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4562 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4563 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4564 
4565 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4566 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4567 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4568 
4569 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4570 {
4571     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4572     DisasASI da;
4573 
4574     if (addr == NULL) {
4575         return false;
4576     }
4577     /* Store insns are ok in fp_exception_pending state. */
4578     if (gen_trap_ifnofpu(dc)) {
4579         return true;
4580     }
4581     if (sz == MO_128 && gen_trap_float128(dc)) {
4582         return true;
4583     }
4584     da = resolve_asi(dc, a->asi, MO_TE | sz);
4585     gen_stf_asi(dc, &da, sz, addr, a->rd);
4586     return advance_pc(dc);
4587 }
4588 
4589 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4590 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4591 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4592 
4593 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4594 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4595 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4596 
4597 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4598 {
4599     TCGv addr;
4600 
4601     if (!avail_32(dc)) {
4602         return false;
4603     }
4604     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4605     if (addr == NULL) {
4606         return false;
4607     }
4608     if (!supervisor(dc)) {
4609         return raise_priv(dc);
4610     }
4611 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4612     if (gen_trap_ifnofpu(dc)) {
4613         return true;
4614     }
4615     if (!dc->fsr_qne) {
4616         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4617         return true;
4618     }
4619 
4620     /* Store the single element from the queue. */
4621     TCGv_i64 fq = tcg_temp_new_i64();
4622     tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4623     tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4624 
4625     /* Mark the queue empty, transitioning to fp_execute state. */
4626     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4627                    offsetof(CPUSPARCState, fsr_qne));
4628     dc->fsr_qne = 0;
4629 
4630     return advance_pc(dc);
4631 #else
4632     qemu_build_not_reached();
4633 #endif
4634 }
4635 
4636 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4637 {
4638     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4639     TCGv_i32 tmp;
4640 
4641     if (addr == NULL) {
4642         return false;
4643     }
4644     if (gen_trap_if_nofpu_fpexception(dc)) {
4645         return true;
4646     }
4647 
4648     tmp = tcg_temp_new_i32();
4649     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4650 
4651     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4652     /* LDFSR does not change FCC[1-3]. */
4653 
4654     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4655     return advance_pc(dc);
4656 }
4657 
4658 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4659 {
4660 #ifdef TARGET_SPARC64
4661     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4662     TCGv_i64 t64;
4663     TCGv_i32 lo, hi;
4664 
4665     if (addr == NULL) {
4666         return false;
4667     }
4668     if (gen_trap_if_nofpu_fpexception(dc)) {
4669         return true;
4670     }
4671 
4672     t64 = tcg_temp_new_i64();
4673     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4674 
4675     lo = tcg_temp_new_i32();
4676     hi = cpu_fcc[3];
4677     tcg_gen_extr_i64_i32(lo, hi, t64);
4678     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4679     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4680     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4681     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4682 
4683     if (entire) {
4684         gen_helper_set_fsr_nofcc(tcg_env, lo);
4685     } else {
4686         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4687     }
4688     return advance_pc(dc);
4689 #else
4690     return false;
4691 #endif
4692 }
4693 
4694 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4695 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4696 
4697 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4698 {
4699     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4700     TCGv fsr;
4701 
4702     if (addr == NULL) {
4703         return false;
4704     }
4705     /* Store insns are ok in fp_exception_pending state. */
4706     if (gen_trap_ifnofpu(dc)) {
4707         return true;
4708     }
4709 
4710     fsr = tcg_temp_new();
4711     gen_helper_get_fsr(fsr, tcg_env);
4712     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4713     return advance_pc(dc);
4714 }
4715 
4716 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4717 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4718 
4719 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4720 {
4721     if (gen_trap_ifnofpu(dc)) {
4722         return true;
4723     }
4724     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4725     return advance_pc(dc);
4726 }
4727 
4728 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4729 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4730 
4731 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4732 {
4733     if (gen_trap_ifnofpu(dc)) {
4734         return true;
4735     }
4736     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4737     return advance_pc(dc);
4738 }
4739 
4740 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4741 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4742 
4743 static bool do_ff(DisasContext *dc, arg_r_r *a,
4744                   void (*func)(TCGv_i32, TCGv_i32))
4745 {
4746     TCGv_i32 tmp;
4747 
4748     if (gen_trap_if_nofpu_fpexception(dc)) {
4749         return true;
4750     }
4751 
4752     tmp = gen_load_fpr_F(dc, a->rs);
4753     func(tmp, tmp);
4754     gen_store_fpr_F(dc, a->rd, tmp);
4755     return advance_pc(dc);
4756 }
4757 
4758 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4759 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4760 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4761 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4762 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4763 
4764 static bool do_fd(DisasContext *dc, arg_r_r *a,
4765                   void (*func)(TCGv_i32, TCGv_i64))
4766 {
4767     TCGv_i32 dst;
4768     TCGv_i64 src;
4769 
4770     if (gen_trap_ifnofpu(dc)) {
4771         return true;
4772     }
4773 
4774     dst = tcg_temp_new_i32();
4775     src = gen_load_fpr_D(dc, a->rs);
4776     func(dst, src);
4777     gen_store_fpr_F(dc, a->rd, dst);
4778     return advance_pc(dc);
4779 }
4780 
4781 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4782 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4783 
4784 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4785                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4786 {
4787     TCGv_i32 tmp;
4788 
4789     if (gen_trap_if_nofpu_fpexception(dc)) {
4790         return true;
4791     }
4792 
4793     tmp = gen_load_fpr_F(dc, a->rs);
4794     func(tmp, tcg_env, tmp);
4795     gen_store_fpr_F(dc, a->rd, tmp);
4796     return advance_pc(dc);
4797 }
4798 
4799 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4800 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4801 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4802 
4803 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4804                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4805 {
4806     TCGv_i32 dst;
4807     TCGv_i64 src;
4808 
4809     if (gen_trap_if_nofpu_fpexception(dc)) {
4810         return true;
4811     }
4812 
4813     dst = tcg_temp_new_i32();
4814     src = gen_load_fpr_D(dc, a->rs);
4815     func(dst, tcg_env, src);
4816     gen_store_fpr_F(dc, a->rd, dst);
4817     return advance_pc(dc);
4818 }
4819 
4820 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4821 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4822 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4823 
4824 static bool do_dd(DisasContext *dc, arg_r_r *a,
4825                   void (*func)(TCGv_i64, TCGv_i64))
4826 {
4827     TCGv_i64 dst, src;
4828 
4829     if (gen_trap_if_nofpu_fpexception(dc)) {
4830         return true;
4831     }
4832 
4833     dst = tcg_temp_new_i64();
4834     src = gen_load_fpr_D(dc, a->rs);
4835     func(dst, src);
4836     gen_store_fpr_D(dc, a->rd, dst);
4837     return advance_pc(dc);
4838 }
4839 
4840 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4841 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4842 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4843 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4844 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4845 
4846 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4847                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4848 {
4849     TCGv_i64 dst, src;
4850 
4851     if (gen_trap_if_nofpu_fpexception(dc)) {
4852         return true;
4853     }
4854 
4855     dst = tcg_temp_new_i64();
4856     src = gen_load_fpr_D(dc, a->rs);
4857     func(dst, tcg_env, src);
4858     gen_store_fpr_D(dc, a->rd, dst);
4859     return advance_pc(dc);
4860 }
4861 
4862 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4863 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4864 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4865 
4866 static bool do_df(DisasContext *dc, arg_r_r *a,
4867                   void (*func)(TCGv_i64, TCGv_i32))
4868 {
4869     TCGv_i64 dst;
4870     TCGv_i32 src;
4871 
4872     if (gen_trap_ifnofpu(dc)) {
4873         return true;
4874     }
4875 
4876     dst = tcg_temp_new_i64();
4877     src = gen_load_fpr_F(dc, a->rs);
4878     func(dst, src);
4879     gen_store_fpr_D(dc, a->rd, dst);
4880     return advance_pc(dc);
4881 }
4882 
4883 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4884 
4885 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4886                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4887 {
4888     TCGv_i64 dst;
4889     TCGv_i32 src;
4890 
4891     if (gen_trap_if_nofpu_fpexception(dc)) {
4892         return true;
4893     }
4894 
4895     dst = tcg_temp_new_i64();
4896     src = gen_load_fpr_F(dc, a->rs);
4897     func(dst, tcg_env, src);
4898     gen_store_fpr_D(dc, a->rd, dst);
4899     return advance_pc(dc);
4900 }
4901 
4902 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4903 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4904 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4905 
4906 static bool do_qq(DisasContext *dc, arg_r_r *a,
4907                   void (*func)(TCGv_i128, TCGv_i128))
4908 {
4909     TCGv_i128 t;
4910 
4911     if (gen_trap_ifnofpu(dc)) {
4912         return true;
4913     }
4914     if (gen_trap_float128(dc)) {
4915         return true;
4916     }
4917 
4918     gen_op_clear_ieee_excp_and_FTT();
4919     t = gen_load_fpr_Q(dc, a->rs);
4920     func(t, t);
4921     gen_store_fpr_Q(dc, a->rd, t);
4922     return advance_pc(dc);
4923 }
4924 
4925 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4926 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4927 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4928 
4929 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4930                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4931 {
4932     TCGv_i128 t;
4933 
4934     if (gen_trap_if_nofpu_fpexception(dc)) {
4935         return true;
4936     }
4937     if (gen_trap_float128(dc)) {
4938         return true;
4939     }
4940 
4941     t = gen_load_fpr_Q(dc, a->rs);
4942     func(t, tcg_env, t);
4943     gen_store_fpr_Q(dc, a->rd, t);
4944     return advance_pc(dc);
4945 }
4946 
4947 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4948 
4949 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4950                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4951 {
4952     TCGv_i128 src;
4953     TCGv_i32 dst;
4954 
4955     if (gen_trap_if_nofpu_fpexception(dc)) {
4956         return true;
4957     }
4958     if (gen_trap_float128(dc)) {
4959         return true;
4960     }
4961 
4962     src = gen_load_fpr_Q(dc, a->rs);
4963     dst = tcg_temp_new_i32();
4964     func(dst, tcg_env, src);
4965     gen_store_fpr_F(dc, a->rd, dst);
4966     return advance_pc(dc);
4967 }
4968 
4969 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4970 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4971 
4972 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4973                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4974 {
4975     TCGv_i128 src;
4976     TCGv_i64 dst;
4977 
4978     if (gen_trap_if_nofpu_fpexception(dc)) {
4979         return true;
4980     }
4981     if (gen_trap_float128(dc)) {
4982         return true;
4983     }
4984 
4985     src = gen_load_fpr_Q(dc, a->rs);
4986     dst = tcg_temp_new_i64();
4987     func(dst, tcg_env, src);
4988     gen_store_fpr_D(dc, a->rd, dst);
4989     return advance_pc(dc);
4990 }
4991 
4992 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4993 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4994 
4995 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4996                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4997 {
4998     TCGv_i32 src;
4999     TCGv_i128 dst;
5000 
5001     if (gen_trap_if_nofpu_fpexception(dc)) {
5002         return true;
5003     }
5004     if (gen_trap_float128(dc)) {
5005         return true;
5006     }
5007 
5008     src = gen_load_fpr_F(dc, a->rs);
5009     dst = tcg_temp_new_i128();
5010     func(dst, tcg_env, src);
5011     gen_store_fpr_Q(dc, a->rd, dst);
5012     return advance_pc(dc);
5013 }
5014 
5015 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
5016 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
5017 
5018 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
5019                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
5020 {
5021     TCGv_i64 src;
5022     TCGv_i128 dst;
5023 
5024     if (gen_trap_if_nofpu_fpexception(dc)) {
5025         return true;
5026     }
5027 
5028     src = gen_load_fpr_D(dc, a->rs);
5029     dst = tcg_temp_new_i128();
5030     func(dst, tcg_env, src);
5031     gen_store_fpr_Q(dc, a->rd, dst);
5032     return advance_pc(dc);
5033 }
5034 
5035 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5036 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5037 
5038 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5039                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5040 {
5041     TCGv_i32 src1, src2;
5042 
5043     if (gen_trap_ifnofpu(dc)) {
5044         return true;
5045     }
5046 
5047     src1 = gen_load_fpr_F(dc, a->rs1);
5048     src2 = gen_load_fpr_F(dc, a->rs2);
5049     func(src1, src1, src2);
5050     gen_store_fpr_F(dc, a->rd, src1);
5051     return advance_pc(dc);
5052 }
5053 
5054 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5055 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5056 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5057 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5058 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5059 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5060 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5061 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5062 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5063 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5064 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5065 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5066 
5067 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5068 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5069 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5070 
5071 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5072 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5073 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5074 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5075 
5076 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5077                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5078 {
5079     TCGv_i32 src1, src2;
5080 
5081     if (gen_trap_if_nofpu_fpexception(dc)) {
5082         return true;
5083     }
5084 
5085     src1 = gen_load_fpr_F(dc, a->rs1);
5086     src2 = gen_load_fpr_F(dc, a->rs2);
5087     func(src1, tcg_env, src1, src2);
5088     gen_store_fpr_F(dc, a->rd, src1);
5089     return advance_pc(dc);
5090 }
5091 
5092 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5093 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5094 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5095 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5096 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5097 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5098 
5099 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5100                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5101 {
5102     TCGv_i64 dst;
5103     TCGv_i32 src1, src2;
5104 
5105     if (gen_trap_ifnofpu(dc)) {
5106         return true;
5107     }
5108 
5109     dst = tcg_temp_new_i64();
5110     src1 = gen_load_fpr_F(dc, a->rs1);
5111     src2 = gen_load_fpr_F(dc, a->rs2);
5112     func(dst, src1, src2);
5113     gen_store_fpr_D(dc, a->rd, dst);
5114     return advance_pc(dc);
5115 }
5116 
5117 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5118 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5119 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5120 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5121 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5122 
5123 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5124                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5125 {
5126     TCGv_i64 dst, src2;
5127     TCGv_i32 src1;
5128 
5129     if (gen_trap_ifnofpu(dc)) {
5130         return true;
5131     }
5132 
5133     dst = tcg_temp_new_i64();
5134     src1 = gen_load_fpr_F(dc, a->rs1);
5135     src2 = gen_load_fpr_D(dc, a->rs2);
5136     func(dst, src1, src2);
5137     gen_store_fpr_D(dc, a->rd, dst);
5138     return advance_pc(dc);
5139 }
5140 
5141 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5142 
5143 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5144                         void (*func)(unsigned, uint32_t, uint32_t,
5145                                      uint32_t, uint32_t, uint32_t))
5146 {
5147     if (gen_trap_ifnofpu(dc)) {
5148         return true;
5149     }
5150 
5151     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5152          gen_offset_fpr_D(a->rs2), 8, 8);
5153     return advance_pc(dc);
5154 }
5155 
5156 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5157 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5158 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5159 
5160 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5161 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5162 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5163 
5164 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5165 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5166 
5167 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5168 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5169 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5170 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5171 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5172 
5173 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5174 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5175 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5176 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5177 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5178 
5179 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5180 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5181 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5182 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5183 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5184 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5185 
5186 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5187 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5188 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5189 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5190 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5191 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5192 
5193 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5194 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5195 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5196 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5197 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5198 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5199 
5200 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5201                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5202 {
5203     TCGv_i64 dst, src1, src2;
5204 
5205     if (gen_trap_ifnofpu(dc)) {
5206         return true;
5207     }
5208 
5209     dst = tcg_temp_new_i64();
5210     src1 = gen_load_fpr_D(dc, a->rs1);
5211     src2 = gen_load_fpr_D(dc, a->rs2);
5212     func(dst, src1, src2);
5213     gen_store_fpr_D(dc, a->rd, dst);
5214     return advance_pc(dc);
5215 }
5216 
5217 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5218 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5219 
5220 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5221 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5222 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5223 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5224 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5225 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5226 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5227 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5228 
5229 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5230 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5231 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5232 
5233 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5234 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5235 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5236 
5237 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5238 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5239 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5240 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5241 
5242 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5243                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5244 {
5245     TCGv_i64 src1, src2;
5246     TCGv dst;
5247 
5248     if (gen_trap_ifnofpu(dc)) {
5249         return true;
5250     }
5251 
5252     dst = gen_dest_gpr(dc, a->rd);
5253     src1 = gen_load_fpr_D(dc, a->rs1);
5254     src2 = gen_load_fpr_D(dc, a->rs2);
5255     func(dst, src1, src2);
5256     gen_store_gpr(dc, a->rd, dst);
5257     return advance_pc(dc);
5258 }
5259 
5260 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5261 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5262 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5263 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5264 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5265 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5266 
5267 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5268 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5269 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5270 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5271 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5272 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5273 
5274 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5275 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5276 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5277 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5278 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5279 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5280 
5281 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5282 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5283 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5284 
5285 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5286                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5287 {
5288     TCGv_i64 dst, src1, src2;
5289 
5290     if (gen_trap_if_nofpu_fpexception(dc)) {
5291         return true;
5292     }
5293 
5294     dst = tcg_temp_new_i64();
5295     src1 = gen_load_fpr_D(dc, a->rs1);
5296     src2 = gen_load_fpr_D(dc, a->rs2);
5297     func(dst, tcg_env, src1, src2);
5298     gen_store_fpr_D(dc, a->rd, dst);
5299     return advance_pc(dc);
5300 }
5301 
5302 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5303 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5304 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5305 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5306 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5307 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5308 
5309 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5310 {
5311     TCGv_i64 dst;
5312     TCGv_i32 src1, src2;
5313 
5314     if (gen_trap_if_nofpu_fpexception(dc)) {
5315         return true;
5316     }
5317     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5318         return raise_unimpfpop(dc);
5319     }
5320 
5321     dst = tcg_temp_new_i64();
5322     src1 = gen_load_fpr_F(dc, a->rs1);
5323     src2 = gen_load_fpr_F(dc, a->rs2);
5324     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5325     gen_store_fpr_D(dc, a->rd, dst);
5326     return advance_pc(dc);
5327 }
5328 
5329 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5330 {
5331     TCGv_i64 dst;
5332     TCGv_i32 src1, src2;
5333 
5334     if (!avail_VIS3(dc)) {
5335         return false;
5336     }
5337     if (gen_trap_ifnofpu(dc)) {
5338         return true;
5339     }
5340     dst = tcg_temp_new_i64();
5341     src1 = gen_load_fpr_F(dc, a->rs1);
5342     src2 = gen_load_fpr_F(dc, a->rs2);
5343     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5344     gen_store_fpr_D(dc, a->rd, dst);
5345     return advance_pc(dc);
5346 }
5347 
5348 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5349                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5350 {
5351     TCGv_i32 dst, src1, src2, src3;
5352 
5353     if (gen_trap_ifnofpu(dc)) {
5354         return true;
5355     }
5356 
5357     src1 = gen_load_fpr_F(dc, a->rs1);
5358     src2 = gen_load_fpr_F(dc, a->rs2);
5359     src3 = gen_load_fpr_F(dc, a->rs3);
5360     dst = tcg_temp_new_i32();
5361     func(dst, src1, src2, src3);
5362     gen_store_fpr_F(dc, a->rd, dst);
5363     return advance_pc(dc);
5364 }
5365 
5366 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5367 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5368 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5369 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5370 
5371 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5372                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5373 {
5374     TCGv_i64 dst, src1, src2, src3;
5375 
5376     if (gen_trap_ifnofpu(dc)) {
5377         return true;
5378     }
5379 
5380     dst  = tcg_temp_new_i64();
5381     src1 = gen_load_fpr_D(dc, a->rs1);
5382     src2 = gen_load_fpr_D(dc, a->rs2);
5383     src3 = gen_load_fpr_D(dc, a->rs3);
5384     func(dst, src1, src2, src3);
5385     gen_store_fpr_D(dc, a->rd, dst);
5386     return advance_pc(dc);
5387 }
5388 
5389 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5390 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5391 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5392 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5393 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5394 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5395 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5396 
5397 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5398 {
5399     TCGv_i64 dst, src1, src2;
5400     TCGv src3;
5401 
5402     if (!avail_VIS4(dc)) {
5403         return false;
5404     }
5405     if (gen_trap_ifnofpu(dc)) {
5406         return true;
5407     }
5408 
5409     dst  = tcg_temp_new_i64();
5410     src1 = gen_load_fpr_D(dc, a->rd);
5411     src2 = gen_load_fpr_D(dc, a->rs2);
5412     src3 = gen_load_gpr(dc, a->rs1);
5413     gen_op_faligndata_i(dst, src1, src2, src3);
5414     gen_store_fpr_D(dc, a->rd, dst);
5415     return advance_pc(dc);
5416 }
5417 
5418 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5419                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5420 {
5421     TCGv_i128 src1, src2;
5422 
5423     if (gen_trap_if_nofpu_fpexception(dc)) {
5424         return true;
5425     }
5426     if (gen_trap_float128(dc)) {
5427         return true;
5428     }
5429 
5430     src1 = gen_load_fpr_Q(dc, a->rs1);
5431     src2 = gen_load_fpr_Q(dc, a->rs2);
5432     func(src1, tcg_env, src1, src2);
5433     gen_store_fpr_Q(dc, a->rd, src1);
5434     return advance_pc(dc);
5435 }
5436 
5437 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5438 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5439 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5440 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5441 
5442 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5443 {
5444     TCGv_i64 src1, src2;
5445     TCGv_i128 dst;
5446 
5447     if (gen_trap_if_nofpu_fpexception(dc)) {
5448         return true;
5449     }
5450     if (gen_trap_float128(dc)) {
5451         return true;
5452     }
5453 
5454     src1 = gen_load_fpr_D(dc, a->rs1);
5455     src2 = gen_load_fpr_D(dc, a->rs2);
5456     dst = tcg_temp_new_i128();
5457     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5458     gen_store_fpr_Q(dc, a->rd, dst);
5459     return advance_pc(dc);
5460 }
5461 
5462 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5463                      void (*func)(DisasContext *, DisasCompare *, int, int))
5464 {
5465     DisasCompare cmp;
5466 
5467     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5468         return false;
5469     }
5470     if (gen_trap_ifnofpu(dc)) {
5471         return true;
5472     }
5473     if (is_128 && gen_trap_float128(dc)) {
5474         return true;
5475     }
5476 
5477     gen_op_clear_ieee_excp_and_FTT();
5478     func(dc, &cmp, a->rd, a->rs2);
5479     return advance_pc(dc);
5480 }
5481 
5482 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5483 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5484 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5485 
5486 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5487                       void (*func)(DisasContext *, DisasCompare *, int, int))
5488 {
5489     DisasCompare cmp;
5490 
5491     if (gen_trap_ifnofpu(dc)) {
5492         return true;
5493     }
5494     if (is_128 && gen_trap_float128(dc)) {
5495         return true;
5496     }
5497 
5498     gen_op_clear_ieee_excp_and_FTT();
5499     gen_compare(&cmp, a->cc, a->cond, dc);
5500     func(dc, &cmp, a->rd, a->rs2);
5501     return advance_pc(dc);
5502 }
5503 
5504 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5505 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5506 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5507 
5508 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5509                        void (*func)(DisasContext *, DisasCompare *, int, int))
5510 {
5511     DisasCompare cmp;
5512 
5513     if (gen_trap_ifnofpu(dc)) {
5514         return true;
5515     }
5516     if (is_128 && gen_trap_float128(dc)) {
5517         return true;
5518     }
5519 
5520     gen_op_clear_ieee_excp_and_FTT();
5521     gen_fcompare(&cmp, a->cc, a->cond);
5522     func(dc, &cmp, a->rd, a->rs2);
5523     return advance_pc(dc);
5524 }
5525 
5526 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5527 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5528 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5529 
5530 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5531 {
5532     TCGv_i32 src1, src2;
5533 
5534     if (avail_32(dc) && a->cc != 0) {
5535         return false;
5536     }
5537     if (gen_trap_if_nofpu_fpexception(dc)) {
5538         return true;
5539     }
5540 
5541     src1 = gen_load_fpr_F(dc, a->rs1);
5542     src2 = gen_load_fpr_F(dc, a->rs2);
5543     if (e) {
5544         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5545     } else {
5546         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5547     }
5548     return advance_pc(dc);
5549 }
5550 
5551 TRANS(FCMPs, ALL, do_fcmps, a, false)
5552 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5553 
5554 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5555 {
5556     TCGv_i64 src1, src2;
5557 
5558     if (avail_32(dc) && a->cc != 0) {
5559         return false;
5560     }
5561     if (gen_trap_if_nofpu_fpexception(dc)) {
5562         return true;
5563     }
5564 
5565     src1 = gen_load_fpr_D(dc, a->rs1);
5566     src2 = gen_load_fpr_D(dc, a->rs2);
5567     if (e) {
5568         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5569     } else {
5570         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5571     }
5572     return advance_pc(dc);
5573 }
5574 
5575 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5576 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5577 
5578 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5579 {
5580     TCGv_i128 src1, src2;
5581 
5582     if (avail_32(dc) && a->cc != 0) {
5583         return false;
5584     }
5585     if (gen_trap_if_nofpu_fpexception(dc)) {
5586         return true;
5587     }
5588     if (gen_trap_float128(dc)) {
5589         return true;
5590     }
5591 
5592     src1 = gen_load_fpr_Q(dc, a->rs1);
5593     src2 = gen_load_fpr_Q(dc, a->rs2);
5594     if (e) {
5595         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5596     } else {
5597         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5598     }
5599     return advance_pc(dc);
5600 }
5601 
5602 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5603 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5604 
5605 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5606 {
5607     TCGv_i32 src1, src2;
5608 
5609     if (!avail_VIS3(dc)) {
5610         return false;
5611     }
5612     if (gen_trap_ifnofpu(dc)) {
5613         return true;
5614     }
5615 
5616     src1 = gen_load_fpr_F(dc, a->rs1);
5617     src2 = gen_load_fpr_F(dc, a->rs2);
5618     gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5619     return advance_pc(dc);
5620 }
5621 
5622 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5623 {
5624     TCGv_i64 src1, src2;
5625 
5626     if (!avail_VIS3(dc)) {
5627         return false;
5628     }
5629     if (gen_trap_ifnofpu(dc)) {
5630         return true;
5631     }
5632 
5633     src1 = gen_load_fpr_D(dc, a->rs1);
5634     src2 = gen_load_fpr_D(dc, a->rs2);
5635     gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5636     return advance_pc(dc);
5637 }
5638 
5639 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5640                       int (*offset)(unsigned int),
5641                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5642 {
5643     TCGv dst;
5644 
5645     if (gen_trap_ifnofpu(dc)) {
5646         return true;
5647     }
5648     dst = gen_dest_gpr(dc, a->rd);
5649     load(dst, tcg_env, offset(a->rs));
5650     gen_store_gpr(dc, a->rd, dst);
5651     return advance_pc(dc);
5652 }
5653 
5654 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5655 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5656 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5657 
5658 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5659                       int (*offset)(unsigned int),
5660                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5661 {
5662     TCGv src;
5663 
5664     if (gen_trap_ifnofpu(dc)) {
5665         return true;
5666     }
5667     src = gen_load_gpr(dc, a->rs);
5668     store(src, tcg_env, offset(a->rd));
5669     return advance_pc(dc);
5670 }
5671 
5672 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5673 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5674 
5675 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5676 {
5677     DisasContext *dc = container_of(dcbase, DisasContext, base);
5678     int bound;
5679 
5680     dc->pc = dc->base.pc_first;
5681     dc->npc = (target_ulong)dc->base.tb->cs_base;
5682     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5683     dc->def = &cpu_env(cs)->def;
5684     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5685     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5686 #ifndef CONFIG_USER_ONLY
5687     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5688 # ifdef TARGET_SPARC64
5689     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5690 # else
5691     dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5692 # endif
5693 #endif
5694 #ifdef TARGET_SPARC64
5695     dc->fprs_dirty = 0;
5696     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5697 #endif
5698     /*
5699      * if we reach a page boundary, we stop generation so that the
5700      * PC of a TT_TFAULT exception is always in the right page
5701      */
5702     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5703     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5704 }
5705 
5706 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5707 {
5708 }
5709 
5710 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5711 {
5712     DisasContext *dc = container_of(dcbase, DisasContext, base);
5713     target_ulong npc = dc->npc;
5714 
5715     if (npc & 3) {
5716         switch (npc) {
5717         case JUMP_PC:
5718             assert(dc->jump_pc[1] == dc->pc + 4);
5719             npc = dc->jump_pc[0] | JUMP_PC;
5720             break;
5721         case DYNAMIC_PC:
5722         case DYNAMIC_PC_LOOKUP:
5723             npc = DYNAMIC_PC;
5724             break;
5725         default:
5726             g_assert_not_reached();
5727         }
5728     }
5729     tcg_gen_insn_start(dc->pc, npc);
5730 }
5731 
5732 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5733 {
5734     DisasContext *dc = container_of(dcbase, DisasContext, base);
5735     unsigned int insn;
5736 
5737     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5738     dc->base.pc_next += 4;
5739 
5740     if (!decode(dc, insn)) {
5741         gen_exception(dc, TT_ILL_INSN);
5742     }
5743 
5744     if (dc->base.is_jmp == DISAS_NORETURN) {
5745         return;
5746     }
5747     if (dc->pc != dc->base.pc_next) {
5748         dc->base.is_jmp = DISAS_TOO_MANY;
5749     }
5750 }
5751 
5752 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5753 {
5754     DisasContext *dc = container_of(dcbase, DisasContext, base);
5755     DisasDelayException *e, *e_next;
5756     bool may_lookup;
5757 
5758     finishing_insn(dc);
5759 
5760     switch (dc->base.is_jmp) {
5761     case DISAS_NEXT:
5762     case DISAS_TOO_MANY:
5763         if (((dc->pc | dc->npc) & 3) == 0) {
5764             /* static PC and NPC: we can use direct chaining */
5765             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5766             break;
5767         }
5768 
5769         may_lookup = true;
5770         if (dc->pc & 3) {
5771             switch (dc->pc) {
5772             case DYNAMIC_PC_LOOKUP:
5773                 break;
5774             case DYNAMIC_PC:
5775                 may_lookup = false;
5776                 break;
5777             default:
5778                 g_assert_not_reached();
5779             }
5780         } else {
5781             tcg_gen_movi_tl(cpu_pc, dc->pc);
5782         }
5783 
5784         if (dc->npc & 3) {
5785             switch (dc->npc) {
5786             case JUMP_PC:
5787                 gen_generic_branch(dc);
5788                 break;
5789             case DYNAMIC_PC:
5790                 may_lookup = false;
5791                 break;
5792             case DYNAMIC_PC_LOOKUP:
5793                 break;
5794             default:
5795                 g_assert_not_reached();
5796             }
5797         } else {
5798             tcg_gen_movi_tl(cpu_npc, dc->npc);
5799         }
5800         if (may_lookup) {
5801             tcg_gen_lookup_and_goto_ptr();
5802         } else {
5803             tcg_gen_exit_tb(NULL, 0);
5804         }
5805         break;
5806 
5807     case DISAS_NORETURN:
5808        break;
5809 
5810     case DISAS_EXIT:
5811         /* Exit TB */
5812         save_state(dc);
5813         tcg_gen_exit_tb(NULL, 0);
5814         break;
5815 
5816     default:
5817         g_assert_not_reached();
5818     }
5819 
5820     for (e = dc->delay_excp_list; e ; e = e_next) {
5821         gen_set_label(e->lab);
5822 
5823         tcg_gen_movi_tl(cpu_pc, e->pc);
5824         if (e->npc % 4 == 0) {
5825             tcg_gen_movi_tl(cpu_npc, e->npc);
5826         }
5827         gen_helper_raise_exception(tcg_env, e->excp);
5828 
5829         e_next = e->next;
5830         g_free(e);
5831     }
5832 }
5833 
5834 static const TranslatorOps sparc_tr_ops = {
5835     .init_disas_context = sparc_tr_init_disas_context,
5836     .tb_start           = sparc_tr_tb_start,
5837     .insn_start         = sparc_tr_insn_start,
5838     .translate_insn     = sparc_tr_translate_insn,
5839     .tb_stop            = sparc_tr_tb_stop,
5840 };
5841 
5842 void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
5843                           int *max_insns, vaddr pc, void *host_pc)
5844 {
5845     DisasContext dc = {};
5846 
5847     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5848 }
5849 
5850 void sparc_tcg_init(void)
5851 {
5852     static const char gregnames[32][4] = {
5853         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5854         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5855         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5856         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5857     };
5858 
5859     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5860 #ifdef TARGET_SPARC64
5861         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5862         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5863         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5864         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5865         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5866 #else
5867         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5868 #endif
5869     };
5870 
5871     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5872 #ifdef TARGET_SPARC64
5873         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5874         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5875         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5876 #endif
5877         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5878         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5879         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5880         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5881         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5882         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5883         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5884         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5885         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5886     };
5887 
5888     unsigned int i;
5889 
5890     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5891                                          offsetof(CPUSPARCState, regwptr),
5892                                          "regwptr");
5893 
5894     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5895         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5896     }
5897 
5898     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5899         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5900     }
5901 
5902     cpu_regs[0] = NULL;
5903     for (i = 1; i < 8; ++i) {
5904         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5905                                          offsetof(CPUSPARCState, gregs[i]),
5906                                          gregnames[i]);
5907     }
5908 
5909     for (i = 8; i < 32; ++i) {
5910         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5911                                          (i - 8) * sizeof(target_ulong),
5912                                          gregnames[i]);
5913     }
5914 }
5915