xref: /qemu/target/sparc/translate.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/target_page.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "asi.h"
34 #include "target/sparc/translate.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 #ifdef TARGET_SPARC64
41 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
42 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
43 # define gen_helper_rett(E)                     qemu_build_not_reached()
44 # define gen_helper_power_down(E)               qemu_build_not_reached()
45 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
46 #else
47 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
48 # define gen_helper_done(E)                     qemu_build_not_reached()
49 # define gen_helper_flushw(E)                   qemu_build_not_reached()
50 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
51 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
52 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
53 # define gen_helper_restored(E)                 qemu_build_not_reached()
54 # define gen_helper_retry(E)                    qemu_build_not_reached()
55 # define gen_helper_saved(E)                    qemu_build_not_reached()
56 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
57 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
58 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
59 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
103 # define MAXTL_MASK                             0
104 #endif
105 
106 #define DISAS_EXIT  DISAS_TARGET_0
107 
108 /* global register indexes */
109 static TCGv_ptr cpu_regwptr;
110 static TCGv cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128 
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z  cpu_xcc_Z
131 #define cpu_cc_C  cpu_xcc_C
132 #else
133 #define cpu_cc_Z  cpu_icc_Z
134 #define cpu_cc_C  cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138 
139 /* Floating point comparison registers */
140 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
141 
142 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X)  env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X)  env_field_offsetof(X)
148 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
149 #endif
150 
151 typedef struct DisasCompare {
152     TCGCond cond;
153     TCGv c1;
154     int c2;
155 } DisasCompare;
156 
157 typedef struct DisasDelayException {
158     struct DisasDelayException *next;
159     TCGLabel *lab;
160     TCGv_i32 excp;
161     /* Saved state at parent insn. */
162     target_ulong pc;
163     target_ulong npc;
164 } DisasDelayException;
165 
166 typedef struct DisasContext {
167     DisasContextBase base;
168     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
169     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
170 
171     /* Used when JUMP_PC value is used. */
172     DisasCompare jump;
173     target_ulong jump_pc[2];
174 
175     int mem_idx;
176     bool cpu_cond_live;
177     bool fpu_enabled;
178     bool address_mask_32bit;
179 #ifndef CONFIG_USER_ONLY
180     bool supervisor;
181 #ifdef TARGET_SPARC64
182     bool hypervisor;
183 #else
184     bool fsr_qne;
185 #endif
186 #endif
187 
188     sparc_def_t *def;
189 #ifdef TARGET_SPARC64
190     int fprs_dirty;
191     int asi;
192 #endif
193     DisasDelayException *delay_excp_list;
194 } DisasContext;
195 
196 // This function uses non-native bit order
197 #define GET_FIELD(X, FROM, TO)                                  \
198     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
199 
200 // This function uses the order in the manuals, i.e. bit 0 is 2^0
201 #define GET_FIELD_SP(X, FROM, TO)               \
202     GET_FIELD(X, 31 - (TO), 31 - (FROM))
203 
204 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
205 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
206 
207 #define UA2005_HTRAP_MASK 0xff
208 #define V8_TRAP_MASK 0x7f
209 
210 #define IS_IMM (insn & (1<<13))
211 
gen_update_fprs_dirty(DisasContext * dc,int rd)212 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
213 {
214 #if defined(TARGET_SPARC64)
215     int bit = (rd < 32) ? 1 : 2;
216     /* If we know we've already set this bit within the TB,
217        we can avoid setting it again.  */
218     if (!(dc->fprs_dirty & bit)) {
219         dc->fprs_dirty |= bit;
220         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
221     }
222 #endif
223 }
224 
225 /* floating point registers moves */
226 
gen_offset_fpr_F(unsigned int reg)227 static int gen_offset_fpr_F(unsigned int reg)
228 {
229     int ret;
230 
231     tcg_debug_assert(reg < 32);
232     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
233     if (reg & 1) {
234         ret += offsetof(CPU_DoubleU, l.lower);
235     } else {
236         ret += offsetof(CPU_DoubleU, l.upper);
237     }
238     return ret;
239 }
240 
gen_load_fpr_F(DisasContext * dc,unsigned int src)241 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
242 {
243     TCGv_i32 ret = tcg_temp_new_i32();
244     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
245     return ret;
246 }
247 
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)248 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
249 {
250     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
251     gen_update_fprs_dirty(dc, dst);
252 }
253 
gen_offset_fpr_D(unsigned int reg)254 static int gen_offset_fpr_D(unsigned int reg)
255 {
256     tcg_debug_assert(reg < 64);
257     tcg_debug_assert(reg % 2 == 0);
258     return offsetof(CPUSPARCState, fpr[reg / 2]);
259 }
260 
gen_load_fpr_D(DisasContext * dc,unsigned int src)261 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
262 {
263     TCGv_i64 ret = tcg_temp_new_i64();
264     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
265     return ret;
266 }
267 
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)268 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
269 {
270     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
271     gen_update_fprs_dirty(dc, dst);
272 }
273 
gen_load_fpr_Q(DisasContext * dc,unsigned int src)274 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
275 {
276     TCGv_i128 ret = tcg_temp_new_i128();
277     TCGv_i64 h = gen_load_fpr_D(dc, src);
278     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
279 
280     tcg_gen_concat_i64_i128(ret, l, h);
281     return ret;
282 }
283 
gen_store_fpr_Q(DisasContext * dc,unsigned int dst,TCGv_i128 v)284 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
285 {
286     TCGv_i64 h = tcg_temp_new_i64();
287     TCGv_i64 l = tcg_temp_new_i64();
288 
289     tcg_gen_extr_i128_i64(l, h, v);
290     gen_store_fpr_D(dc, dst, h);
291     gen_store_fpr_D(dc, dst + 2, l);
292 }
293 
294 /* moves */
295 #ifdef CONFIG_USER_ONLY
296 #define supervisor(dc) 0
297 #define hypervisor(dc) 0
298 #else
299 #ifdef TARGET_SPARC64
300 #define hypervisor(dc) (dc->hypervisor)
301 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
302 #else
303 #define supervisor(dc) (dc->supervisor)
304 #define hypervisor(dc) 0
305 #endif
306 #endif
307 
308 #if !defined(TARGET_SPARC64)
309 # define AM_CHECK(dc)  false
310 #elif defined(TARGET_ABI32)
311 # define AM_CHECK(dc)  true
312 #elif defined(CONFIG_USER_ONLY)
313 # define AM_CHECK(dc)  false
314 #else
315 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
316 #endif
317 
gen_address_mask(DisasContext * dc,TCGv addr)318 static void gen_address_mask(DisasContext *dc, TCGv addr)
319 {
320     if (AM_CHECK(dc)) {
321         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
322     }
323 }
324 
address_mask_i(DisasContext * dc,target_ulong addr)325 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
326 {
327     return AM_CHECK(dc) ? (uint32_t)addr : addr;
328 }
329 
gen_load_gpr(DisasContext * dc,int reg)330 static TCGv gen_load_gpr(DisasContext *dc, int reg)
331 {
332     if (reg > 0) {
333         assert(reg < 32);
334         return cpu_regs[reg];
335     } else {
336         TCGv t = tcg_temp_new();
337         tcg_gen_movi_tl(t, 0);
338         return t;
339     }
340 }
341 
gen_store_gpr(DisasContext * dc,int reg,TCGv v)342 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
343 {
344     if (reg > 0) {
345         assert(reg < 32);
346         tcg_gen_mov_tl(cpu_regs[reg], v);
347     }
348 }
349 
gen_dest_gpr(DisasContext * dc,int reg)350 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
351 {
352     if (reg > 0) {
353         assert(reg < 32);
354         return cpu_regs[reg];
355     } else {
356         return tcg_temp_new();
357     }
358 }
359 
use_goto_tb(DisasContext * s,target_ulong pc,target_ulong npc)360 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
361 {
362     return translator_use_goto_tb(&s->base, pc) &&
363            translator_use_goto_tb(&s->base, npc);
364 }
365 
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)366 static void gen_goto_tb(DisasContext *s, int tb_num,
367                         target_ulong pc, target_ulong npc)
368 {
369     if (use_goto_tb(s, pc, npc))  {
370         /* jump to same page: we can use a direct jump */
371         tcg_gen_goto_tb(tb_num);
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_exit_tb(s->base.tb, tb_num);
375     } else {
376         /* jump to another page: we can use an indirect jump */
377         tcg_gen_movi_tl(cpu_pc, pc);
378         tcg_gen_movi_tl(cpu_npc, npc);
379         tcg_gen_lookup_and_goto_ptr();
380     }
381 }
382 
gen_carry32(void)383 static TCGv gen_carry32(void)
384 {
385     if (TARGET_LONG_BITS == 64) {
386         TCGv t = tcg_temp_new();
387         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
388         return t;
389     }
390     return cpu_icc_C;
391 }
392 
gen_op_addcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)393 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
394 {
395     TCGv z = tcg_constant_tl(0);
396 
397     if (cin) {
398         tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin);
399     } else {
400         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
401     }
402     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
403     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
404     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
405     if (TARGET_LONG_BITS == 64) {
406         /*
407          * Carry-in to bit 32 is result ^ src1 ^ src2.
408          * We already have the src xor term in Z, from computation of V.
409          */
410         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
411         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
412     }
413     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
414     tcg_gen_mov_tl(dst, cpu_cc_N);
415 }
416 
gen_op_addcc(TCGv dst,TCGv src1,TCGv src2)417 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
418 {
419     gen_op_addcc_int(dst, src1, src2, NULL);
420 }
421 
gen_op_taddcc(TCGv dst,TCGv src1,TCGv src2)422 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
423 {
424     TCGv t = tcg_temp_new();
425 
426     /* Save the tag bits around modification of dst. */
427     tcg_gen_or_tl(t, src1, src2);
428 
429     gen_op_addcc(dst, src1, src2);
430 
431     /* Incorprate tag bits into icc.V */
432     tcg_gen_andi_tl(t, t, 3);
433     tcg_gen_neg_tl(t, t);
434     tcg_gen_ext32u_tl(t, t);
435     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
436 }
437 
gen_op_addc(TCGv dst,TCGv src1,TCGv src2)438 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
439 {
440     tcg_gen_add_tl(dst, src1, src2);
441     tcg_gen_add_tl(dst, dst, gen_carry32());
442 }
443 
gen_op_addccc(TCGv dst,TCGv src1,TCGv src2)444 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
445 {
446     gen_op_addcc_int(dst, src1, src2, gen_carry32());
447 }
448 
gen_op_addxc(TCGv dst,TCGv src1,TCGv src2)449 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
450 {
451     tcg_gen_add_tl(dst, src1, src2);
452     tcg_gen_add_tl(dst, dst, cpu_cc_C);
453 }
454 
gen_op_addxccc(TCGv dst,TCGv src1,TCGv src2)455 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
456 {
457     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
458 }
459 
gen_op_subcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)460 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
461 {
462     TCGv z = tcg_constant_tl(0);
463 
464     if (cin) {
465         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
466         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
467     } else {
468         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
469     }
470     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
471     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
472     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
473     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
474 #ifdef TARGET_SPARC64
475     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
476     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
477 #endif
478     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
479     tcg_gen_mov_tl(dst, cpu_cc_N);
480 }
481 
gen_op_subcc(TCGv dst,TCGv src1,TCGv src2)482 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
483 {
484     gen_op_subcc_int(dst, src1, src2, NULL);
485 }
486 
gen_op_tsubcc(TCGv dst,TCGv src1,TCGv src2)487 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
488 {
489     TCGv t = tcg_temp_new();
490 
491     /* Save the tag bits around modification of dst. */
492     tcg_gen_or_tl(t, src1, src2);
493 
494     gen_op_subcc(dst, src1, src2);
495 
496     /* Incorprate tag bits into icc.V */
497     tcg_gen_andi_tl(t, t, 3);
498     tcg_gen_neg_tl(t, t);
499     tcg_gen_ext32u_tl(t, t);
500     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
501 }
502 
gen_op_subc(TCGv dst,TCGv src1,TCGv src2)503 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
504 {
505     tcg_gen_sub_tl(dst, src1, src2);
506     tcg_gen_sub_tl(dst, dst, gen_carry32());
507 }
508 
gen_op_subccc(TCGv dst,TCGv src1,TCGv src2)509 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
510 {
511     gen_op_subcc_int(dst, src1, src2, gen_carry32());
512 }
513 
gen_op_subxc(TCGv dst,TCGv src1,TCGv src2)514 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
515 {
516     tcg_gen_sub_tl(dst, src1, src2);
517     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
518 }
519 
gen_op_subxccc(TCGv dst,TCGv src1,TCGv src2)520 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
521 {
522     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
523 }
524 
gen_op_mulscc(TCGv dst,TCGv src1,TCGv src2)525 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
526 {
527     TCGv zero = tcg_constant_tl(0);
528     TCGv one = tcg_constant_tl(1);
529     TCGv t_src1 = tcg_temp_new();
530     TCGv t_src2 = tcg_temp_new();
531     TCGv t0 = tcg_temp_new();
532 
533     tcg_gen_ext32u_tl(t_src1, src1);
534     tcg_gen_ext32u_tl(t_src2, src2);
535 
536     /*
537      * if (!(env->y & 1))
538      *   src2 = 0;
539      */
540     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
541 
542     /*
543      * b2 = src1 & 1;
544      * y = (b2 << 31) | (y >> 1);
545      */
546     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
547     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
548 
549     // b1 = N ^ V;
550     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
551 
552     /*
553      * src1 = (b1 << 31) | (src1 >> 1)
554      */
555     tcg_gen_andi_tl(t0, t0, 1u << 31);
556     tcg_gen_shri_tl(t_src1, t_src1, 1);
557     tcg_gen_or_tl(t_src1, t_src1, t0);
558 
559     gen_op_addcc(dst, t_src1, t_src2);
560 }
561 
gen_op_multiply(TCGv dst,TCGv src1,TCGv src2,int sign_ext)562 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
563 {
564 #if TARGET_LONG_BITS == 32
565     if (sign_ext) {
566         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
567     } else {
568         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
569     }
570 #else
571     TCGv t0 = tcg_temp_new_i64();
572     TCGv t1 = tcg_temp_new_i64();
573 
574     if (sign_ext) {
575         tcg_gen_ext32s_i64(t0, src1);
576         tcg_gen_ext32s_i64(t1, src2);
577     } else {
578         tcg_gen_ext32u_i64(t0, src1);
579         tcg_gen_ext32u_i64(t1, src2);
580     }
581 
582     tcg_gen_mul_i64(dst, t0, t1);
583     tcg_gen_shri_i64(cpu_y, dst, 32);
584 #endif
585 }
586 
gen_op_umul(TCGv dst,TCGv src1,TCGv src2)587 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
588 {
589     /* zero-extend truncated operands before multiplication */
590     gen_op_multiply(dst, src1, src2, 0);
591 }
592 
gen_op_smul(TCGv dst,TCGv src1,TCGv src2)593 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
594 {
595     /* sign-extend truncated operands before multiplication */
596     gen_op_multiply(dst, src1, src2, 1);
597 }
598 
gen_op_umulxhi(TCGv dst,TCGv src1,TCGv src2)599 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
600 {
601     TCGv discard = tcg_temp_new();
602     tcg_gen_mulu2_tl(discard, dst, src1, src2);
603 }
604 
gen_op_fpmaddx(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)605 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
606                            TCGv_i64 src2, TCGv_i64 src3)
607 {
608     TCGv_i64 t = tcg_temp_new_i64();
609 
610     tcg_gen_mul_i64(t, src1, src2);
611     tcg_gen_add_i64(dst, src3, t);
612 }
613 
gen_op_fpmaddxhi(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)614 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
615                              TCGv_i64 src2, TCGv_i64 src3)
616 {
617     TCGv_i64 l = tcg_temp_new_i64();
618     TCGv_i64 h = tcg_temp_new_i64();
619     TCGv_i64 z = tcg_constant_i64(0);
620 
621     tcg_gen_mulu2_i64(l, h, src1, src2);
622     tcg_gen_add2_i64(l, dst, l, h, src3, z);
623 }
624 
gen_op_sdiv(TCGv dst,TCGv src1,TCGv src2)625 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
626 {
627 #ifdef TARGET_SPARC64
628     gen_helper_sdiv(dst, tcg_env, src1, src2);
629     tcg_gen_ext32s_tl(dst, dst);
630 #else
631     TCGv_i64 t64 = tcg_temp_new_i64();
632     gen_helper_sdiv(t64, tcg_env, src1, src2);
633     tcg_gen_trunc_i64_tl(dst, t64);
634 #endif
635 }
636 
gen_op_udivcc(TCGv dst,TCGv src1,TCGv src2)637 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
638 {
639     TCGv_i64 t64;
640 
641 #ifdef TARGET_SPARC64
642     t64 = cpu_cc_V;
643 #else
644     t64 = tcg_temp_new_i64();
645 #endif
646 
647     gen_helper_udiv(t64, tcg_env, src1, src2);
648 
649 #ifdef TARGET_SPARC64
650     tcg_gen_ext32u_tl(cpu_cc_N, t64);
651     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
652     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
653     tcg_gen_movi_tl(cpu_icc_C, 0);
654 #else
655     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
656 #endif
657     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
658     tcg_gen_movi_tl(cpu_cc_C, 0);
659     tcg_gen_mov_tl(dst, cpu_cc_N);
660 }
661 
gen_op_sdivcc(TCGv dst,TCGv src1,TCGv src2)662 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
663 {
664     TCGv_i64 t64;
665 
666 #ifdef TARGET_SPARC64
667     t64 = cpu_cc_V;
668 #else
669     t64 = tcg_temp_new_i64();
670 #endif
671 
672     gen_helper_sdiv(t64, tcg_env, src1, src2);
673 
674 #ifdef TARGET_SPARC64
675     tcg_gen_ext32s_tl(cpu_cc_N, t64);
676     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
677     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
678     tcg_gen_movi_tl(cpu_icc_C, 0);
679 #else
680     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
681 #endif
682     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
683     tcg_gen_movi_tl(cpu_cc_C, 0);
684     tcg_gen_mov_tl(dst, cpu_cc_N);
685 }
686 
gen_op_taddcctv(TCGv dst,TCGv src1,TCGv src2)687 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
688 {
689     gen_helper_taddcctv(dst, tcg_env, src1, src2);
690 }
691 
gen_op_tsubcctv(TCGv dst,TCGv src1,TCGv src2)692 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
693 {
694     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
695 }
696 
gen_op_popc(TCGv dst,TCGv src1,TCGv src2)697 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
698 {
699     tcg_gen_ctpop_tl(dst, src2);
700 }
701 
gen_op_lzcnt(TCGv dst,TCGv src)702 static void gen_op_lzcnt(TCGv dst, TCGv src)
703 {
704     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
705 }
706 
707 #ifndef TARGET_SPARC64
gen_helper_array8(TCGv dst,TCGv src1,TCGv src2)708 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
709 {
710     g_assert_not_reached();
711 }
712 #endif
713 
gen_op_array16(TCGv dst,TCGv src1,TCGv src2)714 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
715 {
716     gen_helper_array8(dst, src1, src2);
717     tcg_gen_shli_tl(dst, dst, 1);
718 }
719 
gen_op_array32(TCGv dst,TCGv src1,TCGv src2)720 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
721 {
722     gen_helper_array8(dst, src1, src2);
723     tcg_gen_shli_tl(dst, dst, 2);
724 }
725 
gen_op_fpack16(TCGv_i32 dst,TCGv_i64 src)726 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
727 {
728 #ifdef TARGET_SPARC64
729     gen_helper_fpack16(dst, cpu_gsr, src);
730 #else
731     g_assert_not_reached();
732 #endif
733 }
734 
gen_op_fpackfix(TCGv_i32 dst,TCGv_i64 src)735 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
736 {
737 #ifdef TARGET_SPARC64
738     gen_helper_fpackfix(dst, cpu_gsr, src);
739 #else
740     g_assert_not_reached();
741 #endif
742 }
743 
gen_op_fpack32(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)744 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
745 {
746 #ifdef TARGET_SPARC64
747     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
748 #else
749     g_assert_not_reached();
750 #endif
751 }
752 
gen_op_fpadds16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)753 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
754 {
755     TCGv_i32 t[2];
756 
757     for (int i = 0; i < 2; i++) {
758         TCGv_i32 u = tcg_temp_new_i32();
759         TCGv_i32 v = tcg_temp_new_i32();
760 
761         tcg_gen_sextract_i32(u, src1, i * 16, 16);
762         tcg_gen_sextract_i32(v, src2, i * 16, 16);
763         tcg_gen_add_i32(u, u, v);
764         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
765         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
766         t[i] = u;
767     }
768     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
769 }
770 
gen_op_fpsubs16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)771 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
772 {
773     TCGv_i32 t[2];
774 
775     for (int i = 0; i < 2; i++) {
776         TCGv_i32 u = tcg_temp_new_i32();
777         TCGv_i32 v = tcg_temp_new_i32();
778 
779         tcg_gen_sextract_i32(u, src1, i * 16, 16);
780         tcg_gen_sextract_i32(v, src2, i * 16, 16);
781         tcg_gen_sub_i32(u, u, v);
782         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
783         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
784         t[i] = u;
785     }
786     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
787 }
788 
gen_op_fpadds32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)789 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
790 {
791     TCGv_i32 r = tcg_temp_new_i32();
792     TCGv_i32 t = tcg_temp_new_i32();
793     TCGv_i32 v = tcg_temp_new_i32();
794     TCGv_i32 z = tcg_constant_i32(0);
795 
796     tcg_gen_add_i32(r, src1, src2);
797     tcg_gen_xor_i32(t, src1, src2);
798     tcg_gen_xor_i32(v, r, src2);
799     tcg_gen_andc_i32(v, v, t);
800 
801     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
802     tcg_gen_addi_i32(t, t, INT32_MAX);
803 
804     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
805 }
806 
gen_op_fpsubs32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)807 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
808 {
809     TCGv_i32 r = tcg_temp_new_i32();
810     TCGv_i32 t = tcg_temp_new_i32();
811     TCGv_i32 v = tcg_temp_new_i32();
812     TCGv_i32 z = tcg_constant_i32(0);
813 
814     tcg_gen_sub_i32(r, src1, src2);
815     tcg_gen_xor_i32(t, src1, src2);
816     tcg_gen_xor_i32(v, r, src1);
817     tcg_gen_and_i32(v, v, t);
818 
819     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
820     tcg_gen_addi_i32(t, t, INT32_MAX);
821 
822     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
823 }
824 
gen_op_faligndata_i(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2,TCGv gsr)825 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
826                                 TCGv_i64 s2, TCGv gsr)
827 {
828 #ifdef TARGET_SPARC64
829     TCGv t1, t2, shift;
830 
831     t1 = tcg_temp_new();
832     t2 = tcg_temp_new();
833     shift = tcg_temp_new();
834 
835     tcg_gen_andi_tl(shift, gsr, 7);
836     tcg_gen_shli_tl(shift, shift, 3);
837     tcg_gen_shl_tl(t1, s1, shift);
838 
839     /*
840      * A shift of 64 does not produce 0 in TCG.  Divide this into a
841      * shift of (up to 63) followed by a constant shift of 1.
842      */
843     tcg_gen_xori_tl(shift, shift, 63);
844     tcg_gen_shr_tl(t2, s2, shift);
845     tcg_gen_shri_tl(t2, t2, 1);
846 
847     tcg_gen_or_tl(dst, t1, t2);
848 #else
849     g_assert_not_reached();
850 #endif
851 }
852 
gen_op_faligndata_g(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2)853 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
854 {
855     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
856 }
857 
gen_op_bshuffle(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)858 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
859 {
860 #ifdef TARGET_SPARC64
861     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
862 #else
863     g_assert_not_reached();
864 #endif
865 }
866 
gen_op_pdistn(TCGv dst,TCGv_i64 src1,TCGv_i64 src2)867 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
868 {
869 #ifdef TARGET_SPARC64
870     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
871 #else
872     g_assert_not_reached();
873 #endif
874 }
875 
gen_op_fmul8x16al(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)876 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
877 {
878     tcg_gen_ext16s_i32(src2, src2);
879     gen_helper_fmul8x16a(dst, src1, src2);
880 }
881 
gen_op_fmul8x16au(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)882 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
883 {
884     tcg_gen_sari_i32(src2, src2, 16);
885     gen_helper_fmul8x16a(dst, src1, src2);
886 }
887 
gen_op_fmuld8ulx16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)888 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
889 {
890     TCGv_i32 t0 = tcg_temp_new_i32();
891     TCGv_i32 t1 = tcg_temp_new_i32();
892     TCGv_i32 t2 = tcg_temp_new_i32();
893 
894     tcg_gen_ext8u_i32(t0, src1);
895     tcg_gen_ext16s_i32(t1, src2);
896     tcg_gen_mul_i32(t0, t0, t1);
897 
898     tcg_gen_extract_i32(t1, src1, 16, 8);
899     tcg_gen_sextract_i32(t2, src2, 16, 16);
900     tcg_gen_mul_i32(t1, t1, t2);
901 
902     tcg_gen_concat_i32_i64(dst, t0, t1);
903 }
904 
gen_op_fmuld8sux16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)905 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
906 {
907     TCGv_i32 t0 = tcg_temp_new_i32();
908     TCGv_i32 t1 = tcg_temp_new_i32();
909     TCGv_i32 t2 = tcg_temp_new_i32();
910 
911     /*
912      * The insn description talks about extracting the upper 8 bits
913      * of the signed 16-bit input rs1, performing the multiply, then
914      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
915      * the rs1 input, which avoids the need for two shifts.
916      */
917     tcg_gen_ext16s_i32(t0, src1);
918     tcg_gen_andi_i32(t0, t0, ~0xff);
919     tcg_gen_ext16s_i32(t1, src2);
920     tcg_gen_mul_i32(t0, t0, t1);
921 
922     tcg_gen_sextract_i32(t1, src1, 16, 16);
923     tcg_gen_andi_i32(t1, t1, ~0xff);
924     tcg_gen_sextract_i32(t2, src2, 16, 16);
925     tcg_gen_mul_i32(t1, t1, t2);
926 
927     tcg_gen_concat_i32_i64(dst, t0, t1);
928 }
929 
930 #ifdef TARGET_SPARC64
gen_vec_fchksm16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)931 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
932                              TCGv_vec src1, TCGv_vec src2)
933 {
934     TCGv_vec a = tcg_temp_new_vec_matching(dst);
935     TCGv_vec c = tcg_temp_new_vec_matching(dst);
936 
937     tcg_gen_add_vec(vece, a, src1, src2);
938     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
939     /* Vector cmp produces -1 for true, so subtract to add carry. */
940     tcg_gen_sub_vec(vece, dst, a, c);
941 }
942 
gen_op_fchksm16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)943 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
944                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
945 {
946     static const TCGOpcode vecop_list[] = {
947         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
948     };
949     static const GVecGen3 op = {
950         .fni8 = gen_helper_fchksm16,
951         .fniv = gen_vec_fchksm16,
952         .opt_opc = vecop_list,
953         .vece = MO_16,
954     };
955     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
956 }
957 
gen_vec_fmean16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)958 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
959                             TCGv_vec src1, TCGv_vec src2)
960 {
961     TCGv_vec t = tcg_temp_new_vec_matching(dst);
962 
963     tcg_gen_or_vec(vece, t, src1, src2);
964     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
965     tcg_gen_sari_vec(vece, src1, src1, 1);
966     tcg_gen_sari_vec(vece, src2, src2, 1);
967     tcg_gen_add_vec(vece, dst, src1, src2);
968     tcg_gen_add_vec(vece, dst, dst, t);
969 }
970 
gen_op_fmean16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)971 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
972                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
973 {
974     static const TCGOpcode vecop_list[] = {
975         INDEX_op_add_vec, INDEX_op_sari_vec,
976     };
977     static const GVecGen3 op = {
978         .fni8 = gen_helper_fmean16,
979         .fniv = gen_vec_fmean16,
980         .opt_opc = vecop_list,
981         .vece = MO_16,
982     };
983     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
984 }
985 #else
986 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
987 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
988 #endif
989 
finishing_insn(DisasContext * dc)990 static void finishing_insn(DisasContext *dc)
991 {
992     /*
993      * From here, there is no future path through an unwinding exception.
994      * If the current insn cannot raise an exception, the computation of
995      * cpu_cond may be able to be elided.
996      */
997     if (dc->cpu_cond_live) {
998         tcg_gen_discard_tl(cpu_cond);
999         dc->cpu_cond_live = false;
1000     }
1001 }
1002 
gen_generic_branch(DisasContext * dc)1003 static void gen_generic_branch(DisasContext *dc)
1004 {
1005     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1006     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1007     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1008 
1009     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1010 }
1011 
1012 /* call this function before using the condition register as it may
1013    have been set for a jump */
flush_cond(DisasContext * dc)1014 static void flush_cond(DisasContext *dc)
1015 {
1016     if (dc->npc == JUMP_PC) {
1017         gen_generic_branch(dc);
1018         dc->npc = DYNAMIC_PC_LOOKUP;
1019     }
1020 }
1021 
save_npc(DisasContext * dc)1022 static void save_npc(DisasContext *dc)
1023 {
1024     if (dc->npc & 3) {
1025         switch (dc->npc) {
1026         case JUMP_PC:
1027             gen_generic_branch(dc);
1028             dc->npc = DYNAMIC_PC_LOOKUP;
1029             break;
1030         case DYNAMIC_PC:
1031         case DYNAMIC_PC_LOOKUP:
1032             break;
1033         default:
1034             g_assert_not_reached();
1035         }
1036     } else {
1037         tcg_gen_movi_tl(cpu_npc, dc->npc);
1038     }
1039 }
1040 
save_state(DisasContext * dc)1041 static void save_state(DisasContext *dc)
1042 {
1043     tcg_gen_movi_tl(cpu_pc, dc->pc);
1044     save_npc(dc);
1045 }
1046 
gen_exception(DisasContext * dc,int which)1047 static void gen_exception(DisasContext *dc, int which)
1048 {
1049     finishing_insn(dc);
1050     save_state(dc);
1051     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1052     dc->base.is_jmp = DISAS_NORETURN;
1053 }
1054 
delay_exceptionv(DisasContext * dc,TCGv_i32 excp)1055 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1056 {
1057     DisasDelayException *e = g_new0(DisasDelayException, 1);
1058 
1059     e->next = dc->delay_excp_list;
1060     dc->delay_excp_list = e;
1061 
1062     e->lab = gen_new_label();
1063     e->excp = excp;
1064     e->pc = dc->pc;
1065     /* Caller must have used flush_cond before branch. */
1066     assert(e->npc != JUMP_PC);
1067     e->npc = dc->npc;
1068 
1069     return e->lab;
1070 }
1071 
delay_exception(DisasContext * dc,int excp)1072 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1073 {
1074     return delay_exceptionv(dc, tcg_constant_i32(excp));
1075 }
1076 
gen_check_align(DisasContext * dc,TCGv addr,int mask)1077 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1078 {
1079     TCGv t = tcg_temp_new();
1080     TCGLabel *lab;
1081 
1082     tcg_gen_andi_tl(t, addr, mask);
1083 
1084     flush_cond(dc);
1085     lab = delay_exception(dc, TT_UNALIGNED);
1086     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1087 }
1088 
gen_mov_pc_npc(DisasContext * dc)1089 static void gen_mov_pc_npc(DisasContext *dc)
1090 {
1091     finishing_insn(dc);
1092 
1093     if (dc->npc & 3) {
1094         switch (dc->npc) {
1095         case JUMP_PC:
1096             gen_generic_branch(dc);
1097             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1098             dc->pc = DYNAMIC_PC_LOOKUP;
1099             break;
1100         case DYNAMIC_PC:
1101         case DYNAMIC_PC_LOOKUP:
1102             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1103             dc->pc = dc->npc;
1104             break;
1105         default:
1106             g_assert_not_reached();
1107         }
1108     } else {
1109         dc->pc = dc->npc;
1110     }
1111 }
1112 
gen_compare(DisasCompare * cmp,bool xcc,unsigned int cond,DisasContext * dc)1113 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1114                         DisasContext *dc)
1115 {
1116     TCGv t1;
1117 
1118     cmp->c1 = t1 = tcg_temp_new();
1119     cmp->c2 = 0;
1120 
1121     switch (cond & 7) {
1122     case 0x0: /* never */
1123         cmp->cond = TCG_COND_NEVER;
1124         cmp->c1 = tcg_constant_tl(0);
1125         break;
1126 
1127     case 0x1: /* eq: Z */
1128         cmp->cond = TCG_COND_EQ;
1129         if (TARGET_LONG_BITS == 32 || xcc) {
1130             tcg_gen_mov_tl(t1, cpu_cc_Z);
1131         } else {
1132             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1133         }
1134         break;
1135 
1136     case 0x2: /* le: Z | (N ^ V) */
1137         /*
1138          * Simplify:
1139          *   cc_Z || (N ^ V) < 0        NE
1140          *   cc_Z && !((N ^ V) < 0)     EQ
1141          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1142          */
1143         cmp->cond = TCG_COND_EQ;
1144         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1145         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1146         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1147         if (TARGET_LONG_BITS == 64 && !xcc) {
1148             tcg_gen_ext32u_tl(t1, t1);
1149         }
1150         break;
1151 
1152     case 0x3: /* lt: N ^ V */
1153         cmp->cond = TCG_COND_LT;
1154         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1155         if (TARGET_LONG_BITS == 64 && !xcc) {
1156             tcg_gen_ext32s_tl(t1, t1);
1157         }
1158         break;
1159 
1160     case 0x4: /* leu: Z | C */
1161         /*
1162          * Simplify:
1163          *   cc_Z == 0 || cc_C != 0     NE
1164          *   cc_Z != 0 && cc_C == 0     EQ
1165          *   cc_Z & (cc_C ? 0 : -1)     EQ
1166          *   cc_Z & (cc_C - 1)          EQ
1167          */
1168         cmp->cond = TCG_COND_EQ;
1169         if (TARGET_LONG_BITS == 32 || xcc) {
1170             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1171             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1172         } else {
1173             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1174             tcg_gen_subi_tl(t1, t1, 1);
1175             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1176             tcg_gen_ext32u_tl(t1, t1);
1177         }
1178         break;
1179 
1180     case 0x5: /* ltu: C */
1181         cmp->cond = TCG_COND_NE;
1182         if (TARGET_LONG_BITS == 32 || xcc) {
1183             tcg_gen_mov_tl(t1, cpu_cc_C);
1184         } else {
1185             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1186         }
1187         break;
1188 
1189     case 0x6: /* neg: N */
1190         cmp->cond = TCG_COND_LT;
1191         if (TARGET_LONG_BITS == 32 || xcc) {
1192             tcg_gen_mov_tl(t1, cpu_cc_N);
1193         } else {
1194             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1195         }
1196         break;
1197 
1198     case 0x7: /* vs: V */
1199         cmp->cond = TCG_COND_LT;
1200         if (TARGET_LONG_BITS == 32 || xcc) {
1201             tcg_gen_mov_tl(t1, cpu_cc_V);
1202         } else {
1203             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1204         }
1205         break;
1206     }
1207     if (cond & 8) {
1208         cmp->cond = tcg_invert_cond(cmp->cond);
1209     }
1210 }
1211 
gen_fcompare(DisasCompare * cmp,unsigned int cc,unsigned int cond)1212 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1213 {
1214     TCGv_i32 fcc = cpu_fcc[cc];
1215     TCGv_i32 c1 = fcc;
1216     int c2 = 0;
1217     TCGCond tcond;
1218 
1219     /*
1220      * FCC values:
1221      * 0 =
1222      * 1 <
1223      * 2 >
1224      * 3 unordered
1225      */
1226     switch (cond & 7) {
1227     case 0x0: /* fbn */
1228         tcond = TCG_COND_NEVER;
1229         break;
1230     case 0x1: /* fbne : !0 */
1231         tcond = TCG_COND_NE;
1232         break;
1233     case 0x2: /* fblg : 1 or 2 */
1234         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1235         c1 = tcg_temp_new_i32();
1236         tcg_gen_addi_i32(c1, fcc, -1);
1237         c2 = 1;
1238         tcond = TCG_COND_LEU;
1239         break;
1240     case 0x3: /* fbul : 1 or 3 */
1241         c1 = tcg_temp_new_i32();
1242         tcg_gen_andi_i32(c1, fcc, 1);
1243         tcond = TCG_COND_NE;
1244         break;
1245     case 0x4: /* fbl  : 1 */
1246         c2 = 1;
1247         tcond = TCG_COND_EQ;
1248         break;
1249     case 0x5: /* fbug : 2 or 3 */
1250         c2 = 2;
1251         tcond = TCG_COND_GEU;
1252         break;
1253     case 0x6: /* fbg  : 2 */
1254         c2 = 2;
1255         tcond = TCG_COND_EQ;
1256         break;
1257     case 0x7: /* fbu  : 3 */
1258         c2 = 3;
1259         tcond = TCG_COND_EQ;
1260         break;
1261     }
1262     if (cond & 8) {
1263         tcond = tcg_invert_cond(tcond);
1264     }
1265 
1266     cmp->cond = tcond;
1267     cmp->c2 = c2;
1268     cmp->c1 = tcg_temp_new();
1269     tcg_gen_extu_i32_tl(cmp->c1, c1);
1270 }
1271 
gen_compare_reg(DisasCompare * cmp,int cond,TCGv r_src)1272 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1273 {
1274     static const TCGCond cond_reg[4] = {
1275         TCG_COND_NEVER,  /* reserved */
1276         TCG_COND_EQ,
1277         TCG_COND_LE,
1278         TCG_COND_LT,
1279     };
1280     TCGCond tcond;
1281 
1282     if ((cond & 3) == 0) {
1283         return false;
1284     }
1285     tcond = cond_reg[cond & 3];
1286     if (cond & 4) {
1287         tcond = tcg_invert_cond(tcond);
1288     }
1289 
1290     cmp->cond = tcond;
1291     cmp->c1 = tcg_temp_new();
1292     cmp->c2 = 0;
1293     tcg_gen_mov_tl(cmp->c1, r_src);
1294     return true;
1295 }
1296 
gen_op_clear_ieee_excp_and_FTT(void)1297 static void gen_op_clear_ieee_excp_and_FTT(void)
1298 {
1299     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1300                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1301 }
1302 
gen_op_fmovs(TCGv_i32 dst,TCGv_i32 src)1303 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1304 {
1305     gen_op_clear_ieee_excp_and_FTT();
1306     tcg_gen_mov_i32(dst, src);
1307 }
1308 
gen_op_fnegs(TCGv_i32 dst,TCGv_i32 src)1309 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1310 {
1311     gen_op_clear_ieee_excp_and_FTT();
1312     tcg_gen_xori_i32(dst, src, 1u << 31);
1313 }
1314 
gen_op_fabss(TCGv_i32 dst,TCGv_i32 src)1315 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1316 {
1317     gen_op_clear_ieee_excp_and_FTT();
1318     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1319 }
1320 
gen_op_fmovd(TCGv_i64 dst,TCGv_i64 src)1321 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1322 {
1323     gen_op_clear_ieee_excp_and_FTT();
1324     tcg_gen_mov_i64(dst, src);
1325 }
1326 
gen_op_fnegd(TCGv_i64 dst,TCGv_i64 src)1327 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1328 {
1329     gen_op_clear_ieee_excp_and_FTT();
1330     tcg_gen_xori_i64(dst, src, 1ull << 63);
1331 }
1332 
gen_op_fabsd(TCGv_i64 dst,TCGv_i64 src)1333 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1334 {
1335     gen_op_clear_ieee_excp_and_FTT();
1336     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1337 }
1338 
gen_op_fnegq(TCGv_i128 dst,TCGv_i128 src)1339 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1340 {
1341     TCGv_i64 l = tcg_temp_new_i64();
1342     TCGv_i64 h = tcg_temp_new_i64();
1343 
1344     tcg_gen_extr_i128_i64(l, h, src);
1345     tcg_gen_xori_i64(h, h, 1ull << 63);
1346     tcg_gen_concat_i64_i128(dst, l, h);
1347 }
1348 
gen_op_fabsq(TCGv_i128 dst,TCGv_i128 src)1349 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1350 {
1351     TCGv_i64 l = tcg_temp_new_i64();
1352     TCGv_i64 h = tcg_temp_new_i64();
1353 
1354     tcg_gen_extr_i128_i64(l, h, src);
1355     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1356     tcg_gen_concat_i64_i128(dst, l, h);
1357 }
1358 
gen_op_fmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1359 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1360 {
1361     TCGv_i32 z = tcg_constant_i32(0);
1362     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
1363 }
1364 
gen_op_fmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1365 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1366 {
1367     TCGv_i32 z = tcg_constant_i32(0);
1368     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
1369 }
1370 
gen_op_fmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1371 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1372 {
1373     TCGv_i32 z = tcg_constant_i32(0);
1374     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1375     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1376 }
1377 
gen_op_fmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1378 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1379 {
1380     TCGv_i32 z = tcg_constant_i32(0);
1381     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1382     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1383 }
1384 
gen_op_fnmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1386 {
1387     TCGv_i32 z = tcg_constant_i32(0);
1388     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1389                                    float_muladd_negate_result);
1390     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1391 }
1392 
gen_op_fnmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1393 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1394 {
1395     TCGv_i32 z = tcg_constant_i32(0);
1396     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1397                                    float_muladd_negate_result);
1398     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1399 }
1400 
gen_op_fnmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1401 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1402 {
1403     TCGv_i32 z = tcg_constant_i32(0);
1404     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1405     gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1406 }
1407 
gen_op_fnmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1408 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1409 {
1410     TCGv_i32 z = tcg_constant_i32(0);
1411     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1412     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1413 }
1414 
1415 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
gen_op_fhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1416 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1417 {
1418     TCGv_i32 fone = tcg_constant_i32(float32_one);
1419     TCGv_i32 mone = tcg_constant_i32(-1);
1420     TCGv_i32 op = tcg_constant_i32(0);
1421     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1422 }
1423 
gen_op_fhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1424 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1425 {
1426     TCGv_i64 fone = tcg_constant_i64(float64_one);
1427     TCGv_i32 mone = tcg_constant_i32(-1);
1428     TCGv_i32 op = tcg_constant_i32(0);
1429     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1430 }
1431 
1432 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
gen_op_fhsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1433 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1434 {
1435     TCGv_i32 fone = tcg_constant_i32(float32_one);
1436     TCGv_i32 mone = tcg_constant_i32(-1);
1437     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1438     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1439 }
1440 
gen_op_fhsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1441 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1442 {
1443     TCGv_i64 fone = tcg_constant_i64(float64_one);
1444     TCGv_i32 mone = tcg_constant_i32(-1);
1445     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1446     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1447 }
1448 
1449 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
gen_op_fnhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1450 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1451 {
1452     TCGv_i32 fone = tcg_constant_i32(float32_one);
1453     TCGv_i32 mone = tcg_constant_i32(-1);
1454     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1455     gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1456 }
1457 
gen_op_fnhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1458 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1459 {
1460     TCGv_i64 fone = tcg_constant_i64(float64_one);
1461     TCGv_i32 mone = tcg_constant_i32(-1);
1462     TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1463     gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1464 }
1465 
gen_op_fpexception_im(DisasContext * dc,int ftt)1466 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1467 {
1468     /*
1469      * CEXC is only set when succesfully completing an FPop,
1470      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1471      * Thus we can simply store FTT into this field.
1472      */
1473     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1474                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1475     gen_exception(dc, TT_FP_EXCP);
1476 }
1477 
gen_trap_ifnofpu(DisasContext * dc)1478 static bool gen_trap_ifnofpu(DisasContext *dc)
1479 {
1480 #if !defined(CONFIG_USER_ONLY)
1481     if (!dc->fpu_enabled) {
1482         gen_exception(dc, TT_NFPU_INSN);
1483         return true;
1484     }
1485 #endif
1486     return false;
1487 }
1488 
gen_trap_iffpexception(DisasContext * dc)1489 static bool gen_trap_iffpexception(DisasContext *dc)
1490 {
1491 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1492     /*
1493      * There are 3 states for the sparc32 fpu:
1494      * Normally the fpu is in fp_execute, and all insns are allowed.
1495      * When an exception is signaled, it moves to fp_exception_pending state.
1496      * Upon seeing the next FPop, the fpu moves to fp_exception state,
1497      * populates the FQ, and generates an fp_exception trap.
1498      * The fpu remains in fp_exception state until FQ becomes empty
1499      * after execution of a STDFQ instruction.  While the fpu is in
1500      * fp_exception state, and FPop, fp load or fp branch insn will
1501      * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1502      * and the insn will not be entered into the FQ.
1503      *
1504      * In QEMU, we do not model the fp_exception_pending state and
1505      * instead populate FQ and raise the exception immediately.
1506      * But we can still honor fp_exception state by noticing when
1507      * the FQ is not empty.
1508      */
1509     if (dc->fsr_qne) {
1510         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1511         return true;
1512     }
1513 #endif
1514     return false;
1515 }
1516 
gen_trap_if_nofpu_fpexception(DisasContext * dc)1517 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1518 {
1519     return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1520 }
1521 
1522 /* asi moves */
1523 typedef enum {
1524     GET_ASI_HELPER,
1525     GET_ASI_EXCP,
1526     GET_ASI_DIRECT,
1527     GET_ASI_DTWINX,
1528     GET_ASI_CODE,
1529     GET_ASI_BLOCK,
1530     GET_ASI_SHORT,
1531     GET_ASI_BCOPY,
1532     GET_ASI_BFILL,
1533 } ASIType;
1534 
1535 typedef struct {
1536     ASIType type;
1537     int asi;
1538     int mem_idx;
1539     MemOp memop;
1540 } DisasASI;
1541 
1542 /*
1543  * Build DisasASI.
1544  * For asi == -1, treat as non-asi.
1545  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1546  */
resolve_asi(DisasContext * dc,int asi,MemOp memop)1547 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1548 {
1549     ASIType type = GET_ASI_HELPER;
1550     int mem_idx = dc->mem_idx;
1551 
1552     if (asi == -1) {
1553         /* Artificial "non-asi" case. */
1554         type = GET_ASI_DIRECT;
1555         goto done;
1556     }
1557 
1558 #ifndef TARGET_SPARC64
1559     /* Before v9, all asis are immediate and privileged.  */
1560     if (asi < 0) {
1561         gen_exception(dc, TT_ILL_INSN);
1562         type = GET_ASI_EXCP;
1563     } else if (supervisor(dc)
1564                /* Note that LEON accepts ASI_USERDATA in user mode, for
1565                   use with CASA.  Also note that previous versions of
1566                   QEMU allowed (and old versions of gcc emitted) ASI_P
1567                   for LEON, which is incorrect.  */
1568                || (asi == ASI_USERDATA
1569                    && (dc->def->features & CPU_FEATURE_CASA))) {
1570         switch (asi) {
1571         case ASI_USERDATA:    /* User data access */
1572             mem_idx = MMU_USER_IDX;
1573             type = GET_ASI_DIRECT;
1574             break;
1575         case ASI_KERNELDATA:  /* Supervisor data access */
1576             mem_idx = MMU_KERNEL_IDX;
1577             type = GET_ASI_DIRECT;
1578             break;
1579         case ASI_USERTXT:     /* User text access */
1580             mem_idx = MMU_USER_IDX;
1581             type = GET_ASI_CODE;
1582             break;
1583         case ASI_KERNELTXT:   /* Supervisor text access */
1584             mem_idx = MMU_KERNEL_IDX;
1585             type = GET_ASI_CODE;
1586             break;
1587         case ASI_M_BYPASS:    /* MMU passthrough */
1588         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1589             mem_idx = MMU_PHYS_IDX;
1590             type = GET_ASI_DIRECT;
1591             break;
1592         case ASI_M_BCOPY: /* Block copy, sta access */
1593             mem_idx = MMU_KERNEL_IDX;
1594             type = GET_ASI_BCOPY;
1595             break;
1596         case ASI_M_BFILL: /* Block fill, stda access */
1597             mem_idx = MMU_KERNEL_IDX;
1598             type = GET_ASI_BFILL;
1599             break;
1600         }
1601 
1602         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1603          * permissions check in get_physical_address(..).
1604          */
1605         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1606     } else {
1607         gen_exception(dc, TT_PRIV_INSN);
1608         type = GET_ASI_EXCP;
1609     }
1610 #else
1611     if (asi < 0) {
1612         asi = dc->asi;
1613     }
1614     /* With v9, all asis below 0x80 are privileged.  */
1615     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1616        down that bit into DisasContext.  For the moment that's ok,
1617        since the direct implementations below doesn't have any ASIs
1618        in the restricted [0x30, 0x7f] range, and the check will be
1619        done properly in the helper.  */
1620     if (!supervisor(dc) && asi < 0x80) {
1621         gen_exception(dc, TT_PRIV_ACT);
1622         type = GET_ASI_EXCP;
1623     } else {
1624         switch (asi) {
1625         case ASI_REAL:      /* Bypass */
1626         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1627         case ASI_REAL_L:    /* Bypass LE */
1628         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1629         case ASI_TWINX_REAL:   /* Real address, twinx */
1630         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1631         case ASI_QUAD_LDD_PHYS:
1632         case ASI_QUAD_LDD_PHYS_L:
1633             mem_idx = MMU_PHYS_IDX;
1634             break;
1635         case ASI_N:  /* Nucleus */
1636         case ASI_NL: /* Nucleus LE */
1637         case ASI_TWINX_N:
1638         case ASI_TWINX_NL:
1639         case ASI_NUCLEUS_QUAD_LDD:
1640         case ASI_NUCLEUS_QUAD_LDD_L:
1641             if (hypervisor(dc)) {
1642                 mem_idx = MMU_PHYS_IDX;
1643             } else {
1644                 mem_idx = MMU_NUCLEUS_IDX;
1645             }
1646             break;
1647         case ASI_AIUP:  /* As if user primary */
1648         case ASI_AIUPL: /* As if user primary LE */
1649         case ASI_TWINX_AIUP:
1650         case ASI_TWINX_AIUP_L:
1651         case ASI_BLK_AIUP_4V:
1652         case ASI_BLK_AIUP_L_4V:
1653         case ASI_BLK_AIUP:
1654         case ASI_BLK_AIUPL:
1655         case ASI_MON_AIUP:
1656             mem_idx = MMU_USER_IDX;
1657             break;
1658         case ASI_AIUS:  /* As if user secondary */
1659         case ASI_AIUSL: /* As if user secondary LE */
1660         case ASI_TWINX_AIUS:
1661         case ASI_TWINX_AIUS_L:
1662         case ASI_BLK_AIUS_4V:
1663         case ASI_BLK_AIUS_L_4V:
1664         case ASI_BLK_AIUS:
1665         case ASI_BLK_AIUSL:
1666         case ASI_MON_AIUS:
1667             mem_idx = MMU_USER_SECONDARY_IDX;
1668             break;
1669         case ASI_S:  /* Secondary */
1670         case ASI_SL: /* Secondary LE */
1671         case ASI_TWINX_S:
1672         case ASI_TWINX_SL:
1673         case ASI_BLK_COMMIT_S:
1674         case ASI_BLK_S:
1675         case ASI_BLK_SL:
1676         case ASI_FL8_S:
1677         case ASI_FL8_SL:
1678         case ASI_FL16_S:
1679         case ASI_FL16_SL:
1680         case ASI_MON_S:
1681             if (mem_idx == MMU_USER_IDX) {
1682                 mem_idx = MMU_USER_SECONDARY_IDX;
1683             } else if (mem_idx == MMU_KERNEL_IDX) {
1684                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1685             }
1686             break;
1687         case ASI_P:  /* Primary */
1688         case ASI_PL: /* Primary LE */
1689         case ASI_TWINX_P:
1690         case ASI_TWINX_PL:
1691         case ASI_BLK_COMMIT_P:
1692         case ASI_BLK_P:
1693         case ASI_BLK_PL:
1694         case ASI_FL8_P:
1695         case ASI_FL8_PL:
1696         case ASI_FL16_P:
1697         case ASI_FL16_PL:
1698         case ASI_MON_P:
1699             break;
1700         }
1701         switch (asi) {
1702         case ASI_REAL:
1703         case ASI_REAL_IO:
1704         case ASI_REAL_L:
1705         case ASI_REAL_IO_L:
1706         case ASI_N:
1707         case ASI_NL:
1708         case ASI_AIUP:
1709         case ASI_AIUPL:
1710         case ASI_AIUS:
1711         case ASI_AIUSL:
1712         case ASI_S:
1713         case ASI_SL:
1714         case ASI_P:
1715         case ASI_PL:
1716         case ASI_MON_P:
1717         case ASI_MON_S:
1718         case ASI_MON_AIUP:
1719         case ASI_MON_AIUS:
1720             type = GET_ASI_DIRECT;
1721             break;
1722         case ASI_TWINX_REAL:
1723         case ASI_TWINX_REAL_L:
1724         case ASI_TWINX_N:
1725         case ASI_TWINX_NL:
1726         case ASI_TWINX_AIUP:
1727         case ASI_TWINX_AIUP_L:
1728         case ASI_TWINX_AIUS:
1729         case ASI_TWINX_AIUS_L:
1730         case ASI_TWINX_P:
1731         case ASI_TWINX_PL:
1732         case ASI_TWINX_S:
1733         case ASI_TWINX_SL:
1734         case ASI_QUAD_LDD_PHYS:
1735         case ASI_QUAD_LDD_PHYS_L:
1736         case ASI_NUCLEUS_QUAD_LDD:
1737         case ASI_NUCLEUS_QUAD_LDD_L:
1738             type = GET_ASI_DTWINX;
1739             break;
1740         case ASI_BLK_COMMIT_P:
1741         case ASI_BLK_COMMIT_S:
1742         case ASI_BLK_AIUP_4V:
1743         case ASI_BLK_AIUP_L_4V:
1744         case ASI_BLK_AIUP:
1745         case ASI_BLK_AIUPL:
1746         case ASI_BLK_AIUS_4V:
1747         case ASI_BLK_AIUS_L_4V:
1748         case ASI_BLK_AIUS:
1749         case ASI_BLK_AIUSL:
1750         case ASI_BLK_S:
1751         case ASI_BLK_SL:
1752         case ASI_BLK_P:
1753         case ASI_BLK_PL:
1754             type = GET_ASI_BLOCK;
1755             break;
1756         case ASI_FL8_S:
1757         case ASI_FL8_SL:
1758         case ASI_FL8_P:
1759         case ASI_FL8_PL:
1760             memop = MO_UB;
1761             type = GET_ASI_SHORT;
1762             break;
1763         case ASI_FL16_S:
1764         case ASI_FL16_SL:
1765         case ASI_FL16_P:
1766         case ASI_FL16_PL:
1767             memop = MO_TEUW;
1768             type = GET_ASI_SHORT;
1769             break;
1770         }
1771         /* The little-endian asis all have bit 3 set.  */
1772         if (asi & 8) {
1773             memop ^= MO_BSWAP;
1774         }
1775     }
1776 #endif
1777 
1778  done:
1779     return (DisasASI){ type, asi, mem_idx, memop };
1780 }
1781 
1782 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
gen_helper_ld_asi(TCGv_i64 r,TCGv_env e,TCGv a,TCGv_i32 asi,TCGv_i32 mop)1783 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1784                               TCGv_i32 asi, TCGv_i32 mop)
1785 {
1786     g_assert_not_reached();
1787 }
1788 
gen_helper_st_asi(TCGv_env e,TCGv a,TCGv_i64 r,TCGv_i32 asi,TCGv_i32 mop)1789 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1790                               TCGv_i32 asi, TCGv_i32 mop)
1791 {
1792     g_assert_not_reached();
1793 }
1794 #endif
1795 
gen_ld_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1796 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1797 {
1798     switch (da->type) {
1799     case GET_ASI_EXCP:
1800         break;
1801     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1802         gen_exception(dc, TT_ILL_INSN);
1803         break;
1804     case GET_ASI_DIRECT:
1805         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1806         break;
1807 
1808     case GET_ASI_CODE:
1809 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1810         {
1811             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1812             TCGv_i64 t64 = tcg_temp_new_i64();
1813 
1814             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1815             tcg_gen_trunc_i64_tl(dst, t64);
1816         }
1817         break;
1818 #else
1819         g_assert_not_reached();
1820 #endif
1821 
1822     default:
1823         {
1824             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1825             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1826 
1827             save_state(dc);
1828 #ifdef TARGET_SPARC64
1829             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1830 #else
1831             {
1832                 TCGv_i64 t64 = tcg_temp_new_i64();
1833                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1834                 tcg_gen_trunc_i64_tl(dst, t64);
1835             }
1836 #endif
1837         }
1838         break;
1839     }
1840 }
1841 
gen_st_asi(DisasContext * dc,DisasASI * da,TCGv src,TCGv addr)1842 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1843 {
1844     switch (da->type) {
1845     case GET_ASI_EXCP:
1846         break;
1847 
1848     case GET_ASI_DTWINX: /* Reserved for stda.  */
1849         if (TARGET_LONG_BITS == 32) {
1850             gen_exception(dc, TT_ILL_INSN);
1851             break;
1852         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1853             /* Pre OpenSPARC CPUs don't have these */
1854             gen_exception(dc, TT_ILL_INSN);
1855             break;
1856         }
1857         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1858         /* fall through */
1859 
1860     case GET_ASI_DIRECT:
1861         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1862         break;
1863 
1864     case GET_ASI_BCOPY:
1865         assert(TARGET_LONG_BITS == 32);
1866         /*
1867          * Copy 32 bytes from the address in SRC to ADDR.
1868          *
1869          * From Ross RT625 hyperSPARC manual, section 4.6:
1870          * "Block Copy and Block Fill will work only on cache line boundaries."
1871          *
1872          * It does not specify if an unaliged address is truncated or trapped.
1873          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1874          * is obviously wrong.  The only place I can see this used is in the
1875          * Linux kernel which begins with page alignment, advancing by 32,
1876          * so is always aligned.  Assume truncation as the simpler option.
1877          *
1878          * Since the loads and stores are paired, allow the copy to happen
1879          * in the host endianness.  The copy need not be atomic.
1880          */
1881         {
1882             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1883             TCGv saddr = tcg_temp_new();
1884             TCGv daddr = tcg_temp_new();
1885             TCGv_i128 tmp = tcg_temp_new_i128();
1886 
1887             tcg_gen_andi_tl(saddr, src, -32);
1888             tcg_gen_andi_tl(daddr, addr, -32);
1889             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1890             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1891             tcg_gen_addi_tl(saddr, saddr, 16);
1892             tcg_gen_addi_tl(daddr, daddr, 16);
1893             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1894             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1895         }
1896         break;
1897 
1898     default:
1899         {
1900             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1901             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1902 
1903             save_state(dc);
1904 #ifdef TARGET_SPARC64
1905             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1906 #else
1907             {
1908                 TCGv_i64 t64 = tcg_temp_new_i64();
1909                 tcg_gen_extu_tl_i64(t64, src);
1910                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1911             }
1912 #endif
1913 
1914             /* A write to a TLB register may alter page maps.  End the TB. */
1915             dc->npc = DYNAMIC_PC;
1916         }
1917         break;
1918     }
1919 }
1920 
gen_swap_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv src,TCGv addr)1921 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1922                          TCGv dst, TCGv src, TCGv addr)
1923 {
1924     switch (da->type) {
1925     case GET_ASI_EXCP:
1926         break;
1927     case GET_ASI_DIRECT:
1928         tcg_gen_atomic_xchg_tl(dst, addr, src,
1929                                da->mem_idx, da->memop | MO_ALIGN);
1930         break;
1931     default:
1932         /* ??? Should be DAE_invalid_asi.  */
1933         gen_exception(dc, TT_DATA_ACCESS);
1934         break;
1935     }
1936 }
1937 
gen_cas_asi(DisasContext * dc,DisasASI * da,TCGv oldv,TCGv newv,TCGv cmpv,TCGv addr)1938 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1939                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1940 {
1941     switch (da->type) {
1942     case GET_ASI_EXCP:
1943         return;
1944     case GET_ASI_DIRECT:
1945         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1946                                   da->mem_idx, da->memop | MO_ALIGN);
1947         break;
1948     default:
1949         /* ??? Should be DAE_invalid_asi.  */
1950         gen_exception(dc, TT_DATA_ACCESS);
1951         break;
1952     }
1953 }
1954 
gen_ldstub_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1955 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1956 {
1957     switch (da->type) {
1958     case GET_ASI_EXCP:
1959         break;
1960     case GET_ASI_DIRECT:
1961         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1962                                da->mem_idx, MO_UB);
1963         break;
1964     default:
1965         /* ??? In theory, this should be raise DAE_invalid_asi.
1966            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1967         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1968             gen_helper_exit_atomic(tcg_env);
1969         } else {
1970             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1971             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1972             TCGv_i64 s64, t64;
1973 
1974             save_state(dc);
1975             t64 = tcg_temp_new_i64();
1976             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1977 
1978             s64 = tcg_constant_i64(0xff);
1979             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1980 
1981             tcg_gen_trunc_i64_tl(dst, t64);
1982 
1983             /* End the TB.  */
1984             dc->npc = DYNAMIC_PC;
1985         }
1986         break;
1987     }
1988 }
1989 
gen_ldf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)1990 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1991                         TCGv addr, int rd)
1992 {
1993     MemOp memop = da->memop;
1994     MemOp size = memop & MO_SIZE;
1995     TCGv_i32 d32;
1996     TCGv_i64 d64, l64;
1997     TCGv addr_tmp;
1998 
1999     /* TODO: Use 128-bit load/store below. */
2000     if (size == MO_128) {
2001         memop = (memop & ~MO_SIZE) | MO_64;
2002     }
2003 
2004     switch (da->type) {
2005     case GET_ASI_EXCP:
2006         break;
2007 
2008     case GET_ASI_DIRECT:
2009         memop |= MO_ALIGN_4;
2010         switch (size) {
2011         case MO_32:
2012             d32 = tcg_temp_new_i32();
2013             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2014             gen_store_fpr_F(dc, rd, d32);
2015             break;
2016 
2017         case MO_64:
2018             d64 = tcg_temp_new_i64();
2019             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2020             gen_store_fpr_D(dc, rd, d64);
2021             break;
2022 
2023         case MO_128:
2024             d64 = tcg_temp_new_i64();
2025             l64 = tcg_temp_new_i64();
2026             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2027             addr_tmp = tcg_temp_new();
2028             tcg_gen_addi_tl(addr_tmp, addr, 8);
2029             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2030             gen_store_fpr_D(dc, rd, d64);
2031             gen_store_fpr_D(dc, rd + 2, l64);
2032             break;
2033         default:
2034             g_assert_not_reached();
2035         }
2036         break;
2037 
2038     case GET_ASI_BLOCK:
2039         /* Valid for lddfa on aligned registers only.  */
2040         if (orig_size == MO_64 && (rd & 7) == 0) {
2041             /* The first operation checks required alignment.  */
2042             addr_tmp = tcg_temp_new();
2043             d64 = tcg_temp_new_i64();
2044             for (int i = 0; ; ++i) {
2045                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2046                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2047                 gen_store_fpr_D(dc, rd + 2 * i, d64);
2048                 if (i == 7) {
2049                     break;
2050                 }
2051                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2052                 addr = addr_tmp;
2053             }
2054         } else {
2055             gen_exception(dc, TT_ILL_INSN);
2056         }
2057         break;
2058 
2059     case GET_ASI_SHORT:
2060         /* Valid for lddfa only.  */
2061         if (orig_size == MO_64) {
2062             d64 = tcg_temp_new_i64();
2063             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2064             gen_store_fpr_D(dc, rd, d64);
2065         } else {
2066             gen_exception(dc, TT_ILL_INSN);
2067         }
2068         break;
2069 
2070     default:
2071         {
2072             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2073             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2074 
2075             save_state(dc);
2076             /* According to the table in the UA2011 manual, the only
2077                other asis that are valid for ldfa/lddfa/ldqfa are
2078                the NO_FAULT asis.  We still need a helper for these,
2079                but we can just use the integer asi helper for them.  */
2080             switch (size) {
2081             case MO_32:
2082                 d64 = tcg_temp_new_i64();
2083                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2084                 d32 = tcg_temp_new_i32();
2085                 tcg_gen_extrl_i64_i32(d32, d64);
2086                 gen_store_fpr_F(dc, rd, d32);
2087                 break;
2088             case MO_64:
2089                 d64 = tcg_temp_new_i64();
2090                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2091                 gen_store_fpr_D(dc, rd, d64);
2092                 break;
2093             case MO_128:
2094                 d64 = tcg_temp_new_i64();
2095                 l64 = tcg_temp_new_i64();
2096                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2097                 addr_tmp = tcg_temp_new();
2098                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2099                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2100                 gen_store_fpr_D(dc, rd, d64);
2101                 gen_store_fpr_D(dc, rd + 2, l64);
2102                 break;
2103             default:
2104                 g_assert_not_reached();
2105             }
2106         }
2107         break;
2108     }
2109 }
2110 
gen_stf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)2111 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2112                         TCGv addr, int rd)
2113 {
2114     MemOp memop = da->memop;
2115     MemOp size = memop & MO_SIZE;
2116     TCGv_i32 d32;
2117     TCGv_i64 d64;
2118     TCGv addr_tmp;
2119 
2120     /* TODO: Use 128-bit load/store below. */
2121     if (size == MO_128) {
2122         memop = (memop & ~MO_SIZE) | MO_64;
2123     }
2124 
2125     switch (da->type) {
2126     case GET_ASI_EXCP:
2127         break;
2128 
2129     case GET_ASI_DIRECT:
2130         memop |= MO_ALIGN_4;
2131         switch (size) {
2132         case MO_32:
2133             d32 = gen_load_fpr_F(dc, rd);
2134             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2135             break;
2136         case MO_64:
2137             d64 = gen_load_fpr_D(dc, rd);
2138             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2139             break;
2140         case MO_128:
2141             /* Only 4-byte alignment required.  However, it is legal for the
2142                cpu to signal the alignment fault, and the OS trap handler is
2143                required to fix it up.  Requiring 16-byte alignment here avoids
2144                having to probe the second page before performing the first
2145                write.  */
2146             d64 = gen_load_fpr_D(dc, rd);
2147             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2148             addr_tmp = tcg_temp_new();
2149             tcg_gen_addi_tl(addr_tmp, addr, 8);
2150             d64 = gen_load_fpr_D(dc, rd + 2);
2151             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2152             break;
2153         default:
2154             g_assert_not_reached();
2155         }
2156         break;
2157 
2158     case GET_ASI_BLOCK:
2159         /* Valid for stdfa on aligned registers only.  */
2160         if (orig_size == MO_64 && (rd & 7) == 0) {
2161             /* The first operation checks required alignment.  */
2162             addr_tmp = tcg_temp_new();
2163             for (int i = 0; ; ++i) {
2164                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2165                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2166                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2167                 if (i == 7) {
2168                     break;
2169                 }
2170                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2171                 addr = addr_tmp;
2172             }
2173         } else {
2174             gen_exception(dc, TT_ILL_INSN);
2175         }
2176         break;
2177 
2178     case GET_ASI_SHORT:
2179         /* Valid for stdfa only.  */
2180         if (orig_size == MO_64) {
2181             d64 = gen_load_fpr_D(dc, rd);
2182             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2183         } else {
2184             gen_exception(dc, TT_ILL_INSN);
2185         }
2186         break;
2187 
2188     default:
2189         /* According to the table in the UA2011 manual, the only
2190            other asis that are valid for ldfa/lddfa/ldqfa are
2191            the PST* asis, which aren't currently handled.  */
2192         gen_exception(dc, TT_ILL_INSN);
2193         break;
2194     }
2195 }
2196 
gen_ldda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2197 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2198 {
2199     TCGv hi = gen_dest_gpr(dc, rd);
2200     TCGv lo = gen_dest_gpr(dc, rd + 1);
2201 
2202     switch (da->type) {
2203     case GET_ASI_EXCP:
2204         return;
2205 
2206     case GET_ASI_DTWINX:
2207 #ifdef TARGET_SPARC64
2208         {
2209             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2210             TCGv_i128 t = tcg_temp_new_i128();
2211 
2212             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2213             /*
2214              * Note that LE twinx acts as if each 64-bit register result is
2215              * byte swapped.  We perform one 128-bit LE load, so must swap
2216              * the order of the writebacks.
2217              */
2218             if ((mop & MO_BSWAP) == MO_TE) {
2219                 tcg_gen_extr_i128_i64(lo, hi, t);
2220             } else {
2221                 tcg_gen_extr_i128_i64(hi, lo, t);
2222             }
2223         }
2224         break;
2225 #else
2226         g_assert_not_reached();
2227 #endif
2228 
2229     case GET_ASI_DIRECT:
2230         {
2231             TCGv_i64 tmp = tcg_temp_new_i64();
2232 
2233             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2234 
2235             /* Note that LE ldda acts as if each 32-bit register
2236                result is byte swapped.  Having just performed one
2237                64-bit bswap, we need now to swap the writebacks.  */
2238             if ((da->memop & MO_BSWAP) == MO_TE) {
2239                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2240             } else {
2241                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2242             }
2243         }
2244         break;
2245 
2246     case GET_ASI_CODE:
2247 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2248         {
2249             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2250             TCGv_i64 tmp = tcg_temp_new_i64();
2251 
2252             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2253 
2254             /* See above.  */
2255             if ((da->memop & MO_BSWAP) == MO_TE) {
2256                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2257             } else {
2258                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2259             }
2260         }
2261         break;
2262 #else
2263         g_assert_not_reached();
2264 #endif
2265 
2266     default:
2267         /* ??? In theory we've handled all of the ASIs that are valid
2268            for ldda, and this should raise DAE_invalid_asi.  However,
2269            real hardware allows others.  This can be seen with e.g.
2270            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2271         {
2272             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2273             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2274             TCGv_i64 tmp = tcg_temp_new_i64();
2275 
2276             save_state(dc);
2277             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2278 
2279             /* See above.  */
2280             if ((da->memop & MO_BSWAP) == MO_TE) {
2281                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2282             } else {
2283                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2284             }
2285         }
2286         break;
2287     }
2288 
2289     gen_store_gpr(dc, rd, hi);
2290     gen_store_gpr(dc, rd + 1, lo);
2291 }
2292 
gen_stda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2293 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2294 {
2295     TCGv hi = gen_load_gpr(dc, rd);
2296     TCGv lo = gen_load_gpr(dc, rd + 1);
2297 
2298     switch (da->type) {
2299     case GET_ASI_EXCP:
2300         break;
2301 
2302     case GET_ASI_DTWINX:
2303 #ifdef TARGET_SPARC64
2304         {
2305             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2306             TCGv_i128 t = tcg_temp_new_i128();
2307 
2308             /*
2309              * Note that LE twinx acts as if each 64-bit register result is
2310              * byte swapped.  We perform one 128-bit LE store, so must swap
2311              * the order of the construction.
2312              */
2313             if ((mop & MO_BSWAP) == MO_TE) {
2314                 tcg_gen_concat_i64_i128(t, lo, hi);
2315             } else {
2316                 tcg_gen_concat_i64_i128(t, hi, lo);
2317             }
2318             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2319         }
2320         break;
2321 #else
2322         g_assert_not_reached();
2323 #endif
2324 
2325     case GET_ASI_DIRECT:
2326         {
2327             TCGv_i64 t64 = tcg_temp_new_i64();
2328 
2329             /* Note that LE stda acts as if each 32-bit register result is
2330                byte swapped.  We will perform one 64-bit LE store, so now
2331                we must swap the order of the construction.  */
2332             if ((da->memop & MO_BSWAP) == MO_TE) {
2333                 tcg_gen_concat_tl_i64(t64, lo, hi);
2334             } else {
2335                 tcg_gen_concat_tl_i64(t64, hi, lo);
2336             }
2337             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2338         }
2339         break;
2340 
2341     case GET_ASI_BFILL:
2342         assert(TARGET_LONG_BITS == 32);
2343         /*
2344          * Store 32 bytes of [rd:rd+1] to ADDR.
2345          * See comments for GET_ASI_COPY above.
2346          */
2347         {
2348             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2349             TCGv_i64 t8 = tcg_temp_new_i64();
2350             TCGv_i128 t16 = tcg_temp_new_i128();
2351             TCGv daddr = tcg_temp_new();
2352 
2353             tcg_gen_concat_tl_i64(t8, lo, hi);
2354             tcg_gen_concat_i64_i128(t16, t8, t8);
2355             tcg_gen_andi_tl(daddr, addr, -32);
2356             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2357             tcg_gen_addi_tl(daddr, daddr, 16);
2358             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2359         }
2360         break;
2361 
2362     default:
2363         /* ??? In theory we've handled all of the ASIs that are valid
2364            for stda, and this should raise DAE_invalid_asi.  */
2365         {
2366             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2367             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2368             TCGv_i64 t64 = tcg_temp_new_i64();
2369 
2370             /* See above.  */
2371             if ((da->memop & MO_BSWAP) == MO_TE) {
2372                 tcg_gen_concat_tl_i64(t64, lo, hi);
2373             } else {
2374                 tcg_gen_concat_tl_i64(t64, hi, lo);
2375             }
2376 
2377             save_state(dc);
2378             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2379         }
2380         break;
2381     }
2382 }
2383 
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2384 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2385 {
2386 #ifdef TARGET_SPARC64
2387     TCGv_i32 c32, zero, dst, s1, s2;
2388     TCGv_i64 c64 = tcg_temp_new_i64();
2389 
2390     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2391        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2392        the later.  */
2393     c32 = tcg_temp_new_i32();
2394     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2395     tcg_gen_extrl_i64_i32(c32, c64);
2396 
2397     s1 = gen_load_fpr_F(dc, rs);
2398     s2 = gen_load_fpr_F(dc, rd);
2399     dst = tcg_temp_new_i32();
2400     zero = tcg_constant_i32(0);
2401 
2402     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2403 
2404     gen_store_fpr_F(dc, rd, dst);
2405 #else
2406     qemu_build_not_reached();
2407 #endif
2408 }
2409 
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2410 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2411 {
2412 #ifdef TARGET_SPARC64
2413     TCGv_i64 dst = tcg_temp_new_i64();
2414     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2415                         gen_load_fpr_D(dc, rs),
2416                         gen_load_fpr_D(dc, rd));
2417     gen_store_fpr_D(dc, rd, dst);
2418 #else
2419     qemu_build_not_reached();
2420 #endif
2421 }
2422 
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2423 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2424 {
2425 #ifdef TARGET_SPARC64
2426     TCGv c2 = tcg_constant_tl(cmp->c2);
2427     TCGv_i64 h = tcg_temp_new_i64();
2428     TCGv_i64 l = tcg_temp_new_i64();
2429 
2430     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2431                         gen_load_fpr_D(dc, rs),
2432                         gen_load_fpr_D(dc, rd));
2433     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2434                         gen_load_fpr_D(dc, rs + 2),
2435                         gen_load_fpr_D(dc, rd + 2));
2436     gen_store_fpr_D(dc, rd, h);
2437     gen_store_fpr_D(dc, rd + 2, l);
2438 #else
2439     qemu_build_not_reached();
2440 #endif
2441 }
2442 
2443 #ifdef TARGET_SPARC64
gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)2444 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2445 {
2446     TCGv_i32 r_tl = tcg_temp_new_i32();
2447 
2448     /* load env->tl into r_tl */
2449     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2450 
2451     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2452     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2453 
2454     /* calculate offset to current trap state from env->ts, reuse r_tl */
2455     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2456     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2457 
2458     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2459     {
2460         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2461         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2462         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2463     }
2464 }
2465 #endif
2466 
extract_dfpreg(DisasContext * dc,int x)2467 static int extract_dfpreg(DisasContext *dc, int x)
2468 {
2469     int r = x & 0x1e;
2470 #ifdef TARGET_SPARC64
2471     r |= (x & 1) << 5;
2472 #endif
2473     return r;
2474 }
2475 
extract_qfpreg(DisasContext * dc,int x)2476 static int extract_qfpreg(DisasContext *dc, int x)
2477 {
2478     int r = x & 0x1c;
2479 #ifdef TARGET_SPARC64
2480     r |= (x & 1) << 5;
2481 #endif
2482     return r;
2483 }
2484 
2485 /* Include the auto-generated decoder.  */
2486 #include "decode-insns.c.inc"
2487 
2488 #define TRANS(NAME, AVAIL, FUNC, ...) \
2489     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2490     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2491 
2492 #define avail_ALL(C)      true
2493 #ifdef TARGET_SPARC64
2494 # define avail_32(C)      false
2495 # define avail_ASR17(C)   false
2496 # define avail_CASA(C)    true
2497 # define avail_DIV(C)     true
2498 # define avail_MUL(C)     true
2499 # define avail_POWERDOWN(C) false
2500 # define avail_64(C)      true
2501 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2502 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2503 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2504 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2505 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2506 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2507 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2508 # define avail_VIS3B(C)   avail_VIS3(C)
2509 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2510 #else
2511 # define avail_32(C)      true
2512 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2513 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2514 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2515 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2516 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2517 # define avail_64(C)      false
2518 # define avail_FMAF(C)    false
2519 # define avail_GL(C)      false
2520 # define avail_HYPV(C)    false
2521 # define avail_IMA(C)     false
2522 # define avail_VIS1(C)    false
2523 # define avail_VIS2(C)    false
2524 # define avail_VIS3(C)    false
2525 # define avail_VIS3B(C)   false
2526 # define avail_VIS4(C)    false
2527 #endif
2528 
2529 /* Default case for non jump instructions. */
advance_pc(DisasContext * dc)2530 static bool advance_pc(DisasContext *dc)
2531 {
2532     TCGLabel *l1;
2533 
2534     finishing_insn(dc);
2535 
2536     if (dc->npc & 3) {
2537         switch (dc->npc) {
2538         case DYNAMIC_PC:
2539         case DYNAMIC_PC_LOOKUP:
2540             dc->pc = dc->npc;
2541             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2542             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2543             break;
2544 
2545         case JUMP_PC:
2546             /* we can do a static jump */
2547             l1 = gen_new_label();
2548             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2549 
2550             /* jump not taken */
2551             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2552 
2553             /* jump taken */
2554             gen_set_label(l1);
2555             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2556 
2557             dc->base.is_jmp = DISAS_NORETURN;
2558             break;
2559 
2560         default:
2561             g_assert_not_reached();
2562         }
2563     } else {
2564         dc->pc = dc->npc;
2565         dc->npc = dc->npc + 4;
2566     }
2567     return true;
2568 }
2569 
2570 /*
2571  * Major opcodes 00 and 01 -- branches, call, and sethi
2572  */
2573 
advance_jump_cond(DisasContext * dc,DisasCompare * cmp,bool annul,int disp)2574 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2575                               bool annul, int disp)
2576 {
2577     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2578     target_ulong npc;
2579 
2580     finishing_insn(dc);
2581 
2582     if (cmp->cond == TCG_COND_ALWAYS) {
2583         if (annul) {
2584             dc->pc = dest;
2585             dc->npc = dest + 4;
2586         } else {
2587             gen_mov_pc_npc(dc);
2588             dc->npc = dest;
2589         }
2590         return true;
2591     }
2592 
2593     if (cmp->cond == TCG_COND_NEVER) {
2594         npc = dc->npc;
2595         if (npc & 3) {
2596             gen_mov_pc_npc(dc);
2597             if (annul) {
2598                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2599             }
2600             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2601         } else {
2602             dc->pc = npc + (annul ? 4 : 0);
2603             dc->npc = dc->pc + 4;
2604         }
2605         return true;
2606     }
2607 
2608     flush_cond(dc);
2609     npc = dc->npc;
2610 
2611     if (annul) {
2612         TCGLabel *l1 = gen_new_label();
2613 
2614         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2615         gen_goto_tb(dc, 0, npc, dest);
2616         gen_set_label(l1);
2617         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2618 
2619         dc->base.is_jmp = DISAS_NORETURN;
2620     } else {
2621         if (npc & 3) {
2622             switch (npc) {
2623             case DYNAMIC_PC:
2624             case DYNAMIC_PC_LOOKUP:
2625                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2626                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2627                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2628                                    cmp->c1, tcg_constant_tl(cmp->c2),
2629                                    tcg_constant_tl(dest), cpu_npc);
2630                 dc->pc = npc;
2631                 break;
2632             default:
2633                 g_assert_not_reached();
2634             }
2635         } else {
2636             dc->pc = npc;
2637             dc->npc = JUMP_PC;
2638             dc->jump = *cmp;
2639             dc->jump_pc[0] = dest;
2640             dc->jump_pc[1] = npc + 4;
2641 
2642             /* The condition for cpu_cond is always NE -- normalize. */
2643             if (cmp->cond == TCG_COND_NE) {
2644                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2645             } else {
2646                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2647             }
2648             dc->cpu_cond_live = true;
2649         }
2650     }
2651     return true;
2652 }
2653 
raise_priv(DisasContext * dc)2654 static bool raise_priv(DisasContext *dc)
2655 {
2656     gen_exception(dc, TT_PRIV_INSN);
2657     return true;
2658 }
2659 
raise_unimpfpop(DisasContext * dc)2660 static bool raise_unimpfpop(DisasContext *dc)
2661 {
2662     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2663     return true;
2664 }
2665 
gen_trap_float128(DisasContext * dc)2666 static bool gen_trap_float128(DisasContext *dc)
2667 {
2668     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2669         return false;
2670     }
2671     return raise_unimpfpop(dc);
2672 }
2673 
do_bpcc(DisasContext * dc,arg_bcc * a)2674 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2675 {
2676     DisasCompare cmp;
2677 
2678     gen_compare(&cmp, a->cc, a->cond, dc);
2679     return advance_jump_cond(dc, &cmp, a->a, a->i);
2680 }
2681 
TRANS(Bicc,ALL,do_bpcc,a)2682 TRANS(Bicc, ALL, do_bpcc, a)
2683 TRANS(BPcc,  64, do_bpcc, a)
2684 
2685 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2686 {
2687     DisasCompare cmp;
2688 
2689     if (gen_trap_if_nofpu_fpexception(dc)) {
2690         return true;
2691     }
2692     gen_fcompare(&cmp, a->cc, a->cond);
2693     return advance_jump_cond(dc, &cmp, a->a, a->i);
2694 }
2695 
2696 TRANS(FBPfcc,  64, do_fbpfcc, a)
TRANS(FBfcc,ALL,do_fbpfcc,a)2697 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2698 
2699 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2700 {
2701     DisasCompare cmp;
2702 
2703     if (!avail_64(dc)) {
2704         return false;
2705     }
2706     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2707         return false;
2708     }
2709     return advance_jump_cond(dc, &cmp, a->a, a->i);
2710 }
2711 
trans_CALL(DisasContext * dc,arg_CALL * a)2712 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2713 {
2714     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2715 
2716     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2717     gen_mov_pc_npc(dc);
2718     dc->npc = target;
2719     return true;
2720 }
2721 
trans_NCP(DisasContext * dc,arg_NCP * a)2722 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2723 {
2724     /*
2725      * For sparc32, always generate the no-coprocessor exception.
2726      * For sparc64, always generate illegal instruction.
2727      */
2728 #ifdef TARGET_SPARC64
2729     return false;
2730 #else
2731     gen_exception(dc, TT_NCP_INSN);
2732     return true;
2733 #endif
2734 }
2735 
trans_SETHI(DisasContext * dc,arg_SETHI * a)2736 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2737 {
2738     /* Special-case %g0 because that's the canonical nop.  */
2739     if (a->rd) {
2740         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2741     }
2742     return advance_pc(dc);
2743 }
2744 
2745 /*
2746  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2747  */
2748 
do_tcc(DisasContext * dc,int cond,int cc,int rs1,bool imm,int rs2_or_imm)2749 static bool do_tcc(DisasContext *dc, int cond, int cc,
2750                    int rs1, bool imm, int rs2_or_imm)
2751 {
2752     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2753                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2754     DisasCompare cmp;
2755     TCGLabel *lab;
2756     TCGv_i32 trap;
2757 
2758     /* Trap never.  */
2759     if (cond == 0) {
2760         return advance_pc(dc);
2761     }
2762 
2763     /*
2764      * Immediate traps are the most common case.  Since this value is
2765      * live across the branch, it really pays to evaluate the constant.
2766      */
2767     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2768         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2769     } else {
2770         trap = tcg_temp_new_i32();
2771         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2772         if (imm) {
2773             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2774         } else {
2775             TCGv_i32 t2 = tcg_temp_new_i32();
2776             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2777             tcg_gen_add_i32(trap, trap, t2);
2778         }
2779         tcg_gen_andi_i32(trap, trap, mask);
2780         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2781     }
2782 
2783     finishing_insn(dc);
2784 
2785     /* Trap always.  */
2786     if (cond == 8) {
2787         save_state(dc);
2788         gen_helper_raise_exception(tcg_env, trap);
2789         dc->base.is_jmp = DISAS_NORETURN;
2790         return true;
2791     }
2792 
2793     /* Conditional trap.  */
2794     flush_cond(dc);
2795     lab = delay_exceptionv(dc, trap);
2796     gen_compare(&cmp, cc, cond, dc);
2797     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2798 
2799     return advance_pc(dc);
2800 }
2801 
trans_Tcc_r(DisasContext * dc,arg_Tcc_r * a)2802 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2803 {
2804     if (avail_32(dc) && a->cc) {
2805         return false;
2806     }
2807     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2808 }
2809 
trans_Tcc_i_v7(DisasContext * dc,arg_Tcc_i_v7 * a)2810 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2811 {
2812     if (avail_64(dc)) {
2813         return false;
2814     }
2815     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2816 }
2817 
trans_Tcc_i_v9(DisasContext * dc,arg_Tcc_i_v9 * a)2818 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2819 {
2820     if (avail_32(dc)) {
2821         return false;
2822     }
2823     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2824 }
2825 
trans_STBAR(DisasContext * dc,arg_STBAR * a)2826 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2827 {
2828     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2829     return advance_pc(dc);
2830 }
2831 
trans_MEMBAR(DisasContext * dc,arg_MEMBAR * a)2832 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2833 {
2834     if (avail_32(dc)) {
2835         return false;
2836     }
2837     if (a->mmask) {
2838         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2839         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2840     }
2841     if (a->cmask) {
2842         /* For #Sync, etc, end the TB to recognize interrupts. */
2843         dc->base.is_jmp = DISAS_EXIT;
2844     }
2845     return advance_pc(dc);
2846 }
2847 
do_rd_special(DisasContext * dc,bool priv,int rd,TCGv (* func)(DisasContext *,TCGv))2848 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2849                           TCGv (*func)(DisasContext *, TCGv))
2850 {
2851     if (!priv) {
2852         return raise_priv(dc);
2853     }
2854     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2855     return advance_pc(dc);
2856 }
2857 
do_rdy(DisasContext * dc,TCGv dst)2858 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2859 {
2860     return cpu_y;
2861 }
2862 
trans_RDY(DisasContext * dc,arg_RDY * a)2863 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2864 {
2865     /*
2866      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2867      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2868      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2869      */
2870     if (avail_64(dc) && a->rs1 != 0) {
2871         return false;
2872     }
2873     return do_rd_special(dc, true, a->rd, do_rdy);
2874 }
2875 
do_rd_leon3_config(DisasContext * dc,TCGv dst)2876 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2877 {
2878     gen_helper_rdasr17(dst, tcg_env);
2879     return dst;
2880 }
2881 
2882 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2883 
do_rdpic(DisasContext * dc,TCGv dst)2884 static TCGv do_rdpic(DisasContext *dc, TCGv dst)
2885 {
2886     return tcg_constant_tl(0);
2887 }
2888 
2889 TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
2890 
2891 
do_rdccr(DisasContext * dc,TCGv dst)2892 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2893 {
2894     gen_helper_rdccr(dst, tcg_env);
2895     return dst;
2896 }
2897 
2898 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2899 
do_rdasi(DisasContext * dc,TCGv dst)2900 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2901 {
2902 #ifdef TARGET_SPARC64
2903     return tcg_constant_tl(dc->asi);
2904 #else
2905     qemu_build_not_reached();
2906 #endif
2907 }
2908 
2909 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2910 
do_rdtick(DisasContext * dc,TCGv dst)2911 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2912 {
2913     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2914 
2915     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2916     if (translator_io_start(&dc->base)) {
2917         dc->base.is_jmp = DISAS_EXIT;
2918     }
2919     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2920                               tcg_constant_i32(dc->mem_idx));
2921     return dst;
2922 }
2923 
2924 /* TODO: non-priv access only allowed when enabled. */
2925 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2926 
do_rdpc(DisasContext * dc,TCGv dst)2927 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2928 {
2929     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2930 }
2931 
2932 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2933 
do_rdfprs(DisasContext * dc,TCGv dst)2934 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2935 {
2936     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2937     return dst;
2938 }
2939 
2940 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2941 
do_rdgsr(DisasContext * dc,TCGv dst)2942 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2943 {
2944     gen_trap_ifnofpu(dc);
2945     return cpu_gsr;
2946 }
2947 
2948 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2949 
do_rdsoftint(DisasContext * dc,TCGv dst)2950 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2951 {
2952     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2953     return dst;
2954 }
2955 
2956 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2957 
do_rdtick_cmpr(DisasContext * dc,TCGv dst)2958 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2959 {
2960     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2961     return dst;
2962 }
2963 
2964 /* TODO: non-priv access only allowed when enabled. */
2965 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2966 
do_rdstick(DisasContext * dc,TCGv dst)2967 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2968 {
2969     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2970 
2971     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2972     if (translator_io_start(&dc->base)) {
2973         dc->base.is_jmp = DISAS_EXIT;
2974     }
2975     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2976                               tcg_constant_i32(dc->mem_idx));
2977     return dst;
2978 }
2979 
2980 /* TODO: non-priv access only allowed when enabled. */
2981 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2982 
do_rdstick_cmpr(DisasContext * dc,TCGv dst)2983 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2984 {
2985     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2986     return dst;
2987 }
2988 
2989 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2990 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2991 
2992 /*
2993  * UltraSPARC-T1 Strand status.
2994  * HYPV check maybe not enough, UA2005 & UA2007 describe
2995  * this ASR as impl. dep
2996  */
do_rdstrand_status(DisasContext * dc,TCGv dst)2997 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2998 {
2999     return tcg_constant_tl(1);
3000 }
3001 
3002 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3003 
do_rdpsr(DisasContext * dc,TCGv dst)3004 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3005 {
3006     gen_helper_rdpsr(dst, tcg_env);
3007     return dst;
3008 }
3009 
3010 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3011 
do_rdhpstate(DisasContext * dc,TCGv dst)3012 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3013 {
3014     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3015     return dst;
3016 }
3017 
3018 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3019 
do_rdhtstate(DisasContext * dc,TCGv dst)3020 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3021 {
3022     TCGv_i32 tl = tcg_temp_new_i32();
3023     TCGv_ptr tp = tcg_temp_new_ptr();
3024 
3025     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3026     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3027     tcg_gen_shli_i32(tl, tl, 3);
3028     tcg_gen_ext_i32_ptr(tp, tl);
3029     tcg_gen_add_ptr(tp, tp, tcg_env);
3030 
3031     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3032     return dst;
3033 }
3034 
3035 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3036 
do_rdhintp(DisasContext * dc,TCGv dst)3037 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3038 {
3039     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3040     return dst;
3041 }
3042 
3043 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3044 
do_rdhtba(DisasContext * dc,TCGv dst)3045 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3046 {
3047     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3048     return dst;
3049 }
3050 
3051 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3052 
do_rdhver(DisasContext * dc,TCGv dst)3053 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3054 {
3055     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3056     return dst;
3057 }
3058 
3059 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3060 
do_rdhstick_cmpr(DisasContext * dc,TCGv dst)3061 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3062 {
3063     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3064     return dst;
3065 }
3066 
3067 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3068       do_rdhstick_cmpr)
3069 
do_rdwim(DisasContext * dc,TCGv dst)3070 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3071 {
3072     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3073     return dst;
3074 }
3075 
3076 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3077 
do_rdtpc(DisasContext * dc,TCGv dst)3078 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3079 {
3080 #ifdef TARGET_SPARC64
3081     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3082 
3083     gen_load_trap_state_at_tl(r_tsptr);
3084     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3085     return dst;
3086 #else
3087     qemu_build_not_reached();
3088 #endif
3089 }
3090 
3091 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3092 
do_rdtnpc(DisasContext * dc,TCGv dst)3093 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3094 {
3095 #ifdef TARGET_SPARC64
3096     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3097 
3098     gen_load_trap_state_at_tl(r_tsptr);
3099     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3100     return dst;
3101 #else
3102     qemu_build_not_reached();
3103 #endif
3104 }
3105 
3106 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3107 
do_rdtstate(DisasContext * dc,TCGv dst)3108 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3109 {
3110 #ifdef TARGET_SPARC64
3111     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3112 
3113     gen_load_trap_state_at_tl(r_tsptr);
3114     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3115     return dst;
3116 #else
3117     qemu_build_not_reached();
3118 #endif
3119 }
3120 
3121 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3122 
do_rdtt(DisasContext * dc,TCGv dst)3123 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3124 {
3125 #ifdef TARGET_SPARC64
3126     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3127 
3128     gen_load_trap_state_at_tl(r_tsptr);
3129     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3130     return dst;
3131 #else
3132     qemu_build_not_reached();
3133 #endif
3134 }
3135 
3136 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3137 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3138 
do_rdtba(DisasContext * dc,TCGv dst)3139 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3140 {
3141     return cpu_tbr;
3142 }
3143 
3144 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3145 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3146 
do_rdpstate(DisasContext * dc,TCGv dst)3147 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3148 {
3149     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3150     return dst;
3151 }
3152 
3153 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3154 
do_rdtl(DisasContext * dc,TCGv dst)3155 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3156 {
3157     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3158     return dst;
3159 }
3160 
3161 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3162 
do_rdpil(DisasContext * dc,TCGv dst)3163 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3164 {
3165     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3166     return dst;
3167 }
3168 
3169 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3170 
do_rdcwp(DisasContext * dc,TCGv dst)3171 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3172 {
3173     gen_helper_rdcwp(dst, tcg_env);
3174     return dst;
3175 }
3176 
3177 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3178 
do_rdcansave(DisasContext * dc,TCGv dst)3179 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3180 {
3181     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3182     return dst;
3183 }
3184 
3185 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3186 
do_rdcanrestore(DisasContext * dc,TCGv dst)3187 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3188 {
3189     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3190     return dst;
3191 }
3192 
3193 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3194       do_rdcanrestore)
3195 
do_rdcleanwin(DisasContext * dc,TCGv dst)3196 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3197 {
3198     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3199     return dst;
3200 }
3201 
3202 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3203 
do_rdotherwin(DisasContext * dc,TCGv dst)3204 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3205 {
3206     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3207     return dst;
3208 }
3209 
3210 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3211 
do_rdwstate(DisasContext * dc,TCGv dst)3212 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3213 {
3214     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3215     return dst;
3216 }
3217 
3218 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3219 
do_rdgl(DisasContext * dc,TCGv dst)3220 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3221 {
3222     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3223     return dst;
3224 }
3225 
3226 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3227 
3228 /* UA2005 strand status */
do_rdssr(DisasContext * dc,TCGv dst)3229 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3230 {
3231     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3232     return dst;
3233 }
3234 
3235 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3236 
do_rdver(DisasContext * dc,TCGv dst)3237 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3238 {
3239     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3240     return dst;
3241 }
3242 
3243 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3244 
trans_FLUSHW(DisasContext * dc,arg_FLUSHW * a)3245 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3246 {
3247     if (avail_64(dc)) {
3248         gen_helper_flushw(tcg_env);
3249         return advance_pc(dc);
3250     }
3251     return false;
3252 }
3253 
do_wr_special(DisasContext * dc,arg_r_r_ri * a,bool priv,void (* func)(DisasContext *,TCGv))3254 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3255                           void (*func)(DisasContext *, TCGv))
3256 {
3257     TCGv src;
3258 
3259     /* For simplicity, we under-decoded the rs2 form. */
3260     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3261         return false;
3262     }
3263     if (!priv) {
3264         return raise_priv(dc);
3265     }
3266 
3267     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3268         src = tcg_constant_tl(a->rs2_or_imm);
3269     } else {
3270         TCGv src1 = gen_load_gpr(dc, a->rs1);
3271         if (a->rs2_or_imm == 0) {
3272             src = src1;
3273         } else {
3274             src = tcg_temp_new();
3275             if (a->imm) {
3276                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3277             } else {
3278                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3279             }
3280         }
3281     }
3282     func(dc, src);
3283     return advance_pc(dc);
3284 }
3285 
do_wry(DisasContext * dc,TCGv src)3286 static void do_wry(DisasContext *dc, TCGv src)
3287 {
3288     tcg_gen_ext32u_tl(cpu_y, src);
3289 }
3290 
TRANS(WRY,ALL,do_wr_special,a,true,do_wry)3291 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3292 
3293 static void do_wrccr(DisasContext *dc, TCGv src)
3294 {
3295     gen_helper_wrccr(tcg_env, src);
3296 }
3297 
3298 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3299 
do_wrasi(DisasContext * dc,TCGv src)3300 static void do_wrasi(DisasContext *dc, TCGv src)
3301 {
3302     TCGv tmp = tcg_temp_new();
3303 
3304     tcg_gen_ext8u_tl(tmp, src);
3305     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3306     /* End TB to notice changed ASI. */
3307     dc->base.is_jmp = DISAS_EXIT;
3308 }
3309 
3310 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3311 
do_wrfprs(DisasContext * dc,TCGv src)3312 static void do_wrfprs(DisasContext *dc, TCGv src)
3313 {
3314 #ifdef TARGET_SPARC64
3315     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3316     dc->fprs_dirty = 0;
3317     dc->base.is_jmp = DISAS_EXIT;
3318 #else
3319     qemu_build_not_reached();
3320 #endif
3321 }
3322 
3323 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3324 
do_priv_nop(DisasContext * dc,bool priv)3325 static bool do_priv_nop(DisasContext *dc, bool priv)
3326 {
3327     if (!priv) {
3328         return raise_priv(dc);
3329     }
3330     return advance_pc(dc);
3331 }
3332 
TRANS(WRPCR,HYPV,do_priv_nop,supervisor (dc))3333 TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
3334 TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
3335 
3336 static void do_wrgsr(DisasContext *dc, TCGv src)
3337 {
3338     gen_trap_ifnofpu(dc);
3339     tcg_gen_mov_tl(cpu_gsr, src);
3340 }
3341 
3342 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3343 
do_wrsoftint_set(DisasContext * dc,TCGv src)3344 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3345 {
3346     gen_helper_set_softint(tcg_env, src);
3347 }
3348 
3349 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3350 
do_wrsoftint_clr(DisasContext * dc,TCGv src)3351 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3352 {
3353     gen_helper_clear_softint(tcg_env, src);
3354 }
3355 
3356 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3357 
do_wrsoftint(DisasContext * dc,TCGv src)3358 static void do_wrsoftint(DisasContext *dc, TCGv src)
3359 {
3360     gen_helper_write_softint(tcg_env, src);
3361 }
3362 
3363 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3364 
do_wrtick_cmpr(DisasContext * dc,TCGv src)3365 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3366 {
3367     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3368 
3369     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3370     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3371     translator_io_start(&dc->base);
3372     gen_helper_tick_set_limit(r_tickptr, src);
3373     /* End TB to handle timer interrupt */
3374     dc->base.is_jmp = DISAS_EXIT;
3375 }
3376 
3377 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3378 
do_wrstick(DisasContext * dc,TCGv src)3379 static void do_wrstick(DisasContext *dc, TCGv src)
3380 {
3381 #ifdef TARGET_SPARC64
3382     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3383 
3384     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3385     translator_io_start(&dc->base);
3386     gen_helper_tick_set_count(r_tickptr, src);
3387     /* End TB to handle timer interrupt */
3388     dc->base.is_jmp = DISAS_EXIT;
3389 #else
3390     qemu_build_not_reached();
3391 #endif
3392 }
3393 
3394 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3395 
do_wrstick_cmpr(DisasContext * dc,TCGv src)3396 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3397 {
3398     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3399 
3400     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3401     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3402     translator_io_start(&dc->base);
3403     gen_helper_tick_set_limit(r_tickptr, src);
3404     /* End TB to handle timer interrupt */
3405     dc->base.is_jmp = DISAS_EXIT;
3406 }
3407 
3408 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3409 
do_wrpowerdown(DisasContext * dc,TCGv src)3410 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3411 {
3412     finishing_insn(dc);
3413     save_state(dc);
3414     gen_helper_power_down(tcg_env);
3415 }
3416 
TRANS(WRPOWERDOWN,POWERDOWN,do_wr_special,a,supervisor (dc),do_wrpowerdown)3417 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3418 
3419 static void do_wrmwait(DisasContext *dc, TCGv src)
3420 {
3421     /*
3422      * TODO: This is a stub version of mwait, which merely recognizes
3423      * interrupts immediately and does not wait.
3424      */
3425     dc->base.is_jmp = DISAS_EXIT;
3426 }
3427 
TRANS(WRMWAIT,VIS4,do_wr_special,a,true,do_wrmwait)3428 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3429 
3430 static void do_wrpsr(DisasContext *dc, TCGv src)
3431 {
3432     gen_helper_wrpsr(tcg_env, src);
3433     dc->base.is_jmp = DISAS_EXIT;
3434 }
3435 
3436 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3437 
do_wrwim(DisasContext * dc,TCGv src)3438 static void do_wrwim(DisasContext *dc, TCGv src)
3439 {
3440     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3441     TCGv tmp = tcg_temp_new();
3442 
3443     tcg_gen_andi_tl(tmp, src, mask);
3444     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3445 }
3446 
3447 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3448 
do_wrtpc(DisasContext * dc,TCGv src)3449 static void do_wrtpc(DisasContext *dc, TCGv src)
3450 {
3451 #ifdef TARGET_SPARC64
3452     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3453 
3454     gen_load_trap_state_at_tl(r_tsptr);
3455     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3456 #else
3457     qemu_build_not_reached();
3458 #endif
3459 }
3460 
3461 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3462 
do_wrtnpc(DisasContext * dc,TCGv src)3463 static void do_wrtnpc(DisasContext *dc, TCGv src)
3464 {
3465 #ifdef TARGET_SPARC64
3466     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3467 
3468     gen_load_trap_state_at_tl(r_tsptr);
3469     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3470 #else
3471     qemu_build_not_reached();
3472 #endif
3473 }
3474 
3475 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3476 
do_wrtstate(DisasContext * dc,TCGv src)3477 static void do_wrtstate(DisasContext *dc, TCGv src)
3478 {
3479 #ifdef TARGET_SPARC64
3480     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3481 
3482     gen_load_trap_state_at_tl(r_tsptr);
3483     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3484 #else
3485     qemu_build_not_reached();
3486 #endif
3487 }
3488 
3489 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3490 
do_wrtt(DisasContext * dc,TCGv src)3491 static void do_wrtt(DisasContext *dc, TCGv src)
3492 {
3493 #ifdef TARGET_SPARC64
3494     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3495 
3496     gen_load_trap_state_at_tl(r_tsptr);
3497     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3498 #else
3499     qemu_build_not_reached();
3500 #endif
3501 }
3502 
3503 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3504 
do_wrtick(DisasContext * dc,TCGv src)3505 static void do_wrtick(DisasContext *dc, TCGv src)
3506 {
3507     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3508 
3509     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3510     translator_io_start(&dc->base);
3511     gen_helper_tick_set_count(r_tickptr, src);
3512     /* End TB to handle timer interrupt */
3513     dc->base.is_jmp = DISAS_EXIT;
3514 }
3515 
3516 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3517 
do_wrtba(DisasContext * dc,TCGv src)3518 static void do_wrtba(DisasContext *dc, TCGv src)
3519 {
3520     tcg_gen_mov_tl(cpu_tbr, src);
3521 }
3522 
3523 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3524 
do_wrpstate(DisasContext * dc,TCGv src)3525 static void do_wrpstate(DisasContext *dc, TCGv src)
3526 {
3527     save_state(dc);
3528     if (translator_io_start(&dc->base)) {
3529         dc->base.is_jmp = DISAS_EXIT;
3530     }
3531     gen_helper_wrpstate(tcg_env, src);
3532     dc->npc = DYNAMIC_PC;
3533 }
3534 
3535 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3536 
do_wrtl(DisasContext * dc,TCGv src)3537 static void do_wrtl(DisasContext *dc, TCGv src)
3538 {
3539     save_state(dc);
3540     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3541     dc->npc = DYNAMIC_PC;
3542 }
3543 
3544 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3545 
do_wrpil(DisasContext * dc,TCGv src)3546 static void do_wrpil(DisasContext *dc, TCGv src)
3547 {
3548     if (translator_io_start(&dc->base)) {
3549         dc->base.is_jmp = DISAS_EXIT;
3550     }
3551     gen_helper_wrpil(tcg_env, src);
3552 }
3553 
3554 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3555 
do_wrcwp(DisasContext * dc,TCGv src)3556 static void do_wrcwp(DisasContext *dc, TCGv src)
3557 {
3558     gen_helper_wrcwp(tcg_env, src);
3559 }
3560 
3561 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3562 
do_wrcansave(DisasContext * dc,TCGv src)3563 static void do_wrcansave(DisasContext *dc, TCGv src)
3564 {
3565     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3566 }
3567 
3568 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3569 
do_wrcanrestore(DisasContext * dc,TCGv src)3570 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3571 {
3572     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3573 }
3574 
3575 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3576 
do_wrcleanwin(DisasContext * dc,TCGv src)3577 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3578 {
3579     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3580 }
3581 
3582 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3583 
do_wrotherwin(DisasContext * dc,TCGv src)3584 static void do_wrotherwin(DisasContext *dc, TCGv src)
3585 {
3586     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3587 }
3588 
3589 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3590 
do_wrwstate(DisasContext * dc,TCGv src)3591 static void do_wrwstate(DisasContext *dc, TCGv src)
3592 {
3593     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3594 }
3595 
3596 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3597 
do_wrgl(DisasContext * dc,TCGv src)3598 static void do_wrgl(DisasContext *dc, TCGv src)
3599 {
3600     gen_helper_wrgl(tcg_env, src);
3601 }
3602 
TRANS(WRPR_gl,GL,do_wr_special,a,supervisor (dc),do_wrgl)3603 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3604 
3605 /* UA2005 strand status */
3606 static void do_wrssr(DisasContext *dc, TCGv src)
3607 {
3608     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3609 }
3610 
TRANS(WRPR_strand_status,HYPV,do_wr_special,a,hypervisor (dc),do_wrssr)3611 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3612 
3613 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3614 
3615 static void do_wrhpstate(DisasContext *dc, TCGv src)
3616 {
3617     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3618     dc->base.is_jmp = DISAS_EXIT;
3619 }
3620 
TRANS(WRHPR_hpstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhpstate)3621 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3622 
3623 static void do_wrhtstate(DisasContext *dc, TCGv src)
3624 {
3625     TCGv_i32 tl = tcg_temp_new_i32();
3626     TCGv_ptr tp = tcg_temp_new_ptr();
3627 
3628     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3629     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3630     tcg_gen_shli_i32(tl, tl, 3);
3631     tcg_gen_ext_i32_ptr(tp, tl);
3632     tcg_gen_add_ptr(tp, tp, tcg_env);
3633 
3634     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3635 }
3636 
TRANS(WRHPR_htstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtstate)3637 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3638 
3639 static void do_wrhintp(DisasContext *dc, TCGv src)
3640 {
3641     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3642 }
3643 
TRANS(WRHPR_hintp,HYPV,do_wr_special,a,hypervisor (dc),do_wrhintp)3644 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3645 
3646 static void do_wrhtba(DisasContext *dc, TCGv src)
3647 {
3648     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3649 }
3650 
TRANS(WRHPR_htba,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtba)3651 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3652 
3653 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3654 {
3655     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3656 
3657     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3658     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3659     translator_io_start(&dc->base);
3660     gen_helper_tick_set_limit(r_tickptr, src);
3661     /* End TB to handle timer interrupt */
3662     dc->base.is_jmp = DISAS_EXIT;
3663 }
3664 
TRANS(WRHPR_hstick_cmpr,HYPV,do_wr_special,a,hypervisor (dc),do_wrhstick_cmpr)3665 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3666       do_wrhstick_cmpr)
3667 
3668 static bool do_saved_restored(DisasContext *dc, bool saved)
3669 {
3670     if (!supervisor(dc)) {
3671         return raise_priv(dc);
3672     }
3673     if (saved) {
3674         gen_helper_saved(tcg_env);
3675     } else {
3676         gen_helper_restored(tcg_env);
3677     }
3678     return advance_pc(dc);
3679 }
3680 
3681 TRANS(SAVED, 64, do_saved_restored, true)
3682 TRANS(RESTORED, 64, do_saved_restored, false)
3683 
trans_NOP(DisasContext * dc,arg_NOP * a)3684 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3685 {
3686     return advance_pc(dc);
3687 }
3688 
3689 /*
3690  * TODO: Need a feature bit for sparcv8.
3691  * In the meantime, treat all 32-bit cpus like sparcv7.
3692  */
3693 TRANS(NOP_v7, 32, trans_NOP, a)
3694 TRANS(NOP_v9, 64, trans_NOP, a)
3695 
do_arith_int(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),bool logic_cc)3696 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3697                          void (*func)(TCGv, TCGv, TCGv),
3698                          void (*funci)(TCGv, TCGv, target_long),
3699                          bool logic_cc)
3700 {
3701     TCGv dst, src1;
3702 
3703     /* For simplicity, we under-decoded the rs2 form. */
3704     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3705         return false;
3706     }
3707 
3708     if (logic_cc) {
3709         dst = cpu_cc_N;
3710     } else {
3711         dst = gen_dest_gpr(dc, a->rd);
3712     }
3713     src1 = gen_load_gpr(dc, a->rs1);
3714 
3715     if (a->imm || a->rs2_or_imm == 0) {
3716         if (funci) {
3717             funci(dst, src1, a->rs2_or_imm);
3718         } else {
3719             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3720         }
3721     } else {
3722         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3723     }
3724 
3725     if (logic_cc) {
3726         if (TARGET_LONG_BITS == 64) {
3727             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3728             tcg_gen_movi_tl(cpu_icc_C, 0);
3729         }
3730         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3731         tcg_gen_movi_tl(cpu_cc_C, 0);
3732         tcg_gen_movi_tl(cpu_cc_V, 0);
3733     }
3734 
3735     gen_store_gpr(dc, a->rd, dst);
3736     return advance_pc(dc);
3737 }
3738 
do_arith(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),void (* func_cc)(TCGv,TCGv,TCGv))3739 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3740                      void (*func)(TCGv, TCGv, TCGv),
3741                      void (*funci)(TCGv, TCGv, target_long),
3742                      void (*func_cc)(TCGv, TCGv, TCGv))
3743 {
3744     if (a->cc) {
3745         return do_arith_int(dc, a, func_cc, NULL, false);
3746     }
3747     return do_arith_int(dc, a, func, funci, false);
3748 }
3749 
do_logic(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long))3750 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3751                      void (*func)(TCGv, TCGv, TCGv),
3752                      void (*funci)(TCGv, TCGv, target_long))
3753 {
3754     return do_arith_int(dc, a, func, funci, a->cc);
3755 }
3756 
TRANS(ADD,ALL,do_arith,a,tcg_gen_add_tl,tcg_gen_addi_tl,gen_op_addcc)3757 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3758 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3759 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3760 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3761 
3762 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3763 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3764 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3765 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3766 
3767 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3768 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3769 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3770 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3771 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3772 
3773 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3774 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3775 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3776 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3777 
3778 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3779 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3780 
3781 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3782 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3783 
3784 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3785 {
3786     /* OR with %g0 is the canonical alias for MOV. */
3787     if (!a->cc && a->rs1 == 0) {
3788         if (a->imm || a->rs2_or_imm == 0) {
3789             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3790         } else if (a->rs2_or_imm & ~0x1f) {
3791             /* For simplicity, we under-decoded the rs2 form. */
3792             return false;
3793         } else {
3794             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3795         }
3796         return advance_pc(dc);
3797     }
3798     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3799 }
3800 
trans_UDIV(DisasContext * dc,arg_r_r_ri * a)3801 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3802 {
3803     TCGv_i64 t1, t2;
3804     TCGv dst;
3805 
3806     if (!avail_DIV(dc)) {
3807         return false;
3808     }
3809     /* For simplicity, we under-decoded the rs2 form. */
3810     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3811         return false;
3812     }
3813 
3814     if (unlikely(a->rs2_or_imm == 0)) {
3815         gen_exception(dc, TT_DIV_ZERO);
3816         return true;
3817     }
3818 
3819     if (a->imm) {
3820         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3821     } else {
3822         TCGLabel *lab;
3823         TCGv_i32 n2;
3824 
3825         finishing_insn(dc);
3826         flush_cond(dc);
3827 
3828         n2 = tcg_temp_new_i32();
3829         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3830 
3831         lab = delay_exception(dc, TT_DIV_ZERO);
3832         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3833 
3834         t2 = tcg_temp_new_i64();
3835 #ifdef TARGET_SPARC64
3836         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3837 #else
3838         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3839 #endif
3840     }
3841 
3842     t1 = tcg_temp_new_i64();
3843     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3844 
3845     tcg_gen_divu_i64(t1, t1, t2);
3846     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3847 
3848     dst = gen_dest_gpr(dc, a->rd);
3849     tcg_gen_trunc_i64_tl(dst, t1);
3850     gen_store_gpr(dc, a->rd, dst);
3851     return advance_pc(dc);
3852 }
3853 
trans_UDIVX(DisasContext * dc,arg_r_r_ri * a)3854 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3855 {
3856     TCGv dst, src1, src2;
3857 
3858     if (!avail_64(dc)) {
3859         return false;
3860     }
3861     /* For simplicity, we under-decoded the rs2 form. */
3862     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3863         return false;
3864     }
3865 
3866     if (unlikely(a->rs2_or_imm == 0)) {
3867         gen_exception(dc, TT_DIV_ZERO);
3868         return true;
3869     }
3870 
3871     if (a->imm) {
3872         src2 = tcg_constant_tl(a->rs2_or_imm);
3873     } else {
3874         TCGLabel *lab;
3875 
3876         finishing_insn(dc);
3877         flush_cond(dc);
3878 
3879         lab = delay_exception(dc, TT_DIV_ZERO);
3880         src2 = cpu_regs[a->rs2_or_imm];
3881         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3882     }
3883 
3884     dst = gen_dest_gpr(dc, a->rd);
3885     src1 = gen_load_gpr(dc, a->rs1);
3886 
3887     tcg_gen_divu_tl(dst, src1, src2);
3888     gen_store_gpr(dc, a->rd, dst);
3889     return advance_pc(dc);
3890 }
3891 
trans_SDIVX(DisasContext * dc,arg_r_r_ri * a)3892 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3893 {
3894     TCGv dst, src1, src2;
3895 
3896     if (!avail_64(dc)) {
3897         return false;
3898     }
3899     /* For simplicity, we under-decoded the rs2 form. */
3900     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3901         return false;
3902     }
3903 
3904     if (unlikely(a->rs2_or_imm == 0)) {
3905         gen_exception(dc, TT_DIV_ZERO);
3906         return true;
3907     }
3908 
3909     dst = gen_dest_gpr(dc, a->rd);
3910     src1 = gen_load_gpr(dc, a->rs1);
3911 
3912     if (a->imm) {
3913         if (unlikely(a->rs2_or_imm == -1)) {
3914             tcg_gen_neg_tl(dst, src1);
3915             gen_store_gpr(dc, a->rd, dst);
3916             return advance_pc(dc);
3917         }
3918         src2 = tcg_constant_tl(a->rs2_or_imm);
3919     } else {
3920         TCGLabel *lab;
3921         TCGv t1, t2;
3922 
3923         finishing_insn(dc);
3924         flush_cond(dc);
3925 
3926         lab = delay_exception(dc, TT_DIV_ZERO);
3927         src2 = cpu_regs[a->rs2_or_imm];
3928         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3929 
3930         /*
3931          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3932          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3933          */
3934         t1 = tcg_temp_new();
3935         t2 = tcg_temp_new();
3936         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3937         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3938         tcg_gen_and_tl(t1, t1, t2);
3939         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3940                            tcg_constant_tl(1), src2);
3941         src2 = t1;
3942     }
3943 
3944     tcg_gen_div_tl(dst, src1, src2);
3945     gen_store_gpr(dc, a->rd, dst);
3946     return advance_pc(dc);
3947 }
3948 
gen_edge(DisasContext * dc,arg_r_r_r * a,int width,bool cc,bool little_endian)3949 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3950                      int width, bool cc, bool little_endian)
3951 {
3952     TCGv dst, s1, s2, l, r, t, m;
3953     uint64_t amask = address_mask_i(dc, -8);
3954 
3955     dst = gen_dest_gpr(dc, a->rd);
3956     s1 = gen_load_gpr(dc, a->rs1);
3957     s2 = gen_load_gpr(dc, a->rs2);
3958 
3959     if (cc) {
3960         gen_op_subcc(cpu_cc_N, s1, s2);
3961     }
3962 
3963     l = tcg_temp_new();
3964     r = tcg_temp_new();
3965     t = tcg_temp_new();
3966 
3967     switch (width) {
3968     case 8:
3969         tcg_gen_andi_tl(l, s1, 7);
3970         tcg_gen_andi_tl(r, s2, 7);
3971         tcg_gen_xori_tl(r, r, 7);
3972         m = tcg_constant_tl(0xff);
3973         break;
3974     case 16:
3975         tcg_gen_extract_tl(l, s1, 1, 2);
3976         tcg_gen_extract_tl(r, s2, 1, 2);
3977         tcg_gen_xori_tl(r, r, 3);
3978         m = tcg_constant_tl(0xf);
3979         break;
3980     case 32:
3981         tcg_gen_extract_tl(l, s1, 2, 1);
3982         tcg_gen_extract_tl(r, s2, 2, 1);
3983         tcg_gen_xori_tl(r, r, 1);
3984         m = tcg_constant_tl(0x3);
3985         break;
3986     default:
3987         abort();
3988     }
3989 
3990     /* Compute Left Edge */
3991     if (little_endian) {
3992         tcg_gen_shl_tl(l, m, l);
3993         tcg_gen_and_tl(l, l, m);
3994     } else {
3995         tcg_gen_shr_tl(l, m, l);
3996     }
3997     /* Compute Right Edge */
3998     if (little_endian) {
3999         tcg_gen_shr_tl(r, m, r);
4000     } else {
4001         tcg_gen_shl_tl(r, m, r);
4002         tcg_gen_and_tl(r, r, m);
4003     }
4004 
4005     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
4006     tcg_gen_xor_tl(t, s1, s2);
4007     tcg_gen_and_tl(r, r, l);
4008     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
4009 
4010     gen_store_gpr(dc, a->rd, dst);
4011     return advance_pc(dc);
4012 }
4013 
4014 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
4015 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
4016 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
4017 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4018 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4019 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4020 
4021 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4022 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4023 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4024 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4025 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4026 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4027 
do_rr(DisasContext * dc,arg_r_r * a,void (* func)(TCGv,TCGv))4028 static bool do_rr(DisasContext *dc, arg_r_r *a,
4029                   void (*func)(TCGv, TCGv))
4030 {
4031     TCGv dst = gen_dest_gpr(dc, a->rd);
4032     TCGv src = gen_load_gpr(dc, a->rs);
4033 
4034     func(dst, src);
4035     gen_store_gpr(dc, a->rd, dst);
4036     return advance_pc(dc);
4037 }
4038 
TRANS(LZCNT,VIS3,do_rr,a,gen_op_lzcnt)4039 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4040 
4041 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4042                    void (*func)(TCGv, TCGv, TCGv))
4043 {
4044     TCGv dst = gen_dest_gpr(dc, a->rd);
4045     TCGv src1 = gen_load_gpr(dc, a->rs1);
4046     TCGv src2 = gen_load_gpr(dc, a->rs2);
4047 
4048     func(dst, src1, src2);
4049     gen_store_gpr(dc, a->rd, dst);
4050     return advance_pc(dc);
4051 }
4052 
TRANS(ARRAY8,VIS1,do_rrr,a,gen_helper_array8)4053 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4054 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4055 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4056 
4057 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4058 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4059 
4060 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4061 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4062 
4063 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4064 
4065 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4066 {
4067 #ifdef TARGET_SPARC64
4068     TCGv tmp = tcg_temp_new();
4069 
4070     tcg_gen_add_tl(tmp, s1, s2);
4071     tcg_gen_andi_tl(dst, tmp, -8);
4072     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4073 #else
4074     g_assert_not_reached();
4075 #endif
4076 }
4077 
gen_op_alignaddrl(TCGv dst,TCGv s1,TCGv s2)4078 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4079 {
4080 #ifdef TARGET_SPARC64
4081     TCGv tmp = tcg_temp_new();
4082 
4083     tcg_gen_add_tl(tmp, s1, s2);
4084     tcg_gen_andi_tl(dst, tmp, -8);
4085     tcg_gen_neg_tl(tmp, tmp);
4086     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4087 #else
4088     g_assert_not_reached();
4089 #endif
4090 }
4091 
TRANS(ALIGNADDR,VIS1,do_rrr,a,gen_op_alignaddr)4092 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4093 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4094 
4095 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4096 {
4097 #ifdef TARGET_SPARC64
4098     tcg_gen_add_tl(dst, s1, s2);
4099     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4100 #else
4101     g_assert_not_reached();
4102 #endif
4103 }
4104 
TRANS(BMASK,VIS2,do_rrr,a,gen_op_bmask)4105 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4106 
4107 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4108 {
4109     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4110     return true;
4111 }
4112 
4113 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4114 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4115 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4116 
do_shift_r(DisasContext * dc,arg_shiftr * a,bool l,bool u)4117 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4118 {
4119     TCGv dst, src1, src2;
4120 
4121     /* Reject 64-bit shifts for sparc32. */
4122     if (avail_32(dc) && a->x) {
4123         return false;
4124     }
4125 
4126     src2 = tcg_temp_new();
4127     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4128     src1 = gen_load_gpr(dc, a->rs1);
4129     dst = gen_dest_gpr(dc, a->rd);
4130 
4131     if (l) {
4132         tcg_gen_shl_tl(dst, src1, src2);
4133         if (!a->x) {
4134             tcg_gen_ext32u_tl(dst, dst);
4135         }
4136     } else if (u) {
4137         if (!a->x) {
4138             tcg_gen_ext32u_tl(dst, src1);
4139             src1 = dst;
4140         }
4141         tcg_gen_shr_tl(dst, src1, src2);
4142     } else {
4143         if (!a->x) {
4144             tcg_gen_ext32s_tl(dst, src1);
4145             src1 = dst;
4146         }
4147         tcg_gen_sar_tl(dst, src1, src2);
4148     }
4149     gen_store_gpr(dc, a->rd, dst);
4150     return advance_pc(dc);
4151 }
4152 
TRANS(SLL_r,ALL,do_shift_r,a,true,true)4153 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4154 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4155 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4156 
4157 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4158 {
4159     TCGv dst, src1;
4160 
4161     /* Reject 64-bit shifts for sparc32. */
4162     if (avail_32(dc) && (a->x || a->i >= 32)) {
4163         return false;
4164     }
4165 
4166     src1 = gen_load_gpr(dc, a->rs1);
4167     dst = gen_dest_gpr(dc, a->rd);
4168 
4169     if (avail_32(dc) || a->x) {
4170         if (l) {
4171             tcg_gen_shli_tl(dst, src1, a->i);
4172         } else if (u) {
4173             tcg_gen_shri_tl(dst, src1, a->i);
4174         } else {
4175             tcg_gen_sari_tl(dst, src1, a->i);
4176         }
4177     } else {
4178         if (l) {
4179             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4180         } else if (u) {
4181             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4182         } else {
4183             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4184         }
4185     }
4186     gen_store_gpr(dc, a->rd, dst);
4187     return advance_pc(dc);
4188 }
4189 
TRANS(SLL_i,ALL,do_shift_i,a,true,true)4190 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4191 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4192 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4193 
4194 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4195 {
4196     /* For simplicity, we under-decoded the rs2 form. */
4197     if (!imm && rs2_or_imm & ~0x1f) {
4198         return NULL;
4199     }
4200     if (imm || rs2_or_imm == 0) {
4201         return tcg_constant_tl(rs2_or_imm);
4202     } else {
4203         return cpu_regs[rs2_or_imm];
4204     }
4205 }
4206 
do_mov_cond(DisasContext * dc,DisasCompare * cmp,int rd,TCGv src2)4207 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4208 {
4209     TCGv dst = gen_load_gpr(dc, rd);
4210     TCGv c2 = tcg_constant_tl(cmp->c2);
4211 
4212     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4213     gen_store_gpr(dc, rd, dst);
4214     return advance_pc(dc);
4215 }
4216 
trans_MOVcc(DisasContext * dc,arg_MOVcc * a)4217 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4218 {
4219     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4220     DisasCompare cmp;
4221 
4222     if (src2 == NULL) {
4223         return false;
4224     }
4225     gen_compare(&cmp, a->cc, a->cond, dc);
4226     return do_mov_cond(dc, &cmp, a->rd, src2);
4227 }
4228 
trans_MOVfcc(DisasContext * dc,arg_MOVfcc * a)4229 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4230 {
4231     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4232     DisasCompare cmp;
4233 
4234     if (src2 == NULL) {
4235         return false;
4236     }
4237     gen_fcompare(&cmp, a->cc, a->cond);
4238     return do_mov_cond(dc, &cmp, a->rd, src2);
4239 }
4240 
trans_MOVR(DisasContext * dc,arg_MOVR * a)4241 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4242 {
4243     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4244     DisasCompare cmp;
4245 
4246     if (src2 == NULL) {
4247         return false;
4248     }
4249     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4250         return false;
4251     }
4252     return do_mov_cond(dc, &cmp, a->rd, src2);
4253 }
4254 
do_add_special(DisasContext * dc,arg_r_r_ri * a,bool (* func)(DisasContext * dc,int rd,TCGv src))4255 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4256                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4257 {
4258     TCGv src1, sum;
4259 
4260     /* For simplicity, we under-decoded the rs2 form. */
4261     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4262         return false;
4263     }
4264 
4265     /*
4266      * Always load the sum into a new temporary.
4267      * This is required to capture the value across a window change,
4268      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4269      */
4270     sum = tcg_temp_new();
4271     src1 = gen_load_gpr(dc, a->rs1);
4272     if (a->imm || a->rs2_or_imm == 0) {
4273         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4274     } else {
4275         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4276     }
4277     return func(dc, a->rd, sum);
4278 }
4279 
do_jmpl(DisasContext * dc,int rd,TCGv src)4280 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4281 {
4282     /*
4283      * Preserve pc across advance, so that we can delay
4284      * the writeback to rd until after src is consumed.
4285      */
4286     target_ulong cur_pc = dc->pc;
4287 
4288     gen_check_align(dc, src, 3);
4289 
4290     gen_mov_pc_npc(dc);
4291     tcg_gen_mov_tl(cpu_npc, src);
4292     gen_address_mask(dc, cpu_npc);
4293     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4294 
4295     dc->npc = DYNAMIC_PC_LOOKUP;
4296     return true;
4297 }
4298 
TRANS(JMPL,ALL,do_add_special,a,do_jmpl)4299 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4300 
4301 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4302 {
4303     if (!supervisor(dc)) {
4304         return raise_priv(dc);
4305     }
4306 
4307     gen_check_align(dc, src, 3);
4308 
4309     gen_mov_pc_npc(dc);
4310     tcg_gen_mov_tl(cpu_npc, src);
4311     gen_helper_rett(tcg_env);
4312 
4313     dc->npc = DYNAMIC_PC;
4314     return true;
4315 }
4316 
4317 TRANS(RETT, 32, do_add_special, a, do_rett)
4318 
do_return(DisasContext * dc,int rd,TCGv src)4319 static bool do_return(DisasContext *dc, int rd, TCGv src)
4320 {
4321     gen_check_align(dc, src, 3);
4322     gen_helper_restore(tcg_env);
4323 
4324     gen_mov_pc_npc(dc);
4325     tcg_gen_mov_tl(cpu_npc, src);
4326     gen_address_mask(dc, cpu_npc);
4327 
4328     dc->npc = DYNAMIC_PC_LOOKUP;
4329     return true;
4330 }
4331 
4332 TRANS(RETURN, 64, do_add_special, a, do_return)
4333 
do_save(DisasContext * dc,int rd,TCGv src)4334 static bool do_save(DisasContext *dc, int rd, TCGv src)
4335 {
4336     gen_helper_save(tcg_env);
4337     gen_store_gpr(dc, rd, src);
4338     return advance_pc(dc);
4339 }
4340 
TRANS(SAVE,ALL,do_add_special,a,do_save)4341 TRANS(SAVE, ALL, do_add_special, a, do_save)
4342 
4343 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4344 {
4345     gen_helper_restore(tcg_env);
4346     gen_store_gpr(dc, rd, src);
4347     return advance_pc(dc);
4348 }
4349 
TRANS(RESTORE,ALL,do_add_special,a,do_restore)4350 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4351 
4352 static bool do_done_retry(DisasContext *dc, bool done)
4353 {
4354     if (!supervisor(dc)) {
4355         return raise_priv(dc);
4356     }
4357     dc->npc = DYNAMIC_PC;
4358     dc->pc = DYNAMIC_PC;
4359     translator_io_start(&dc->base);
4360     if (done) {
4361         gen_helper_done(tcg_env);
4362     } else {
4363         gen_helper_retry(tcg_env);
4364     }
4365     return true;
4366 }
4367 
4368 TRANS(DONE, 64, do_done_retry, true)
4369 TRANS(RETRY, 64, do_done_retry, false)
4370 
4371 /*
4372  * Major opcode 11 -- load and store instructions
4373  */
4374 
gen_ldst_addr(DisasContext * dc,int rs1,bool imm,int rs2_or_imm)4375 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4376 {
4377     TCGv addr, tmp = NULL;
4378 
4379     /* For simplicity, we under-decoded the rs2 form. */
4380     if (!imm && rs2_or_imm & ~0x1f) {
4381         return NULL;
4382     }
4383 
4384     addr = gen_load_gpr(dc, rs1);
4385     if (rs2_or_imm) {
4386         tmp = tcg_temp_new();
4387         if (imm) {
4388             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4389         } else {
4390             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4391         }
4392         addr = tmp;
4393     }
4394     if (AM_CHECK(dc)) {
4395         if (!tmp) {
4396             tmp = tcg_temp_new();
4397         }
4398         tcg_gen_ext32u_tl(tmp, addr);
4399         addr = tmp;
4400     }
4401     return addr;
4402 }
4403 
do_ld_gpr(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4404 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4405 {
4406     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4407     DisasASI da;
4408 
4409     if (addr == NULL) {
4410         return false;
4411     }
4412     da = resolve_asi(dc, a->asi, mop);
4413 
4414     reg = gen_dest_gpr(dc, a->rd);
4415     gen_ld_asi(dc, &da, reg, addr);
4416     gen_store_gpr(dc, a->rd, reg);
4417     return advance_pc(dc);
4418 }
4419 
TRANS(LDUW,ALL,do_ld_gpr,a,MO_TEUL)4420 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4421 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4422 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4423 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4424 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4425 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4426 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4427 
4428 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4429 {
4430     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4431     DisasASI da;
4432 
4433     if (addr == NULL) {
4434         return false;
4435     }
4436     da = resolve_asi(dc, a->asi, mop);
4437 
4438     reg = gen_load_gpr(dc, a->rd);
4439     gen_st_asi(dc, &da, reg, addr);
4440     return advance_pc(dc);
4441 }
4442 
TRANS(STW,ALL,do_st_gpr,a,MO_TEUL)4443 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4444 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4445 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4446 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4447 
4448 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4449 {
4450     TCGv addr;
4451     DisasASI da;
4452 
4453     if (a->rd & 1) {
4454         return false;
4455     }
4456     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4457     if (addr == NULL) {
4458         return false;
4459     }
4460     da = resolve_asi(dc, a->asi, MO_TEUQ);
4461     gen_ldda_asi(dc, &da, addr, a->rd);
4462     return advance_pc(dc);
4463 }
4464 
trans_STD(DisasContext * dc,arg_r_r_ri_asi * a)4465 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4466 {
4467     TCGv addr;
4468     DisasASI da;
4469 
4470     if (a->rd & 1) {
4471         return false;
4472     }
4473     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4474     if (addr == NULL) {
4475         return false;
4476     }
4477     da = resolve_asi(dc, a->asi, MO_TEUQ);
4478     gen_stda_asi(dc, &da, addr, a->rd);
4479     return advance_pc(dc);
4480 }
4481 
trans_LDSTUB(DisasContext * dc,arg_r_r_ri_asi * a)4482 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4483 {
4484     TCGv addr, reg;
4485     DisasASI da;
4486 
4487     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4488     if (addr == NULL) {
4489         return false;
4490     }
4491     da = resolve_asi(dc, a->asi, MO_UB);
4492 
4493     reg = gen_dest_gpr(dc, a->rd);
4494     gen_ldstub_asi(dc, &da, reg, addr);
4495     gen_store_gpr(dc, a->rd, reg);
4496     return advance_pc(dc);
4497 }
4498 
trans_SWAP(DisasContext * dc,arg_r_r_ri_asi * a)4499 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4500 {
4501     TCGv addr, dst, src;
4502     DisasASI da;
4503 
4504     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4505     if (addr == NULL) {
4506         return false;
4507     }
4508     da = resolve_asi(dc, a->asi, MO_TEUL);
4509 
4510     dst = gen_dest_gpr(dc, a->rd);
4511     src = gen_load_gpr(dc, a->rd);
4512     gen_swap_asi(dc, &da, dst, src, addr);
4513     gen_store_gpr(dc, a->rd, dst);
4514     return advance_pc(dc);
4515 }
4516 
do_casa(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4517 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4518 {
4519     TCGv addr, o, n, c;
4520     DisasASI da;
4521 
4522     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4523     if (addr == NULL) {
4524         return false;
4525     }
4526     da = resolve_asi(dc, a->asi, mop);
4527 
4528     o = gen_dest_gpr(dc, a->rd);
4529     n = gen_load_gpr(dc, a->rd);
4530     c = gen_load_gpr(dc, a->rs2_or_imm);
4531     gen_cas_asi(dc, &da, o, n, c, addr);
4532     gen_store_gpr(dc, a->rd, o);
4533     return advance_pc(dc);
4534 }
4535 
TRANS(CASA,CASA,do_casa,a,MO_TEUL)4536 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4537 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4538 
4539 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4540 {
4541     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4542     DisasASI da;
4543 
4544     if (addr == NULL) {
4545         return false;
4546     }
4547     if (gen_trap_if_nofpu_fpexception(dc)) {
4548         return true;
4549     }
4550     if (sz == MO_128 && gen_trap_float128(dc)) {
4551         return true;
4552     }
4553     da = resolve_asi(dc, a->asi, MO_TE | sz);
4554     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4555     gen_update_fprs_dirty(dc, a->rd);
4556     return advance_pc(dc);
4557 }
4558 
TRANS(LDF,ALL,do_ld_fpr,a,MO_32)4559 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4560 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4561 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4562 
4563 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4564 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4565 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4566 
4567 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4568 {
4569     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4570     DisasASI da;
4571 
4572     if (addr == NULL) {
4573         return false;
4574     }
4575     /* Store insns are ok in fp_exception_pending state. */
4576     if (gen_trap_ifnofpu(dc)) {
4577         return true;
4578     }
4579     if (sz == MO_128 && gen_trap_float128(dc)) {
4580         return true;
4581     }
4582     da = resolve_asi(dc, a->asi, MO_TE | sz);
4583     gen_stf_asi(dc, &da, sz, addr, a->rd);
4584     return advance_pc(dc);
4585 }
4586 
TRANS(STF,ALL,do_st_fpr,a,MO_32)4587 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4588 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4589 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4590 
4591 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4592 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4593 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4594 
4595 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4596 {
4597     TCGv addr;
4598 
4599     if (!avail_32(dc)) {
4600         return false;
4601     }
4602     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4603     if (addr == NULL) {
4604         return false;
4605     }
4606     if (!supervisor(dc)) {
4607         return raise_priv(dc);
4608     }
4609 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4610     if (gen_trap_ifnofpu(dc)) {
4611         return true;
4612     }
4613     if (!dc->fsr_qne) {
4614         gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4615         return true;
4616     }
4617 
4618     /* Store the single element from the queue. */
4619     TCGv_i64 fq = tcg_temp_new_i64();
4620     tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4621     tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4622 
4623     /* Mark the queue empty, transitioning to fp_execute state. */
4624     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4625                    offsetof(CPUSPARCState, fsr_qne));
4626     dc->fsr_qne = 0;
4627 
4628     return advance_pc(dc);
4629 #else
4630     qemu_build_not_reached();
4631 #endif
4632 }
4633 
trans_LDFSR(DisasContext * dc,arg_r_r_ri * a)4634 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4635 {
4636     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4637     TCGv_i32 tmp;
4638 
4639     if (addr == NULL) {
4640         return false;
4641     }
4642     if (gen_trap_if_nofpu_fpexception(dc)) {
4643         return true;
4644     }
4645 
4646     tmp = tcg_temp_new_i32();
4647     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4648 
4649     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4650     /* LDFSR does not change FCC[1-3]. */
4651 
4652     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4653     return advance_pc(dc);
4654 }
4655 
do_ldxfsr(DisasContext * dc,arg_r_r_ri * a,bool entire)4656 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4657 {
4658 #ifdef TARGET_SPARC64
4659     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4660     TCGv_i64 t64;
4661     TCGv_i32 lo, hi;
4662 
4663     if (addr == NULL) {
4664         return false;
4665     }
4666     if (gen_trap_if_nofpu_fpexception(dc)) {
4667         return true;
4668     }
4669 
4670     t64 = tcg_temp_new_i64();
4671     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4672 
4673     lo = tcg_temp_new_i32();
4674     hi = cpu_fcc[3];
4675     tcg_gen_extr_i64_i32(lo, hi, t64);
4676     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4677     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4678     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4679     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4680 
4681     if (entire) {
4682         gen_helper_set_fsr_nofcc(tcg_env, lo);
4683     } else {
4684         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4685     }
4686     return advance_pc(dc);
4687 #else
4688     return false;
4689 #endif
4690 }
4691 
4692 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
TRANS(LDXEFSR,VIS3B,do_ldxfsr,a,true)4693 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4694 
4695 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4696 {
4697     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4698     TCGv fsr;
4699 
4700     if (addr == NULL) {
4701         return false;
4702     }
4703     /* Store insns are ok in fp_exception_pending state. */
4704     if (gen_trap_ifnofpu(dc)) {
4705         return true;
4706     }
4707 
4708     fsr = tcg_temp_new();
4709     gen_helper_get_fsr(fsr, tcg_env);
4710     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4711     return advance_pc(dc);
4712 }
4713 
TRANS(STFSR,ALL,do_stfsr,a,MO_TEUL)4714 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4715 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4716 
4717 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4718 {
4719     if (gen_trap_ifnofpu(dc)) {
4720         return true;
4721     }
4722     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4723     return advance_pc(dc);
4724 }
4725 
4726 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4727 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4728 
do_dc(DisasContext * dc,int rd,int64_t c)4729 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4730 {
4731     if (gen_trap_ifnofpu(dc)) {
4732         return true;
4733     }
4734     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4735     return advance_pc(dc);
4736 }
4737 
4738 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4739 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4740 
do_ff(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i32,TCGv_i32))4741 static bool do_ff(DisasContext *dc, arg_r_r *a,
4742                   void (*func)(TCGv_i32, TCGv_i32))
4743 {
4744     TCGv_i32 tmp;
4745 
4746     if (gen_trap_if_nofpu_fpexception(dc)) {
4747         return true;
4748     }
4749 
4750     tmp = gen_load_fpr_F(dc, a->rs);
4751     func(tmp, tmp);
4752     gen_store_fpr_F(dc, a->rd, tmp);
4753     return advance_pc(dc);
4754 }
4755 
TRANS(FMOVs,ALL,do_ff,a,gen_op_fmovs)4756 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4757 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4758 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4759 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4760 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4761 
4762 static bool do_fd(DisasContext *dc, arg_r_r *a,
4763                   void (*func)(TCGv_i32, TCGv_i64))
4764 {
4765     TCGv_i32 dst;
4766     TCGv_i64 src;
4767 
4768     if (gen_trap_ifnofpu(dc)) {
4769         return true;
4770     }
4771 
4772     dst = tcg_temp_new_i32();
4773     src = gen_load_fpr_D(dc, a->rs);
4774     func(dst, src);
4775     gen_store_fpr_F(dc, a->rd, dst);
4776     return advance_pc(dc);
4777 }
4778 
TRANS(FPACK16,VIS1,do_fd,a,gen_op_fpack16)4779 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4780 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4781 
4782 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4783                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4784 {
4785     TCGv_i32 tmp;
4786 
4787     if (gen_trap_if_nofpu_fpexception(dc)) {
4788         return true;
4789     }
4790 
4791     tmp = gen_load_fpr_F(dc, a->rs);
4792     func(tmp, tcg_env, tmp);
4793     gen_store_fpr_F(dc, a->rd, tmp);
4794     return advance_pc(dc);
4795 }
4796 
TRANS(FSQRTs,ALL,do_env_ff,a,gen_helper_fsqrts)4797 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4798 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4799 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4800 
4801 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4802                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4803 {
4804     TCGv_i32 dst;
4805     TCGv_i64 src;
4806 
4807     if (gen_trap_if_nofpu_fpexception(dc)) {
4808         return true;
4809     }
4810 
4811     dst = tcg_temp_new_i32();
4812     src = gen_load_fpr_D(dc, a->rs);
4813     func(dst, tcg_env, src);
4814     gen_store_fpr_F(dc, a->rd, dst);
4815     return advance_pc(dc);
4816 }
4817 
TRANS(FdTOs,ALL,do_env_fd,a,gen_helper_fdtos)4818 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4819 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4820 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4821 
4822 static bool do_dd(DisasContext *dc, arg_r_r *a,
4823                   void (*func)(TCGv_i64, TCGv_i64))
4824 {
4825     TCGv_i64 dst, src;
4826 
4827     if (gen_trap_if_nofpu_fpexception(dc)) {
4828         return true;
4829     }
4830 
4831     dst = tcg_temp_new_i64();
4832     src = gen_load_fpr_D(dc, a->rs);
4833     func(dst, src);
4834     gen_store_fpr_D(dc, a->rd, dst);
4835     return advance_pc(dc);
4836 }
4837 
4838 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4839 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4840 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
TRANS(FSRCd,VIS1,do_dd,a,tcg_gen_mov_i64)4841 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4842 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4843 
4844 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4845                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4846 {
4847     TCGv_i64 dst, src;
4848 
4849     if (gen_trap_if_nofpu_fpexception(dc)) {
4850         return true;
4851     }
4852 
4853     dst = tcg_temp_new_i64();
4854     src = gen_load_fpr_D(dc, a->rs);
4855     func(dst, tcg_env, src);
4856     gen_store_fpr_D(dc, a->rd, dst);
4857     return advance_pc(dc);
4858 }
4859 
TRANS(FSQRTd,ALL,do_env_dd,a,gen_helper_fsqrtd)4860 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4861 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4862 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4863 
4864 static bool do_df(DisasContext *dc, arg_r_r *a,
4865                   void (*func)(TCGv_i64, TCGv_i32))
4866 {
4867     TCGv_i64 dst;
4868     TCGv_i32 src;
4869 
4870     if (gen_trap_ifnofpu(dc)) {
4871         return true;
4872     }
4873 
4874     dst = tcg_temp_new_i64();
4875     src = gen_load_fpr_F(dc, a->rs);
4876     func(dst, src);
4877     gen_store_fpr_D(dc, a->rd, dst);
4878     return advance_pc(dc);
4879 }
4880 
TRANS(FEXPAND,VIS1,do_df,a,gen_helper_fexpand)4881 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4882 
4883 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4884                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4885 {
4886     TCGv_i64 dst;
4887     TCGv_i32 src;
4888 
4889     if (gen_trap_if_nofpu_fpexception(dc)) {
4890         return true;
4891     }
4892 
4893     dst = tcg_temp_new_i64();
4894     src = gen_load_fpr_F(dc, a->rs);
4895     func(dst, tcg_env, src);
4896     gen_store_fpr_D(dc, a->rd, dst);
4897     return advance_pc(dc);
4898 }
4899 
TRANS(FiTOd,ALL,do_env_df,a,gen_helper_fitod)4900 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4901 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4902 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4903 
4904 static bool do_qq(DisasContext *dc, arg_r_r *a,
4905                   void (*func)(TCGv_i128, TCGv_i128))
4906 {
4907     TCGv_i128 t;
4908 
4909     if (gen_trap_ifnofpu(dc)) {
4910         return true;
4911     }
4912     if (gen_trap_float128(dc)) {
4913         return true;
4914     }
4915 
4916     gen_op_clear_ieee_excp_and_FTT();
4917     t = gen_load_fpr_Q(dc, a->rs);
4918     func(t, t);
4919     gen_store_fpr_Q(dc, a->rd, t);
4920     return advance_pc(dc);
4921 }
4922 
4923 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4924 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4925 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4926 
do_env_qq(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128))4927 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4928                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4929 {
4930     TCGv_i128 t;
4931 
4932     if (gen_trap_if_nofpu_fpexception(dc)) {
4933         return true;
4934     }
4935     if (gen_trap_float128(dc)) {
4936         return true;
4937     }
4938 
4939     t = gen_load_fpr_Q(dc, a->rs);
4940     func(t, tcg_env, t);
4941     gen_store_fpr_Q(dc, a->rd, t);
4942     return advance_pc(dc);
4943 }
4944 
TRANS(FSQRTq,ALL,do_env_qq,a,gen_helper_fsqrtq)4945 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4946 
4947 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4948                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4949 {
4950     TCGv_i128 src;
4951     TCGv_i32 dst;
4952 
4953     if (gen_trap_if_nofpu_fpexception(dc)) {
4954         return true;
4955     }
4956     if (gen_trap_float128(dc)) {
4957         return true;
4958     }
4959 
4960     src = gen_load_fpr_Q(dc, a->rs);
4961     dst = tcg_temp_new_i32();
4962     func(dst, tcg_env, src);
4963     gen_store_fpr_F(dc, a->rd, dst);
4964     return advance_pc(dc);
4965 }
4966 
TRANS(FqTOs,ALL,do_env_fq,a,gen_helper_fqtos)4967 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4968 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4969 
4970 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4971                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4972 {
4973     TCGv_i128 src;
4974     TCGv_i64 dst;
4975 
4976     if (gen_trap_if_nofpu_fpexception(dc)) {
4977         return true;
4978     }
4979     if (gen_trap_float128(dc)) {
4980         return true;
4981     }
4982 
4983     src = gen_load_fpr_Q(dc, a->rs);
4984     dst = tcg_temp_new_i64();
4985     func(dst, tcg_env, src);
4986     gen_store_fpr_D(dc, a->rd, dst);
4987     return advance_pc(dc);
4988 }
4989 
TRANS(FqTOd,ALL,do_env_dq,a,gen_helper_fqtod)4990 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4991 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4992 
4993 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4994                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4995 {
4996     TCGv_i32 src;
4997     TCGv_i128 dst;
4998 
4999     if (gen_trap_if_nofpu_fpexception(dc)) {
5000         return true;
5001     }
5002     if (gen_trap_float128(dc)) {
5003         return true;
5004     }
5005 
5006     src = gen_load_fpr_F(dc, a->rs);
5007     dst = tcg_temp_new_i128();
5008     func(dst, tcg_env, src);
5009     gen_store_fpr_Q(dc, a->rd, dst);
5010     return advance_pc(dc);
5011 }
5012 
TRANS(FiTOq,ALL,do_env_qf,a,gen_helper_fitoq)5013 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
5014 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
5015 
5016 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
5017                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
5018 {
5019     TCGv_i64 src;
5020     TCGv_i128 dst;
5021 
5022     if (gen_trap_if_nofpu_fpexception(dc)) {
5023         return true;
5024     }
5025 
5026     src = gen_load_fpr_D(dc, a->rs);
5027     dst = tcg_temp_new_i128();
5028     func(dst, tcg_env, src);
5029     gen_store_fpr_Q(dc, a->rd, dst);
5030     return advance_pc(dc);
5031 }
5032 
TRANS(FdTOq,ALL,do_env_qd,a,gen_helper_fdtoq)5033 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5034 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5035 
5036 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5037                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5038 {
5039     TCGv_i32 src1, src2;
5040 
5041     if (gen_trap_ifnofpu(dc)) {
5042         return true;
5043     }
5044 
5045     src1 = gen_load_fpr_F(dc, a->rs1);
5046     src2 = gen_load_fpr_F(dc, a->rs2);
5047     func(src1, src1, src2);
5048     gen_store_fpr_F(dc, a->rd, src1);
5049     return advance_pc(dc);
5050 }
5051 
TRANS(FPADD16s,VIS1,do_fff,a,tcg_gen_vec_add16_i32)5052 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5053 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5054 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5055 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5056 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5057 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5058 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5059 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5060 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5061 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5062 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5063 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5064 
5065 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5066 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5067 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5068 
5069 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5070 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5071 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5072 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5073 
5074 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5075                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5076 {
5077     TCGv_i32 src1, src2;
5078 
5079     if (gen_trap_if_nofpu_fpexception(dc)) {
5080         return true;
5081     }
5082 
5083     src1 = gen_load_fpr_F(dc, a->rs1);
5084     src2 = gen_load_fpr_F(dc, a->rs2);
5085     func(src1, tcg_env, src1, src2);
5086     gen_store_fpr_F(dc, a->rd, src1);
5087     return advance_pc(dc);
5088 }
5089 
TRANS(FADDs,ALL,do_env_fff,a,gen_helper_fadds)5090 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5091 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5092 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5093 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5094 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5095 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5096 
5097 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5098                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5099 {
5100     TCGv_i64 dst;
5101     TCGv_i32 src1, src2;
5102 
5103     if (gen_trap_ifnofpu(dc)) {
5104         return true;
5105     }
5106 
5107     dst = tcg_temp_new_i64();
5108     src1 = gen_load_fpr_F(dc, a->rs1);
5109     src2 = gen_load_fpr_F(dc, a->rs2);
5110     func(dst, src1, src2);
5111     gen_store_fpr_D(dc, a->rd, dst);
5112     return advance_pc(dc);
5113 }
5114 
TRANS(FMUL8x16AU,VIS1,do_dff,a,gen_op_fmul8x16au)5115 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5116 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5117 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5118 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5119 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5120 
5121 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5122                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5123 {
5124     TCGv_i64 dst, src2;
5125     TCGv_i32 src1;
5126 
5127     if (gen_trap_ifnofpu(dc)) {
5128         return true;
5129     }
5130 
5131     dst = tcg_temp_new_i64();
5132     src1 = gen_load_fpr_F(dc, a->rs1);
5133     src2 = gen_load_fpr_D(dc, a->rs2);
5134     func(dst, src1, src2);
5135     gen_store_fpr_D(dc, a->rd, dst);
5136     return advance_pc(dc);
5137 }
5138 
TRANS(FMUL8x16,VIS1,do_dfd,a,gen_helper_fmul8x16)5139 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5140 
5141 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5142                         void (*func)(unsigned, uint32_t, uint32_t,
5143                                      uint32_t, uint32_t, uint32_t))
5144 {
5145     if (gen_trap_ifnofpu(dc)) {
5146         return true;
5147     }
5148 
5149     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5150          gen_offset_fpr_D(a->rs2), 8, 8);
5151     return advance_pc(dc);
5152 }
5153 
TRANS(FPADD8,VIS4,do_gvec_ddd,a,MO_8,tcg_gen_gvec_add)5154 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5155 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5156 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5157 
5158 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5159 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5160 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5161 
5162 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5163 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5164 
5165 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5166 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5167 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5168 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5169 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5170 
5171 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5172 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5173 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5174 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5175 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5176 
5177 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5178 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5179 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5180 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5181 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5182 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5183 
5184 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5185 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5186 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5187 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5188 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5189 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5190 
5191 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5192 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5193 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5194 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5195 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5196 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5197 
5198 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5199                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5200 {
5201     TCGv_i64 dst, src1, src2;
5202 
5203     if (gen_trap_ifnofpu(dc)) {
5204         return true;
5205     }
5206 
5207     dst = tcg_temp_new_i64();
5208     src1 = gen_load_fpr_D(dc, a->rs1);
5209     src2 = gen_load_fpr_D(dc, a->rs2);
5210     func(dst, src1, src2);
5211     gen_store_fpr_D(dc, a->rd, dst);
5212     return advance_pc(dc);
5213 }
5214 
TRANS(FMUL8SUx16,VIS1,do_ddd,a,gen_helper_fmul8sux16)5215 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5216 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5217 
5218 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5219 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5220 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5221 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5222 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5223 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5224 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5225 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5226 
5227 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5228 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5229 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5230 
5231 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5232 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5233 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5234 
5235 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5236 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5237 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5238 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5239 
5240 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5241                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5242 {
5243     TCGv_i64 src1, src2;
5244     TCGv dst;
5245 
5246     if (gen_trap_ifnofpu(dc)) {
5247         return true;
5248     }
5249 
5250     dst = gen_dest_gpr(dc, a->rd);
5251     src1 = gen_load_fpr_D(dc, a->rs1);
5252     src2 = gen_load_fpr_D(dc, a->rs2);
5253     func(dst, src1, src2);
5254     gen_store_gpr(dc, a->rd, dst);
5255     return advance_pc(dc);
5256 }
5257 
TRANS(FPCMPLE16,VIS1,do_rdd,a,gen_helper_fcmple16)5258 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5259 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5260 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5261 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5262 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5263 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5264 
5265 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5266 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5267 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5268 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5269 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5270 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5271 
5272 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5273 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5274 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5275 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5276 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5277 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5278 
5279 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5280 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5281 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5282 
5283 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5284                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5285 {
5286     TCGv_i64 dst, src1, src2;
5287 
5288     if (gen_trap_if_nofpu_fpexception(dc)) {
5289         return true;
5290     }
5291 
5292     dst = tcg_temp_new_i64();
5293     src1 = gen_load_fpr_D(dc, a->rs1);
5294     src2 = gen_load_fpr_D(dc, a->rs2);
5295     func(dst, tcg_env, src1, src2);
5296     gen_store_fpr_D(dc, a->rd, dst);
5297     return advance_pc(dc);
5298 }
5299 
TRANS(FADDd,ALL,do_env_ddd,a,gen_helper_faddd)5300 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5301 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5302 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5303 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5304 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5305 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5306 
5307 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5308 {
5309     TCGv_i64 dst;
5310     TCGv_i32 src1, src2;
5311 
5312     if (gen_trap_if_nofpu_fpexception(dc)) {
5313         return true;
5314     }
5315     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5316         return raise_unimpfpop(dc);
5317     }
5318 
5319     dst = tcg_temp_new_i64();
5320     src1 = gen_load_fpr_F(dc, a->rs1);
5321     src2 = gen_load_fpr_F(dc, a->rs2);
5322     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5323     gen_store_fpr_D(dc, a->rd, dst);
5324     return advance_pc(dc);
5325 }
5326 
trans_FNsMULd(DisasContext * dc,arg_r_r_r * a)5327 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5328 {
5329     TCGv_i64 dst;
5330     TCGv_i32 src1, src2;
5331 
5332     if (!avail_VIS3(dc)) {
5333         return false;
5334     }
5335     if (gen_trap_ifnofpu(dc)) {
5336         return true;
5337     }
5338     dst = tcg_temp_new_i64();
5339     src1 = gen_load_fpr_F(dc, a->rs1);
5340     src2 = gen_load_fpr_F(dc, a->rs2);
5341     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5342     gen_store_fpr_D(dc, a->rd, dst);
5343     return advance_pc(dc);
5344 }
5345 
do_ffff(DisasContext * dc,arg_r_r_r_r * a,void (* func)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_i32))5346 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5347                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5348 {
5349     TCGv_i32 dst, src1, src2, src3;
5350 
5351     if (gen_trap_ifnofpu(dc)) {
5352         return true;
5353     }
5354 
5355     src1 = gen_load_fpr_F(dc, a->rs1);
5356     src2 = gen_load_fpr_F(dc, a->rs2);
5357     src3 = gen_load_fpr_F(dc, a->rs3);
5358     dst = tcg_temp_new_i32();
5359     func(dst, src1, src2, src3);
5360     gen_store_fpr_F(dc, a->rd, dst);
5361     return advance_pc(dc);
5362 }
5363 
TRANS(FMADDs,FMAF,do_ffff,a,gen_op_fmadds)5364 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5365 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5366 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5367 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5368 
5369 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5370                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5371 {
5372     TCGv_i64 dst, src1, src2, src3;
5373 
5374     if (gen_trap_ifnofpu(dc)) {
5375         return true;
5376     }
5377 
5378     dst  = tcg_temp_new_i64();
5379     src1 = gen_load_fpr_D(dc, a->rs1);
5380     src2 = gen_load_fpr_D(dc, a->rs2);
5381     src3 = gen_load_fpr_D(dc, a->rs3);
5382     func(dst, src1, src2, src3);
5383     gen_store_fpr_D(dc, a->rd, dst);
5384     return advance_pc(dc);
5385 }
5386 
TRANS(PDIST,VIS1,do_dddd,a,gen_helper_pdist)5387 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5388 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5389 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5390 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5391 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5392 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5393 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5394 
5395 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5396 {
5397     TCGv_i64 dst, src1, src2;
5398     TCGv src3;
5399 
5400     if (!avail_VIS4(dc)) {
5401         return false;
5402     }
5403     if (gen_trap_ifnofpu(dc)) {
5404         return true;
5405     }
5406 
5407     dst  = tcg_temp_new_i64();
5408     src1 = gen_load_fpr_D(dc, a->rd);
5409     src2 = gen_load_fpr_D(dc, a->rs2);
5410     src3 = gen_load_gpr(dc, a->rs1);
5411     gen_op_faligndata_i(dst, src1, src2, src3);
5412     gen_store_fpr_D(dc, a->rd, dst);
5413     return advance_pc(dc);
5414 }
5415 
do_env_qqq(DisasContext * dc,arg_r_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128,TCGv_i128))5416 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5417                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5418 {
5419     TCGv_i128 src1, src2;
5420 
5421     if (gen_trap_if_nofpu_fpexception(dc)) {
5422         return true;
5423     }
5424     if (gen_trap_float128(dc)) {
5425         return true;
5426     }
5427 
5428     src1 = gen_load_fpr_Q(dc, a->rs1);
5429     src2 = gen_load_fpr_Q(dc, a->rs2);
5430     func(src1, tcg_env, src1, src2);
5431     gen_store_fpr_Q(dc, a->rd, src1);
5432     return advance_pc(dc);
5433 }
5434 
TRANS(FADDq,ALL,do_env_qqq,a,gen_helper_faddq)5435 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5436 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5437 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5438 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5439 
5440 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5441 {
5442     TCGv_i64 src1, src2;
5443     TCGv_i128 dst;
5444 
5445     if (gen_trap_if_nofpu_fpexception(dc)) {
5446         return true;
5447     }
5448     if (gen_trap_float128(dc)) {
5449         return true;
5450     }
5451 
5452     src1 = gen_load_fpr_D(dc, a->rs1);
5453     src2 = gen_load_fpr_D(dc, a->rs2);
5454     dst = tcg_temp_new_i128();
5455     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5456     gen_store_fpr_Q(dc, a->rd, dst);
5457     return advance_pc(dc);
5458 }
5459 
do_fmovr(DisasContext * dc,arg_FMOVRs * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5460 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5461                      void (*func)(DisasContext *, DisasCompare *, int, int))
5462 {
5463     DisasCompare cmp;
5464 
5465     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5466         return false;
5467     }
5468     if (gen_trap_ifnofpu(dc)) {
5469         return true;
5470     }
5471     if (is_128 && gen_trap_float128(dc)) {
5472         return true;
5473     }
5474 
5475     gen_op_clear_ieee_excp_and_FTT();
5476     func(dc, &cmp, a->rd, a->rs2);
5477     return advance_pc(dc);
5478 }
5479 
5480 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5481 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5482 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5483 
do_fmovcc(DisasContext * dc,arg_FMOVscc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5484 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5485                       void (*func)(DisasContext *, DisasCompare *, int, int))
5486 {
5487     DisasCompare cmp;
5488 
5489     if (gen_trap_ifnofpu(dc)) {
5490         return true;
5491     }
5492     if (is_128 && gen_trap_float128(dc)) {
5493         return true;
5494     }
5495 
5496     gen_op_clear_ieee_excp_and_FTT();
5497     gen_compare(&cmp, a->cc, a->cond, dc);
5498     func(dc, &cmp, a->rd, a->rs2);
5499     return advance_pc(dc);
5500 }
5501 
5502 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5503 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5504 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5505 
do_fmovfcc(DisasContext * dc,arg_FMOVsfcc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5506 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5507                        void (*func)(DisasContext *, DisasCompare *, int, int))
5508 {
5509     DisasCompare cmp;
5510 
5511     if (gen_trap_ifnofpu(dc)) {
5512         return true;
5513     }
5514     if (is_128 && gen_trap_float128(dc)) {
5515         return true;
5516     }
5517 
5518     gen_op_clear_ieee_excp_and_FTT();
5519     gen_fcompare(&cmp, a->cc, a->cond);
5520     func(dc, &cmp, a->rd, a->rs2);
5521     return advance_pc(dc);
5522 }
5523 
5524 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5525 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5526 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5527 
do_fcmps(DisasContext * dc,arg_FCMPs * a,bool e)5528 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5529 {
5530     TCGv_i32 src1, src2;
5531 
5532     if (avail_32(dc) && a->cc != 0) {
5533         return false;
5534     }
5535     if (gen_trap_if_nofpu_fpexception(dc)) {
5536         return true;
5537     }
5538 
5539     src1 = gen_load_fpr_F(dc, a->rs1);
5540     src2 = gen_load_fpr_F(dc, a->rs2);
5541     if (e) {
5542         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5543     } else {
5544         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5545     }
5546     return advance_pc(dc);
5547 }
5548 
TRANS(FCMPs,ALL,do_fcmps,a,false)5549 TRANS(FCMPs, ALL, do_fcmps, a, false)
5550 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5551 
5552 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5553 {
5554     TCGv_i64 src1, src2;
5555 
5556     if (avail_32(dc) && a->cc != 0) {
5557         return false;
5558     }
5559     if (gen_trap_if_nofpu_fpexception(dc)) {
5560         return true;
5561     }
5562 
5563     src1 = gen_load_fpr_D(dc, a->rs1);
5564     src2 = gen_load_fpr_D(dc, a->rs2);
5565     if (e) {
5566         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5567     } else {
5568         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5569     }
5570     return advance_pc(dc);
5571 }
5572 
TRANS(FCMPd,ALL,do_fcmpd,a,false)5573 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5574 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5575 
5576 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5577 {
5578     TCGv_i128 src1, src2;
5579 
5580     if (avail_32(dc) && a->cc != 0) {
5581         return false;
5582     }
5583     if (gen_trap_if_nofpu_fpexception(dc)) {
5584         return true;
5585     }
5586     if (gen_trap_float128(dc)) {
5587         return true;
5588     }
5589 
5590     src1 = gen_load_fpr_Q(dc, a->rs1);
5591     src2 = gen_load_fpr_Q(dc, a->rs2);
5592     if (e) {
5593         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5594     } else {
5595         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5596     }
5597     return advance_pc(dc);
5598 }
5599 
TRANS(FCMPq,ALL,do_fcmpq,a,false)5600 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5601 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5602 
5603 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5604 {
5605     TCGv_i32 src1, src2;
5606 
5607     if (!avail_VIS3(dc)) {
5608         return false;
5609     }
5610     if (gen_trap_ifnofpu(dc)) {
5611         return true;
5612     }
5613 
5614     src1 = gen_load_fpr_F(dc, a->rs1);
5615     src2 = gen_load_fpr_F(dc, a->rs2);
5616     gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5617     return advance_pc(dc);
5618 }
5619 
trans_FLCMPd(DisasContext * dc,arg_FLCMPd * a)5620 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5621 {
5622     TCGv_i64 src1, src2;
5623 
5624     if (!avail_VIS3(dc)) {
5625         return false;
5626     }
5627     if (gen_trap_ifnofpu(dc)) {
5628         return true;
5629     }
5630 
5631     src1 = gen_load_fpr_D(dc, a->rs1);
5632     src2 = gen_load_fpr_D(dc, a->rs2);
5633     gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5634     return advance_pc(dc);
5635 }
5636 
do_movf2r(DisasContext * dc,arg_r_r * a,int (* offset)(unsigned int),void (* load)(TCGv,TCGv_ptr,tcg_target_long))5637 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5638                       int (*offset)(unsigned int),
5639                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5640 {
5641     TCGv dst;
5642 
5643     if (gen_trap_ifnofpu(dc)) {
5644         return true;
5645     }
5646     dst = gen_dest_gpr(dc, a->rd);
5647     load(dst, tcg_env, offset(a->rs));
5648     gen_store_gpr(dc, a->rd, dst);
5649     return advance_pc(dc);
5650 }
5651 
TRANS(MOVsTOsw,VIS3B,do_movf2r,a,gen_offset_fpr_F,tcg_gen_ld32s_tl)5652 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5653 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5654 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5655 
5656 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5657                       int (*offset)(unsigned int),
5658                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5659 {
5660     TCGv src;
5661 
5662     if (gen_trap_ifnofpu(dc)) {
5663         return true;
5664     }
5665     src = gen_load_gpr(dc, a->rs);
5666     store(src, tcg_env, offset(a->rd));
5667     return advance_pc(dc);
5668 }
5669 
TRANS(MOVwTOs,VIS3B,do_movr2f,a,gen_offset_fpr_F,tcg_gen_st32_tl)5670 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5671 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5672 
5673 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5674 {
5675     DisasContext *dc = container_of(dcbase, DisasContext, base);
5676     int bound;
5677 
5678     dc->pc = dc->base.pc_first;
5679     dc->npc = (target_ulong)dc->base.tb->cs_base;
5680     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5681     dc->def = &cpu_env(cs)->def;
5682     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5683     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5684 #ifndef CONFIG_USER_ONLY
5685     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5686 # ifdef TARGET_SPARC64
5687     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5688 # else
5689     dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5690 # endif
5691 #endif
5692 #ifdef TARGET_SPARC64
5693     dc->fprs_dirty = 0;
5694     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5695 #endif
5696     /*
5697      * if we reach a page boundary, we stop generation so that the
5698      * PC of a TT_TFAULT exception is always in the right page
5699      */
5700     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5701     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5702 }
5703 
sparc_tr_tb_start(DisasContextBase * db,CPUState * cs)5704 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5705 {
5706 }
5707 
sparc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)5708 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5709 {
5710     DisasContext *dc = container_of(dcbase, DisasContext, base);
5711     target_ulong npc = dc->npc;
5712 
5713     if (npc & 3) {
5714         switch (npc) {
5715         case JUMP_PC:
5716             assert(dc->jump_pc[1] == dc->pc + 4);
5717             npc = dc->jump_pc[0] | JUMP_PC;
5718             break;
5719         case DYNAMIC_PC:
5720         case DYNAMIC_PC_LOOKUP:
5721             npc = DYNAMIC_PC;
5722             break;
5723         default:
5724             g_assert_not_reached();
5725         }
5726     }
5727     tcg_gen_insn_start(dc->pc, npc);
5728 }
5729 
sparc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)5730 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5731 {
5732     DisasContext *dc = container_of(dcbase, DisasContext, base);
5733     unsigned int insn;
5734 
5735     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5736     dc->base.pc_next += 4;
5737 
5738     if (!decode(dc, insn)) {
5739         gen_exception(dc, TT_ILL_INSN);
5740     }
5741 
5742     if (dc->base.is_jmp == DISAS_NORETURN) {
5743         return;
5744     }
5745     if (dc->pc != dc->base.pc_next) {
5746         dc->base.is_jmp = DISAS_TOO_MANY;
5747     }
5748 }
5749 
sparc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)5750 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5751 {
5752     DisasContext *dc = container_of(dcbase, DisasContext, base);
5753     DisasDelayException *e, *e_next;
5754     bool may_lookup;
5755 
5756     finishing_insn(dc);
5757 
5758     switch (dc->base.is_jmp) {
5759     case DISAS_NEXT:
5760     case DISAS_TOO_MANY:
5761         if (((dc->pc | dc->npc) & 3) == 0) {
5762             /* static PC and NPC: we can use direct chaining */
5763             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5764             break;
5765         }
5766 
5767         may_lookup = true;
5768         if (dc->pc & 3) {
5769             switch (dc->pc) {
5770             case DYNAMIC_PC_LOOKUP:
5771                 break;
5772             case DYNAMIC_PC:
5773                 may_lookup = false;
5774                 break;
5775             default:
5776                 g_assert_not_reached();
5777             }
5778         } else {
5779             tcg_gen_movi_tl(cpu_pc, dc->pc);
5780         }
5781 
5782         if (dc->npc & 3) {
5783             switch (dc->npc) {
5784             case JUMP_PC:
5785                 gen_generic_branch(dc);
5786                 break;
5787             case DYNAMIC_PC:
5788                 may_lookup = false;
5789                 break;
5790             case DYNAMIC_PC_LOOKUP:
5791                 break;
5792             default:
5793                 g_assert_not_reached();
5794             }
5795         } else {
5796             tcg_gen_movi_tl(cpu_npc, dc->npc);
5797         }
5798         if (may_lookup) {
5799             tcg_gen_lookup_and_goto_ptr();
5800         } else {
5801             tcg_gen_exit_tb(NULL, 0);
5802         }
5803         break;
5804 
5805     case DISAS_NORETURN:
5806        break;
5807 
5808     case DISAS_EXIT:
5809         /* Exit TB */
5810         save_state(dc);
5811         tcg_gen_exit_tb(NULL, 0);
5812         break;
5813 
5814     default:
5815         g_assert_not_reached();
5816     }
5817 
5818     for (e = dc->delay_excp_list; e ; e = e_next) {
5819         gen_set_label(e->lab);
5820 
5821         tcg_gen_movi_tl(cpu_pc, e->pc);
5822         if (e->npc % 4 == 0) {
5823             tcg_gen_movi_tl(cpu_npc, e->npc);
5824         }
5825         gen_helper_raise_exception(tcg_env, e->excp);
5826 
5827         e_next = e->next;
5828         g_free(e);
5829     }
5830 }
5831 
5832 static const TranslatorOps sparc_tr_ops = {
5833     .init_disas_context = sparc_tr_init_disas_context,
5834     .tb_start           = sparc_tr_tb_start,
5835     .insn_start         = sparc_tr_insn_start,
5836     .translate_insn     = sparc_tr_translate_insn,
5837     .tb_stop            = sparc_tr_tb_stop,
5838 };
5839 
sparc_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)5840 void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
5841                           int *max_insns, vaddr pc, void *host_pc)
5842 {
5843     DisasContext dc = {};
5844 
5845     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5846 }
5847 
sparc_tcg_init(void)5848 void sparc_tcg_init(void)
5849 {
5850     static const char gregnames[32][4] = {
5851         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5852         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5853         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5854         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5855     };
5856 
5857     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5858 #ifdef TARGET_SPARC64
5859         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5860         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5861         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5862         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5863         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5864 #else
5865         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5866 #endif
5867     };
5868 
5869     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5870 #ifdef TARGET_SPARC64
5871         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5872         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5873         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5874 #endif
5875         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5876         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5877         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5878         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5879         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5880         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5881         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5882         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5883         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5884     };
5885 
5886     unsigned int i;
5887 
5888     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5889                                          offsetof(CPUSPARCState, regwptr),
5890                                          "regwptr");
5891 
5892     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5893         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5894     }
5895 
5896     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5897         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5898     }
5899 
5900     cpu_regs[0] = NULL;
5901     for (i = 1; i < 8; ++i) {
5902         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5903                                          offsetof(CPUSPARCState, gregs[i]),
5904                                          gregnames[i]);
5905     }
5906 
5907     for (i = 8; i < 32; ++i) {
5908         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5909                                          (i - 8) * sizeof(target_ulong),
5910                                          gregnames[i]);
5911     }
5912 }
5913