xref: /qemu/target/s390x/tcg/translate.c (revision 84307cd6027c4602913177ff09aeefa4743b7234)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "tcg/tcg-op.h"
35 #include "tcg/tcg-op-gvec.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/helper-proto.h"
39 #include "exec/helper-gen.h"
40 
41 #include "exec/translator.h"
42 #include "exec/translation-block.h"
43 #include "exec/log.h"
44 #include "qemu/atomic128.h"
45 
46 #define HELPER_H "helper.h"
47 #include "exec/helper-info.c.inc"
48 #undef  HELPER_H
49 
50 
51 /* Information that (most) every instruction needs to manipulate.  */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55 
56 /*
57  * Define a structure to hold the decoded fields.  We'll store each inside
58  * an array indexed by an enum.  In order to conserve memory, we'll arrange
59  * for fields that do not exist at the same time to overlap, thus the "C"
60  * for compact.  For checking purposes there is an "O" for original index
61  * as well that will be applied to availability bitmaps.
62  */
63 
64 enum DisasFieldIndexO {
65     FLD_O_r1,
66     FLD_O_r2,
67     FLD_O_r3,
68     FLD_O_m1,
69     FLD_O_m3,
70     FLD_O_m4,
71     FLD_O_m5,
72     FLD_O_m6,
73     FLD_O_b1,
74     FLD_O_b2,
75     FLD_O_b4,
76     FLD_O_d1,
77     FLD_O_d2,
78     FLD_O_d4,
79     FLD_O_x2,
80     FLD_O_l1,
81     FLD_O_l2,
82     FLD_O_i1,
83     FLD_O_i2,
84     FLD_O_i3,
85     FLD_O_i4,
86     FLD_O_i5,
87     FLD_O_v1,
88     FLD_O_v2,
89     FLD_O_v3,
90     FLD_O_v4,
91 };
92 
93 enum DisasFieldIndexC {
94     FLD_C_r1 = 0,
95     FLD_C_m1 = 0,
96     FLD_C_b1 = 0,
97     FLD_C_i1 = 0,
98     FLD_C_v1 = 0,
99 
100     FLD_C_r2 = 1,
101     FLD_C_b2 = 1,
102     FLD_C_i2 = 1,
103 
104     FLD_C_r3 = 2,
105     FLD_C_m3 = 2,
106     FLD_C_i3 = 2,
107     FLD_C_v3 = 2,
108 
109     FLD_C_m4 = 3,
110     FLD_C_b4 = 3,
111     FLD_C_i4 = 3,
112     FLD_C_l1 = 3,
113     FLD_C_v4 = 3,
114 
115     FLD_C_i5 = 4,
116     FLD_C_d1 = 4,
117     FLD_C_m5 = 4,
118 
119     FLD_C_d2 = 5,
120     FLD_C_m6 = 5,
121 
122     FLD_C_d4 = 6,
123     FLD_C_x2 = 6,
124     FLD_C_l2 = 6,
125     FLD_C_v2 = 6,
126 
127     NUM_C_FIELD = 7
128 };
129 
130 struct DisasFields {
131     uint64_t raw_insn;
132     unsigned op:8;
133     unsigned op2:8;
134     unsigned presentC:16;
135     unsigned int presentO;
136     int c[NUM_C_FIELD];
137 };
138 
139 struct DisasContext {
140     DisasContextBase base;
141     const DisasInsn *insn;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
pc_to_link_info(TCGv_i64 out,DisasContext * s,uint64_t pc)170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
s390x_translate_init(void)195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(tcg_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(tcg_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(tcg_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(tcg_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
vec_full_reg_offset(uint8_t reg)226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
vec_reg_offset(uint8_t reg,uint8_t enr,MemOp es)232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
freg64_offset(uint8_t reg)267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
freg32_offset(uint8_t reg)273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
load_reg(int reg)279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
load_freg(int reg)286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
291     return r;
292 }
293 
load_freg32_i64(int reg)294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
299     return r;
300 }
301 
load_freg_128(int reg)302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
store_reg(int reg,TCGv_i64 v)312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
store_freg(int reg,TCGv_i64 v)317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
320 }
321 
store_reg32_i64(int reg,TCGv_i64 v)322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
store_reg32h_i64(int reg,TCGv_i64 v)328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
store_freg32_i64(int reg,TCGv_i64 v)333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
336 }
337 
update_psw_addr(DisasContext * s)338 static void update_psw_addr(DisasContext *s)
339 {
340     /* psw.addr */
341     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343 
per_branch(DisasContext * s,TCGv_i64 dest)344 static void per_branch(DisasContext *s, TCGv_i64 dest)
345 {
346 #ifndef CONFIG_USER_ONLY
347     if (s->base.tb->flags & FLAG_MASK_PER_BRANCH) {
348         gen_helper_per_branch(tcg_env, dest, tcg_constant_i32(s->ilen));
349     }
350 #endif
351 }
352 
per_breaking_event(DisasContext * s)353 static void per_breaking_event(DisasContext *s)
354 {
355     tcg_gen_movi_i64(gbea, s->base.pc_next);
356 }
357 
update_cc_op(DisasContext * s)358 static void update_cc_op(DisasContext *s)
359 {
360     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
361         tcg_gen_movi_i32(cc_op, s->cc_op);
362     }
363 }
364 
ld_code2(CPUS390XState * env,DisasContext * s,uint64_t pc)365 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
366                                 uint64_t pc)
367 {
368     return (uint64_t)translator_lduw(env, &s->base, pc);
369 }
370 
ld_code4(CPUS390XState * env,DisasContext * s,uint64_t pc)371 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
372                                 uint64_t pc)
373 {
374     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
375 }
376 
get_mem_index(DisasContext * s)377 static int get_mem_index(DisasContext *s)
378 {
379 #ifdef CONFIG_USER_ONLY
380     return MMU_USER_IDX;
381 #else
382     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
383         return MMU_REAL_IDX;
384     }
385 
386     switch (s->base.tb->flags & FLAG_MASK_ASC) {
387     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
388         return MMU_PRIMARY_IDX;
389     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
390         return MMU_SECONDARY_IDX;
391     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
392         return MMU_HOME_IDX;
393     default:
394         g_assert_not_reached();
395     }
396 #endif
397 }
398 
gen_exception(int excp)399 static void gen_exception(int excp)
400 {
401     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
402 }
403 
gen_program_exception(DisasContext * s,int code)404 static void gen_program_exception(DisasContext *s, int code)
405 {
406     /* Remember what pgm exception this was.  */
407     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
408                    offsetof(CPUS390XState, int_pgm_code));
409 
410     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
411                    offsetof(CPUS390XState, int_pgm_ilen));
412 
413     /* update the psw */
414     update_psw_addr(s);
415 
416     /* Save off cc.  */
417     update_cc_op(s);
418 
419     /* Trigger exception.  */
420     gen_exception(EXCP_PGM);
421 }
422 
gen_illegal_opcode(DisasContext * s)423 static inline void gen_illegal_opcode(DisasContext *s)
424 {
425     gen_program_exception(s, PGM_OPERATION);
426 }
427 
gen_data_exception(uint8_t dxc)428 static inline void gen_data_exception(uint8_t dxc)
429 {
430     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
431 }
432 
gen_trap(DisasContext * s)433 static inline void gen_trap(DisasContext *s)
434 {
435     /* Set DXC to 0xff */
436     gen_data_exception(0xff);
437 }
438 
gen_addi_and_wrap_i64(DisasContext * s,TCGv_i64 dst,TCGv_i64 src,int64_t imm)439 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
440                                   int64_t imm)
441 {
442     tcg_gen_addi_i64(dst, src, imm);
443     if (!(s->base.tb->flags & FLAG_MASK_64)) {
444         if (s->base.tb->flags & FLAG_MASK_32) {
445             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
446         } else {
447             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
448         }
449     }
450 }
451 
get_address(DisasContext * s,int x2,int b2,int d2)452 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
453 {
454     TCGv_i64 tmp = tcg_temp_new_i64();
455 
456     /*
457      * Note that d2 is limited to 20 bits, signed.  If we crop negative
458      * displacements early we create larger immediate addends.
459      */
460     if (b2 && x2) {
461         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
462         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
463     } else if (b2) {
464         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
465     } else if (x2) {
466         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
467     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
468         if (s->base.tb->flags & FLAG_MASK_32) {
469             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
470         } else {
471             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
472         }
473     } else {
474         tcg_gen_movi_i64(tmp, d2);
475     }
476 
477     return tmp;
478 }
479 
live_cc_data(DisasContext * s)480 static inline bool live_cc_data(DisasContext *s)
481 {
482     return (s->cc_op != CC_OP_DYNAMIC
483             && s->cc_op != CC_OP_STATIC
484             && s->cc_op > 3);
485 }
486 
gen_op_movi_cc(DisasContext * s,uint32_t val)487 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
488 {
489     if (live_cc_data(s)) {
490         tcg_gen_discard_i64(cc_src);
491         tcg_gen_discard_i64(cc_dst);
492         tcg_gen_discard_i64(cc_vr);
493     }
494     s->cc_op = CC_OP_CONST0 + val;
495 }
496 
gen_op_update1_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 dst)497 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
498 {
499     if (live_cc_data(s)) {
500         tcg_gen_discard_i64(cc_src);
501         tcg_gen_discard_i64(cc_vr);
502     }
503     tcg_gen_mov_i64(cc_dst, dst);
504     s->cc_op = op;
505 }
506 
gen_op_update2_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 src,TCGv_i64 dst)507 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
508                                   TCGv_i64 dst)
509 {
510     if (live_cc_data(s)) {
511         tcg_gen_discard_i64(cc_vr);
512     }
513     tcg_gen_mov_i64(cc_src, src);
514     tcg_gen_mov_i64(cc_dst, dst);
515     s->cc_op = op;
516 }
517 
gen_op_update3_cc_i64(DisasContext * s,enum cc_op op,TCGv_i64 src,TCGv_i64 dst,TCGv_i64 vr)518 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
519                                   TCGv_i64 dst, TCGv_i64 vr)
520 {
521     tcg_gen_mov_i64(cc_src, src);
522     tcg_gen_mov_i64(cc_dst, dst);
523     tcg_gen_mov_i64(cc_vr, vr);
524     s->cc_op = op;
525 }
526 
set_cc_nz_u64(DisasContext * s,TCGv_i64 val)527 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
528 {
529     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
530 }
531 
532 /* CC value is in env->cc_op */
set_cc_static(DisasContext * s)533 static void set_cc_static(DisasContext *s)
534 {
535     if (live_cc_data(s)) {
536         tcg_gen_discard_i64(cc_src);
537         tcg_gen_discard_i64(cc_dst);
538         tcg_gen_discard_i64(cc_vr);
539     }
540     s->cc_op = CC_OP_STATIC;
541 }
542 
543 /* calculates cc into cc_op */
gen_op_calc_cc(DisasContext * s)544 static void gen_op_calc_cc(DisasContext *s)
545 {
546     TCGv_i32 local_cc_op = NULL;
547     TCGv_i64 dummy = NULL;
548 
549     switch (s->cc_op) {
550     default:
551         dummy = tcg_constant_i64(0);
552         /* FALLTHRU */
553     case CC_OP_ADD_64:
554     case CC_OP_SUB_64:
555     case CC_OP_ADD_32:
556     case CC_OP_SUB_32:
557         local_cc_op = tcg_constant_i32(s->cc_op);
558         break;
559     case CC_OP_CONST0:
560     case CC_OP_CONST1:
561     case CC_OP_CONST2:
562     case CC_OP_CONST3:
563     case CC_OP_STATIC:
564     case CC_OP_DYNAMIC:
565         break;
566     }
567 
568     switch (s->cc_op) {
569     case CC_OP_CONST0:
570     case CC_OP_CONST1:
571     case CC_OP_CONST2:
572     case CC_OP_CONST3:
573         /* s->cc_op is the cc value */
574         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
575         break;
576     case CC_OP_STATIC:
577         /* env->cc_op already is the cc value */
578         break;
579     case CC_OP_NZ:
580         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
581         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
582         break;
583     case CC_OP_ABS_64:
584     case CC_OP_NABS_64:
585     case CC_OP_ABS_32:
586     case CC_OP_NABS_32:
587     case CC_OP_LTGT0_32:
588     case CC_OP_LTGT0_64:
589     case CC_OP_COMP_32:
590     case CC_OP_COMP_64:
591     case CC_OP_NZ_F32:
592     case CC_OP_NZ_F64:
593     case CC_OP_FLOGR:
594     case CC_OP_LCBB:
595     case CC_OP_MULS_32:
596         /* 1 argument */
597         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
598         break;
599     case CC_OP_ADDU:
600     case CC_OP_ICM:
601     case CC_OP_LTGT_32:
602     case CC_OP_LTGT_64:
603     case CC_OP_LTUGTU_32:
604     case CC_OP_LTUGTU_64:
605     case CC_OP_TM_32:
606     case CC_OP_TM_64:
607     case CC_OP_SLA:
608     case CC_OP_SUBU:
609     case CC_OP_NZ_F128:
610     case CC_OP_VC:
611     case CC_OP_MULS_64:
612         /* 2 arguments */
613         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
614         break;
615     case CC_OP_ADD_64:
616     case CC_OP_SUB_64:
617     case CC_OP_ADD_32:
618     case CC_OP_SUB_32:
619         /* 3 arguments */
620         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
621         break;
622     case CC_OP_DYNAMIC:
623         /* unknown operation - assume 3 arguments and cc_op in env */
624         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
625         break;
626     default:
627         g_assert_not_reached();
628     }
629 
630     /* We now have cc in cc_op as constant */
631     set_cc_static(s);
632 }
633 
use_goto_tb(DisasContext * s,uint64_t dest)634 static bool use_goto_tb(DisasContext *s, uint64_t dest)
635 {
636     return translator_use_goto_tb(&s->base, dest);
637 }
638 
account_noninline_branch(DisasContext * s,int cc_op)639 static void account_noninline_branch(DisasContext *s, int cc_op)
640 {
641 #ifdef DEBUG_INLINE_BRANCHES
642     inline_branch_miss[cc_op]++;
643 #endif
644 }
645 
account_inline_branch(DisasContext * s,int cc_op)646 static void account_inline_branch(DisasContext *s, int cc_op)
647 {
648 #ifdef DEBUG_INLINE_BRANCHES
649     inline_branch_hit[cc_op]++;
650 #endif
651 }
652 
653 /* Table of mask values to comparison codes, given a comparison as input.
654    For such, CC=3 should not be possible.  */
655 static const TCGCond ltgt_cond[16] = {
656     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
657     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
658     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
659     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
660     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
661     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
662     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
663     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
664 };
665 
666 /* Table of mask values to comparison codes, given a logic op as input.
667    For such, only CC=0 and CC=1 should be possible.  */
668 static const TCGCond nz_cond[16] = {
669     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
670     TCG_COND_NEVER, TCG_COND_NEVER,
671     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
672     TCG_COND_NE, TCG_COND_NE,
673     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
674     TCG_COND_EQ, TCG_COND_EQ,
675     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
676     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
677 };
678 
679 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
680    details required to generate a TCG comparison.  */
disas_jcc(DisasContext * s,DisasCompare * c,uint32_t mask)681 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
682 {
683     TCGCond cond;
684     enum cc_op old_cc_op = s->cc_op;
685 
686     if (mask == 15 || mask == 0) {
687         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
688         c->u.s32.a = cc_op;
689         c->u.s32.b = cc_op;
690         c->is_64 = false;
691         return;
692     }
693 
694     /* Find the TCG condition for the mask + cc op.  */
695     switch (old_cc_op) {
696     case CC_OP_LTGT0_32:
697     case CC_OP_LTGT0_64:
698     case CC_OP_LTGT_32:
699     case CC_OP_LTGT_64:
700         cond = ltgt_cond[mask];
701         if (cond == TCG_COND_NEVER) {
702             goto do_dynamic;
703         }
704         account_inline_branch(s, old_cc_op);
705         break;
706 
707     case CC_OP_LTUGTU_32:
708     case CC_OP_LTUGTU_64:
709         cond = tcg_unsigned_cond(ltgt_cond[mask]);
710         if (cond == TCG_COND_NEVER) {
711             goto do_dynamic;
712         }
713         account_inline_branch(s, old_cc_op);
714         break;
715 
716     case CC_OP_NZ:
717         cond = nz_cond[mask];
718         if (cond == TCG_COND_NEVER) {
719             goto do_dynamic;
720         }
721         account_inline_branch(s, old_cc_op);
722         break;
723 
724     case CC_OP_TM_32:
725     case CC_OP_TM_64:
726         switch (mask) {
727         case 8:
728             cond = TCG_COND_TSTEQ;
729             break;
730         case 4 | 2 | 1:
731             cond = TCG_COND_TSTNE;
732             break;
733         default:
734             goto do_dynamic;
735         }
736         account_inline_branch(s, old_cc_op);
737         break;
738 
739     case CC_OP_ICM:
740         switch (mask) {
741         case 8:
742             cond = TCG_COND_TSTEQ;
743             break;
744         case 4 | 2 | 1:
745         case 4 | 2:
746             cond = TCG_COND_TSTNE;
747             break;
748         default:
749             goto do_dynamic;
750         }
751         account_inline_branch(s, old_cc_op);
752         break;
753 
754     case CC_OP_FLOGR:
755         switch (mask & 0xa) {
756         case 8: /* src == 0 -> no one bit found */
757             cond = TCG_COND_EQ;
758             break;
759         case 2: /* src != 0 -> one bit found */
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ADDU:
769     case CC_OP_SUBU:
770         switch (mask) {
771         case 8 | 2: /* result == 0 */
772             cond = TCG_COND_EQ;
773             break;
774         case 4 | 1: /* result != 0 */
775             cond = TCG_COND_NE;
776             break;
777         case 8 | 4: /* !carry (borrow) */
778             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
779             break;
780         case 2 | 1: /* carry (!borrow) */
781             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
782             break;
783         default:
784             goto do_dynamic;
785         }
786         account_inline_branch(s, old_cc_op);
787         break;
788 
789     default:
790     do_dynamic:
791         /* Calculate cc value.  */
792         gen_op_calc_cc(s);
793         /* FALLTHRU */
794 
795     case CC_OP_STATIC:
796         /* Jump based on CC.  We'll load up the real cond below;
797            the assignment here merely avoids a compiler warning.  */
798         account_noninline_branch(s, old_cc_op);
799         old_cc_op = CC_OP_STATIC;
800         cond = TCG_COND_NEVER;
801         break;
802     }
803 
804     /* Load up the arguments of the comparison.  */
805     c->is_64 = true;
806     switch (old_cc_op) {
807     case CC_OP_LTGT0_32:
808         c->is_64 = false;
809         c->u.s32.a = tcg_temp_new_i32();
810         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
811         c->u.s32.b = tcg_constant_i32(0);
812         break;
813     case CC_OP_LTGT_32:
814     case CC_OP_LTUGTU_32:
815         c->is_64 = false;
816         c->u.s32.a = tcg_temp_new_i32();
817         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
818         c->u.s32.b = tcg_temp_new_i32();
819         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
820         break;
821 
822     case CC_OP_LTGT0_64:
823     case CC_OP_NZ:
824     case CC_OP_FLOGR:
825         c->u.s64.a = cc_dst;
826         c->u.s64.b = tcg_constant_i64(0);
827         break;
828 
829     case CC_OP_LTGT_64:
830     case CC_OP_LTUGTU_64:
831     case CC_OP_TM_32:
832     case CC_OP_TM_64:
833     case CC_OP_ICM:
834         c->u.s64.a = cc_src;
835         c->u.s64.b = cc_dst;
836         break;
837 
838     case CC_OP_ADDU:
839     case CC_OP_SUBU:
840         c->is_64 = true;
841         c->u.s64.b = tcg_constant_i64(0);
842         switch (mask) {
843         case 8 | 2:
844         case 4 | 1: /* result */
845             c->u.s64.a = cc_dst;
846             break;
847         case 8 | 4:
848         case 2 | 1: /* carry */
849             c->u.s64.a = cc_src;
850             break;
851         default:
852             g_assert_not_reached();
853         }
854         break;
855 
856     case CC_OP_STATIC:
857         c->is_64 = false;
858         c->u.s32.a = cc_op;
859 
860         /* Fold half of the cases using bit 3 to invert. */
861         switch (mask & 8 ? mask ^ 0xf : mask) {
862         case 0x1: /* cc == 3 */
863             cond = TCG_COND_EQ;
864             c->u.s32.b = tcg_constant_i32(3);
865             break;
866         case 0x2: /* cc == 2 */
867             cond = TCG_COND_EQ;
868             c->u.s32.b = tcg_constant_i32(2);
869             break;
870         case 0x4: /* cc == 1 */
871             cond = TCG_COND_EQ;
872             c->u.s32.b = tcg_constant_i32(1);
873             break;
874         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
875             cond = TCG_COND_GTU;
876             c->u.s32.b = tcg_constant_i32(1);
877             break;
878         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
879             cond = TCG_COND_TSTNE;
880             c->u.s32.b = tcg_constant_i32(1);
881             break;
882         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
883             cond = TCG_COND_LEU;
884             c->u.s32.a = tcg_temp_new_i32();
885             c->u.s32.b = tcg_constant_i32(1);
886             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
887             break;
888         case 0x4 | 0x2 | 0x1: /* cc != 0 */
889             cond = TCG_COND_NE;
890             c->u.s32.b = tcg_constant_i32(0);
891             break;
892         default:
893             /* case 0: never, handled above. */
894             g_assert_not_reached();
895         }
896         if (mask & 8) {
897             cond = tcg_invert_cond(cond);
898         }
899         break;
900 
901     default:
902         abort();
903     }
904     c->cond = cond;
905 }
906 
907 /* ====================================================================== */
908 /* Define the insn format enumeration.  */
909 #define F0(N)                         FMT_##N,
910 #define F1(N, X1)                     F0(N)
911 #define F2(N, X1, X2)                 F0(N)
912 #define F3(N, X1, X2, X3)             F0(N)
913 #define F4(N, X1, X2, X3, X4)         F0(N)
914 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
915 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
916 
917 typedef enum {
918 #include "insn-format.h.inc"
919 } DisasFormat;
920 
921 #undef F0
922 #undef F1
923 #undef F2
924 #undef F3
925 #undef F4
926 #undef F5
927 #undef F6
928 
929 /* This is the way fields are to be accessed out of DisasFields.  */
930 #define have_field(S, F)  have_field1((S), FLD_O_##F)
931 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
932 
have_field1(const DisasContext * s,enum DisasFieldIndexO c)933 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
934 {
935     return (s->fields.presentO >> c) & 1;
936 }
937 
get_field1(const DisasContext * s,enum DisasFieldIndexO o,enum DisasFieldIndexC c)938 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
939                       enum DisasFieldIndexC c)
940 {
941     assert(have_field1(s, o));
942     return s->fields.c[c];
943 }
944 
945 /* Describe the layout of each field in each format.  */
946 typedef struct DisasField {
947     unsigned int beg:8;
948     unsigned int size:8;
949     unsigned int type:2;
950     unsigned int indexC:6;
951     enum DisasFieldIndexO indexO:8;
952 } DisasField;
953 
954 typedef struct DisasFormatInfo {
955     DisasField op[NUM_C_FIELD];
956 } DisasFormatInfo;
957 
958 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
959 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
960 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
961 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
962                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
963 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
964                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
965                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
966 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
967                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
968 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
969                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
970                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
971 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
972 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
973 
974 #define F0(N)                     { { } },
975 #define F1(N, X1)                 { { X1 } },
976 #define F2(N, X1, X2)             { { X1, X2 } },
977 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
978 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
979 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
980 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
981 
982 static const DisasFormatInfo format_info[] = {
983 #include "insn-format.h.inc"
984 };
985 
986 #undef F0
987 #undef F1
988 #undef F2
989 #undef F3
990 #undef F4
991 #undef F5
992 #undef F6
993 #undef R
994 #undef M
995 #undef V
996 #undef BD
997 #undef BXD
998 #undef BDL
999 #undef BXDL
1000 #undef I
1001 #undef L
1002 
1003 /* Generally, we'll extract operands into this structures, operate upon
1004    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1005    of routines below for more details.  */
1006 typedef struct {
1007     TCGv_i64 out, out2, in1, in2;
1008     TCGv_i64 addr1;
1009     TCGv_i128 out_128, in1_128, in2_128;
1010 } DisasOps;
1011 
1012 /* Instructions can place constraints on their operands, raising specification
1013    exceptions if they are violated.  To make this easy to automate, each "in1",
1014    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1015    of the following, or 0.  To make this easy to document, we'll put the
1016    SPEC_<name> defines next to <name>.  */
1017 
1018 #define SPEC_r1_even    1
1019 #define SPEC_r2_even    2
1020 #define SPEC_r3_even    4
1021 #define SPEC_r1_f128    8
1022 #define SPEC_r2_f128    16
1023 
1024 /* Return values from translate_one, indicating the state of the TB.  */
1025 
1026 /* We are not using a goto_tb (for whatever reason), but have updated
1027    the PC (for whatever reason), so there's no need to do it again on
1028    exiting the TB.  */
1029 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1030 
1031 /* We have updated the PC and CC values.  */
1032 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1033 
1034 
1035 /* Instruction flags */
1036 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1037 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1038 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1039 #define IF_BFP      0x0008      /* binary floating point instruction */
1040 #define IF_DFP      0x0010      /* decimal floating point instruction */
1041 #define IF_PRIV     0x0020      /* privileged instruction */
1042 #define IF_VEC      0x0040      /* vector instruction */
1043 #define IF_IO       0x0080      /* input/output instruction */
1044 
1045 struct DisasInsn {
1046     unsigned opc:16;
1047     unsigned flags:16;
1048     DisasFormat fmt:8;
1049     unsigned fac:8;
1050     unsigned spec:8;
1051 
1052     const char *name;
1053 
1054     /* Pre-process arguments before HELP_OP.  */
1055     void (*help_in1)(DisasContext *, DisasOps *);
1056     void (*help_in2)(DisasContext *, DisasOps *);
1057     void (*help_prep)(DisasContext *, DisasOps *);
1058 
1059     /*
1060      * Post-process output after HELP_OP.
1061      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1062      */
1063     void (*help_wout)(DisasContext *, DisasOps *);
1064     void (*help_cout)(DisasContext *, DisasOps *);
1065 
1066     /* Implement the operation itself.  */
1067     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1068 
1069     uint64_t data;
1070 };
1071 
1072 /* ====================================================================== */
1073 /* Miscellaneous helpers, used by several operations.  */
1074 
help_goto_direct(DisasContext * s,uint64_t dest)1075 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1076 {
1077     update_cc_op(s);
1078     per_breaking_event(s);
1079     per_branch(s, tcg_constant_i64(dest));
1080 
1081     if (dest == s->pc_tmp) {
1082         return DISAS_NEXT;
1083     }
1084     if (use_goto_tb(s, dest)) {
1085         tcg_gen_goto_tb(0);
1086         tcg_gen_movi_i64(psw_addr, dest);
1087         tcg_gen_exit_tb(s->base.tb, 0);
1088         return DISAS_NORETURN;
1089     } else {
1090         tcg_gen_movi_i64(psw_addr, dest);
1091         return DISAS_PC_CC_UPDATED;
1092     }
1093 }
1094 
help_goto_indirect(DisasContext * s,TCGv_i64 dest)1095 static DisasJumpType help_goto_indirect(DisasContext *s, TCGv_i64 dest)
1096 {
1097     update_cc_op(s);
1098     per_breaking_event(s);
1099     tcg_gen_mov_i64(psw_addr, dest);
1100     per_branch(s, psw_addr);
1101     return DISAS_PC_CC_UPDATED;
1102 }
1103 
help_branch(DisasContext * s,DisasCompare * c,bool is_imm,int imm,TCGv_i64 cdest)1104 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1105                                  bool is_imm, int imm, TCGv_i64 cdest)
1106 {
1107     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1108     TCGLabel *lab;
1109 
1110     /* Take care of the special cases first.  */
1111     if (c->cond == TCG_COND_NEVER) {
1112         return DISAS_NEXT;
1113     }
1114     if (is_imm) {
1115         /*
1116          * Do not optimize a conditional branch if PER enabled, because we
1117          * still need a conditional call to helper_per_branch.
1118          */
1119         if (c->cond == TCG_COND_ALWAYS
1120             || (dest == s->pc_tmp &&
1121                 !(s->base.tb->flags & FLAG_MASK_PER_BRANCH))) {
1122             return help_goto_direct(s, dest);
1123         }
1124     } else {
1125         if (!cdest) {
1126             /* E.g. bcr %r0 -> no branch.  */
1127             return DISAS_NEXT;
1128         }
1129         if (c->cond == TCG_COND_ALWAYS) {
1130             return help_goto_indirect(s, cdest);
1131         }
1132     }
1133 
1134     update_cc_op(s);
1135 
1136     /*
1137      * Ensure the taken branch is fall-through of the tcg branch.
1138      * This keeps @cdest usage within the extended basic block,
1139      * which avoids an otherwise unnecessary spill to the stack.
1140      */
1141     lab = gen_new_label();
1142     if (c->is_64) {
1143         tcg_gen_brcond_i64(tcg_invert_cond(c->cond),
1144                            c->u.s64.a, c->u.s64.b, lab);
1145     } else {
1146         tcg_gen_brcond_i32(tcg_invert_cond(c->cond),
1147                            c->u.s32.a, c->u.s32.b, lab);
1148     }
1149 
1150     /* Branch taken.  */
1151     per_breaking_event(s);
1152     if (is_imm) {
1153         tcg_gen_movi_i64(psw_addr, dest);
1154     } else {
1155         tcg_gen_mov_i64(psw_addr, cdest);
1156     }
1157     per_branch(s, psw_addr);
1158 
1159     if (is_imm && use_goto_tb(s, dest)) {
1160         tcg_gen_goto_tb(0);
1161         tcg_gen_exit_tb(s->base.tb, 0);
1162     } else {
1163         tcg_gen_lookup_and_goto_ptr();
1164     }
1165 
1166     gen_set_label(lab);
1167 
1168     /* Branch not taken.  */
1169     tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1170     if (use_goto_tb(s, s->pc_tmp)) {
1171         tcg_gen_goto_tb(1);
1172         tcg_gen_exit_tb(s->base.tb, 1);
1173         return DISAS_NORETURN;
1174     }
1175     return DISAS_PC_CC_UPDATED;
1176 }
1177 
1178 /* ====================================================================== */
1179 /* The operations.  These perform the bulk of the work for any insn,
1180    usually after the operands have been loaded and output initialized.  */
1181 
op_abs(DisasContext * s,DisasOps * o)1182 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1183 {
1184     tcg_gen_abs_i64(o->out, o->in2);
1185     return DISAS_NEXT;
1186 }
1187 
op_absf32(DisasContext * s,DisasOps * o)1188 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1189 {
1190     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1191     return DISAS_NEXT;
1192 }
1193 
op_absf64(DisasContext * s,DisasOps * o)1194 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1195 {
1196     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1197     return DISAS_NEXT;
1198 }
1199 
op_absf128(DisasContext * s,DisasOps * o)1200 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1201 {
1202     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1203     tcg_gen_mov_i64(o->out2, o->in2);
1204     return DISAS_NEXT;
1205 }
1206 
op_add(DisasContext * s,DisasOps * o)1207 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1208 {
1209     tcg_gen_add_i64(o->out, o->in1, o->in2);
1210     return DISAS_NEXT;
1211 }
1212 
op_addu64(DisasContext * s,DisasOps * o)1213 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1214 {
1215     tcg_gen_movi_i64(cc_src, 0);
1216     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1217     return DISAS_NEXT;
1218 }
1219 
1220 /* Compute carry into cc_src. */
compute_carry(DisasContext * s)1221 static void compute_carry(DisasContext *s)
1222 {
1223     switch (s->cc_op) {
1224     case CC_OP_ADDU:
1225         /* The carry value is already in cc_src (1,0). */
1226         break;
1227     case CC_OP_SUBU:
1228         tcg_gen_addi_i64(cc_src, cc_src, 1);
1229         break;
1230     default:
1231         gen_op_calc_cc(s);
1232         /* fall through */
1233     case CC_OP_STATIC:
1234         /* The carry flag is the msb of CC; compute into cc_src. */
1235         tcg_gen_extu_i32_i64(cc_src, cc_op);
1236         tcg_gen_shri_i64(cc_src, cc_src, 1);
1237         break;
1238     }
1239 }
1240 
op_addc32(DisasContext * s,DisasOps * o)1241 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1242 {
1243     compute_carry(s);
1244     tcg_gen_add_i64(o->out, o->in1, o->in2);
1245     tcg_gen_add_i64(o->out, o->out, cc_src);
1246     return DISAS_NEXT;
1247 }
1248 
op_addc64(DisasContext * s,DisasOps * o)1249 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1250 {
1251     compute_carry(s);
1252     tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src);
1253     return DISAS_NEXT;
1254 }
1255 
op_asi(DisasContext * s,DisasOps * o)1256 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1257 {
1258     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1259 
1260     o->in1 = tcg_temp_new_i64();
1261     if (non_atomic) {
1262         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1263     } else {
1264         /* Perform the atomic addition in memory. */
1265         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1266                                      s->insn->data);
1267     }
1268 
1269     /* Recompute also for atomic case: needed for setting CC. */
1270     tcg_gen_add_i64(o->out, o->in1, o->in2);
1271 
1272     if (non_atomic) {
1273         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1274     }
1275     return DISAS_NEXT;
1276 }
1277 
op_asiu64(DisasContext * s,DisasOps * o)1278 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1279 {
1280     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1281 
1282     o->in1 = tcg_temp_new_i64();
1283     if (non_atomic) {
1284         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1285     } else {
1286         /* Perform the atomic addition in memory. */
1287         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1288                                      s->insn->data);
1289     }
1290 
1291     /* Recompute also for atomic case: needed for setting CC. */
1292     tcg_gen_movi_i64(cc_src, 0);
1293     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1294 
1295     if (non_atomic) {
1296         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1297     }
1298     return DISAS_NEXT;
1299 }
1300 
op_aeb(DisasContext * s,DisasOps * o)1301 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1302 {
1303     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
op_adb(DisasContext * s,DisasOps * o)1307 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1308 {
1309     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1310     return DISAS_NEXT;
1311 }
1312 
op_axb(DisasContext * s,DisasOps * o)1313 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1314 {
1315     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1316     return DISAS_NEXT;
1317 }
1318 
op_and(DisasContext * s,DisasOps * o)1319 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1320 {
1321     tcg_gen_and_i64(o->out, o->in1, o->in2);
1322     return DISAS_NEXT;
1323 }
1324 
op_andi(DisasContext * s,DisasOps * o)1325 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1326 {
1327     int shift = s->insn->data & 0xff;
1328     int size = s->insn->data >> 8;
1329     uint64_t mask = ((1ull << size) - 1) << shift;
1330     TCGv_i64 t = tcg_temp_new_i64();
1331 
1332     tcg_gen_shli_i64(t, o->in2, shift);
1333     tcg_gen_ori_i64(t, t, ~mask);
1334     tcg_gen_and_i64(o->out, o->in1, t);
1335 
1336     /* Produce the CC from only the bits manipulated.  */
1337     tcg_gen_andi_i64(cc_dst, o->out, mask);
1338     set_cc_nz_u64(s, cc_dst);
1339     return DISAS_NEXT;
1340 }
1341 
op_andc(DisasContext * s,DisasOps * o)1342 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1343 {
1344     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1345     return DISAS_NEXT;
1346 }
1347 
op_orc(DisasContext * s,DisasOps * o)1348 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1349 {
1350     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1351     return DISAS_NEXT;
1352 }
1353 
op_nand(DisasContext * s,DisasOps * o)1354 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1355 {
1356     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1357     return DISAS_NEXT;
1358 }
1359 
op_nor(DisasContext * s,DisasOps * o)1360 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1361 {
1362     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1363     return DISAS_NEXT;
1364 }
1365 
op_nxor(DisasContext * s,DisasOps * o)1366 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1367 {
1368     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1369     return DISAS_NEXT;
1370 }
1371 
op_ni(DisasContext * s,DisasOps * o)1372 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1373 {
1374     o->in1 = tcg_temp_new_i64();
1375 
1376     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1377         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1378     } else {
1379         /* Perform the atomic operation in memory. */
1380         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1381                                      s->insn->data);
1382     }
1383 
1384     /* Recompute also for atomic case: needed for setting CC. */
1385     tcg_gen_and_i64(o->out, o->in1, o->in2);
1386 
1387     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1388         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1389     }
1390     return DISAS_NEXT;
1391 }
1392 
op_bas(DisasContext * s,DisasOps * o)1393 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1394 {
1395     pc_to_link_info(o->out, s, s->pc_tmp);
1396     if (o->in2) {
1397         return help_goto_indirect(s, o->in2);
1398     } else {
1399         return DISAS_NEXT;
1400     }
1401 }
1402 
save_link_info(DisasContext * s,DisasOps * o)1403 static void save_link_info(DisasContext *s, DisasOps *o)
1404 {
1405     TCGv_i64 t;
1406 
1407     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1408         pc_to_link_info(o->out, s, s->pc_tmp);
1409         return;
1410     }
1411     gen_op_calc_cc(s);
1412     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1413     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1414     t = tcg_temp_new_i64();
1415     tcg_gen_shri_i64(t, psw_mask, 16);
1416     tcg_gen_andi_i64(t, t, 0x0f000000);
1417     tcg_gen_or_i64(o->out, o->out, t);
1418     tcg_gen_extu_i32_i64(t, cc_op);
1419     tcg_gen_shli_i64(t, t, 28);
1420     tcg_gen_or_i64(o->out, o->out, t);
1421 }
1422 
op_bal(DisasContext * s,DisasOps * o)1423 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1424 {
1425     save_link_info(s, o);
1426     if (o->in2) {
1427         return help_goto_indirect(s, o->in2);
1428     } else {
1429         return DISAS_NEXT;
1430     }
1431 }
1432 
1433 /*
1434  * Disassemble the target of a branch. The results are returned in a form
1435  * suitable for passing into help_branch():
1436  *
1437  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1438  *   branches, whose DisasContext *S contains the relative immediate field RI,
1439  *   are considered fixed. All the other branches are considered computed.
1440  * - int IMM is the value of RI.
1441  * - TCGv_i64 CDEST is the address of the computed target.
1442  */
1443 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1444     if (have_field(s, ri)) {                                                   \
1445         if (unlikely(s->ex_value)) {                                           \
1446             cdest = tcg_temp_new_i64();                                        \
1447             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1448             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1449             is_imm = false;                                                    \
1450         } else {                                                               \
1451             is_imm = true;                                                     \
1452         }                                                                      \
1453     } else {                                                                   \
1454         is_imm = false;                                                        \
1455     }                                                                          \
1456     imm = is_imm ? get_field(s, ri) : 0;                                       \
1457 } while (false)
1458 
op_basi(DisasContext * s,DisasOps * o)1459 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1460 {
1461     DisasCompare c;
1462     bool is_imm;
1463     int imm;
1464 
1465     pc_to_link_info(o->out, s, s->pc_tmp);
1466 
1467     disas_jdest(s, i2, is_imm, imm, o->in2);
1468     disas_jcc(s, &c, 0xf);
1469     return help_branch(s, &c, is_imm, imm, o->in2);
1470 }
1471 
op_bc(DisasContext * s,DisasOps * o)1472 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1473 {
1474     int m1 = get_field(s, m1);
1475     DisasCompare c;
1476     bool is_imm;
1477     int imm;
1478 
1479     /* BCR with R2 = 0 causes no branching */
1480     if (have_field(s, r2) && get_field(s, r2) == 0) {
1481         if (m1 == 14) {
1482             /* Perform serialization */
1483             /* FIXME: check for fast-BCR-serialization facility */
1484             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1485         }
1486         if (m1 == 15) {
1487             /* Perform serialization */
1488             /* FIXME: perform checkpoint-synchronisation */
1489             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1490         }
1491         return DISAS_NEXT;
1492     }
1493 
1494     disas_jdest(s, i2, is_imm, imm, o->in2);
1495     disas_jcc(s, &c, m1);
1496     return help_branch(s, &c, is_imm, imm, o->in2);
1497 }
1498 
op_bct32(DisasContext * s,DisasOps * o)1499 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1500 {
1501     int r1 = get_field(s, r1);
1502     DisasCompare c;
1503     bool is_imm;
1504     TCGv_i64 t;
1505     int imm;
1506 
1507     c.cond = TCG_COND_NE;
1508     c.is_64 = false;
1509 
1510     t = tcg_temp_new_i64();
1511     tcg_gen_subi_i64(t, regs[r1], 1);
1512     store_reg32_i64(r1, t);
1513     c.u.s32.a = tcg_temp_new_i32();
1514     c.u.s32.b = tcg_constant_i32(0);
1515     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1516 
1517     disas_jdest(s, i2, is_imm, imm, o->in2);
1518     return help_branch(s, &c, is_imm, imm, o->in2);
1519 }
1520 
op_bcth(DisasContext * s,DisasOps * o)1521 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1522 {
1523     int r1 = get_field(s, r1);
1524     int imm = get_field(s, i2);
1525     DisasCompare c;
1526     TCGv_i64 t;
1527 
1528     c.cond = TCG_COND_NE;
1529     c.is_64 = false;
1530 
1531     t = tcg_temp_new_i64();
1532     tcg_gen_shri_i64(t, regs[r1], 32);
1533     tcg_gen_subi_i64(t, t, 1);
1534     store_reg32h_i64(r1, t);
1535     c.u.s32.a = tcg_temp_new_i32();
1536     c.u.s32.b = tcg_constant_i32(0);
1537     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1538 
1539     return help_branch(s, &c, 1, imm, o->in2);
1540 }
1541 
op_bct64(DisasContext * s,DisasOps * o)1542 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1543 {
1544     int r1 = get_field(s, r1);
1545     DisasCompare c;
1546     bool is_imm;
1547     int imm;
1548 
1549     c.cond = TCG_COND_NE;
1550     c.is_64 = true;
1551 
1552     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1553     c.u.s64.a = regs[r1];
1554     c.u.s64.b = tcg_constant_i64(0);
1555 
1556     disas_jdest(s, i2, is_imm, imm, o->in2);
1557     return help_branch(s, &c, is_imm, imm, o->in2);
1558 }
1559 
op_bx32(DisasContext * s,DisasOps * o)1560 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1561 {
1562     int r1 = get_field(s, r1);
1563     int r3 = get_field(s, r3);
1564     DisasCompare c;
1565     bool is_imm;
1566     TCGv_i64 t;
1567     int imm;
1568 
1569     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1570     c.is_64 = false;
1571 
1572     t = tcg_temp_new_i64();
1573     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1574     c.u.s32.a = tcg_temp_new_i32();
1575     c.u.s32.b = tcg_temp_new_i32();
1576     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1577     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1578     store_reg32_i64(r1, t);
1579 
1580     disas_jdest(s, i2, is_imm, imm, o->in2);
1581     return help_branch(s, &c, is_imm, imm, o->in2);
1582 }
1583 
op_bx64(DisasContext * s,DisasOps * o)1584 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1585 {
1586     int r1 = get_field(s, r1);
1587     int r3 = get_field(s, r3);
1588     DisasCompare c;
1589     bool is_imm;
1590     int imm;
1591 
1592     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1593     c.is_64 = true;
1594 
1595     if (r1 == (r3 | 1)) {
1596         c.u.s64.b = load_reg(r3 | 1);
1597     } else {
1598         c.u.s64.b = regs[r3 | 1];
1599     }
1600 
1601     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1602     c.u.s64.a = regs[r1];
1603 
1604     disas_jdest(s, i2, is_imm, imm, o->in2);
1605     return help_branch(s, &c, is_imm, imm, o->in2);
1606 }
1607 
op_cj(DisasContext * s,DisasOps * o)1608 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1609 {
1610     int imm, m3 = get_field(s, m3);
1611     bool is_imm;
1612     DisasCompare c;
1613 
1614     c.cond = ltgt_cond[m3];
1615     if (s->insn->data) {
1616         c.cond = tcg_unsigned_cond(c.cond);
1617     }
1618     c.is_64 = true;
1619     c.u.s64.a = o->in1;
1620     c.u.s64.b = o->in2;
1621 
1622     o->out = NULL;
1623     disas_jdest(s, i4, is_imm, imm, o->out);
1624     if (!is_imm && !o->out) {
1625         imm = 0;
1626         o->out = get_address(s, 0, get_field(s, b4),
1627                              get_field(s, d4));
1628     }
1629 
1630     return help_branch(s, &c, is_imm, imm, o->out);
1631 }
1632 
op_ceb(DisasContext * s,DisasOps * o)1633 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1634 {
1635     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1636     set_cc_static(s);
1637     return DISAS_NEXT;
1638 }
1639 
op_cdb(DisasContext * s,DisasOps * o)1640 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1641 {
1642     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1643     set_cc_static(s);
1644     return DISAS_NEXT;
1645 }
1646 
op_cxb(DisasContext * s,DisasOps * o)1647 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1648 {
1649     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1650     set_cc_static(s);
1651     return DISAS_NEXT;
1652 }
1653 
fpinst_extract_m34(DisasContext * s,bool m3_with_fpe,bool m4_with_fpe)1654 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1655                                    bool m4_with_fpe)
1656 {
1657     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1658     uint8_t m3 = get_field(s, m3);
1659     uint8_t m4 = get_field(s, m4);
1660 
1661     /* m3 field was introduced with FPE */
1662     if (!fpe && m3_with_fpe) {
1663         m3 = 0;
1664     }
1665     /* m4 field was introduced with FPE */
1666     if (!fpe && m4_with_fpe) {
1667         m4 = 0;
1668     }
1669 
1670     /* Check for valid rounding modes. Mode 3 was introduced later. */
1671     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1672         gen_program_exception(s, PGM_SPECIFICATION);
1673         return NULL;
1674     }
1675 
1676     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1677 }
1678 
op_cfeb(DisasContext * s,DisasOps * o)1679 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1680 {
1681     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1682 
1683     if (!m34) {
1684         return DISAS_NORETURN;
1685     }
1686     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1687     set_cc_static(s);
1688     return DISAS_NEXT;
1689 }
1690 
op_cfdb(DisasContext * s,DisasOps * o)1691 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1692 {
1693     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1694 
1695     if (!m34) {
1696         return DISAS_NORETURN;
1697     }
1698     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1699     set_cc_static(s);
1700     return DISAS_NEXT;
1701 }
1702 
op_cfxb(DisasContext * s,DisasOps * o)1703 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1704 {
1705     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1706 
1707     if (!m34) {
1708         return DISAS_NORETURN;
1709     }
1710     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1711     set_cc_static(s);
1712     return DISAS_NEXT;
1713 }
1714 
op_cgeb(DisasContext * s,DisasOps * o)1715 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1716 {
1717     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1718 
1719     if (!m34) {
1720         return DISAS_NORETURN;
1721     }
1722     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1723     set_cc_static(s);
1724     return DISAS_NEXT;
1725 }
1726 
op_cgdb(DisasContext * s,DisasOps * o)1727 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1728 {
1729     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1730 
1731     if (!m34) {
1732         return DISAS_NORETURN;
1733     }
1734     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1735     set_cc_static(s);
1736     return DISAS_NEXT;
1737 }
1738 
op_cgxb(DisasContext * s,DisasOps * o)1739 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1740 {
1741     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1742 
1743     if (!m34) {
1744         return DISAS_NORETURN;
1745     }
1746     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1747     set_cc_static(s);
1748     return DISAS_NEXT;
1749 }
1750 
op_clfeb(DisasContext * s,DisasOps * o)1751 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1752 {
1753     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1754 
1755     if (!m34) {
1756         return DISAS_NORETURN;
1757     }
1758     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1759     set_cc_static(s);
1760     return DISAS_NEXT;
1761 }
1762 
op_clfdb(DisasContext * s,DisasOps * o)1763 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1764 {
1765     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1766 
1767     if (!m34) {
1768         return DISAS_NORETURN;
1769     }
1770     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1771     set_cc_static(s);
1772     return DISAS_NEXT;
1773 }
1774 
op_clfxb(DisasContext * s,DisasOps * o)1775 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1776 {
1777     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1778 
1779     if (!m34) {
1780         return DISAS_NORETURN;
1781     }
1782     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1783     set_cc_static(s);
1784     return DISAS_NEXT;
1785 }
1786 
op_clgeb(DisasContext * s,DisasOps * o)1787 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1788 {
1789     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1790 
1791     if (!m34) {
1792         return DISAS_NORETURN;
1793     }
1794     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1795     set_cc_static(s);
1796     return DISAS_NEXT;
1797 }
1798 
op_clgdb(DisasContext * s,DisasOps * o)1799 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1800 {
1801     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1802 
1803     if (!m34) {
1804         return DISAS_NORETURN;
1805     }
1806     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1807     set_cc_static(s);
1808     return DISAS_NEXT;
1809 }
1810 
op_clgxb(DisasContext * s,DisasOps * o)1811 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1812 {
1813     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1814 
1815     if (!m34) {
1816         return DISAS_NORETURN;
1817     }
1818     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1819     set_cc_static(s);
1820     return DISAS_NEXT;
1821 }
1822 
op_cegb(DisasContext * s,DisasOps * o)1823 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1824 {
1825     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1826 
1827     if (!m34) {
1828         return DISAS_NORETURN;
1829     }
1830     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1831     return DISAS_NEXT;
1832 }
1833 
op_cdgb(DisasContext * s,DisasOps * o)1834 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1835 {
1836     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1837 
1838     if (!m34) {
1839         return DISAS_NORETURN;
1840     }
1841     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1842     return DISAS_NEXT;
1843 }
1844 
op_cxgb(DisasContext * s,DisasOps * o)1845 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1846 {
1847     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1848 
1849     if (!m34) {
1850         return DISAS_NORETURN;
1851     }
1852     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1853     return DISAS_NEXT;
1854 }
1855 
op_celgb(DisasContext * s,DisasOps * o)1856 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1857 {
1858     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1859 
1860     if (!m34) {
1861         return DISAS_NORETURN;
1862     }
1863     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1864     return DISAS_NEXT;
1865 }
1866 
op_cdlgb(DisasContext * s,DisasOps * o)1867 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1868 {
1869     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1870 
1871     if (!m34) {
1872         return DISAS_NORETURN;
1873     }
1874     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1875     return DISAS_NEXT;
1876 }
1877 
op_cxlgb(DisasContext * s,DisasOps * o)1878 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1886     return DISAS_NEXT;
1887 }
1888 
op_cksm(DisasContext * s,DisasOps * o)1889 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1890 {
1891     int r2 = get_field(s, r2);
1892     TCGv_i128 pair = tcg_temp_new_i128();
1893     TCGv_i64 len = tcg_temp_new_i64();
1894 
1895     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1896     set_cc_static(s);
1897     tcg_gen_extr_i128_i64(o->out, len, pair);
1898 
1899     tcg_gen_add_i64(regs[r2], regs[r2], len);
1900     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1901 
1902     return DISAS_NEXT;
1903 }
1904 
op_clc(DisasContext * s,DisasOps * o)1905 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1906 {
1907     int l = get_field(s, l1);
1908     TCGv_i64 src;
1909     TCGv_i32 vl;
1910     MemOp mop;
1911 
1912     switch (l + 1) {
1913     case 1:
1914     case 2:
1915     case 4:
1916     case 8:
1917         mop = ctz32(l + 1) | MO_TE;
1918         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1919         src = tcg_temp_new_i64();
1920         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1921         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1922         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1923         return DISAS_NEXT;
1924     default:
1925         vl = tcg_constant_i32(l);
1926         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
1927         set_cc_static(s);
1928         return DISAS_NEXT;
1929     }
1930 }
1931 
op_clcl(DisasContext * s,DisasOps * o)1932 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1933 {
1934     int r1 = get_field(s, r1);
1935     int r2 = get_field(s, r2);
1936     TCGv_i32 t1, t2;
1937 
1938     /* r1 and r2 must be even.  */
1939     if (r1 & 1 || r2 & 1) {
1940         gen_program_exception(s, PGM_SPECIFICATION);
1941         return DISAS_NORETURN;
1942     }
1943 
1944     t1 = tcg_constant_i32(r1);
1945     t2 = tcg_constant_i32(r2);
1946     gen_helper_clcl(cc_op, tcg_env, t1, t2);
1947     set_cc_static(s);
1948     return DISAS_NEXT;
1949 }
1950 
op_clcle(DisasContext * s,DisasOps * o)1951 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1952 {
1953     int r1 = get_field(s, r1);
1954     int r3 = get_field(s, r3);
1955     TCGv_i32 t1, t3;
1956 
1957     /* r1 and r3 must be even.  */
1958     if (r1 & 1 || r3 & 1) {
1959         gen_program_exception(s, PGM_SPECIFICATION);
1960         return DISAS_NORETURN;
1961     }
1962 
1963     t1 = tcg_constant_i32(r1);
1964     t3 = tcg_constant_i32(r3);
1965     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
1966     set_cc_static(s);
1967     return DISAS_NEXT;
1968 }
1969 
op_clclu(DisasContext * s,DisasOps * o)1970 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1971 {
1972     int r1 = get_field(s, r1);
1973     int r3 = get_field(s, r3);
1974     TCGv_i32 t1, t3;
1975 
1976     /* r1 and r3 must be even.  */
1977     if (r1 & 1 || r3 & 1) {
1978         gen_program_exception(s, PGM_SPECIFICATION);
1979         return DISAS_NORETURN;
1980     }
1981 
1982     t1 = tcg_constant_i32(r1);
1983     t3 = tcg_constant_i32(r3);
1984     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
1985     set_cc_static(s);
1986     return DISAS_NEXT;
1987 }
1988 
op_clm(DisasContext * s,DisasOps * o)1989 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1990 {
1991     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
1992     TCGv_i32 t1 = tcg_temp_new_i32();
1993 
1994     tcg_gen_extrl_i64_i32(t1, o->in1);
1995     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
1996     set_cc_static(s);
1997     return DISAS_NEXT;
1998 }
1999 
op_clst(DisasContext * s,DisasOps * o)2000 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2001 {
2002     TCGv_i128 pair = tcg_temp_new_i128();
2003 
2004     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2005     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2006 
2007     set_cc_static(s);
2008     return DISAS_NEXT;
2009 }
2010 
op_cps(DisasContext * s,DisasOps * o)2011 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2012 {
2013     TCGv_i64 t = tcg_temp_new_i64();
2014     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2015     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2016     tcg_gen_or_i64(o->out, o->out, t);
2017     return DISAS_NEXT;
2018 }
2019 
op_cs(DisasContext * s,DisasOps * o)2020 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2021 {
2022     int d2 = get_field(s, d2);
2023     int b2 = get_field(s, b2);
2024     TCGv_i64 addr, cc;
2025 
2026     /* Note that in1 = R3 (new value) and
2027        in2 = (zero-extended) R1 (expected value).  */
2028 
2029     addr = get_address(s, 0, b2, d2);
2030     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2031                                get_mem_index(s), s->insn->data | MO_ALIGN);
2032 
2033     /* Are the memory and expected values (un)equal?  Note that this setcond
2034        produces the output CC value, thus the NE sense of the test.  */
2035     cc = tcg_temp_new_i64();
2036     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2037     tcg_gen_extrl_i64_i32(cc_op, cc);
2038     set_cc_static(s);
2039 
2040     return DISAS_NEXT;
2041 }
2042 
op_cdsg(DisasContext * s,DisasOps * o)2043 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2044 {
2045     int r1 = get_field(s, r1);
2046 
2047     o->out_128 = tcg_temp_new_i128();
2048     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2049 
2050     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2051     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2052                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2053 
2054     /*
2055      * Extract result into cc_dst:cc_src, compare vs the expected value
2056      * in the as yet unmodified input registers, then update CC_OP.
2057      */
2058     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2059     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2060     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2061     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2062     set_cc_nz_u64(s, cc_dst);
2063 
2064     return DISAS_NEXT;
2065 }
2066 
op_csst(DisasContext * s,DisasOps * o)2067 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2068 {
2069     int r3 = get_field(s, r3);
2070     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2071 
2072     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2073         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2074     } else {
2075         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2076     }
2077 
2078     set_cc_static(s);
2079     return DISAS_NEXT;
2080 }
2081 
2082 #ifndef CONFIG_USER_ONLY
op_csp(DisasContext * s,DisasOps * o)2083 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2084 {
2085     MemOp mop = s->insn->data;
2086     TCGv_i64 addr, old, cc;
2087     TCGLabel *lab = gen_new_label();
2088 
2089     /* Note that in1 = R1 (zero-extended expected value),
2090        out = R1 (original reg), out2 = R1+1 (new value).  */
2091 
2092     addr = tcg_temp_new_i64();
2093     old = tcg_temp_new_i64();
2094     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2095     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2096                                get_mem_index(s), mop | MO_ALIGN);
2097 
2098     /* Are the memory and expected values (un)equal?  */
2099     cc = tcg_temp_new_i64();
2100     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2101     tcg_gen_extrl_i64_i32(cc_op, cc);
2102 
2103     /* Write back the output now, so that it happens before the
2104        following branch, so that we don't need local temps.  */
2105     if ((mop & MO_SIZE) == MO_32) {
2106         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2107     } else {
2108         tcg_gen_mov_i64(o->out, old);
2109     }
2110 
2111     /* If the comparison was equal, and the LSB of R2 was set,
2112        then we need to flush the TLB (for all cpus).  */
2113     tcg_gen_xori_i64(cc, cc, 1);
2114     tcg_gen_and_i64(cc, cc, o->in2);
2115     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2116 
2117     gen_helper_purge(tcg_env);
2118     gen_set_label(lab);
2119 
2120     return DISAS_NEXT;
2121 }
2122 #endif
2123 
op_cvb(DisasContext * s,DisasOps * o)2124 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2125 {
2126     TCGv_i64 t = tcg_temp_new_i64();
2127     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2128     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2129     return DISAS_NEXT;
2130 }
2131 
op_cvbg(DisasContext * s,DisasOps * o)2132 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2133 {
2134     TCGv_i128 t = tcg_temp_new_i128();
2135     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2136     gen_helper_cvbg(o->out, tcg_env, t);
2137     return DISAS_NEXT;
2138 }
2139 
op_cvd(DisasContext * s,DisasOps * o)2140 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2141 {
2142     TCGv_i64 t1 = tcg_temp_new_i64();
2143     TCGv_i32 t2 = tcg_temp_new_i32();
2144     tcg_gen_extrl_i64_i32(t2, o->in1);
2145     gen_helper_cvd(t1, t2);
2146     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2147     return DISAS_NEXT;
2148 }
2149 
op_cvdg(DisasContext * s,DisasOps * o)2150 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2151 {
2152     TCGv_i128 t = tcg_temp_new_i128();
2153     gen_helper_cvdg(t, o->in1);
2154     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2155     return DISAS_NEXT;
2156 }
2157 
op_ct(DisasContext * s,DisasOps * o)2158 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2159 {
2160     int m3 = get_field(s, m3);
2161     TCGLabel *lab = gen_new_label();
2162     TCGCond c;
2163 
2164     c = tcg_invert_cond(ltgt_cond[m3]);
2165     if (s->insn->data) {
2166         c = tcg_unsigned_cond(c);
2167     }
2168     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2169 
2170     /* Trap.  */
2171     gen_trap(s);
2172 
2173     gen_set_label(lab);
2174     return DISAS_NEXT;
2175 }
2176 
op_cuXX(DisasContext * s,DisasOps * o)2177 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2178 {
2179     int m3 = get_field(s, m3);
2180     int r1 = get_field(s, r1);
2181     int r2 = get_field(s, r2);
2182     TCGv_i32 tr1, tr2, chk;
2183 
2184     /* R1 and R2 must both be even.  */
2185     if ((r1 | r2) & 1) {
2186         gen_program_exception(s, PGM_SPECIFICATION);
2187         return DISAS_NORETURN;
2188     }
2189     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2190         m3 = 0;
2191     }
2192 
2193     tr1 = tcg_constant_i32(r1);
2194     tr2 = tcg_constant_i32(r2);
2195     chk = tcg_constant_i32(m3);
2196 
2197     switch (s->insn->data) {
2198     case 12:
2199         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2200         break;
2201     case 14:
2202         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2203         break;
2204     case 21:
2205         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2206         break;
2207     case 24:
2208         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2209         break;
2210     case 41:
2211         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2212         break;
2213     case 42:
2214         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2215         break;
2216     default:
2217         g_assert_not_reached();
2218     }
2219 
2220     set_cc_static(s);
2221     return DISAS_NEXT;
2222 }
2223 
2224 #ifndef CONFIG_USER_ONLY
op_diag(DisasContext * s,DisasOps * o)2225 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2226 {
2227     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2228     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2229     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2230 
2231     gen_helper_diag(tcg_env, r1, r3, func_code);
2232     return DISAS_NEXT;
2233 }
2234 #endif
2235 
op_divs32(DisasContext * s,DisasOps * o)2236 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2237 {
2238     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2239     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2240     return DISAS_NEXT;
2241 }
2242 
op_divu32(DisasContext * s,DisasOps * o)2243 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2244 {
2245     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2246     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2247     return DISAS_NEXT;
2248 }
2249 
op_divs64(DisasContext * s,DisasOps * o)2250 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2251 {
2252     TCGv_i128 t = tcg_temp_new_i128();
2253 
2254     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2255     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2256     return DISAS_NEXT;
2257 }
2258 
op_divu64(DisasContext * s,DisasOps * o)2259 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2260 {
2261     TCGv_i128 t = tcg_temp_new_i128();
2262 
2263     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2264     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2265     return DISAS_NEXT;
2266 }
2267 
op_deb(DisasContext * s,DisasOps * o)2268 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2269 {
2270     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2271     return DISAS_NEXT;
2272 }
2273 
op_ddb(DisasContext * s,DisasOps * o)2274 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2275 {
2276     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2277     return DISAS_NEXT;
2278 }
2279 
op_dxb(DisasContext * s,DisasOps * o)2280 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2281 {
2282     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2283     return DISAS_NEXT;
2284 }
2285 
op_ear(DisasContext * s,DisasOps * o)2286 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2287 {
2288     int r2 = get_field(s, r2);
2289     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2290     return DISAS_NEXT;
2291 }
2292 
op_ecag(DisasContext * s,DisasOps * o)2293 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2294 {
2295     /* No cache information provided.  */
2296     tcg_gen_movi_i64(o->out, -1);
2297     return DISAS_NEXT;
2298 }
2299 
op_efpc(DisasContext * s,DisasOps * o)2300 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2301 {
2302     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2303     return DISAS_NEXT;
2304 }
2305 
op_epsw(DisasContext * s,DisasOps * o)2306 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2307 {
2308     int r1 = get_field(s, r1);
2309     int r2 = get_field(s, r2);
2310     TCGv_i64 t = tcg_temp_new_i64();
2311     TCGv_i64 t_cc = tcg_temp_new_i64();
2312 
2313     /* Note the "subsequently" in the PoO, which implies a defined result
2314        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2315     gen_op_calc_cc(s);
2316     tcg_gen_extu_i32_i64(t_cc, cc_op);
2317     tcg_gen_shri_i64(t, psw_mask, 32);
2318     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2319     store_reg32_i64(r1, t);
2320     if (r2 != 0) {
2321         store_reg32_i64(r2, psw_mask);
2322     }
2323     return DISAS_NEXT;
2324 }
2325 
op_ex(DisasContext * s,DisasOps * o)2326 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2327 {
2328     int r1 = get_field(s, r1);
2329     TCGv_i32 ilen;
2330     TCGv_i64 v1;
2331 
2332     /* Nested EXECUTE is not allowed.  */
2333     if (unlikely(s->ex_value)) {
2334         gen_program_exception(s, PGM_EXECUTE);
2335         return DISAS_NORETURN;
2336     }
2337 
2338     update_psw_addr(s);
2339     update_cc_op(s);
2340 
2341     if (r1 == 0) {
2342         v1 = tcg_constant_i64(0);
2343     } else {
2344         v1 = regs[r1];
2345     }
2346 
2347     ilen = tcg_constant_i32(s->ilen);
2348     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2349 
2350     return DISAS_PC_CC_UPDATED;
2351 }
2352 
op_fieb(DisasContext * s,DisasOps * o)2353 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2354 {
2355     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2356 
2357     if (!m34) {
2358         return DISAS_NORETURN;
2359     }
2360     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2361     return DISAS_NEXT;
2362 }
2363 
op_fidb(DisasContext * s,DisasOps * o)2364 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2365 {
2366     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2367 
2368     if (!m34) {
2369         return DISAS_NORETURN;
2370     }
2371     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2372     return DISAS_NEXT;
2373 }
2374 
op_fixb(DisasContext * s,DisasOps * o)2375 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2376 {
2377     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2378 
2379     if (!m34) {
2380         return DISAS_NORETURN;
2381     }
2382     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2383     return DISAS_NEXT;
2384 }
2385 
op_flogr(DisasContext * s,DisasOps * o)2386 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2387 {
2388     /* We'll use the original input for cc computation, since we get to
2389        compare that against 0, which ought to be better than comparing
2390        the real output against 64.  It also lets cc_dst be a convenient
2391        temporary during our computation.  */
2392     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2393 
2394     /* R1 = IN ? CLZ(IN) : 64.  */
2395     tcg_gen_clzi_i64(o->out, o->in2, 64);
2396 
2397     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2398        value by 64, which is undefined.  But since the shift is 64 iff the
2399        input is zero, we still get the correct result after and'ing.  */
2400     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2401     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2402     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2403     return DISAS_NEXT;
2404 }
2405 
op_icm(DisasContext * s,DisasOps * o)2406 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2407 {
2408     int m3 = get_field(s, m3);
2409     int pos, len, base = s->insn->data;
2410     TCGv_i64 tmp = tcg_temp_new_i64();
2411     uint64_t ccm;
2412 
2413     switch (m3) {
2414     case 0xf:
2415         /* Effectively a 32-bit load.  */
2416         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2417         len = 32;
2418         goto one_insert;
2419 
2420     case 0xc:
2421     case 0x6:
2422     case 0x3:
2423         /* Effectively a 16-bit load.  */
2424         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2425         len = 16;
2426         goto one_insert;
2427 
2428     case 0x8:
2429     case 0x4:
2430     case 0x2:
2431     case 0x1:
2432         /* Effectively an 8-bit load.  */
2433         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2434         len = 8;
2435         goto one_insert;
2436 
2437     one_insert:
2438         pos = base + ctz32(m3) * 8;
2439         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2440         ccm = ((1ull << len) - 1) << pos;
2441         break;
2442 
2443     case 0:
2444         /* Recognize access exceptions for the first byte.  */
2445         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2446         gen_op_movi_cc(s, 0);
2447         return DISAS_NEXT;
2448 
2449     default:
2450         /* This is going to be a sequence of loads and inserts.  */
2451         pos = base + 32 - 8;
2452         ccm = 0;
2453         while (m3) {
2454             if (m3 & 0x8) {
2455                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2456                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2457                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2458                 ccm |= 0xffull << pos;
2459             }
2460             m3 = (m3 << 1) & 0xf;
2461             pos -= 8;
2462         }
2463         break;
2464     }
2465 
2466     tcg_gen_movi_i64(tmp, ccm);
2467     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2468     return DISAS_NEXT;
2469 }
2470 
op_insi(DisasContext * s,DisasOps * o)2471 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2472 {
2473     int shift = s->insn->data & 0xff;
2474     int size = s->insn->data >> 8;
2475     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2476     return DISAS_NEXT;
2477 }
2478 
op_ipm(DisasContext * s,DisasOps * o)2479 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2480 {
2481     TCGv_i64 t1, t2;
2482 
2483     gen_op_calc_cc(s);
2484     t1 = tcg_temp_new_i64();
2485     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2486     t2 = tcg_temp_new_i64();
2487     tcg_gen_extu_i32_i64(t2, cc_op);
2488     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2489     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2490     return DISAS_NEXT;
2491 }
2492 
2493 #ifndef CONFIG_USER_ONLY
op_idte(DisasContext * s,DisasOps * o)2494 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2495 {
2496     TCGv_i32 m4;
2497 
2498     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2499         m4 = tcg_constant_i32(get_field(s, m4));
2500     } else {
2501         m4 = tcg_constant_i32(0);
2502     }
2503     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2504     return DISAS_NEXT;
2505 }
2506 
op_ipte(DisasContext * s,DisasOps * o)2507 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2508 {
2509     TCGv_i32 m4;
2510 
2511     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2512         m4 = tcg_constant_i32(get_field(s, m4));
2513     } else {
2514         m4 = tcg_constant_i32(0);
2515     }
2516     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2517     return DISAS_NEXT;
2518 }
2519 
op_iske(DisasContext * s,DisasOps * o)2520 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2521 {
2522     gen_helper_iske(o->out, tcg_env, o->in2);
2523     return DISAS_NEXT;
2524 }
2525 #endif
2526 
op_msa(DisasContext * s,DisasOps * o)2527 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2528 {
2529     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2530     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2531     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2532     TCGv_i32 t_r1, t_r2, t_r3, type;
2533 
2534     switch (s->insn->data) {
2535     case S390_FEAT_TYPE_KMA:
2536         if (r3 == r1 || r3 == r2) {
2537             gen_program_exception(s, PGM_SPECIFICATION);
2538             return DISAS_NORETURN;
2539         }
2540         /* FALL THROUGH */
2541     case S390_FEAT_TYPE_KMCTR:
2542         if (r3 & 1 || !r3) {
2543             gen_program_exception(s, PGM_SPECIFICATION);
2544             return DISAS_NORETURN;
2545         }
2546         /* FALL THROUGH */
2547     case S390_FEAT_TYPE_PPNO:
2548     case S390_FEAT_TYPE_KMF:
2549     case S390_FEAT_TYPE_KMC:
2550     case S390_FEAT_TYPE_KMO:
2551     case S390_FEAT_TYPE_KM:
2552         if (r1 & 1 || !r1) {
2553             gen_program_exception(s, PGM_SPECIFICATION);
2554             return DISAS_NORETURN;
2555         }
2556         /* FALL THROUGH */
2557     case S390_FEAT_TYPE_KMAC:
2558     case S390_FEAT_TYPE_KIMD:
2559     case S390_FEAT_TYPE_KLMD:
2560         if (r2 & 1 || !r2) {
2561             gen_program_exception(s, PGM_SPECIFICATION);
2562             return DISAS_NORETURN;
2563         }
2564         /* FALL THROUGH */
2565     case S390_FEAT_TYPE_PCKMO:
2566     case S390_FEAT_TYPE_PCC:
2567         break;
2568     default:
2569         g_assert_not_reached();
2570     };
2571 
2572     t_r1 = tcg_constant_i32(r1);
2573     t_r2 = tcg_constant_i32(r2);
2574     t_r3 = tcg_constant_i32(r3);
2575     type = tcg_constant_i32(s->insn->data);
2576     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2577     set_cc_static(s);
2578     return DISAS_NEXT;
2579 }
2580 
op_keb(DisasContext * s,DisasOps * o)2581 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2582 {
2583     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2584     set_cc_static(s);
2585     return DISAS_NEXT;
2586 }
2587 
op_kdb(DisasContext * s,DisasOps * o)2588 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2589 {
2590     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2591     set_cc_static(s);
2592     return DISAS_NEXT;
2593 }
2594 
op_kxb(DisasContext * s,DisasOps * o)2595 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2596 {
2597     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2598     set_cc_static(s);
2599     return DISAS_NEXT;
2600 }
2601 
help_laa(DisasContext * s,DisasOps * o,bool addu64)2602 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2603 {
2604     /* The real output is indeed the original value in memory;
2605        recompute the addition for the computation of CC.  */
2606     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2607                                  s->insn->data | MO_ALIGN);
2608     /* However, we need to recompute the addition for setting CC.  */
2609     if (addu64) {
2610         tcg_gen_movi_i64(cc_src, 0);
2611         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2612     } else {
2613         tcg_gen_add_i64(o->out, o->in1, o->in2);
2614     }
2615     return DISAS_NEXT;
2616 }
2617 
op_laa(DisasContext * s,DisasOps * o)2618 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2619 {
2620     return help_laa(s, o, false);
2621 }
2622 
op_laa_addu64(DisasContext * s,DisasOps * o)2623 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2624 {
2625     return help_laa(s, o, true);
2626 }
2627 
op_lan(DisasContext * s,DisasOps * o)2628 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2629 {
2630     /* The real output is indeed the original value in memory;
2631        recompute the addition for the computation of CC.  */
2632     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2633                                  s->insn->data | MO_ALIGN);
2634     /* However, we need to recompute the operation for setting CC.  */
2635     tcg_gen_and_i64(o->out, o->in1, o->in2);
2636     return DISAS_NEXT;
2637 }
2638 
op_lao(DisasContext * s,DisasOps * o)2639 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2640 {
2641     /* The real output is indeed the original value in memory;
2642        recompute the addition for the computation of CC.  */
2643     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2644                                 s->insn->data | MO_ALIGN);
2645     /* However, we need to recompute the operation for setting CC.  */
2646     tcg_gen_or_i64(o->out, o->in1, o->in2);
2647     return DISAS_NEXT;
2648 }
2649 
op_lax(DisasContext * s,DisasOps * o)2650 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2651 {
2652     /* The real output is indeed the original value in memory;
2653        recompute the addition for the computation of CC.  */
2654     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2655                                  s->insn->data | MO_ALIGN);
2656     /* However, we need to recompute the operation for setting CC.  */
2657     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2658     return DISAS_NEXT;
2659 }
2660 
op_ldeb(DisasContext * s,DisasOps * o)2661 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2662 {
2663     gen_helper_ldeb(o->out, tcg_env, o->in2);
2664     return DISAS_NEXT;
2665 }
2666 
op_ledb(DisasContext * s,DisasOps * o)2667 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2668 {
2669     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2670 
2671     if (!m34) {
2672         return DISAS_NORETURN;
2673     }
2674     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2675     return DISAS_NEXT;
2676 }
2677 
op_ldxb(DisasContext * s,DisasOps * o)2678 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2679 {
2680     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2681 
2682     if (!m34) {
2683         return DISAS_NORETURN;
2684     }
2685     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2686     return DISAS_NEXT;
2687 }
2688 
op_lexb(DisasContext * s,DisasOps * o)2689 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2690 {
2691     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2692 
2693     if (!m34) {
2694         return DISAS_NORETURN;
2695     }
2696     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2697     return DISAS_NEXT;
2698 }
2699 
op_lxdb(DisasContext * s,DisasOps * o)2700 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2701 {
2702     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2703     return DISAS_NEXT;
2704 }
2705 
op_lxeb(DisasContext * s,DisasOps * o)2706 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2707 {
2708     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2709     return DISAS_NEXT;
2710 }
2711 
op_lde(DisasContext * s,DisasOps * o)2712 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2713 {
2714     tcg_gen_shli_i64(o->out, o->in2, 32);
2715     return DISAS_NEXT;
2716 }
2717 
op_llgt(DisasContext * s,DisasOps * o)2718 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2719 {
2720     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2721     return DISAS_NEXT;
2722 }
2723 
op_ld8s(DisasContext * s,DisasOps * o)2724 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2725 {
2726     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2727     return DISAS_NEXT;
2728 }
2729 
op_ld8u(DisasContext * s,DisasOps * o)2730 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2731 {
2732     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2733     return DISAS_NEXT;
2734 }
2735 
op_ld16s(DisasContext * s,DisasOps * o)2736 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2737 {
2738     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2739     return DISAS_NEXT;
2740 }
2741 
op_ld16u(DisasContext * s,DisasOps * o)2742 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2743 {
2744     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2745     return DISAS_NEXT;
2746 }
2747 
op_ld32s(DisasContext * s,DisasOps * o)2748 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2749 {
2750     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2751                        MO_TESL | s->insn->data);
2752     return DISAS_NEXT;
2753 }
2754 
op_ld32u(DisasContext * s,DisasOps * o)2755 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2756 {
2757     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2758                        MO_TEUL | s->insn->data);
2759     return DISAS_NEXT;
2760 }
2761 
op_ld64(DisasContext * s,DisasOps * o)2762 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2763 {
2764     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2765                         MO_TEUQ | s->insn->data);
2766     return DISAS_NEXT;
2767 }
2768 
op_lat(DisasContext * s,DisasOps * o)2769 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2770 {
2771     TCGLabel *lab = gen_new_label();
2772     store_reg32_i64(get_field(s, r1), o->in2);
2773     /* The value is stored even in case of trap. */
2774     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2775     gen_trap(s);
2776     gen_set_label(lab);
2777     return DISAS_NEXT;
2778 }
2779 
op_lgat(DisasContext * s,DisasOps * o)2780 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2781 {
2782     TCGLabel *lab = gen_new_label();
2783     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2784     /* The value is stored even in case of trap. */
2785     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2786     gen_trap(s);
2787     gen_set_label(lab);
2788     return DISAS_NEXT;
2789 }
2790 
op_lfhat(DisasContext * s,DisasOps * o)2791 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2792 {
2793     TCGLabel *lab = gen_new_label();
2794     store_reg32h_i64(get_field(s, r1), o->in2);
2795     /* The value is stored even in case of trap. */
2796     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2797     gen_trap(s);
2798     gen_set_label(lab);
2799     return DISAS_NEXT;
2800 }
2801 
op_llgfat(DisasContext * s,DisasOps * o)2802 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2803 {
2804     TCGLabel *lab = gen_new_label();
2805 
2806     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2807     /* The value is stored even in case of trap. */
2808     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2809     gen_trap(s);
2810     gen_set_label(lab);
2811     return DISAS_NEXT;
2812 }
2813 
op_llgtat(DisasContext * s,DisasOps * o)2814 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2815 {
2816     TCGLabel *lab = gen_new_label();
2817     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2818     /* The value is stored even in case of trap. */
2819     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2820     gen_trap(s);
2821     gen_set_label(lab);
2822     return DISAS_NEXT;
2823 }
2824 
op_loc(DisasContext * s,DisasOps * o)2825 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2826 {
2827     DisasCompare c;
2828 
2829     if (have_field(s, m3)) {
2830         /* LOAD * ON CONDITION */
2831         disas_jcc(s, &c, get_field(s, m3));
2832     } else {
2833         /* SELECT */
2834         disas_jcc(s, &c, get_field(s, m4));
2835     }
2836 
2837     if (c.is_64) {
2838         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2839                             o->in2, o->in1);
2840     } else {
2841         TCGv_i32 t32 = tcg_temp_new_i32();
2842         TCGv_i64 t, z;
2843 
2844         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2845 
2846         t = tcg_temp_new_i64();
2847         tcg_gen_extu_i32_i64(t, t32);
2848 
2849         z = tcg_constant_i64(0);
2850         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2851     }
2852 
2853     return DISAS_NEXT;
2854 }
2855 
2856 #ifndef CONFIG_USER_ONLY
op_lctl(DisasContext * s,DisasOps * o)2857 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2858 {
2859     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2860     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2861 
2862     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2863     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2864     s->exit_to_mainloop = true;
2865     return DISAS_TOO_MANY;
2866 }
2867 
op_lctlg(DisasContext * s,DisasOps * o)2868 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2869 {
2870     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2871     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2872 
2873     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2874     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2875     s->exit_to_mainloop = true;
2876     return DISAS_TOO_MANY;
2877 }
2878 
op_lra(DisasContext * s,DisasOps * o)2879 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2880 {
2881     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2882     set_cc_static(s);
2883     return DISAS_NEXT;
2884 }
2885 
op_lpp(DisasContext * s,DisasOps * o)2886 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2887 {
2888     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2889     return DISAS_NEXT;
2890 }
2891 
op_lpsw(DisasContext * s,DisasOps * o)2892 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2893 {
2894     TCGv_i64 mask, addr;
2895 
2896     per_breaking_event(s);
2897 
2898     /*
2899      * Convert the short PSW into the normal PSW, similar to what
2900      * s390_cpu_load_normal() does.
2901      */
2902     mask = tcg_temp_new_i64();
2903     addr = tcg_temp_new_i64();
2904     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2905     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2906     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2907     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2908     gen_helper_load_psw(tcg_env, mask, addr);
2909     return DISAS_NORETURN;
2910 }
2911 
op_lpswe(DisasContext * s,DisasOps * o)2912 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2913 {
2914     TCGv_i64 t1, t2;
2915 
2916     per_breaking_event(s);
2917 
2918     t1 = tcg_temp_new_i64();
2919     t2 = tcg_temp_new_i64();
2920     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2921                         MO_TEUQ | MO_ALIGN_8);
2922     tcg_gen_addi_i64(o->in2, o->in2, 8);
2923     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2924     gen_helper_load_psw(tcg_env, t1, t2);
2925     return DISAS_NORETURN;
2926 }
2927 #endif
2928 
op_lam(DisasContext * s,DisasOps * o)2929 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2930 {
2931     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2932     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2933 
2934     gen_helper_lam(tcg_env, r1, o->in2, r3);
2935     return DISAS_NEXT;
2936 }
2937 
op_lm32(DisasContext * s,DisasOps * o)2938 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2939 {
2940     int r1 = get_field(s, r1);
2941     int r3 = get_field(s, r3);
2942     TCGv_i64 t1, t2;
2943 
2944     /* Only one register to read. */
2945     t1 = tcg_temp_new_i64();
2946     if (unlikely(r1 == r3)) {
2947         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2948         store_reg32_i64(r1, t1);
2949         return DISAS_NEXT;
2950     }
2951 
2952     /* First load the values of the first and last registers to trigger
2953        possible page faults. */
2954     t2 = tcg_temp_new_i64();
2955     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2956     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2957     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2958     store_reg32_i64(r1, t1);
2959     store_reg32_i64(r3, t2);
2960 
2961     /* Only two registers to read. */
2962     if (((r1 + 1) & 15) == r3) {
2963         return DISAS_NEXT;
2964     }
2965 
2966     /* Then load the remaining registers. Page fault can't occur. */
2967     r3 = (r3 - 1) & 15;
2968     tcg_gen_movi_i64(t2, 4);
2969     while (r1 != r3) {
2970         r1 = (r1 + 1) & 15;
2971         tcg_gen_add_i64(o->in2, o->in2, t2);
2972         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2973         store_reg32_i64(r1, t1);
2974     }
2975     return DISAS_NEXT;
2976 }
2977 
op_lmh(DisasContext * s,DisasOps * o)2978 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2979 {
2980     int r1 = get_field(s, r1);
2981     int r3 = get_field(s, r3);
2982     TCGv_i64 t1, t2;
2983 
2984     /* Only one register to read. */
2985     t1 = tcg_temp_new_i64();
2986     if (unlikely(r1 == r3)) {
2987         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2988         store_reg32h_i64(r1, t1);
2989         return DISAS_NEXT;
2990     }
2991 
2992     /* First load the values of the first and last registers to trigger
2993        possible page faults. */
2994     t2 = tcg_temp_new_i64();
2995     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2996     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2997     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2998     store_reg32h_i64(r1, t1);
2999     store_reg32h_i64(r3, t2);
3000 
3001     /* Only two registers to read. */
3002     if (((r1 + 1) & 15) == r3) {
3003         return DISAS_NEXT;
3004     }
3005 
3006     /* Then load the remaining registers. Page fault can't occur. */
3007     r3 = (r3 - 1) & 15;
3008     tcg_gen_movi_i64(t2, 4);
3009     while (r1 != r3) {
3010         r1 = (r1 + 1) & 15;
3011         tcg_gen_add_i64(o->in2, o->in2, t2);
3012         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3013         store_reg32h_i64(r1, t1);
3014     }
3015     return DISAS_NEXT;
3016 }
3017 
op_lm64(DisasContext * s,DisasOps * o)3018 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3019 {
3020     int r1 = get_field(s, r1);
3021     int r3 = get_field(s, r3);
3022     TCGv_i64 t1, t2;
3023 
3024     /* Only one register to read. */
3025     if (unlikely(r1 == r3)) {
3026         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3027         return DISAS_NEXT;
3028     }
3029 
3030     /* First load the values of the first and last registers to trigger
3031        possible page faults. */
3032     t1 = tcg_temp_new_i64();
3033     t2 = tcg_temp_new_i64();
3034     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3035     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3036     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3037     tcg_gen_mov_i64(regs[r1], t1);
3038 
3039     /* Only two registers to read. */
3040     if (((r1 + 1) & 15) == r3) {
3041         return DISAS_NEXT;
3042     }
3043 
3044     /* Then load the remaining registers. Page fault can't occur. */
3045     r3 = (r3 - 1) & 15;
3046     tcg_gen_movi_i64(t1, 8);
3047     while (r1 != r3) {
3048         r1 = (r1 + 1) & 15;
3049         tcg_gen_add_i64(o->in2, o->in2, t1);
3050         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3051     }
3052     return DISAS_NEXT;
3053 }
3054 
op_lpd(DisasContext * s,DisasOps * o)3055 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3056 {
3057     TCGv_i64 a1, a2;
3058     MemOp mop = s->insn->data;
3059 
3060     /* In a parallel context, stop the world and single step.  */
3061     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3062         update_psw_addr(s);
3063         update_cc_op(s);
3064         gen_exception(EXCP_ATOMIC);
3065         return DISAS_NORETURN;
3066     }
3067 
3068     /* In a serial context, perform the two loads ... */
3069     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3070     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3071     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3072     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3073 
3074     /* ... and indicate that we performed them while interlocked.  */
3075     gen_op_movi_cc(s, 0);
3076     return DISAS_NEXT;
3077 }
3078 
op_lpq(DisasContext * s,DisasOps * o)3079 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3080 {
3081     o->out_128 = tcg_temp_new_i128();
3082     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3083                          MO_TE | MO_128 | MO_ALIGN);
3084     return DISAS_NEXT;
3085 }
3086 
3087 #ifndef CONFIG_USER_ONLY
op_lura(DisasContext * s,DisasOps * o)3088 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3089 {
3090     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3091     return DISAS_NEXT;
3092 }
3093 #endif
3094 
op_lzrb(DisasContext * s,DisasOps * o)3095 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3096 {
3097     tcg_gen_andi_i64(o->out, o->in2, -256);
3098     return DISAS_NEXT;
3099 }
3100 
op_lcbb(DisasContext * s,DisasOps * o)3101 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3102 {
3103     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3104 
3105     if (get_field(s, m3) > 6) {
3106         gen_program_exception(s, PGM_SPECIFICATION);
3107         return DISAS_NORETURN;
3108     }
3109 
3110     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3111     tcg_gen_neg_i64(o->addr1, o->addr1);
3112     tcg_gen_movi_i64(o->out, 16);
3113     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3114     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3115     return DISAS_NEXT;
3116 }
3117 
op_mc(DisasContext * s,DisasOps * o)3118 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3119 {
3120     const uint8_t monitor_class = get_field(s, i2);
3121 
3122     if (monitor_class & 0xf0) {
3123         gen_program_exception(s, PGM_SPECIFICATION);
3124         return DISAS_NORETURN;
3125     }
3126 
3127 #if !defined(CONFIG_USER_ONLY)
3128     gen_helper_monitor_call(tcg_env, o->addr1,
3129                             tcg_constant_i32(monitor_class));
3130 #endif
3131     /* Defaults to a NOP. */
3132     return DISAS_NEXT;
3133 }
3134 
op_mov2(DisasContext * s,DisasOps * o)3135 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3136 {
3137     o->out = o->in2;
3138     o->in2 = NULL;
3139     return DISAS_NEXT;
3140 }
3141 
op_mov2e(DisasContext * s,DisasOps * o)3142 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3143 {
3144     int b2 = get_field(s, b2);
3145     TCGv ar1 = tcg_temp_new_i64();
3146     int r1 = get_field(s, r1);
3147 
3148     o->out = o->in2;
3149     o->in2 = NULL;
3150 
3151     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3152     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3153         tcg_gen_movi_i64(ar1, 0);
3154         break;
3155     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3156         tcg_gen_movi_i64(ar1, 1);
3157         break;
3158     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3159         if (b2) {
3160             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3161         } else {
3162             tcg_gen_movi_i64(ar1, 0);
3163         }
3164         break;
3165     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3166         tcg_gen_movi_i64(ar1, 2);
3167         break;
3168     }
3169 
3170     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3171     return DISAS_NEXT;
3172 }
3173 
op_movx(DisasContext * s,DisasOps * o)3174 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3175 {
3176     o->out = o->in1;
3177     o->out2 = o->in2;
3178     o->in1 = NULL;
3179     o->in2 = NULL;
3180     return DISAS_NEXT;
3181 }
3182 
op_mvc(DisasContext * s,DisasOps * o)3183 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3184 {
3185     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3186 
3187     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3188     return DISAS_NEXT;
3189 }
3190 
op_mvcrl(DisasContext * s,DisasOps * o)3191 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3192 {
3193     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3194     return DISAS_NEXT;
3195 }
3196 
op_mvcin(DisasContext * s,DisasOps * o)3197 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3198 {
3199     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3200 
3201     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3202     return DISAS_NEXT;
3203 }
3204 
op_mvcl(DisasContext * s,DisasOps * o)3205 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3206 {
3207     int r1 = get_field(s, r1);
3208     int r2 = get_field(s, r2);
3209     TCGv_i32 t1, t2;
3210 
3211     /* r1 and r2 must be even.  */
3212     if (r1 & 1 || r2 & 1) {
3213         gen_program_exception(s, PGM_SPECIFICATION);
3214         return DISAS_NORETURN;
3215     }
3216 
3217     t1 = tcg_constant_i32(r1);
3218     t2 = tcg_constant_i32(r2);
3219     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3220     set_cc_static(s);
3221     return DISAS_NEXT;
3222 }
3223 
op_mvcle(DisasContext * s,DisasOps * o)3224 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3225 {
3226     int r1 = get_field(s, r1);
3227     int r3 = get_field(s, r3);
3228     TCGv_i32 t1, t3;
3229 
3230     /* r1 and r3 must be even.  */
3231     if (r1 & 1 || r3 & 1) {
3232         gen_program_exception(s, PGM_SPECIFICATION);
3233         return DISAS_NORETURN;
3234     }
3235 
3236     t1 = tcg_constant_i32(r1);
3237     t3 = tcg_constant_i32(r3);
3238     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3239     set_cc_static(s);
3240     return DISAS_NEXT;
3241 }
3242 
op_mvclu(DisasContext * s,DisasOps * o)3243 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3244 {
3245     int r1 = get_field(s, r1);
3246     int r3 = get_field(s, r3);
3247     TCGv_i32 t1, t3;
3248 
3249     /* r1 and r3 must be even.  */
3250     if (r1 & 1 || r3 & 1) {
3251         gen_program_exception(s, PGM_SPECIFICATION);
3252         return DISAS_NORETURN;
3253     }
3254 
3255     t1 = tcg_constant_i32(r1);
3256     t3 = tcg_constant_i32(r3);
3257     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3258     set_cc_static(s);
3259     return DISAS_NEXT;
3260 }
3261 
op_mvcos(DisasContext * s,DisasOps * o)3262 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3263 {
3264     int r3 = get_field(s, r3);
3265     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3266     set_cc_static(s);
3267     return DISAS_NEXT;
3268 }
3269 
3270 #ifndef CONFIG_USER_ONLY
op_mvcp(DisasContext * s,DisasOps * o)3271 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3272 {
3273     int r1 = get_field(s, l1);
3274     int r3 = get_field(s, r3);
3275     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3276     set_cc_static(s);
3277     return DISAS_NEXT;
3278 }
3279 
op_mvcs(DisasContext * s,DisasOps * o)3280 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3281 {
3282     int r1 = get_field(s, l1);
3283     int r3 = get_field(s, r3);
3284     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3285     set_cc_static(s);
3286     return DISAS_NEXT;
3287 }
3288 #endif
3289 
op_mvn(DisasContext * s,DisasOps * o)3290 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3291 {
3292     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3293 
3294     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3295     return DISAS_NEXT;
3296 }
3297 
op_mvo(DisasContext * s,DisasOps * o)3298 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3299 {
3300     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3301 
3302     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3303     return DISAS_NEXT;
3304 }
3305 
op_mvpg(DisasContext * s,DisasOps * o)3306 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3307 {
3308     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3309     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3310 
3311     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3312     set_cc_static(s);
3313     return DISAS_NEXT;
3314 }
3315 
op_mvst(DisasContext * s,DisasOps * o)3316 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3317 {
3318     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3319     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3320 
3321     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3322     set_cc_static(s);
3323     return DISAS_NEXT;
3324 }
3325 
op_mvz(DisasContext * s,DisasOps * o)3326 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3327 {
3328     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3329 
3330     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3331     return DISAS_NEXT;
3332 }
3333 
op_mul(DisasContext * s,DisasOps * o)3334 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3335 {
3336     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3337     return DISAS_NEXT;
3338 }
3339 
op_mul128(DisasContext * s,DisasOps * o)3340 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3341 {
3342     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3343     return DISAS_NEXT;
3344 }
3345 
op_muls128(DisasContext * s,DisasOps * o)3346 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3347 {
3348     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3349     return DISAS_NEXT;
3350 }
3351 
op_meeb(DisasContext * s,DisasOps * o)3352 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3353 {
3354     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3355     return DISAS_NEXT;
3356 }
3357 
op_mdeb(DisasContext * s,DisasOps * o)3358 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3359 {
3360     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3361     return DISAS_NEXT;
3362 }
3363 
op_mdb(DisasContext * s,DisasOps * o)3364 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3365 {
3366     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3367     return DISAS_NEXT;
3368 }
3369 
op_mxb(DisasContext * s,DisasOps * o)3370 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3371 {
3372     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3373     return DISAS_NEXT;
3374 }
3375 
op_mxdb(DisasContext * s,DisasOps * o)3376 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3377 {
3378     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3379     return DISAS_NEXT;
3380 }
3381 
op_maeb(DisasContext * s,DisasOps * o)3382 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3383 {
3384     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3385     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3386     return DISAS_NEXT;
3387 }
3388 
op_madb(DisasContext * s,DisasOps * o)3389 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3390 {
3391     TCGv_i64 r3 = load_freg(get_field(s, r3));
3392     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3393     return DISAS_NEXT;
3394 }
3395 
op_mseb(DisasContext * s,DisasOps * o)3396 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3397 {
3398     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3399     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3400     return DISAS_NEXT;
3401 }
3402 
op_msdb(DisasContext * s,DisasOps * o)3403 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3404 {
3405     TCGv_i64 r3 = load_freg(get_field(s, r3));
3406     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3407     return DISAS_NEXT;
3408 }
3409 
op_nabs(DisasContext * s,DisasOps * o)3410 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3411 {
3412     TCGv_i64 z = tcg_constant_i64(0);
3413     TCGv_i64 n = tcg_temp_new_i64();
3414 
3415     tcg_gen_neg_i64(n, o->in2);
3416     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3417     return DISAS_NEXT;
3418 }
3419 
op_nabsf32(DisasContext * s,DisasOps * o)3420 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3421 {
3422     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3423     return DISAS_NEXT;
3424 }
3425 
op_nabsf64(DisasContext * s,DisasOps * o)3426 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3427 {
3428     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3429     return DISAS_NEXT;
3430 }
3431 
op_nabsf128(DisasContext * s,DisasOps * o)3432 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3433 {
3434     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3435     tcg_gen_mov_i64(o->out2, o->in2);
3436     return DISAS_NEXT;
3437 }
3438 
op_nc(DisasContext * s,DisasOps * o)3439 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3440 {
3441     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3442 
3443     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3444     set_cc_static(s);
3445     return DISAS_NEXT;
3446 }
3447 
op_neg(DisasContext * s,DisasOps * o)3448 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3449 {
3450     tcg_gen_neg_i64(o->out, o->in2);
3451     return DISAS_NEXT;
3452 }
3453 
op_negf32(DisasContext * s,DisasOps * o)3454 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3455 {
3456     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3457     return DISAS_NEXT;
3458 }
3459 
op_negf64(DisasContext * s,DisasOps * o)3460 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3461 {
3462     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3463     return DISAS_NEXT;
3464 }
3465 
op_negf128(DisasContext * s,DisasOps * o)3466 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3467 {
3468     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3469     tcg_gen_mov_i64(o->out2, o->in2);
3470     return DISAS_NEXT;
3471 }
3472 
op_oc(DisasContext * s,DisasOps * o)3473 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3474 {
3475     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3476 
3477     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3478     set_cc_static(s);
3479     return DISAS_NEXT;
3480 }
3481 
op_or(DisasContext * s,DisasOps * o)3482 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3483 {
3484     tcg_gen_or_i64(o->out, o->in1, o->in2);
3485     return DISAS_NEXT;
3486 }
3487 
op_ori(DisasContext * s,DisasOps * o)3488 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3489 {
3490     int shift = s->insn->data & 0xff;
3491     int size = s->insn->data >> 8;
3492     uint64_t mask = ((1ull << size) - 1) << shift;
3493     TCGv_i64 t = tcg_temp_new_i64();
3494 
3495     tcg_gen_shli_i64(t, o->in2, shift);
3496     tcg_gen_or_i64(o->out, o->in1, t);
3497 
3498     /* Produce the CC from only the bits manipulated.  */
3499     tcg_gen_andi_i64(cc_dst, o->out, mask);
3500     set_cc_nz_u64(s, cc_dst);
3501     return DISAS_NEXT;
3502 }
3503 
op_oi(DisasContext * s,DisasOps * o)3504 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3505 {
3506     o->in1 = tcg_temp_new_i64();
3507 
3508     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3509         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3510     } else {
3511         /* Perform the atomic operation in memory. */
3512         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3513                                     s->insn->data);
3514     }
3515 
3516     /* Recompute also for atomic case: needed for setting CC. */
3517     tcg_gen_or_i64(o->out, o->in1, o->in2);
3518 
3519     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3520         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3521     }
3522     return DISAS_NEXT;
3523 }
3524 
op_pack(DisasContext * s,DisasOps * o)3525 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3526 {
3527     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3528 
3529     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3530     return DISAS_NEXT;
3531 }
3532 
op_pka(DisasContext * s,DisasOps * o)3533 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3534 {
3535     int l2 = get_field(s, l2) + 1;
3536     TCGv_i32 l;
3537 
3538     /* The length must not exceed 32 bytes.  */
3539     if (l2 > 32) {
3540         gen_program_exception(s, PGM_SPECIFICATION);
3541         return DISAS_NORETURN;
3542     }
3543     l = tcg_constant_i32(l2);
3544     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3545     return DISAS_NEXT;
3546 }
3547 
op_pku(DisasContext * s,DisasOps * o)3548 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3549 {
3550     int l2 = get_field(s, l2) + 1;
3551     TCGv_i32 l;
3552 
3553     /* The length must be even and should not exceed 64 bytes.  */
3554     if ((l2 & 1) || (l2 > 64)) {
3555         gen_program_exception(s, PGM_SPECIFICATION);
3556         return DISAS_NORETURN;
3557     }
3558     l = tcg_constant_i32(l2);
3559     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3560     return DISAS_NEXT;
3561 }
3562 
op_popcnt(DisasContext * s,DisasOps * o)3563 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3564 {
3565     const uint8_t m3 = get_field(s, m3);
3566 
3567     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3568         tcg_gen_ctpop_i64(o->out, o->in2);
3569     } else {
3570         gen_helper_popcnt(o->out, o->in2);
3571     }
3572     return DISAS_NEXT;
3573 }
3574 
3575 #ifndef CONFIG_USER_ONLY
op_ptlb(DisasContext * s,DisasOps * o)3576 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3577 {
3578     gen_helper_ptlb(tcg_env);
3579     return DISAS_NEXT;
3580 }
3581 #endif
3582 
op_risbg(DisasContext * s,DisasOps * o)3583 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3584 {
3585     int i3 = get_field(s, i3);
3586     int i4 = get_field(s, i4);
3587     int i5 = get_field(s, i5);
3588     int do_zero = i4 & 0x80;
3589     uint64_t mask, imask, pmask;
3590     int pos, len, rot;
3591 
3592     /* Adjust the arguments for the specific insn.  */
3593     switch (s->fields.op2) {
3594     case 0x55: /* risbg */
3595     case 0x59: /* risbgn */
3596         i3 &= 63;
3597         i4 &= 63;
3598         pmask = ~0;
3599         break;
3600     case 0x5d: /* risbhg */
3601         i3 &= 31;
3602         i4 &= 31;
3603         pmask = 0xffffffff00000000ull;
3604         break;
3605     case 0x51: /* risblg */
3606         i3 = (i3 & 31) + 32;
3607         i4 = (i4 & 31) + 32;
3608         pmask = 0x00000000ffffffffull;
3609         break;
3610     default:
3611         g_assert_not_reached();
3612     }
3613 
3614     /* MASK is the set of bits to be inserted from R2. */
3615     if (i3 <= i4) {
3616         /* [0...i3---i4...63] */
3617         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3618     } else {
3619         /* [0---i4...i3---63] */
3620         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3621     }
3622     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3623     mask &= pmask;
3624 
3625     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3626        insns, we need to keep the other half of the register.  */
3627     imask = ~mask | ~pmask;
3628     if (do_zero) {
3629         imask = ~pmask;
3630     }
3631 
3632     len = i4 - i3 + 1;
3633     pos = 63 - i4;
3634     rot = i5 & 63;
3635 
3636     /* In some cases we can implement this with extract.  */
3637     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3638         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3639         return DISAS_NEXT;
3640     }
3641 
3642     /* In some cases we can implement this with deposit.  */
3643     if (len > 0 && (imask == 0 || ~mask == imask)) {
3644         /* Note that we rotate the bits to be inserted to the lsb, not to
3645            the position as described in the PoO.  */
3646         rot = (rot - pos) & 63;
3647     } else {
3648         pos = -1;
3649     }
3650 
3651     /* Rotate the input as necessary.  */
3652     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3653 
3654     /* Insert the selected bits into the output.  */
3655     if (pos >= 0) {
3656         if (imask == 0) {
3657             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3658         } else {
3659             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3660         }
3661     } else if (imask == 0) {
3662         tcg_gen_andi_i64(o->out, o->in2, mask);
3663     } else {
3664         tcg_gen_andi_i64(o->in2, o->in2, mask);
3665         tcg_gen_andi_i64(o->out, o->out, imask);
3666         tcg_gen_or_i64(o->out, o->out, o->in2);
3667     }
3668     return DISAS_NEXT;
3669 }
3670 
op_rosbg(DisasContext * s,DisasOps * o)3671 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3672 {
3673     int i3 = get_field(s, i3);
3674     int i4 = get_field(s, i4);
3675     int i5 = get_field(s, i5);
3676     TCGv_i64 orig_out;
3677     uint64_t mask;
3678 
3679     /* If this is a test-only form, arrange to discard the result.  */
3680     if (i3 & 0x80) {
3681         tcg_debug_assert(o->out != NULL);
3682         orig_out = o->out;
3683         o->out = tcg_temp_new_i64();
3684         tcg_gen_mov_i64(o->out, orig_out);
3685     }
3686 
3687     i3 &= 63;
3688     i4 &= 63;
3689     i5 &= 63;
3690 
3691     /* MASK is the set of bits to be operated on from R2.
3692        Take care for I3/I4 wraparound.  */
3693     mask = ~0ull >> i3;
3694     if (i3 <= i4) {
3695         mask ^= ~0ull >> i4 >> 1;
3696     } else {
3697         mask |= ~(~0ull >> i4 >> 1);
3698     }
3699 
3700     /* Rotate the input as necessary.  */
3701     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3702 
3703     /* Operate.  */
3704     switch (s->fields.op2) {
3705     case 0x54: /* AND */
3706         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3707         tcg_gen_and_i64(o->out, o->out, o->in2);
3708         break;
3709     case 0x56: /* OR */
3710         tcg_gen_andi_i64(o->in2, o->in2, mask);
3711         tcg_gen_or_i64(o->out, o->out, o->in2);
3712         break;
3713     case 0x57: /* XOR */
3714         tcg_gen_andi_i64(o->in2, o->in2, mask);
3715         tcg_gen_xor_i64(o->out, o->out, o->in2);
3716         break;
3717     default:
3718         abort();
3719     }
3720 
3721     /* Set the CC.  */
3722     tcg_gen_andi_i64(cc_dst, o->out, mask);
3723     set_cc_nz_u64(s, cc_dst);
3724     return DISAS_NEXT;
3725 }
3726 
op_rev16(DisasContext * s,DisasOps * o)3727 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3728 {
3729     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3730     return DISAS_NEXT;
3731 }
3732 
op_rev32(DisasContext * s,DisasOps * o)3733 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3734 {
3735     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3736     return DISAS_NEXT;
3737 }
3738 
op_rev64(DisasContext * s,DisasOps * o)3739 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3740 {
3741     tcg_gen_bswap64_i64(o->out, o->in2);
3742     return DISAS_NEXT;
3743 }
3744 
op_rll32(DisasContext * s,DisasOps * o)3745 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3746 {
3747     TCGv_i32 t1 = tcg_temp_new_i32();
3748     TCGv_i32 t2 = tcg_temp_new_i32();
3749     TCGv_i32 to = tcg_temp_new_i32();
3750     tcg_gen_extrl_i64_i32(t1, o->in1);
3751     tcg_gen_extrl_i64_i32(t2, o->in2);
3752     tcg_gen_rotl_i32(to, t1, t2);
3753     tcg_gen_extu_i32_i64(o->out, to);
3754     return DISAS_NEXT;
3755 }
3756 
op_rll64(DisasContext * s,DisasOps * o)3757 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3758 {
3759     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3760     return DISAS_NEXT;
3761 }
3762 
3763 #ifndef CONFIG_USER_ONLY
op_rrbe(DisasContext * s,DisasOps * o)3764 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3765 {
3766     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3767     set_cc_static(s);
3768     return DISAS_NEXT;
3769 }
3770 
op_sacf(DisasContext * s,DisasOps * o)3771 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3772 {
3773     gen_helper_sacf(tcg_env, o->in2);
3774     /* Addressing mode has changed, so end the block.  */
3775     return DISAS_TOO_MANY;
3776 }
3777 #endif
3778 
op_sam(DisasContext * s,DisasOps * o)3779 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3780 {
3781     int sam = s->insn->data;
3782     TCGv_i64 tsam;
3783     uint64_t mask;
3784 
3785     switch (sam) {
3786     case 0:
3787         mask = 0xffffff;
3788         break;
3789     case 1:
3790         mask = 0x7fffffff;
3791         break;
3792     default:
3793         mask = -1;
3794         break;
3795     }
3796 
3797     /* Bizarre but true, we check the address of the current insn for the
3798        specification exception, not the next to be executed.  Thus the PoO
3799        documents that Bad Things Happen two bytes before the end.  */
3800     if (s->base.pc_next & ~mask) {
3801         gen_program_exception(s, PGM_SPECIFICATION);
3802         return DISAS_NORETURN;
3803     }
3804     s->pc_tmp &= mask;
3805 
3806     tsam = tcg_constant_i64(sam);
3807     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3808 
3809     /* Always exit the TB, since we (may have) changed execution mode.  */
3810     return DISAS_TOO_MANY;
3811 }
3812 
op_sar(DisasContext * s,DisasOps * o)3813 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3814 {
3815     int r1 = get_field(s, r1);
3816     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3817     return DISAS_NEXT;
3818 }
3819 
op_seb(DisasContext * s,DisasOps * o)3820 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3821 {
3822     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3823     return DISAS_NEXT;
3824 }
3825 
op_sdb(DisasContext * s,DisasOps * o)3826 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3827 {
3828     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3829     return DISAS_NEXT;
3830 }
3831 
op_sxb(DisasContext * s,DisasOps * o)3832 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3833 {
3834     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3835     return DISAS_NEXT;
3836 }
3837 
op_sqeb(DisasContext * s,DisasOps * o)3838 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3839 {
3840     gen_helper_sqeb(o->out, tcg_env, o->in2);
3841     return DISAS_NEXT;
3842 }
3843 
op_sqdb(DisasContext * s,DisasOps * o)3844 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3845 {
3846     gen_helper_sqdb(o->out, tcg_env, o->in2);
3847     return DISAS_NEXT;
3848 }
3849 
op_sqxb(DisasContext * s,DisasOps * o)3850 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3851 {
3852     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3853     return DISAS_NEXT;
3854 }
3855 
3856 #ifndef CONFIG_USER_ONLY
op_servc(DisasContext * s,DisasOps * o)3857 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3858 {
3859     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3860     set_cc_static(s);
3861     return DISAS_NEXT;
3862 }
3863 
op_sigp(DisasContext * s,DisasOps * o)3864 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3865 {
3866     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3867     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3868 
3869     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3870     set_cc_static(s);
3871     return DISAS_NEXT;
3872 }
3873 #endif
3874 
op_soc(DisasContext * s,DisasOps * o)3875 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3876 {
3877     DisasCompare c;
3878     TCGv_i64 a, h;
3879     TCGLabel *lab;
3880     int r1;
3881 
3882     disas_jcc(s, &c, get_field(s, m3));
3883 
3884     /* We want to store when the condition is fulfilled, so branch
3885        out when it's not */
3886     c.cond = tcg_invert_cond(c.cond);
3887 
3888     lab = gen_new_label();
3889     if (c.is_64) {
3890         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3891     } else {
3892         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3893     }
3894 
3895     r1 = get_field(s, r1);
3896     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3897     switch (s->insn->data) {
3898     case 1: /* STOCG */
3899         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3900         break;
3901     case 0: /* STOC */
3902         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3903         break;
3904     case 2: /* STOCFH */
3905         h = tcg_temp_new_i64();
3906         tcg_gen_shri_i64(h, regs[r1], 32);
3907         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3908         break;
3909     default:
3910         g_assert_not_reached();
3911     }
3912 
3913     gen_set_label(lab);
3914     return DISAS_NEXT;
3915 }
3916 
op_sla(DisasContext * s,DisasOps * o)3917 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3918 {
3919     TCGv_i64 t;
3920     uint64_t sign = 1ull << s->insn->data;
3921     if (s->insn->data == 31) {
3922         t = tcg_temp_new_i64();
3923         tcg_gen_shli_i64(t, o->in1, 32);
3924     } else {
3925         t = o->in1;
3926     }
3927     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3928     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3929     /* The arithmetic left shift is curious in that it does not affect
3930        the sign bit.  Copy that over from the source unchanged.  */
3931     tcg_gen_andi_i64(o->out, o->out, ~sign);
3932     tcg_gen_andi_i64(o->in1, o->in1, sign);
3933     tcg_gen_or_i64(o->out, o->out, o->in1);
3934     return DISAS_NEXT;
3935 }
3936 
op_sll(DisasContext * s,DisasOps * o)3937 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3938 {
3939     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3940     return DISAS_NEXT;
3941 }
3942 
op_sra(DisasContext * s,DisasOps * o)3943 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3944 {
3945     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3946     return DISAS_NEXT;
3947 }
3948 
op_srl(DisasContext * s,DisasOps * o)3949 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3950 {
3951     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3952     return DISAS_NEXT;
3953 }
3954 
op_sfpc(DisasContext * s,DisasOps * o)3955 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3956 {
3957     gen_helper_sfpc(tcg_env, o->in2);
3958     return DISAS_NEXT;
3959 }
3960 
op_sfas(DisasContext * s,DisasOps * o)3961 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3962 {
3963     gen_helper_sfas(tcg_env, o->in2);
3964     return DISAS_NEXT;
3965 }
3966 
op_srnm(DisasContext * s,DisasOps * o)3967 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3968 {
3969     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3970     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3971     gen_helper_srnm(tcg_env, o->addr1);
3972     return DISAS_NEXT;
3973 }
3974 
op_srnmb(DisasContext * s,DisasOps * o)3975 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
3976 {
3977     /* Bits 0-55 are are ignored. */
3978     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
3979     gen_helper_srnm(tcg_env, o->addr1);
3980     return DISAS_NEXT;
3981 }
3982 
op_srnmt(DisasContext * s,DisasOps * o)3983 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
3984 {
3985     TCGv_i64 tmp = tcg_temp_new_i64();
3986 
3987     /* Bits other than 61-63 are ignored. */
3988     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
3989 
3990     /* No need to call a helper, we don't implement dfp */
3991     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3992     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
3993     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3994     return DISAS_NEXT;
3995 }
3996 
op_spm(DisasContext * s,DisasOps * o)3997 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3998 {
3999     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4000     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4001     set_cc_static(s);
4002 
4003     tcg_gen_shri_i64(o->in1, o->in1, 24);
4004     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4005     return DISAS_NEXT;
4006 }
4007 
op_ectg(DisasContext * s,DisasOps * o)4008 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4009 {
4010     int b1 = get_field(s, b1);
4011     int d1 = get_field(s, d1);
4012     int b2 = get_field(s, b2);
4013     int d2 = get_field(s, d2);
4014     int r3 = get_field(s, r3);
4015     TCGv_i64 tmp = tcg_temp_new_i64();
4016 
4017     /* fetch all operands first */
4018     o->in1 = tcg_temp_new_i64();
4019     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4020     o->in2 = tcg_temp_new_i64();
4021     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4022     o->addr1 = tcg_temp_new_i64();
4023     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4024 
4025     /* load the third operand into r3 before modifying anything */
4026     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4027 
4028     /* subtract CPU timer from first operand and store in GR0 */
4029     gen_helper_stpt(tmp, tcg_env);
4030     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4031 
4032     /* store second operand in GR1 */
4033     tcg_gen_mov_i64(regs[1], o->in2);
4034     return DISAS_NEXT;
4035 }
4036 
4037 #ifndef CONFIG_USER_ONLY
op_spka(DisasContext * s,DisasOps * o)4038 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4039 {
4040     tcg_gen_shri_i64(o->in2, o->in2, 4);
4041     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4042     return DISAS_NEXT;
4043 }
4044 
op_sske(DisasContext * s,DisasOps * o)4045 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4046 {
4047     gen_helper_sske(tcg_env, o->in1, o->in2);
4048     return DISAS_NEXT;
4049 }
4050 
gen_check_psw_mask(DisasContext * s)4051 static void gen_check_psw_mask(DisasContext *s)
4052 {
4053     TCGv_i64 reserved = tcg_temp_new_i64();
4054     TCGLabel *ok = gen_new_label();
4055 
4056     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4057     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4058     gen_program_exception(s, PGM_SPECIFICATION);
4059     gen_set_label(ok);
4060 }
4061 
op_ssm(DisasContext * s,DisasOps * o)4062 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4063 {
4064     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4065 
4066     gen_check_psw_mask(s);
4067 
4068     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4069     s->exit_to_mainloop = true;
4070     return DISAS_TOO_MANY;
4071 }
4072 
op_stap(DisasContext * s,DisasOps * o)4073 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4074 {
4075     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4076     return DISAS_NEXT;
4077 }
4078 #endif
4079 
op_stck(DisasContext * s,DisasOps * o)4080 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4081 {
4082     gen_helper_stck(o->out, tcg_env);
4083     /* ??? We don't implement clock states.  */
4084     gen_op_movi_cc(s, 0);
4085     return DISAS_NEXT;
4086 }
4087 
op_stcke(DisasContext * s,DisasOps * o)4088 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4089 {
4090     TCGv_i64 c1 = tcg_temp_new_i64();
4091     TCGv_i64 c2 = tcg_temp_new_i64();
4092     TCGv_i64 todpr = tcg_temp_new_i64();
4093     gen_helper_stck(c1, tcg_env);
4094     /* 16 bit value store in an uint32_t (only valid bits set) */
4095     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4096     /* Shift the 64-bit value into its place as a zero-extended
4097        104-bit value.  Note that "bit positions 64-103 are always
4098        non-zero so that they compare differently to STCK"; we set
4099        the least significant bit to 1.  */
4100     tcg_gen_shli_i64(c2, c1, 56);
4101     tcg_gen_shri_i64(c1, c1, 8);
4102     tcg_gen_ori_i64(c2, c2, 0x10000);
4103     tcg_gen_or_i64(c2, c2, todpr);
4104     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4105     tcg_gen_addi_i64(o->in2, o->in2, 8);
4106     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4107     /* ??? We don't implement clock states.  */
4108     gen_op_movi_cc(s, 0);
4109     return DISAS_NEXT;
4110 }
4111 
4112 #ifndef CONFIG_USER_ONLY
op_sck(DisasContext * s,DisasOps * o)4113 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4114 {
4115     gen_helper_sck(cc_op, tcg_env, o->in2);
4116     set_cc_static(s);
4117     return DISAS_NEXT;
4118 }
4119 
op_sckc(DisasContext * s,DisasOps * o)4120 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4121 {
4122     gen_helper_sckc(tcg_env, o->in2);
4123     return DISAS_NEXT;
4124 }
4125 
op_sckpf(DisasContext * s,DisasOps * o)4126 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4127 {
4128     gen_helper_sckpf(tcg_env, regs[0]);
4129     return DISAS_NEXT;
4130 }
4131 
op_stckc(DisasContext * s,DisasOps * o)4132 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4133 {
4134     gen_helper_stckc(o->out, tcg_env);
4135     return DISAS_NEXT;
4136 }
4137 
op_stctg(DisasContext * s,DisasOps * o)4138 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4139 {
4140     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4141     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4142 
4143     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4144     return DISAS_NEXT;
4145 }
4146 
op_stctl(DisasContext * s,DisasOps * o)4147 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4148 {
4149     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4150     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4151 
4152     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4153     return DISAS_NEXT;
4154 }
4155 
op_stidp(DisasContext * s,DisasOps * o)4156 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4157 {
4158     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4159     return DISAS_NEXT;
4160 }
4161 
op_spt(DisasContext * s,DisasOps * o)4162 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4163 {
4164     gen_helper_spt(tcg_env, o->in2);
4165     return DISAS_NEXT;
4166 }
4167 
op_stfl(DisasContext * s,DisasOps * o)4168 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4169 {
4170     gen_helper_stfl(tcg_env);
4171     return DISAS_NEXT;
4172 }
4173 
op_stpt(DisasContext * s,DisasOps * o)4174 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4175 {
4176     gen_helper_stpt(o->out, tcg_env);
4177     return DISAS_NEXT;
4178 }
4179 
op_stsi(DisasContext * s,DisasOps * o)4180 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4181 {
4182     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4183     set_cc_static(s);
4184     return DISAS_NEXT;
4185 }
4186 
op_spx(DisasContext * s,DisasOps * o)4187 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4188 {
4189     gen_helper_spx(tcg_env, o->in2);
4190     return DISAS_NEXT;
4191 }
4192 
op_xsch(DisasContext * s,DisasOps * o)4193 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4194 {
4195     gen_helper_xsch(tcg_env, regs[1]);
4196     set_cc_static(s);
4197     return DISAS_NEXT;
4198 }
4199 
op_csch(DisasContext * s,DisasOps * o)4200 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4201 {
4202     gen_helper_csch(tcg_env, regs[1]);
4203     set_cc_static(s);
4204     return DISAS_NEXT;
4205 }
4206 
op_hsch(DisasContext * s,DisasOps * o)4207 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4208 {
4209     gen_helper_hsch(tcg_env, regs[1]);
4210     set_cc_static(s);
4211     return DISAS_NEXT;
4212 }
4213 
op_msch(DisasContext * s,DisasOps * o)4214 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4215 {
4216     gen_helper_msch(tcg_env, regs[1], o->in2);
4217     set_cc_static(s);
4218     return DISAS_NEXT;
4219 }
4220 
op_rchp(DisasContext * s,DisasOps * o)4221 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4222 {
4223     gen_helper_rchp(tcg_env, regs[1]);
4224     set_cc_static(s);
4225     return DISAS_NEXT;
4226 }
4227 
op_rsch(DisasContext * s,DisasOps * o)4228 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4229 {
4230     gen_helper_rsch(tcg_env, regs[1]);
4231     set_cc_static(s);
4232     return DISAS_NEXT;
4233 }
4234 
op_sal(DisasContext * s,DisasOps * o)4235 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4236 {
4237     gen_helper_sal(tcg_env, regs[1]);
4238     return DISAS_NEXT;
4239 }
4240 
op_schm(DisasContext * s,DisasOps * o)4241 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4242 {
4243     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4244     return DISAS_NEXT;
4245 }
4246 
op_siga(DisasContext * s,DisasOps * o)4247 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4248 {
4249     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4250     gen_op_movi_cc(s, 3);
4251     return DISAS_NEXT;
4252 }
4253 
op_stcps(DisasContext * s,DisasOps * o)4254 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4255 {
4256     /* The instruction is suppressed if not provided. */
4257     return DISAS_NEXT;
4258 }
4259 
op_ssch(DisasContext * s,DisasOps * o)4260 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4261 {
4262     gen_helper_ssch(tcg_env, regs[1], o->in2);
4263     set_cc_static(s);
4264     return DISAS_NEXT;
4265 }
4266 
op_stsch(DisasContext * s,DisasOps * o)4267 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4268 {
4269     gen_helper_stsch(tcg_env, regs[1], o->in2);
4270     set_cc_static(s);
4271     return DISAS_NEXT;
4272 }
4273 
op_stcrw(DisasContext * s,DisasOps * o)4274 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4275 {
4276     gen_helper_stcrw(tcg_env, o->in2);
4277     set_cc_static(s);
4278     return DISAS_NEXT;
4279 }
4280 
op_tpi(DisasContext * s,DisasOps * o)4281 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4282 {
4283     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4284     set_cc_static(s);
4285     return DISAS_NEXT;
4286 }
4287 
op_tsch(DisasContext * s,DisasOps * o)4288 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4289 {
4290     gen_helper_tsch(tcg_env, regs[1], o->in2);
4291     set_cc_static(s);
4292     return DISAS_NEXT;
4293 }
4294 
op_chsc(DisasContext * s,DisasOps * o)4295 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4296 {
4297     gen_helper_chsc(tcg_env, o->in2);
4298     set_cc_static(s);
4299     return DISAS_NEXT;
4300 }
4301 
op_stpx(DisasContext * s,DisasOps * o)4302 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4303 {
4304     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4305     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4306     return DISAS_NEXT;
4307 }
4308 
op_stnosm(DisasContext * s,DisasOps * o)4309 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4310 {
4311     uint64_t i2 = get_field(s, i2);
4312     TCGv_i64 t;
4313 
4314     /* It is important to do what the instruction name says: STORE THEN.
4315        If we let the output hook perform the store then if we fault and
4316        restart, we'll have the wrong SYSTEM MASK in place.  */
4317     t = tcg_temp_new_i64();
4318     tcg_gen_shri_i64(t, psw_mask, 56);
4319     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4320 
4321     if (s->fields.op == 0xac) {
4322         tcg_gen_andi_i64(psw_mask, psw_mask,
4323                          (i2 << 56) | 0x00ffffffffffffffull);
4324     } else {
4325         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4326     }
4327 
4328     gen_check_psw_mask(s);
4329 
4330     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4331     s->exit_to_mainloop = true;
4332     return DISAS_TOO_MANY;
4333 }
4334 
op_stura(DisasContext * s,DisasOps * o)4335 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4336 {
4337     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4338 
4339     if (s->base.tb->flags & FLAG_MASK_PER_STORE_REAL) {
4340         update_cc_op(s);
4341         update_psw_addr(s);
4342         gen_helper_per_store_real(tcg_env, tcg_constant_i32(s->ilen));
4343         return DISAS_NORETURN;
4344     }
4345     return DISAS_NEXT;
4346 }
4347 #endif
4348 
op_stfle(DisasContext * s,DisasOps * o)4349 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4350 {
4351     gen_helper_stfle(cc_op, tcg_env, o->in2);
4352     set_cc_static(s);
4353     return DISAS_NEXT;
4354 }
4355 
op_st8(DisasContext * s,DisasOps * o)4356 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4357 {
4358     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4359     return DISAS_NEXT;
4360 }
4361 
op_st16(DisasContext * s,DisasOps * o)4362 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4363 {
4364     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4365     return DISAS_NEXT;
4366 }
4367 
op_st32(DisasContext * s,DisasOps * o)4368 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4369 {
4370     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4371                        MO_TEUL | s->insn->data);
4372     return DISAS_NEXT;
4373 }
4374 
op_st64(DisasContext * s,DisasOps * o)4375 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4376 {
4377     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4378                         MO_TEUQ | s->insn->data);
4379     return DISAS_NEXT;
4380 }
4381 
op_stam(DisasContext * s,DisasOps * o)4382 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4383 {
4384     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4385     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4386 
4387     gen_helper_stam(tcg_env, r1, o->in2, r3);
4388     return DISAS_NEXT;
4389 }
4390 
op_stcm(DisasContext * s,DisasOps * o)4391 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4392 {
4393     int m3 = get_field(s, m3);
4394     int pos, base = s->insn->data;
4395     TCGv_i64 tmp = tcg_temp_new_i64();
4396 
4397     pos = base + ctz32(m3) * 8;
4398     switch (m3) {
4399     case 0xf:
4400         /* Effectively a 32-bit store.  */
4401         tcg_gen_shri_i64(tmp, o->in1, pos);
4402         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4403         break;
4404 
4405     case 0xc:
4406     case 0x6:
4407     case 0x3:
4408         /* Effectively a 16-bit store.  */
4409         tcg_gen_shri_i64(tmp, o->in1, pos);
4410         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4411         break;
4412 
4413     case 0x8:
4414     case 0x4:
4415     case 0x2:
4416     case 0x1:
4417         /* Effectively an 8-bit store.  */
4418         tcg_gen_shri_i64(tmp, o->in1, pos);
4419         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4420         break;
4421 
4422     default:
4423         /* This is going to be a sequence of shifts and stores.  */
4424         pos = base + 32 - 8;
4425         while (m3) {
4426             if (m3 & 0x8) {
4427                 tcg_gen_shri_i64(tmp, o->in1, pos);
4428                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4429                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4430             }
4431             m3 = (m3 << 1) & 0xf;
4432             pos -= 8;
4433         }
4434         break;
4435     }
4436     return DISAS_NEXT;
4437 }
4438 
op_stm(DisasContext * s,DisasOps * o)4439 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4440 {
4441     int r1 = get_field(s, r1);
4442     int r3 = get_field(s, r3);
4443     int size = s->insn->data;
4444     TCGv_i64 tsize = tcg_constant_i64(size);
4445 
4446     while (1) {
4447         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4448                             size == 8 ? MO_TEUQ : MO_TEUL);
4449         if (r1 == r3) {
4450             break;
4451         }
4452         tcg_gen_add_i64(o->in2, o->in2, tsize);
4453         r1 = (r1 + 1) & 15;
4454     }
4455 
4456     return DISAS_NEXT;
4457 }
4458 
op_stmh(DisasContext * s,DisasOps * o)4459 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4460 {
4461     int r1 = get_field(s, r1);
4462     int r3 = get_field(s, r3);
4463     TCGv_i64 t = tcg_temp_new_i64();
4464     TCGv_i64 t4 = tcg_constant_i64(4);
4465     TCGv_i64 t32 = tcg_constant_i64(32);
4466 
4467     while (1) {
4468         tcg_gen_shl_i64(t, regs[r1], t32);
4469         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4470         if (r1 == r3) {
4471             break;
4472         }
4473         tcg_gen_add_i64(o->in2, o->in2, t4);
4474         r1 = (r1 + 1) & 15;
4475     }
4476     return DISAS_NEXT;
4477 }
4478 
op_stpq(DisasContext * s,DisasOps * o)4479 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4480 {
4481     TCGv_i128 t16 = tcg_temp_new_i128();
4482 
4483     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4484     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4485                          MO_TE | MO_128 | MO_ALIGN);
4486     return DISAS_NEXT;
4487 }
4488 
op_srst(DisasContext * s,DisasOps * o)4489 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4490 {
4491     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4492     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4493 
4494     gen_helper_srst(tcg_env, r1, r2);
4495     set_cc_static(s);
4496     return DISAS_NEXT;
4497 }
4498 
op_srstu(DisasContext * s,DisasOps * o)4499 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4500 {
4501     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4502     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4503 
4504     gen_helper_srstu(tcg_env, r1, r2);
4505     set_cc_static(s);
4506     return DISAS_NEXT;
4507 }
4508 
op_sub(DisasContext * s,DisasOps * o)4509 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4510 {
4511     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4512     return DISAS_NEXT;
4513 }
4514 
op_subu64(DisasContext * s,DisasOps * o)4515 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4516 {
4517     tcg_gen_movi_i64(cc_src, 0);
4518     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4519     return DISAS_NEXT;
4520 }
4521 
4522 /* Compute borrow (0, -1) into cc_src. */
compute_borrow(DisasContext * s)4523 static void compute_borrow(DisasContext *s)
4524 {
4525     switch (s->cc_op) {
4526     case CC_OP_SUBU:
4527         /* The borrow value is already in cc_src (0,-1). */
4528         break;
4529     default:
4530         gen_op_calc_cc(s);
4531         /* fall through */
4532     case CC_OP_STATIC:
4533         /* The carry flag is the msb of CC; compute into cc_src. */
4534         tcg_gen_extu_i32_i64(cc_src, cc_op);
4535         tcg_gen_shri_i64(cc_src, cc_src, 1);
4536         /* fall through */
4537     case CC_OP_ADDU:
4538         /* Convert carry (1,0) to borrow (0,-1). */
4539         tcg_gen_subi_i64(cc_src, cc_src, 1);
4540         break;
4541     }
4542 }
4543 
op_subb32(DisasContext * s,DisasOps * o)4544 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4545 {
4546     compute_borrow(s);
4547 
4548     /* Borrow is {0, -1}, so add to subtract. */
4549     tcg_gen_add_i64(o->out, o->in1, cc_src);
4550     tcg_gen_sub_i64(o->out, o->out, o->in2);
4551     return DISAS_NEXT;
4552 }
4553 
op_subb64(DisasContext * s,DisasOps * o)4554 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4555 {
4556     compute_borrow(s);
4557 
4558     /*
4559      * Borrow is {0, -1}, so add to subtract; replicate the
4560      * borrow input to produce 128-bit -1 for the addition.
4561      */
4562     TCGv_i64 zero = tcg_constant_i64(0);
4563     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4564     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4565 
4566     return DISAS_NEXT;
4567 }
4568 
op_svc(DisasContext * s,DisasOps * o)4569 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4570 {
4571     TCGv_i32 t;
4572 
4573     update_psw_addr(s);
4574     update_cc_op(s);
4575 
4576     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4577     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4578 
4579     t = tcg_constant_i32(s->ilen);
4580     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4581 
4582     gen_exception(EXCP_SVC);
4583     return DISAS_NORETURN;
4584 }
4585 
op_tam(DisasContext * s,DisasOps * o)4586 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4587 {
4588     int cc = 0;
4589 
4590     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4591     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4592     gen_op_movi_cc(s, cc);
4593     return DISAS_NEXT;
4594 }
4595 
op_tceb(DisasContext * s,DisasOps * o)4596 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4597 {
4598     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4599     set_cc_static(s);
4600     return DISAS_NEXT;
4601 }
4602 
op_tcdb(DisasContext * s,DisasOps * o)4603 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4604 {
4605     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4606     set_cc_static(s);
4607     return DISAS_NEXT;
4608 }
4609 
op_tcxb(DisasContext * s,DisasOps * o)4610 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4611 {
4612     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4613     set_cc_static(s);
4614     return DISAS_NEXT;
4615 }
4616 
4617 #ifndef CONFIG_USER_ONLY
4618 
op_testblock(DisasContext * s,DisasOps * o)4619 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4620 {
4621     gen_helper_testblock(cc_op, tcg_env, o->in2);
4622     set_cc_static(s);
4623     return DISAS_NEXT;
4624 }
4625 
op_tprot(DisasContext * s,DisasOps * o)4626 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4627 {
4628     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4629     set_cc_static(s);
4630     return DISAS_NEXT;
4631 }
4632 
4633 #endif
4634 
op_tp(DisasContext * s,DisasOps * o)4635 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4636 {
4637     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4638 
4639     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4640     set_cc_static(s);
4641     return DISAS_NEXT;
4642 }
4643 
op_tr(DisasContext * s,DisasOps * o)4644 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4645 {
4646     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4647 
4648     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4649     set_cc_static(s);
4650     return DISAS_NEXT;
4651 }
4652 
op_tre(DisasContext * s,DisasOps * o)4653 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4654 {
4655     TCGv_i128 pair = tcg_temp_new_i128();
4656 
4657     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4658     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4659     set_cc_static(s);
4660     return DISAS_NEXT;
4661 }
4662 
op_trt(DisasContext * s,DisasOps * o)4663 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4664 {
4665     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4666 
4667     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4668     set_cc_static(s);
4669     return DISAS_NEXT;
4670 }
4671 
op_trtr(DisasContext * s,DisasOps * o)4672 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4673 {
4674     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4675 
4676     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4677     set_cc_static(s);
4678     return DISAS_NEXT;
4679 }
4680 
op_trXX(DisasContext * s,DisasOps * o)4681 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4682 {
4683     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4684     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4685     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4686     TCGv_i32 tst = tcg_temp_new_i32();
4687     int m3 = get_field(s, m3);
4688 
4689     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4690         m3 = 0;
4691     }
4692     if (m3 & 1) {
4693         tcg_gen_movi_i32(tst, -1);
4694     } else {
4695         tcg_gen_extrl_i64_i32(tst, regs[0]);
4696         if (s->insn->opc & 3) {
4697             tcg_gen_ext8u_i32(tst, tst);
4698         } else {
4699             tcg_gen_ext16u_i32(tst, tst);
4700         }
4701     }
4702     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4703 
4704     set_cc_static(s);
4705     return DISAS_NEXT;
4706 }
4707 
op_ts(DisasContext * s,DisasOps * o)4708 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4709 {
4710     TCGv_i32 ff = tcg_constant_i32(0xff);
4711     TCGv_i32 t1 = tcg_temp_new_i32();
4712 
4713     tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4714     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4715     set_cc_static(s);
4716     return DISAS_NEXT;
4717 }
4718 
op_unpk(DisasContext * s,DisasOps * o)4719 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4720 {
4721     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4722 
4723     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4724     return DISAS_NEXT;
4725 }
4726 
op_unpka(DisasContext * s,DisasOps * o)4727 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4728 {
4729     int l1 = get_field(s, l1) + 1;
4730     TCGv_i32 l;
4731 
4732     /* The length must not exceed 32 bytes.  */
4733     if (l1 > 32) {
4734         gen_program_exception(s, PGM_SPECIFICATION);
4735         return DISAS_NORETURN;
4736     }
4737     l = tcg_constant_i32(l1);
4738     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4739     set_cc_static(s);
4740     return DISAS_NEXT;
4741 }
4742 
op_unpku(DisasContext * s,DisasOps * o)4743 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4744 {
4745     int l1 = get_field(s, l1) + 1;
4746     TCGv_i32 l;
4747 
4748     /* The length must be even and should not exceed 64 bytes.  */
4749     if ((l1 & 1) || (l1 > 64)) {
4750         gen_program_exception(s, PGM_SPECIFICATION);
4751         return DISAS_NORETURN;
4752     }
4753     l = tcg_constant_i32(l1);
4754     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4755     set_cc_static(s);
4756     return DISAS_NEXT;
4757 }
4758 
4759 
op_xc(DisasContext * s,DisasOps * o)4760 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4761 {
4762     int d1 = get_field(s, d1);
4763     int d2 = get_field(s, d2);
4764     int b1 = get_field(s, b1);
4765     int b2 = get_field(s, b2);
4766     int l = get_field(s, l1);
4767     TCGv_i32 t32;
4768 
4769     o->addr1 = get_address(s, 0, b1, d1);
4770 
4771     /* If the addresses are identical, this is a store/memset of zero.  */
4772     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4773         o->in2 = tcg_constant_i64(0);
4774 
4775         l++;
4776         while (l >= 8) {
4777             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4778             l -= 8;
4779             if (l > 0) {
4780                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4781             }
4782         }
4783         if (l >= 4) {
4784             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4785             l -= 4;
4786             if (l > 0) {
4787                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4788             }
4789         }
4790         if (l >= 2) {
4791             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4792             l -= 2;
4793             if (l > 0) {
4794                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4795             }
4796         }
4797         if (l) {
4798             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4799         }
4800         gen_op_movi_cc(s, 0);
4801         return DISAS_NEXT;
4802     }
4803 
4804     /* But in general we'll defer to a helper.  */
4805     o->in2 = get_address(s, 0, b2, d2);
4806     t32 = tcg_constant_i32(l);
4807     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4808     set_cc_static(s);
4809     return DISAS_NEXT;
4810 }
4811 
op_xor(DisasContext * s,DisasOps * o)4812 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4813 {
4814     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4815     return DISAS_NEXT;
4816 }
4817 
op_xori(DisasContext * s,DisasOps * o)4818 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4819 {
4820     int shift = s->insn->data & 0xff;
4821     int size = s->insn->data >> 8;
4822     uint64_t mask = ((1ull << size) - 1) << shift;
4823     TCGv_i64 t = tcg_temp_new_i64();
4824 
4825     tcg_gen_shli_i64(t, o->in2, shift);
4826     tcg_gen_xor_i64(o->out, o->in1, t);
4827 
4828     /* Produce the CC from only the bits manipulated.  */
4829     tcg_gen_andi_i64(cc_dst, o->out, mask);
4830     set_cc_nz_u64(s, cc_dst);
4831     return DISAS_NEXT;
4832 }
4833 
op_xi(DisasContext * s,DisasOps * o)4834 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4835 {
4836     o->in1 = tcg_temp_new_i64();
4837 
4838     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4839         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4840     } else {
4841         /* Perform the atomic operation in memory. */
4842         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4843                                      s->insn->data);
4844     }
4845 
4846     /* Recompute also for atomic case: needed for setting CC. */
4847     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4848 
4849     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4850         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4851     }
4852     return DISAS_NEXT;
4853 }
4854 
op_zero(DisasContext * s,DisasOps * o)4855 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4856 {
4857     o->out = tcg_constant_i64(0);
4858     return DISAS_NEXT;
4859 }
4860 
op_zero2(DisasContext * s,DisasOps * o)4861 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4862 {
4863     o->out = tcg_constant_i64(0);
4864     o->out2 = o->out;
4865     return DISAS_NEXT;
4866 }
4867 
4868 #ifndef CONFIG_USER_ONLY
op_clp(DisasContext * s,DisasOps * o)4869 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4870 {
4871     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4872 
4873     gen_helper_clp(tcg_env, r2);
4874     set_cc_static(s);
4875     return DISAS_NEXT;
4876 }
4877 
op_pcilg(DisasContext * s,DisasOps * o)4878 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4879 {
4880     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4881     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4882 
4883     gen_helper_pcilg(tcg_env, r1, r2);
4884     set_cc_static(s);
4885     return DISAS_NEXT;
4886 }
4887 
op_pcistg(DisasContext * s,DisasOps * o)4888 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4889 {
4890     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4891     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4892 
4893     gen_helper_pcistg(tcg_env, r1, r2);
4894     set_cc_static(s);
4895     return DISAS_NEXT;
4896 }
4897 
op_stpcifc(DisasContext * s,DisasOps * o)4898 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4899 {
4900     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4901     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4902 
4903     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4904     set_cc_static(s);
4905     return DISAS_NEXT;
4906 }
4907 
op_sic(DisasContext * s,DisasOps * o)4908 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4909 {
4910     gen_helper_sic(tcg_env, o->in1, o->in2);
4911     return DISAS_NEXT;
4912 }
4913 
op_rpcit(DisasContext * s,DisasOps * o)4914 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4915 {
4916     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4917     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4918 
4919     gen_helper_rpcit(tcg_env, r1, r2);
4920     set_cc_static(s);
4921     return DISAS_NEXT;
4922 }
4923 
op_pcistb(DisasContext * s,DisasOps * o)4924 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4925 {
4926     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4927     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4928     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4929 
4930     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
4931     set_cc_static(s);
4932     return DISAS_NEXT;
4933 }
4934 
op_mpcifc(DisasContext * s,DisasOps * o)4935 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4936 {
4937     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4938     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4939 
4940     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
4941     set_cc_static(s);
4942     return DISAS_NEXT;
4943 }
4944 #endif
4945 
4946 #include "translate_vx.c.inc"
4947 
4948 /* ====================================================================== */
4949 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4950    the original inputs), update the various cc data structures in order to
4951    be able to compute the new condition code.  */
4952 
cout_abs32(DisasContext * s,DisasOps * o)4953 static void cout_abs32(DisasContext *s, DisasOps *o)
4954 {
4955     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4956 }
4957 
cout_abs64(DisasContext * s,DisasOps * o)4958 static void cout_abs64(DisasContext *s, DisasOps *o)
4959 {
4960     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4961 }
4962 
cout_adds32(DisasContext * s,DisasOps * o)4963 static void cout_adds32(DisasContext *s, DisasOps *o)
4964 {
4965     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4966 }
4967 
cout_adds64(DisasContext * s,DisasOps * o)4968 static void cout_adds64(DisasContext *s, DisasOps *o)
4969 {
4970     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4971 }
4972 
cout_addu32(DisasContext * s,DisasOps * o)4973 static void cout_addu32(DisasContext *s, DisasOps *o)
4974 {
4975     tcg_gen_shri_i64(cc_src, o->out, 32);
4976     tcg_gen_ext32u_i64(cc_dst, o->out);
4977     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
4978 }
4979 
cout_addu64(DisasContext * s,DisasOps * o)4980 static void cout_addu64(DisasContext *s, DisasOps *o)
4981 {
4982     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
4983 }
4984 
cout_cmps32(DisasContext * s,DisasOps * o)4985 static void cout_cmps32(DisasContext *s, DisasOps *o)
4986 {
4987     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4988 }
4989 
cout_cmps64(DisasContext * s,DisasOps * o)4990 static void cout_cmps64(DisasContext *s, DisasOps *o)
4991 {
4992     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4993 }
4994 
cout_cmpu32(DisasContext * s,DisasOps * o)4995 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4996 {
4997     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4998 }
4999 
cout_cmpu64(DisasContext * s,DisasOps * o)5000 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5001 {
5002     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5003 }
5004 
cout_f32(DisasContext * s,DisasOps * o)5005 static void cout_f32(DisasContext *s, DisasOps *o)
5006 {
5007     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5008 }
5009 
cout_f64(DisasContext * s,DisasOps * o)5010 static void cout_f64(DisasContext *s, DisasOps *o)
5011 {
5012     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5013 }
5014 
cout_f128(DisasContext * s,DisasOps * o)5015 static void cout_f128(DisasContext *s, DisasOps *o)
5016 {
5017     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5018 }
5019 
cout_nabs32(DisasContext * s,DisasOps * o)5020 static void cout_nabs32(DisasContext *s, DisasOps *o)
5021 {
5022     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5023 }
5024 
cout_nabs64(DisasContext * s,DisasOps * o)5025 static void cout_nabs64(DisasContext *s, DisasOps *o)
5026 {
5027     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5028 }
5029 
cout_neg32(DisasContext * s,DisasOps * o)5030 static void cout_neg32(DisasContext *s, DisasOps *o)
5031 {
5032     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5033 }
5034 
cout_neg64(DisasContext * s,DisasOps * o)5035 static void cout_neg64(DisasContext *s, DisasOps *o)
5036 {
5037     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5038 }
5039 
cout_nz32(DisasContext * s,DisasOps * o)5040 static void cout_nz32(DisasContext *s, DisasOps *o)
5041 {
5042     tcg_gen_ext32u_i64(cc_dst, o->out);
5043     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5044 }
5045 
cout_nz64(DisasContext * s,DisasOps * o)5046 static void cout_nz64(DisasContext *s, DisasOps *o)
5047 {
5048     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5049 }
5050 
cout_s32(DisasContext * s,DisasOps * o)5051 static void cout_s32(DisasContext *s, DisasOps *o)
5052 {
5053     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5054 }
5055 
cout_s64(DisasContext * s,DisasOps * o)5056 static void cout_s64(DisasContext *s, DisasOps *o)
5057 {
5058     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5059 }
5060 
cout_subs32(DisasContext * s,DisasOps * o)5061 static void cout_subs32(DisasContext *s, DisasOps *o)
5062 {
5063     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5064 }
5065 
cout_subs64(DisasContext * s,DisasOps * o)5066 static void cout_subs64(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5069 }
5070 
cout_subu32(DisasContext * s,DisasOps * o)5071 static void cout_subu32(DisasContext *s, DisasOps *o)
5072 {
5073     tcg_gen_sari_i64(cc_src, o->out, 32);
5074     tcg_gen_ext32u_i64(cc_dst, o->out);
5075     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5076 }
5077 
cout_subu64(DisasContext * s,DisasOps * o)5078 static void cout_subu64(DisasContext *s, DisasOps *o)
5079 {
5080     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5081 }
5082 
cout_tm32(DisasContext * s,DisasOps * o)5083 static void cout_tm32(DisasContext *s, DisasOps *o)
5084 {
5085     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5086 }
5087 
cout_tm64(DisasContext * s,DisasOps * o)5088 static void cout_tm64(DisasContext *s, DisasOps *o)
5089 {
5090     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5091 }
5092 
cout_muls32(DisasContext * s,DisasOps * o)5093 static void cout_muls32(DisasContext *s, DisasOps *o)
5094 {
5095     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5096 }
5097 
cout_muls64(DisasContext * s,DisasOps * o)5098 static void cout_muls64(DisasContext *s, DisasOps *o)
5099 {
5100     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5101     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5102 }
5103 
5104 /* ====================================================================== */
5105 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5106    with the TCG register to which we will write.  Used in combination with
5107    the "wout" generators, in some cases we need a new temporary, and in
5108    some cases we can write to a TCG global.  */
5109 
prep_new(DisasContext * s,DisasOps * o)5110 static void prep_new(DisasContext *s, DisasOps *o)
5111 {
5112     o->out = tcg_temp_new_i64();
5113 }
5114 #define SPEC_prep_new 0
5115 
prep_new_P(DisasContext * s,DisasOps * o)5116 static void prep_new_P(DisasContext *s, DisasOps *o)
5117 {
5118     o->out = tcg_temp_new_i64();
5119     o->out2 = tcg_temp_new_i64();
5120 }
5121 #define SPEC_prep_new_P 0
5122 
prep_new_x(DisasContext * s,DisasOps * o)5123 static void prep_new_x(DisasContext *s, DisasOps *o)
5124 {
5125     o->out_128 = tcg_temp_new_i128();
5126 }
5127 #define SPEC_prep_new_x 0
5128 
prep_r1(DisasContext * s,DisasOps * o)5129 static void prep_r1(DisasContext *s, DisasOps *o)
5130 {
5131     o->out = regs[get_field(s, r1)];
5132 }
5133 #define SPEC_prep_r1 0
5134 
prep_r1_P(DisasContext * s,DisasOps * o)5135 static void prep_r1_P(DisasContext *s, DisasOps *o)
5136 {
5137     int r1 = get_field(s, r1);
5138     o->out = regs[r1];
5139     o->out2 = regs[r1 + 1];
5140 }
5141 #define SPEC_prep_r1_P SPEC_r1_even
5142 
5143 /* ====================================================================== */
5144 /* The "Write OUTput" generators.  These generally perform some non-trivial
5145    copy of data to TCG globals, or to main memory.  The trivial cases are
5146    generally handled by having a "prep" generator install the TCG global
5147    as the destination of the operation.  */
5148 
wout_r1(DisasContext * s,DisasOps * o)5149 static void wout_r1(DisasContext *s, DisasOps *o)
5150 {
5151     store_reg(get_field(s, r1), o->out);
5152 }
5153 #define SPEC_wout_r1 0
5154 
wout_out2_r1(DisasContext * s,DisasOps * o)5155 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5156 {
5157     store_reg(get_field(s, r1), o->out2);
5158 }
5159 #define SPEC_wout_out2_r1 0
5160 
wout_r1_8(DisasContext * s,DisasOps * o)5161 static void wout_r1_8(DisasContext *s, DisasOps *o)
5162 {
5163     int r1 = get_field(s, r1);
5164     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5165 }
5166 #define SPEC_wout_r1_8 0
5167 
wout_r1_16(DisasContext * s,DisasOps * o)5168 static void wout_r1_16(DisasContext *s, DisasOps *o)
5169 {
5170     int r1 = get_field(s, r1);
5171     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5172 }
5173 #define SPEC_wout_r1_16 0
5174 
wout_r1_32(DisasContext * s,DisasOps * o)5175 static void wout_r1_32(DisasContext *s, DisasOps *o)
5176 {
5177     store_reg32_i64(get_field(s, r1), o->out);
5178 }
5179 #define SPEC_wout_r1_32 0
5180 
wout_r1_32h(DisasContext * s,DisasOps * o)5181 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5182 {
5183     store_reg32h_i64(get_field(s, r1), o->out);
5184 }
5185 #define SPEC_wout_r1_32h 0
5186 
wout_r1_P32(DisasContext * s,DisasOps * o)5187 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5188 {
5189     int r1 = get_field(s, r1);
5190     store_reg32_i64(r1, o->out);
5191     store_reg32_i64(r1 + 1, o->out2);
5192 }
5193 #define SPEC_wout_r1_P32 SPEC_r1_even
5194 
wout_r1_D32(DisasContext * s,DisasOps * o)5195 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5196 {
5197     int r1 = get_field(s, r1);
5198     TCGv_i64 t = tcg_temp_new_i64();
5199     store_reg32_i64(r1 + 1, o->out);
5200     tcg_gen_shri_i64(t, o->out, 32);
5201     store_reg32_i64(r1, t);
5202 }
5203 #define SPEC_wout_r1_D32 SPEC_r1_even
5204 
wout_r1_D64(DisasContext * s,DisasOps * o)5205 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5206 {
5207     int r1 = get_field(s, r1);
5208     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5209 }
5210 #define SPEC_wout_r1_D64 SPEC_r1_even
5211 
wout_r3_P32(DisasContext * s,DisasOps * o)5212 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5213 {
5214     int r3 = get_field(s, r3);
5215     store_reg32_i64(r3, o->out);
5216     store_reg32_i64(r3 + 1, o->out2);
5217 }
5218 #define SPEC_wout_r3_P32 SPEC_r3_even
5219 
wout_r3_P64(DisasContext * s,DisasOps * o)5220 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5221 {
5222     int r3 = get_field(s, r3);
5223     store_reg(r3, o->out);
5224     store_reg(r3 + 1, o->out2);
5225 }
5226 #define SPEC_wout_r3_P64 SPEC_r3_even
5227 
wout_e1(DisasContext * s,DisasOps * o)5228 static void wout_e1(DisasContext *s, DisasOps *o)
5229 {
5230     store_freg32_i64(get_field(s, r1), o->out);
5231 }
5232 #define SPEC_wout_e1 0
5233 
wout_f1(DisasContext * s,DisasOps * o)5234 static void wout_f1(DisasContext *s, DisasOps *o)
5235 {
5236     store_freg(get_field(s, r1), o->out);
5237 }
5238 #define SPEC_wout_f1 0
5239 
wout_x1(DisasContext * s,DisasOps * o)5240 static void wout_x1(DisasContext *s, DisasOps *o)
5241 {
5242     int f1 = get_field(s, r1);
5243 
5244     /* Split out_128 into out+out2 for cout_f128. */
5245     tcg_debug_assert(o->out == NULL);
5246     o->out = tcg_temp_new_i64();
5247     o->out2 = tcg_temp_new_i64();
5248 
5249     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5250     store_freg(f1, o->out);
5251     store_freg(f1 + 2, o->out2);
5252 }
5253 #define SPEC_wout_x1 SPEC_r1_f128
5254 
wout_x1_P(DisasContext * s,DisasOps * o)5255 static void wout_x1_P(DisasContext *s, DisasOps *o)
5256 {
5257     int f1 = get_field(s, r1);
5258     store_freg(f1, o->out);
5259     store_freg(f1 + 2, o->out2);
5260 }
5261 #define SPEC_wout_x1_P SPEC_r1_f128
5262 
wout_cond_r1r2_32(DisasContext * s,DisasOps * o)5263 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5264 {
5265     if (get_field(s, r1) != get_field(s, r2)) {
5266         store_reg32_i64(get_field(s, r1), o->out);
5267     }
5268 }
5269 #define SPEC_wout_cond_r1r2_32 0
5270 
wout_cond_e1e2(DisasContext * s,DisasOps * o)5271 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5272 {
5273     if (get_field(s, r1) != get_field(s, r2)) {
5274         store_freg32_i64(get_field(s, r1), o->out);
5275     }
5276 }
5277 #define SPEC_wout_cond_e1e2 0
5278 
wout_m1_8(DisasContext * s,DisasOps * o)5279 static void wout_m1_8(DisasContext *s, DisasOps *o)
5280 {
5281     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5282 }
5283 #define SPEC_wout_m1_8 0
5284 
wout_m1_16(DisasContext * s,DisasOps * o)5285 static void wout_m1_16(DisasContext *s, DisasOps *o)
5286 {
5287     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5288 }
5289 #define SPEC_wout_m1_16 0
5290 
5291 #ifndef CONFIG_USER_ONLY
wout_m1_16a(DisasContext * s,DisasOps * o)5292 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5293 {
5294     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5295 }
5296 #define SPEC_wout_m1_16a 0
5297 #endif
5298 
wout_m1_32(DisasContext * s,DisasOps * o)5299 static void wout_m1_32(DisasContext *s, DisasOps *o)
5300 {
5301     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5302 }
5303 #define SPEC_wout_m1_32 0
5304 
5305 #ifndef CONFIG_USER_ONLY
wout_m1_32a(DisasContext * s,DisasOps * o)5306 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5307 {
5308     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5309 }
5310 #define SPEC_wout_m1_32a 0
5311 #endif
5312 
wout_m1_64(DisasContext * s,DisasOps * o)5313 static void wout_m1_64(DisasContext *s, DisasOps *o)
5314 {
5315     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5316 }
5317 #define SPEC_wout_m1_64 0
5318 
5319 #ifndef CONFIG_USER_ONLY
wout_m1_64a(DisasContext * s,DisasOps * o)5320 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5321 {
5322     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5323 }
5324 #define SPEC_wout_m1_64a 0
5325 #endif
5326 
wout_m2_32(DisasContext * s,DisasOps * o)5327 static void wout_m2_32(DisasContext *s, DisasOps *o)
5328 {
5329     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5330 }
5331 #define SPEC_wout_m2_32 0
5332 
wout_in2_r1(DisasContext * s,DisasOps * o)5333 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5334 {
5335     store_reg(get_field(s, r1), o->in2);
5336 }
5337 #define SPEC_wout_in2_r1 0
5338 
wout_in2_r1_32(DisasContext * s,DisasOps * o)5339 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5340 {
5341     store_reg32_i64(get_field(s, r1), o->in2);
5342 }
5343 #define SPEC_wout_in2_r1_32 0
5344 
5345 /* ====================================================================== */
5346 /* The "INput 1" generators.  These load the first operand to an insn.  */
5347 
in1_r1(DisasContext * s,DisasOps * o)5348 static void in1_r1(DisasContext *s, DisasOps *o)
5349 {
5350     o->in1 = load_reg(get_field(s, r1));
5351 }
5352 #define SPEC_in1_r1 0
5353 
in1_r1_o(DisasContext * s,DisasOps * o)5354 static void in1_r1_o(DisasContext *s, DisasOps *o)
5355 {
5356     o->in1 = regs[get_field(s, r1)];
5357 }
5358 #define SPEC_in1_r1_o 0
5359 
in1_r1_32s(DisasContext * s,DisasOps * o)5360 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5361 {
5362     o->in1 = tcg_temp_new_i64();
5363     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5364 }
5365 #define SPEC_in1_r1_32s 0
5366 
in1_r1_32u(DisasContext * s,DisasOps * o)5367 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5368 {
5369     o->in1 = tcg_temp_new_i64();
5370     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5371 }
5372 #define SPEC_in1_r1_32u 0
5373 
in1_r1_sr32(DisasContext * s,DisasOps * o)5374 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5375 {
5376     o->in1 = tcg_temp_new_i64();
5377     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5378 }
5379 #define SPEC_in1_r1_sr32 0
5380 
in1_r1p1(DisasContext * s,DisasOps * o)5381 static void in1_r1p1(DisasContext *s, DisasOps *o)
5382 {
5383     o->in1 = load_reg(get_field(s, r1) + 1);
5384 }
5385 #define SPEC_in1_r1p1 SPEC_r1_even
5386 
in1_r1p1_o(DisasContext * s,DisasOps * o)5387 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5388 {
5389     o->in1 = regs[get_field(s, r1) + 1];
5390 }
5391 #define SPEC_in1_r1p1_o SPEC_r1_even
5392 
in1_r1p1_32s(DisasContext * s,DisasOps * o)5393 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5394 {
5395     o->in1 = tcg_temp_new_i64();
5396     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5397 }
5398 #define SPEC_in1_r1p1_32s SPEC_r1_even
5399 
in1_r1p1_32u(DisasContext * s,DisasOps * o)5400 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5401 {
5402     o->in1 = tcg_temp_new_i64();
5403     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5404 }
5405 #define SPEC_in1_r1p1_32u SPEC_r1_even
5406 
in1_r1_D32(DisasContext * s,DisasOps * o)5407 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5408 {
5409     int r1 = get_field(s, r1);
5410     o->in1 = tcg_temp_new_i64();
5411     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5412 }
5413 #define SPEC_in1_r1_D32 SPEC_r1_even
5414 
in1_r2(DisasContext * s,DisasOps * o)5415 static void in1_r2(DisasContext *s, DisasOps *o)
5416 {
5417     o->in1 = load_reg(get_field(s, r2));
5418 }
5419 #define SPEC_in1_r2 0
5420 
in1_r2_sr32(DisasContext * s,DisasOps * o)5421 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5422 {
5423     o->in1 = tcg_temp_new_i64();
5424     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5425 }
5426 #define SPEC_in1_r2_sr32 0
5427 
in1_r2_32u(DisasContext * s,DisasOps * o)5428 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5429 {
5430     o->in1 = tcg_temp_new_i64();
5431     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5432 }
5433 #define SPEC_in1_r2_32u 0
5434 
in1_r3(DisasContext * s,DisasOps * o)5435 static void in1_r3(DisasContext *s, DisasOps *o)
5436 {
5437     o->in1 = load_reg(get_field(s, r3));
5438 }
5439 #define SPEC_in1_r3 0
5440 
in1_r3_o(DisasContext * s,DisasOps * o)5441 static void in1_r3_o(DisasContext *s, DisasOps *o)
5442 {
5443     o->in1 = regs[get_field(s, r3)];
5444 }
5445 #define SPEC_in1_r3_o 0
5446 
in1_r3_32s(DisasContext * s,DisasOps * o)5447 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5448 {
5449     o->in1 = tcg_temp_new_i64();
5450     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5451 }
5452 #define SPEC_in1_r3_32s 0
5453 
in1_r3_32u(DisasContext * s,DisasOps * o)5454 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5455 {
5456     o->in1 = tcg_temp_new_i64();
5457     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5458 }
5459 #define SPEC_in1_r3_32u 0
5460 
in1_r3_D32(DisasContext * s,DisasOps * o)5461 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5462 {
5463     int r3 = get_field(s, r3);
5464     o->in1 = tcg_temp_new_i64();
5465     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5466 }
5467 #define SPEC_in1_r3_D32 SPEC_r3_even
5468 
in1_r3_sr32(DisasContext * s,DisasOps * o)5469 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5470 {
5471     o->in1 = tcg_temp_new_i64();
5472     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5473 }
5474 #define SPEC_in1_r3_sr32 0
5475 
in1_e1(DisasContext * s,DisasOps * o)5476 static void in1_e1(DisasContext *s, DisasOps *o)
5477 {
5478     o->in1 = load_freg32_i64(get_field(s, r1));
5479 }
5480 #define SPEC_in1_e1 0
5481 
in1_f1(DisasContext * s,DisasOps * o)5482 static void in1_f1(DisasContext *s, DisasOps *o)
5483 {
5484     o->in1 = load_freg(get_field(s, r1));
5485 }
5486 #define SPEC_in1_f1 0
5487 
in1_x1(DisasContext * s,DisasOps * o)5488 static void in1_x1(DisasContext *s, DisasOps *o)
5489 {
5490     o->in1_128 = load_freg_128(get_field(s, r1));
5491 }
5492 #define SPEC_in1_x1 SPEC_r1_f128
5493 
5494 /* Load the high double word of an extended (128-bit) format FP number */
in1_x2h(DisasContext * s,DisasOps * o)5495 static void in1_x2h(DisasContext *s, DisasOps *o)
5496 {
5497     o->in1 = load_freg(get_field(s, r2));
5498 }
5499 #define SPEC_in1_x2h SPEC_r2_f128
5500 
in1_f3(DisasContext * s,DisasOps * o)5501 static void in1_f3(DisasContext *s, DisasOps *o)
5502 {
5503     o->in1 = load_freg(get_field(s, r3));
5504 }
5505 #define SPEC_in1_f3 0
5506 
in1_la1(DisasContext * s,DisasOps * o)5507 static void in1_la1(DisasContext *s, DisasOps *o)
5508 {
5509     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5510 }
5511 #define SPEC_in1_la1 0
5512 
in1_la2(DisasContext * s,DisasOps * o)5513 static void in1_la2(DisasContext *s, DisasOps *o)
5514 {
5515     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5516     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5517 }
5518 #define SPEC_in1_la2 0
5519 
in1_m1_8u(DisasContext * s,DisasOps * o)5520 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5521 {
5522     in1_la1(s, o);
5523     o->in1 = tcg_temp_new_i64();
5524     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5525 }
5526 #define SPEC_in1_m1_8u 0
5527 
in1_m1_16s(DisasContext * s,DisasOps * o)5528 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5529 {
5530     in1_la1(s, o);
5531     o->in1 = tcg_temp_new_i64();
5532     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5533 }
5534 #define SPEC_in1_m1_16s 0
5535 
in1_m1_16u(DisasContext * s,DisasOps * o)5536 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5537 {
5538     in1_la1(s, o);
5539     o->in1 = tcg_temp_new_i64();
5540     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5541 }
5542 #define SPEC_in1_m1_16u 0
5543 
in1_m1_32s(DisasContext * s,DisasOps * o)5544 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5545 {
5546     in1_la1(s, o);
5547     o->in1 = tcg_temp_new_i64();
5548     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5549 }
5550 #define SPEC_in1_m1_32s 0
5551 
in1_m1_32u(DisasContext * s,DisasOps * o)5552 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5553 {
5554     in1_la1(s, o);
5555     o->in1 = tcg_temp_new_i64();
5556     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5557 }
5558 #define SPEC_in1_m1_32u 0
5559 
in1_m1_64(DisasContext * s,DisasOps * o)5560 static void in1_m1_64(DisasContext *s, DisasOps *o)
5561 {
5562     in1_la1(s, o);
5563     o->in1 = tcg_temp_new_i64();
5564     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5565 }
5566 #define SPEC_in1_m1_64 0
5567 
5568 /* ====================================================================== */
5569 /* The "INput 2" generators.  These load the second operand to an insn.  */
5570 
in2_r1_o(DisasContext * s,DisasOps * o)5571 static void in2_r1_o(DisasContext *s, DisasOps *o)
5572 {
5573     o->in2 = regs[get_field(s, r1)];
5574 }
5575 #define SPEC_in2_r1_o 0
5576 
in2_r1_16u(DisasContext * s,DisasOps * o)5577 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5578 {
5579     o->in2 = tcg_temp_new_i64();
5580     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5581 }
5582 #define SPEC_in2_r1_16u 0
5583 
in2_r1_32u(DisasContext * s,DisasOps * o)5584 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5585 {
5586     o->in2 = tcg_temp_new_i64();
5587     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5588 }
5589 #define SPEC_in2_r1_32u 0
5590 
in2_r1_D32(DisasContext * s,DisasOps * o)5591 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5592 {
5593     int r1 = get_field(s, r1);
5594     o->in2 = tcg_temp_new_i64();
5595     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5596 }
5597 #define SPEC_in2_r1_D32 SPEC_r1_even
5598 
in2_r2(DisasContext * s,DisasOps * o)5599 static void in2_r2(DisasContext *s, DisasOps *o)
5600 {
5601     o->in2 = load_reg(get_field(s, r2));
5602 }
5603 #define SPEC_in2_r2 0
5604 
in2_r2_o(DisasContext * s,DisasOps * o)5605 static void in2_r2_o(DisasContext *s, DisasOps *o)
5606 {
5607     o->in2 = regs[get_field(s, r2)];
5608 }
5609 #define SPEC_in2_r2_o 0
5610 
in2_r2_nz(DisasContext * s,DisasOps * o)5611 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5612 {
5613     int r2 = get_field(s, r2);
5614     if (r2 != 0) {
5615         o->in2 = load_reg(r2);
5616     }
5617 }
5618 #define SPEC_in2_r2_nz 0
5619 
in2_r2_8s(DisasContext * s,DisasOps * o)5620 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5621 {
5622     o->in2 = tcg_temp_new_i64();
5623     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5624 }
5625 #define SPEC_in2_r2_8s 0
5626 
in2_r2_8u(DisasContext * s,DisasOps * o)5627 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5628 {
5629     o->in2 = tcg_temp_new_i64();
5630     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5631 }
5632 #define SPEC_in2_r2_8u 0
5633 
in2_r2_16s(DisasContext * s,DisasOps * o)5634 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5635 {
5636     o->in2 = tcg_temp_new_i64();
5637     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5638 }
5639 #define SPEC_in2_r2_16s 0
5640 
in2_r2_16u(DisasContext * s,DisasOps * o)5641 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5642 {
5643     o->in2 = tcg_temp_new_i64();
5644     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5645 }
5646 #define SPEC_in2_r2_16u 0
5647 
in2_r3(DisasContext * s,DisasOps * o)5648 static void in2_r3(DisasContext *s, DisasOps *o)
5649 {
5650     o->in2 = load_reg(get_field(s, r3));
5651 }
5652 #define SPEC_in2_r3 0
5653 
in2_r3_D64(DisasContext * s,DisasOps * o)5654 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5655 {
5656     int r3 = get_field(s, r3);
5657     o->in2_128 = tcg_temp_new_i128();
5658     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5659 }
5660 #define SPEC_in2_r3_D64 SPEC_r3_even
5661 
in2_r3_sr32(DisasContext * s,DisasOps * o)5662 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5663 {
5664     o->in2 = tcg_temp_new_i64();
5665     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5666 }
5667 #define SPEC_in2_r3_sr32 0
5668 
in2_r3_32u(DisasContext * s,DisasOps * o)5669 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5670 {
5671     o->in2 = tcg_temp_new_i64();
5672     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5673 }
5674 #define SPEC_in2_r3_32u 0
5675 
in2_r2_32s(DisasContext * s,DisasOps * o)5676 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = tcg_temp_new_i64();
5679     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5680 }
5681 #define SPEC_in2_r2_32s 0
5682 
in2_r2_32u(DisasContext * s,DisasOps * o)5683 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5687 }
5688 #define SPEC_in2_r2_32u 0
5689 
in2_r2_sr32(DisasContext * s,DisasOps * o)5690 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5691 {
5692     o->in2 = tcg_temp_new_i64();
5693     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5694 }
5695 #define SPEC_in2_r2_sr32 0
5696 
in2_e2(DisasContext * s,DisasOps * o)5697 static void in2_e2(DisasContext *s, DisasOps *o)
5698 {
5699     o->in2 = load_freg32_i64(get_field(s, r2));
5700 }
5701 #define SPEC_in2_e2 0
5702 
in2_f2(DisasContext * s,DisasOps * o)5703 static void in2_f2(DisasContext *s, DisasOps *o)
5704 {
5705     o->in2 = load_freg(get_field(s, r2));
5706 }
5707 #define SPEC_in2_f2 0
5708 
in2_x2(DisasContext * s,DisasOps * o)5709 static void in2_x2(DisasContext *s, DisasOps *o)
5710 {
5711     o->in2_128 = load_freg_128(get_field(s, r2));
5712 }
5713 #define SPEC_in2_x2 SPEC_r2_f128
5714 
5715 /* Load the low double word of an extended (128-bit) format FP number */
in2_x2l(DisasContext * s,DisasOps * o)5716 static void in2_x2l(DisasContext *s, DisasOps *o)
5717 {
5718     o->in2 = load_freg(get_field(s, r2) + 2);
5719 }
5720 #define SPEC_in2_x2l SPEC_r2_f128
5721 
in2_ra2(DisasContext * s,DisasOps * o)5722 static void in2_ra2(DisasContext *s, DisasOps *o)
5723 {
5724     int r2 = get_field(s, r2);
5725 
5726     /* Note: *don't* treat !r2 as 0, use the reg value. */
5727     o->in2 = tcg_temp_new_i64();
5728     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5729 }
5730 #define SPEC_in2_ra2 0
5731 
in2_ra2_E(DisasContext * s,DisasOps * o)5732 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5733 {
5734     return in2_ra2(s, o);
5735 }
5736 #define SPEC_in2_ra2_E SPEC_r2_even
5737 
in2_a2(DisasContext * s,DisasOps * o)5738 static void in2_a2(DisasContext *s, DisasOps *o)
5739 {
5740     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5741     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5742 }
5743 #define SPEC_in2_a2 0
5744 
gen_ri2(DisasContext * s)5745 static TCGv gen_ri2(DisasContext *s)
5746 {
5747     TCGv ri2 = NULL;
5748     bool is_imm;
5749     int imm;
5750 
5751     disas_jdest(s, i2, is_imm, imm, ri2);
5752     if (is_imm) {
5753         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5754     }
5755 
5756     return ri2;
5757 }
5758 
in2_ri2(DisasContext * s,DisasOps * o)5759 static void in2_ri2(DisasContext *s, DisasOps *o)
5760 {
5761     o->in2 = gen_ri2(s);
5762 }
5763 #define SPEC_in2_ri2 0
5764 
in2_sh(DisasContext * s,DisasOps * o)5765 static void in2_sh(DisasContext *s, DisasOps *o)
5766 {
5767     int b2 = get_field(s, b2);
5768     int d2 = get_field(s, d2);
5769 
5770     if (b2 == 0) {
5771         o->in2 = tcg_constant_i64(d2 & 0x3f);
5772     } else {
5773         o->in2 = get_address(s, 0, b2, d2);
5774         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5775     }
5776 }
5777 #define SPEC_in2_sh 0
5778 
in2_m2_8u(DisasContext * s,DisasOps * o)5779 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5780 {
5781     in2_a2(s, o);
5782     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5783 }
5784 #define SPEC_in2_m2_8u 0
5785 
in2_m2_16s(DisasContext * s,DisasOps * o)5786 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5787 {
5788     in2_a2(s, o);
5789     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5790 }
5791 #define SPEC_in2_m2_16s 0
5792 
in2_m2_16u(DisasContext * s,DisasOps * o)5793 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5794 {
5795     in2_a2(s, o);
5796     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5797 }
5798 #define SPEC_in2_m2_16u 0
5799 
in2_m2_32s(DisasContext * s,DisasOps * o)5800 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5801 {
5802     in2_a2(s, o);
5803     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5804 }
5805 #define SPEC_in2_m2_32s 0
5806 
in2_m2_32u(DisasContext * s,DisasOps * o)5807 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5808 {
5809     in2_a2(s, o);
5810     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5811 }
5812 #define SPEC_in2_m2_32u 0
5813 
5814 #ifndef CONFIG_USER_ONLY
in2_m2_32ua(DisasContext * s,DisasOps * o)5815 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5816 {
5817     in2_a2(s, o);
5818     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5819 }
5820 #define SPEC_in2_m2_32ua 0
5821 #endif
5822 
in2_m2_64(DisasContext * s,DisasOps * o)5823 static void in2_m2_64(DisasContext *s, DisasOps *o)
5824 {
5825     in2_a2(s, o);
5826     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5827 }
5828 #define SPEC_in2_m2_64 0
5829 
in2_m2_64w(DisasContext * s,DisasOps * o)5830 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5831 {
5832     in2_a2(s, o);
5833     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5834     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5835 }
5836 #define SPEC_in2_m2_64w 0
5837 
5838 #ifndef CONFIG_USER_ONLY
in2_m2_64a(DisasContext * s,DisasOps * o)5839 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5840 {
5841     in2_a2(s, o);
5842     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5843 }
5844 #define SPEC_in2_m2_64a 0
5845 #endif
5846 
in2_mri2_16s(DisasContext * s,DisasOps * o)5847 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5848 {
5849     o->in2 = tcg_temp_new_i64();
5850     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5851 }
5852 #define SPEC_in2_mri2_16s 0
5853 
in2_mri2_16u(DisasContext * s,DisasOps * o)5854 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5855 {
5856     o->in2 = tcg_temp_new_i64();
5857     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5858 }
5859 #define SPEC_in2_mri2_16u 0
5860 
in2_mri2_32s(DisasContext * s,DisasOps * o)5861 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5862 {
5863     o->in2 = tcg_temp_new_i64();
5864     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5865                        MO_TESL | MO_ALIGN);
5866 }
5867 #define SPEC_in2_mri2_32s 0
5868 
in2_mri2_32u(DisasContext * s,DisasOps * o)5869 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5870 {
5871     o->in2 = tcg_temp_new_i64();
5872     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5873                        MO_TEUL | MO_ALIGN);
5874 }
5875 #define SPEC_in2_mri2_32u 0
5876 
in2_mri2_64(DisasContext * s,DisasOps * o)5877 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5878 {
5879     o->in2 = tcg_temp_new_i64();
5880     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5881                         MO_TEUQ | MO_ALIGN);
5882 }
5883 #define SPEC_in2_mri2_64 0
5884 
in2_i2(DisasContext * s,DisasOps * o)5885 static void in2_i2(DisasContext *s, DisasOps *o)
5886 {
5887     o->in2 = tcg_constant_i64(get_field(s, i2));
5888 }
5889 #define SPEC_in2_i2 0
5890 
in2_i2_8u(DisasContext * s,DisasOps * o)5891 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5892 {
5893     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5894 }
5895 #define SPEC_in2_i2_8u 0
5896 
in2_i2_16u(DisasContext * s,DisasOps * o)5897 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5898 {
5899     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5900 }
5901 #define SPEC_in2_i2_16u 0
5902 
in2_i2_32u(DisasContext * s,DisasOps * o)5903 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5904 {
5905     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5906 }
5907 #define SPEC_in2_i2_32u 0
5908 
in2_i2_16u_shl(DisasContext * s,DisasOps * o)5909 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5910 {
5911     uint64_t i2 = (uint16_t)get_field(s, i2);
5912     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5913 }
5914 #define SPEC_in2_i2_16u_shl 0
5915 
in2_i2_32u_shl(DisasContext * s,DisasOps * o)5916 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5917 {
5918     uint64_t i2 = (uint32_t)get_field(s, i2);
5919     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5920 }
5921 #define SPEC_in2_i2_32u_shl 0
5922 
5923 #ifndef CONFIG_USER_ONLY
in2_insn(DisasContext * s,DisasOps * o)5924 static void in2_insn(DisasContext *s, DisasOps *o)
5925 {
5926     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5927 }
5928 #define SPEC_in2_insn 0
5929 #endif
5930 
5931 /* ====================================================================== */
5932 
5933 /* Find opc within the table of insns.  This is formulated as a switch
5934    statement so that (1) we get compile-time notice of cut-paste errors
5935    for duplicated opcodes, and (2) the compiler generates the binary
5936    search tree, rather than us having to post-process the table.  */
5937 
5938 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5939     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5940 
5941 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5942     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5943 
5944 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5945     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5946 
5947 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5948 
5949 enum DisasInsnEnum {
5950 #include "insn-data.h.inc"
5951 };
5952 
5953 #undef E
5954 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5955     .opc = OPC,                                                             \
5956     .flags = FL,                                                            \
5957     .fmt = FMT_##FT,                                                        \
5958     .fac = FAC_##FC,                                                        \
5959     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5960     .name = #NM,                                                            \
5961     .help_in1 = in1_##I1,                                                   \
5962     .help_in2 = in2_##I2,                                                   \
5963     .help_prep = prep_##P,                                                  \
5964     .help_wout = wout_##W,                                                  \
5965     .help_cout = cout_##CC,                                                 \
5966     .help_op = op_##OP,                                                     \
5967     .data = D                                                               \
5968  },
5969 
5970 /* Allow 0 to be used for NULL in the table below.  */
5971 #define in1_0  NULL
5972 #define in2_0  NULL
5973 #define prep_0  NULL
5974 #define wout_0  NULL
5975 #define cout_0  NULL
5976 #define op_0  NULL
5977 
5978 #define SPEC_in1_0 0
5979 #define SPEC_in2_0 0
5980 #define SPEC_prep_0 0
5981 #define SPEC_wout_0 0
5982 
5983 /* Give smaller names to the various facilities.  */
5984 #define FAC_Z           S390_FEAT_ZARCH
5985 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5986 #define FAC_DFP         S390_FEAT_DFP
5987 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
5988 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5989 #define FAC_EE          S390_FEAT_EXECUTE_EXT
5990 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5991 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5992 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
5993 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
5994 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5995 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5996 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5997 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
5998 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5999 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6000 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6001 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6002 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6003 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6004 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6005 #define FAC_SFLE        S390_FEAT_STFLE
6006 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6007 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6008 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6009 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6010 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6011 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6012 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6013 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6014 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6015 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6016 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6017 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6018 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6019 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6020 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6021 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6022 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6023 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6024 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6025 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6026 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6027 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6028 
6029 static const DisasInsn insn_info[] = {
6030 #include "insn-data.h.inc"
6031 };
6032 
6033 #undef E
6034 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6035     case OPC: return &insn_info[insn_ ## NM];
6036 
lookup_opc(uint16_t opc)6037 static const DisasInsn *lookup_opc(uint16_t opc)
6038 {
6039     switch (opc) {
6040 #include "insn-data.h.inc"
6041     default:
6042         return NULL;
6043     }
6044 }
6045 
6046 #undef F
6047 #undef E
6048 #undef D
6049 #undef C
6050 
6051 /* Extract a field from the insn.  The INSN should be left-aligned in
6052    the uint64_t so that we can more easily utilize the big-bit-endian
6053    definitions we extract from the Principals of Operation.  */
6054 
extract_field(DisasFields * o,const DisasField * f,uint64_t insn)6055 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6056 {
6057     uint32_t r, m;
6058 
6059     if (f->size == 0) {
6060         return;
6061     }
6062 
6063     /* Zero extract the field from the insn.  */
6064     r = (insn << f->beg) >> (64 - f->size);
6065 
6066     /* Sign-extend, or un-swap the field as necessary.  */
6067     switch (f->type) {
6068     case 0: /* unsigned */
6069         break;
6070     case 1: /* signed */
6071         assert(f->size <= 32);
6072         m = 1u << (f->size - 1);
6073         r = (r ^ m) - m;
6074         break;
6075     case 2: /* dl+dh split, signed 20 bit. */
6076         r = ((int8_t)r << 12) | (r >> 8);
6077         break;
6078     case 3: /* MSB stored in RXB */
6079         g_assert(f->size == 4);
6080         switch (f->beg) {
6081         case 8:
6082             r |= extract64(insn, 63 - 36, 1) << 4;
6083             break;
6084         case 12:
6085             r |= extract64(insn, 63 - 37, 1) << 4;
6086             break;
6087         case 16:
6088             r |= extract64(insn, 63 - 38, 1) << 4;
6089             break;
6090         case 32:
6091             r |= extract64(insn, 63 - 39, 1) << 4;
6092             break;
6093         default:
6094             g_assert_not_reached();
6095         }
6096         break;
6097     default:
6098         abort();
6099     }
6100 
6101     /*
6102      * Validate that the "compressed" encoding we selected above is valid.
6103      * I.e. we haven't made two different original fields overlap.
6104      */
6105     assert(((o->presentC >> f->indexC) & 1) == 0);
6106     o->presentC |= 1 << f->indexC;
6107     o->presentO |= 1 << f->indexO;
6108 
6109     o->c[f->indexC] = r;
6110 }
6111 
6112 /* Lookup the insn at the current PC, extracting the operands into O and
6113    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6114 
extract_insn(CPUS390XState * env,DisasContext * s)6115 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6116 {
6117     uint64_t insn, pc = s->base.pc_next;
6118     int op, op2, ilen;
6119     const DisasInsn *info;
6120 
6121     if (unlikely(s->ex_value)) {
6122         uint64_t be_insn;
6123 
6124         /* Drop the EX data now, so that it's clear on exception paths.  */
6125         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6126                        offsetof(CPUS390XState, ex_value));
6127 
6128         /* Extract the values saved by EXECUTE.  */
6129         insn = s->ex_value & 0xffffffffffff0000ull;
6130         ilen = s->ex_value & 0xf;
6131         op = insn >> 56;
6132 
6133         /* Register insn bytes with translator so plugins work. */
6134         be_insn = cpu_to_be64(insn);
6135         translator_fake_ld(&s->base, &be_insn, get_ilen(op));
6136     } else {
6137         insn = ld_code2(env, s, pc);
6138         op = (insn >> 8) & 0xff;
6139         ilen = get_ilen(op);
6140         switch (ilen) {
6141         case 2:
6142             insn = insn << 48;
6143             break;
6144         case 4:
6145             insn = ld_code4(env, s, pc) << 32;
6146             break;
6147         case 6:
6148             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6149             break;
6150         default:
6151             g_assert_not_reached();
6152         }
6153     }
6154     s->pc_tmp = s->base.pc_next + ilen;
6155     s->ilen = ilen;
6156 
6157     /* We can't actually determine the insn format until we've looked up
6158        the full insn opcode.  Which we can't do without locating the
6159        secondary opcode.  Assume by default that OP2 is at bit 40; for
6160        those smaller insns that don't actually have a secondary opcode
6161        this will correctly result in OP2 = 0. */
6162     switch (op) {
6163     case 0x01: /* E */
6164     case 0x80: /* S */
6165     case 0x82: /* S */
6166     case 0x93: /* S */
6167     case 0xb2: /* S, RRF, RRE, IE */
6168     case 0xb3: /* RRE, RRD, RRF */
6169     case 0xb9: /* RRE, RRF */
6170     case 0xe5: /* SSE, SIL */
6171         op2 = (insn << 8) >> 56;
6172         break;
6173     case 0xa5: /* RI */
6174     case 0xa7: /* RI */
6175     case 0xc0: /* RIL */
6176     case 0xc2: /* RIL */
6177     case 0xc4: /* RIL */
6178     case 0xc6: /* RIL */
6179     case 0xc8: /* SSF */
6180     case 0xcc: /* RIL */
6181         op2 = (insn << 12) >> 60;
6182         break;
6183     case 0xc5: /* MII */
6184     case 0xc7: /* SMI */
6185     case 0xd0 ... 0xdf: /* SS */
6186     case 0xe1: /* SS */
6187     case 0xe2: /* SS */
6188     case 0xe8: /* SS */
6189     case 0xe9: /* SS */
6190     case 0xea: /* SS */
6191     case 0xee ... 0xf3: /* SS */
6192     case 0xf8 ... 0xfd: /* SS */
6193         op2 = 0;
6194         break;
6195     default:
6196         op2 = (insn << 40) >> 56;
6197         break;
6198     }
6199 
6200     memset(&s->fields, 0, sizeof(s->fields));
6201     s->fields.raw_insn = insn;
6202     s->fields.op = op;
6203     s->fields.op2 = op2;
6204 
6205     /* Lookup the instruction.  */
6206     info = lookup_opc(op << 8 | op2);
6207     s->insn = info;
6208 
6209     /* If we found it, extract the operands.  */
6210     if (info != NULL) {
6211         DisasFormat fmt = info->fmt;
6212         int i;
6213 
6214         for (i = 0; i < NUM_C_FIELD; ++i) {
6215             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6216         }
6217     }
6218     return info;
6219 }
6220 
is_afp_reg(int reg)6221 static bool is_afp_reg(int reg)
6222 {
6223     return reg % 2 || reg > 6;
6224 }
6225 
is_fp_pair(int reg)6226 static bool is_fp_pair(int reg)
6227 {
6228     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6229     return !(reg & 0x2);
6230 }
6231 
translate_one(CPUS390XState * env,DisasContext * s)6232 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6233 {
6234     const DisasInsn *insn;
6235     DisasJumpType ret = DISAS_NEXT;
6236     DisasOps o = {};
6237     bool icount = false;
6238 
6239     /* Search for the insn in the table.  */
6240     insn = extract_insn(env, s);
6241 
6242     /* Update insn_start now that we know the ILEN.  */
6243     tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6244 
6245     /* Not found means unimplemented/illegal opcode.  */
6246     if (insn == NULL) {
6247         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6248                       s->fields.op, s->fields.op2);
6249         gen_illegal_opcode(s);
6250         ret = DISAS_NORETURN;
6251         goto out;
6252     }
6253 
6254 #ifndef CONFIG_USER_ONLY
6255     if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6256         /* With ifetch set, psw_addr and cc_op are always up-to-date. */
6257         gen_helper_per_ifetch(tcg_env, tcg_constant_i32(s->ilen));
6258     }
6259 #endif
6260 
6261     /* process flags */
6262     if (insn->flags) {
6263         /* privileged instruction */
6264         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6265             gen_program_exception(s, PGM_PRIVILEGED);
6266             ret = DISAS_NORETURN;
6267             goto out;
6268         }
6269 
6270         /* if AFP is not enabled, instructions and registers are forbidden */
6271         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6272             uint8_t dxc = 0;
6273 
6274             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6275                 dxc = 1;
6276             }
6277             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6278                 dxc = 1;
6279             }
6280             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6281                 dxc = 1;
6282             }
6283             if (insn->flags & IF_BFP) {
6284                 dxc = 2;
6285             }
6286             if (insn->flags & IF_DFP) {
6287                 dxc = 3;
6288             }
6289             if (insn->flags & IF_VEC) {
6290                 dxc = 0xfe;
6291             }
6292             if (dxc) {
6293                 gen_data_exception(dxc);
6294                 ret = DISAS_NORETURN;
6295                 goto out;
6296             }
6297         }
6298 
6299         /* if vector instructions not enabled, executing them is forbidden */
6300         if (insn->flags & IF_VEC) {
6301             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6302                 gen_data_exception(0xfe);
6303                 ret = DISAS_NORETURN;
6304                 goto out;
6305             }
6306         }
6307 
6308         /* input/output is the special case for icount mode */
6309         if (unlikely(insn->flags & IF_IO)) {
6310             icount = translator_io_start(&s->base);
6311         }
6312     }
6313 
6314     /* Check for insn specification exceptions.  */
6315     if (insn->spec) {
6316         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6317             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6318             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6319             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6320             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6321             gen_program_exception(s, PGM_SPECIFICATION);
6322             ret = DISAS_NORETURN;
6323             goto out;
6324         }
6325     }
6326 
6327     /* Implement the instruction.  */
6328     if (insn->help_in1) {
6329         insn->help_in1(s, &o);
6330     }
6331     if (insn->help_in2) {
6332         insn->help_in2(s, &o);
6333     }
6334     if (insn->help_prep) {
6335         insn->help_prep(s, &o);
6336     }
6337     if (insn->help_op) {
6338         ret = insn->help_op(s, &o);
6339         if (ret == DISAS_NORETURN) {
6340             goto out;
6341         }
6342     }
6343     if (insn->help_wout) {
6344         insn->help_wout(s, &o);
6345     }
6346     if (insn->help_cout) {
6347         insn->help_cout(s, &o);
6348     }
6349 
6350     /* io should be the last instruction in tb when icount is enabled */
6351     if (unlikely(icount && ret == DISAS_NEXT)) {
6352         ret = DISAS_TOO_MANY;
6353     }
6354 
6355 #ifndef CONFIG_USER_ONLY
6356     if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6357         switch (ret) {
6358         case DISAS_TOO_MANY:
6359             s->base.is_jmp = DISAS_PC_CC_UPDATED;
6360             /* fall through */
6361         case DISAS_NEXT:
6362             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6363             break;
6364         default:
6365             break;
6366         }
6367         update_cc_op(s);
6368         gen_helper_per_check_exception(tcg_env);
6369     }
6370 #endif
6371 
6372 out:
6373     /* Advance to the next instruction.  */
6374     s->base.pc_next = s->pc_tmp;
6375     return ret;
6376 }
6377 
s390x_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)6378 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6379 {
6380     DisasContext *dc = container_of(dcbase, DisasContext, base);
6381 
6382     /* 31-bit mode */
6383     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6384         dc->base.pc_first &= 0x7fffffff;
6385         dc->base.pc_next = dc->base.pc_first;
6386     }
6387 
6388     dc->cc_op = CC_OP_DYNAMIC;
6389     dc->ex_value = dc->base.tb->cs_base;
6390     dc->exit_to_mainloop = dc->ex_value;
6391 }
6392 
s390x_tr_tb_start(DisasContextBase * db,CPUState * cs)6393 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6394 {
6395 }
6396 
s390x_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)6397 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6398 {
6399     DisasContext *dc = container_of(dcbase, DisasContext, base);
6400 
6401     /* Delay the set of ilen until we've read the insn. */
6402     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6403 }
6404 
get_next_pc(CPUS390XState * env,DisasContext * s,uint64_t pc)6405 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6406                                 uint64_t pc)
6407 {
6408     uint64_t insn = translator_lduw(env, &s->base, pc);
6409 
6410     return pc + get_ilen((insn >> 8) & 0xff);
6411 }
6412 
s390x_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)6413 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6414 {
6415     CPUS390XState *env = cpu_env(cs);
6416     DisasContext *dc = container_of(dcbase, DisasContext, base);
6417 
6418     dc->base.is_jmp = translate_one(env, dc);
6419     if (dc->base.is_jmp == DISAS_NEXT) {
6420         if (dc->ex_value ||
6421             !translator_is_same_page(dcbase, dc->base.pc_next) ||
6422             !translator_is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6423             dc->base.is_jmp = DISAS_TOO_MANY;
6424         }
6425     }
6426 }
6427 
s390x_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)6428 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6429 {
6430     DisasContext *dc = container_of(dcbase, DisasContext, base);
6431 
6432     switch (dc->base.is_jmp) {
6433     case DISAS_NORETURN:
6434         break;
6435     case DISAS_TOO_MANY:
6436         update_psw_addr(dc);
6437         /* FALLTHRU */
6438     case DISAS_PC_UPDATED:
6439         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6440            cc op type is in env */
6441         update_cc_op(dc);
6442         /* FALLTHRU */
6443     case DISAS_PC_CC_UPDATED:
6444         /* Exit the TB, either by raising a debug exception or by return.  */
6445         if (dc->exit_to_mainloop) {
6446             tcg_gen_exit_tb(NULL, 0);
6447         } else {
6448             tcg_gen_lookup_and_goto_ptr();
6449         }
6450         break;
6451     default:
6452         g_assert_not_reached();
6453     }
6454 }
6455 
s390x_tr_disas_log(const DisasContextBase * dcbase,CPUState * cs,FILE * logfile)6456 static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
6457                                CPUState *cs, FILE *logfile)
6458 {
6459     DisasContext *dc = container_of(dcbase, DisasContext, base);
6460 
6461     if (unlikely(dc->ex_value)) {
6462         /* The ex_value has been recorded with translator_fake_ld. */
6463         fprintf(logfile, "IN: EXECUTE\n");
6464         target_disas(logfile, cs, &dc->base);
6465         return true;
6466     }
6467     return false;
6468 }
6469 
6470 static const TranslatorOps s390x_tr_ops = {
6471     .init_disas_context = s390x_tr_init_disas_context,
6472     .tb_start           = s390x_tr_tb_start,
6473     .insn_start         = s390x_tr_insn_start,
6474     .translate_insn     = s390x_tr_translate_insn,
6475     .tb_stop            = s390x_tr_tb_stop,
6476     .disas_log          = s390x_tr_disas_log,
6477 };
6478 
s390x_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)6479 void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
6480                           int *max_insns, vaddr pc, void *host_pc)
6481 {
6482     DisasContext dc;
6483 
6484     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6485 }
6486 
s390x_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)6487 void s390x_restore_state_to_opc(CPUState *cs,
6488                                 const TranslationBlock *tb,
6489                                 const uint64_t *data)
6490 {
6491     CPUS390XState *env = cpu_env(cs);
6492     int cc_op = data[1];
6493 
6494     env->psw.addr = data[0];
6495 
6496     /* Update the CC opcode if it is not already up-to-date.  */
6497     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6498         env->cc_op = cc_op;
6499     }
6500 
6501     /* Record ILEN.  */
6502     env->int_pgm_ilen = data[2];
6503 }
6504