xref: /qemu/target/s390x/tcg/translate.c (revision 6f8e6aed81277ec14d5a5dcafdd00dadf7ac465c)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "exec/exec-all.h"
35 #include "tcg/tcg-op.h"
36 #include "tcg/tcg-op-gvec.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/helper-proto.h"
40 #include "exec/helper-gen.h"
41 
42 #include "exec/translator.h"
43 #include "exec/translation-block.h"
44 #include "exec/log.h"
45 #include "qemu/atomic128.h"
46 
47 #define HELPER_H "helper.h"
48 #include "exec/helper-info.c.inc"
49 #undef  HELPER_H
50 
51 
52 /* Information that (most) every instruction needs to manipulate.  */
53 typedef struct DisasContext DisasContext;
54 typedef struct DisasInsn DisasInsn;
55 typedef struct DisasFields DisasFields;
56 
57 /*
58  * Define a structure to hold the decoded fields.  We'll store each inside
59  * an array indexed by an enum.  In order to conserve memory, we'll arrange
60  * for fields that do not exist at the same time to overlap, thus the "C"
61  * for compact.  For checking purposes there is an "O" for original index
62  * as well that will be applied to availability bitmaps.
63  */
64 
65 enum DisasFieldIndexO {
66     FLD_O_r1,
67     FLD_O_r2,
68     FLD_O_r3,
69     FLD_O_m1,
70     FLD_O_m3,
71     FLD_O_m4,
72     FLD_O_m5,
73     FLD_O_m6,
74     FLD_O_b1,
75     FLD_O_b2,
76     FLD_O_b4,
77     FLD_O_d1,
78     FLD_O_d2,
79     FLD_O_d4,
80     FLD_O_x2,
81     FLD_O_l1,
82     FLD_O_l2,
83     FLD_O_i1,
84     FLD_O_i2,
85     FLD_O_i3,
86     FLD_O_i4,
87     FLD_O_i5,
88     FLD_O_v1,
89     FLD_O_v2,
90     FLD_O_v3,
91     FLD_O_v4,
92 };
93 
94 enum DisasFieldIndexC {
95     FLD_C_r1 = 0,
96     FLD_C_m1 = 0,
97     FLD_C_b1 = 0,
98     FLD_C_i1 = 0,
99     FLD_C_v1 = 0,
100 
101     FLD_C_r2 = 1,
102     FLD_C_b2 = 1,
103     FLD_C_i2 = 1,
104 
105     FLD_C_r3 = 2,
106     FLD_C_m3 = 2,
107     FLD_C_i3 = 2,
108     FLD_C_v3 = 2,
109 
110     FLD_C_m4 = 3,
111     FLD_C_b4 = 3,
112     FLD_C_i4 = 3,
113     FLD_C_l1 = 3,
114     FLD_C_v4 = 3,
115 
116     FLD_C_i5 = 4,
117     FLD_C_d1 = 4,
118     FLD_C_m5 = 4,
119 
120     FLD_C_d2 = 5,
121     FLD_C_m6 = 5,
122 
123     FLD_C_d4 = 6,
124     FLD_C_x2 = 6,
125     FLD_C_l2 = 6,
126     FLD_C_v2 = 6,
127 
128     NUM_C_FIELD = 7
129 };
130 
131 struct DisasFields {
132     uint64_t raw_insn;
133     unsigned op:8;
134     unsigned op2:8;
135     unsigned presentC:16;
136     unsigned int presentO;
137     int c[NUM_C_FIELD];
138 };
139 
140 struct DisasContext {
141     DisasContextBase base;
142     const DisasInsn *insn;
143     DisasFields fields;
144     uint64_t ex_value;
145     /*
146      * During translate_one(), pc_tmp is used to determine the instruction
147      * to be executed after base.pc_next - e.g. next sequential instruction
148      * or a branch target.
149      */
150     uint64_t pc_tmp;
151     uint32_t ilen;
152     enum cc_op cc_op;
153     bool exit_to_mainloop;
154 };
155 
156 /* Information carried about a condition to be evaluated.  */
157 typedef struct {
158     TCGCond cond:8;
159     bool is_64;
160     union {
161         struct { TCGv_i64 a, b; } s64;
162         struct { TCGv_i32 a, b; } s32;
163     } u;
164 } DisasCompare;
165 
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit[CC_OP_MAX];
168 static uint64_t inline_branch_miss[CC_OP_MAX];
169 #endif
170 
171 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
172 {
173     if (s->base.tb->flags & FLAG_MASK_32) {
174         if (s->base.tb->flags & FLAG_MASK_64) {
175             tcg_gen_movi_i64(out, pc);
176             return;
177         }
178         pc |= 0x80000000;
179     }
180     assert(!(s->base.tb->flags & FLAG_MASK_64));
181     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
182 }
183 
184 static TCGv_i64 psw_addr;
185 static TCGv_i64 psw_mask;
186 static TCGv_i64 gbea;
187 
188 static TCGv_i32 cc_op;
189 static TCGv_i64 cc_src;
190 static TCGv_i64 cc_dst;
191 static TCGv_i64 cc_vr;
192 
193 static char cpu_reg_names[16][4];
194 static TCGv_i64 regs[16];
195 
196 void s390x_translate_init(void)
197 {
198     int i;
199 
200     psw_addr = tcg_global_mem_new_i64(tcg_env,
201                                       offsetof(CPUS390XState, psw.addr),
202                                       "psw_addr");
203     psw_mask = tcg_global_mem_new_i64(tcg_env,
204                                       offsetof(CPUS390XState, psw.mask),
205                                       "psw_mask");
206     gbea = tcg_global_mem_new_i64(tcg_env,
207                                   offsetof(CPUS390XState, gbea),
208                                   "gbea");
209 
210     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
211                                    "cc_op");
212     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
213                                     "cc_src");
214     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
215                                     "cc_dst");
216     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
217                                    "cc_vr");
218 
219     for (i = 0; i < 16; i++) {
220         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
221         regs[i] = tcg_global_mem_new(tcg_env,
222                                      offsetof(CPUS390XState, regs[i]),
223                                      cpu_reg_names[i]);
224     }
225 }
226 
227 static inline int vec_full_reg_offset(uint8_t reg)
228 {
229     g_assert(reg < 32);
230     return offsetof(CPUS390XState, vregs[reg][0]);
231 }
232 
233 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
234 {
235     /* Convert element size (es) - e.g. MO_8 - to bytes */
236     const uint8_t bytes = 1 << es;
237     int offs = enr * bytes;
238 
239     /*
240      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
241      * of the 16 byte vector, on both, little and big endian systems.
242      *
243      * Big Endian (target/possible host)
244      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
245      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
246      * W:  [             0][             1] - [             2][             3]
247      * DW: [                             0] - [                             1]
248      *
249      * Little Endian (possible host)
250      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
251      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
252      * W:  [             1][             0] - [             3][             2]
253      * DW: [                             0] - [                             1]
254      *
255      * For 16 byte elements, the two 8 byte halves will not form a host
256      * int128 if the host is little endian, since they're in the wrong order.
257      * Some operations (e.g. xor) do not care. For operations like addition,
258      * the two 8 byte elements have to be loaded separately. Let's force all
259      * 16 byte operations to handle it in a special way.
260      */
261     g_assert(es <= MO_64);
262 #if !HOST_BIG_ENDIAN
263     offs ^= (8 - bytes);
264 #endif
265     return offs + vec_full_reg_offset(reg);
266 }
267 
268 static inline int freg64_offset(uint8_t reg)
269 {
270     g_assert(reg < 16);
271     return vec_reg_offset(reg, 0, MO_64);
272 }
273 
274 static inline int freg32_offset(uint8_t reg)
275 {
276     g_assert(reg < 16);
277     return vec_reg_offset(reg, 0, MO_32);
278 }
279 
280 static TCGv_i64 load_reg(int reg)
281 {
282     TCGv_i64 r = tcg_temp_new_i64();
283     tcg_gen_mov_i64(r, regs[reg]);
284     return r;
285 }
286 
287 static TCGv_i64 load_freg(int reg)
288 {
289     TCGv_i64 r = tcg_temp_new_i64();
290 
291     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
292     return r;
293 }
294 
295 static TCGv_i64 load_freg32_i64(int reg)
296 {
297     TCGv_i64 r = tcg_temp_new_i64();
298 
299     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
300     return r;
301 }
302 
303 static TCGv_i128 load_freg_128(int reg)
304 {
305     TCGv_i64 h = load_freg(reg);
306     TCGv_i64 l = load_freg(reg + 2);
307     TCGv_i128 r = tcg_temp_new_i128();
308 
309     tcg_gen_concat_i64_i128(r, l, h);
310     return r;
311 }
312 
313 static void store_reg(int reg, TCGv_i64 v)
314 {
315     tcg_gen_mov_i64(regs[reg], v);
316 }
317 
318 static void store_freg(int reg, TCGv_i64 v)
319 {
320     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
321 }
322 
323 static void store_reg32_i64(int reg, TCGv_i64 v)
324 {
325     /* 32 bit register writes keep the upper half */
326     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
327 }
328 
329 static void store_reg32h_i64(int reg, TCGv_i64 v)
330 {
331     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
332 }
333 
334 static void store_freg32_i64(int reg, TCGv_i64 v)
335 {
336     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
337 }
338 
339 static void update_psw_addr(DisasContext *s)
340 {
341     /* psw.addr */
342     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344 
345 static void per_branch(DisasContext *s, TCGv_i64 dest)
346 {
347 #ifndef CONFIG_USER_ONLY
348     if (s->base.tb->flags & FLAG_MASK_PER_BRANCH) {
349         gen_helper_per_branch(tcg_env, dest, tcg_constant_i32(s->ilen));
350     }
351 #endif
352 }
353 
354 static void per_breaking_event(DisasContext *s)
355 {
356     tcg_gen_movi_i64(gbea, s->base.pc_next);
357 }
358 
359 static void update_cc_op(DisasContext *s)
360 {
361     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
362         tcg_gen_movi_i32(cc_op, s->cc_op);
363     }
364 }
365 
366 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
367                                 uint64_t pc)
368 {
369     return (uint64_t)translator_lduw(env, &s->base, pc);
370 }
371 
372 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
373                                 uint64_t pc)
374 {
375     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
376 }
377 
378 static int get_mem_index(DisasContext *s)
379 {
380 #ifdef CONFIG_USER_ONLY
381     return MMU_USER_IDX;
382 #else
383     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
384         return MMU_REAL_IDX;
385     }
386 
387     switch (s->base.tb->flags & FLAG_MASK_ASC) {
388     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
389         return MMU_PRIMARY_IDX;
390     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
391         return MMU_SECONDARY_IDX;
392     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
393         return MMU_HOME_IDX;
394     default:
395         g_assert_not_reached();
396     }
397 #endif
398 }
399 
400 static void gen_exception(int excp)
401 {
402     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
403 }
404 
405 static void gen_program_exception(DisasContext *s, int code)
406 {
407     /* Remember what pgm exception this was.  */
408     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
409                    offsetof(CPUS390XState, int_pgm_code));
410 
411     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
412                    offsetof(CPUS390XState, int_pgm_ilen));
413 
414     /* update the psw */
415     update_psw_addr(s);
416 
417     /* Save off cc.  */
418     update_cc_op(s);
419 
420     /* Trigger exception.  */
421     gen_exception(EXCP_PGM);
422 }
423 
424 static inline void gen_illegal_opcode(DisasContext *s)
425 {
426     gen_program_exception(s, PGM_OPERATION);
427 }
428 
429 static inline void gen_data_exception(uint8_t dxc)
430 {
431     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
432 }
433 
434 static inline void gen_trap(DisasContext *s)
435 {
436     /* Set DXC to 0xff */
437     gen_data_exception(0xff);
438 }
439 
440 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
441                                   int64_t imm)
442 {
443     tcg_gen_addi_i64(dst, src, imm);
444     if (!(s->base.tb->flags & FLAG_MASK_64)) {
445         if (s->base.tb->flags & FLAG_MASK_32) {
446             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
447         } else {
448             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
449         }
450     }
451 }
452 
453 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
454 {
455     TCGv_i64 tmp = tcg_temp_new_i64();
456 
457     /*
458      * Note that d2 is limited to 20 bits, signed.  If we crop negative
459      * displacements early we create larger immediate addends.
460      */
461     if (b2 && x2) {
462         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
463         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
464     } else if (b2) {
465         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
466     } else if (x2) {
467         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
468     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
469         if (s->base.tb->flags & FLAG_MASK_32) {
470             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
471         } else {
472             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
473         }
474     } else {
475         tcg_gen_movi_i64(tmp, d2);
476     }
477 
478     return tmp;
479 }
480 
481 static inline bool live_cc_data(DisasContext *s)
482 {
483     return (s->cc_op != CC_OP_DYNAMIC
484             && s->cc_op != CC_OP_STATIC
485             && s->cc_op > 3);
486 }
487 
488 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
489 {
490     if (live_cc_data(s)) {
491         tcg_gen_discard_i64(cc_src);
492         tcg_gen_discard_i64(cc_dst);
493         tcg_gen_discard_i64(cc_vr);
494     }
495     s->cc_op = CC_OP_CONST0 + val;
496 }
497 
498 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
499 {
500     if (live_cc_data(s)) {
501         tcg_gen_discard_i64(cc_src);
502         tcg_gen_discard_i64(cc_vr);
503     }
504     tcg_gen_mov_i64(cc_dst, dst);
505     s->cc_op = op;
506 }
507 
508 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
509                                   TCGv_i64 dst)
510 {
511     if (live_cc_data(s)) {
512         tcg_gen_discard_i64(cc_vr);
513     }
514     tcg_gen_mov_i64(cc_src, src);
515     tcg_gen_mov_i64(cc_dst, dst);
516     s->cc_op = op;
517 }
518 
519 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
520                                   TCGv_i64 dst, TCGv_i64 vr)
521 {
522     tcg_gen_mov_i64(cc_src, src);
523     tcg_gen_mov_i64(cc_dst, dst);
524     tcg_gen_mov_i64(cc_vr, vr);
525     s->cc_op = op;
526 }
527 
528 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
529 {
530     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
531 }
532 
533 /* CC value is in env->cc_op */
534 static void set_cc_static(DisasContext *s)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_src);
538         tcg_gen_discard_i64(cc_dst);
539         tcg_gen_discard_i64(cc_vr);
540     }
541     s->cc_op = CC_OP_STATIC;
542 }
543 
544 /* calculates cc into cc_op */
545 static void gen_op_calc_cc(DisasContext *s)
546 {
547     TCGv_i32 local_cc_op = NULL;
548     TCGv_i64 dummy = NULL;
549 
550     switch (s->cc_op) {
551     default:
552         dummy = tcg_constant_i64(0);
553         /* FALLTHRU */
554     case CC_OP_ADD_64:
555     case CC_OP_SUB_64:
556     case CC_OP_ADD_32:
557     case CC_OP_SUB_32:
558         local_cc_op = tcg_constant_i32(s->cc_op);
559         break;
560     case CC_OP_CONST0:
561     case CC_OP_CONST1:
562     case CC_OP_CONST2:
563     case CC_OP_CONST3:
564     case CC_OP_STATIC:
565     case CC_OP_DYNAMIC:
566         break;
567     }
568 
569     switch (s->cc_op) {
570     case CC_OP_CONST0:
571     case CC_OP_CONST1:
572     case CC_OP_CONST2:
573     case CC_OP_CONST3:
574         /* s->cc_op is the cc value */
575         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
576         break;
577     case CC_OP_STATIC:
578         /* env->cc_op already is the cc value */
579         break;
580     case CC_OP_NZ:
581         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
582         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
583         break;
584     case CC_OP_ABS_64:
585     case CC_OP_NABS_64:
586     case CC_OP_ABS_32:
587     case CC_OP_NABS_32:
588     case CC_OP_LTGT0_32:
589     case CC_OP_LTGT0_64:
590     case CC_OP_COMP_32:
591     case CC_OP_COMP_64:
592     case CC_OP_NZ_F32:
593     case CC_OP_NZ_F64:
594     case CC_OP_FLOGR:
595     case CC_OP_LCBB:
596     case CC_OP_MULS_32:
597         /* 1 argument */
598         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
599         break;
600     case CC_OP_ADDU:
601     case CC_OP_ICM:
602     case CC_OP_LTGT_32:
603     case CC_OP_LTGT_64:
604     case CC_OP_LTUGTU_32:
605     case CC_OP_LTUGTU_64:
606     case CC_OP_TM_32:
607     case CC_OP_TM_64:
608     case CC_OP_SLA:
609     case CC_OP_SUBU:
610     case CC_OP_NZ_F128:
611     case CC_OP_VC:
612     case CC_OP_MULS_64:
613         /* 2 arguments */
614         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
615         break;
616     case CC_OP_ADD_64:
617     case CC_OP_SUB_64:
618     case CC_OP_ADD_32:
619     case CC_OP_SUB_32:
620         /* 3 arguments */
621         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
622         break;
623     case CC_OP_DYNAMIC:
624         /* unknown operation - assume 3 arguments and cc_op in env */
625         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
626         break;
627     default:
628         g_assert_not_reached();
629     }
630 
631     /* We now have cc in cc_op as constant */
632     set_cc_static(s);
633 }
634 
635 static bool use_goto_tb(DisasContext *s, uint64_t dest)
636 {
637     return translator_use_goto_tb(&s->base, dest);
638 }
639 
640 static void account_noninline_branch(DisasContext *s, int cc_op)
641 {
642 #ifdef DEBUG_INLINE_BRANCHES
643     inline_branch_miss[cc_op]++;
644 #endif
645 }
646 
647 static void account_inline_branch(DisasContext *s, int cc_op)
648 {
649 #ifdef DEBUG_INLINE_BRANCHES
650     inline_branch_hit[cc_op]++;
651 #endif
652 }
653 
654 /* Table of mask values to comparison codes, given a comparison as input.
655    For such, CC=3 should not be possible.  */
656 static const TCGCond ltgt_cond[16] = {
657     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
658     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
659     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
660     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
661     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
662     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
663     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
664     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
665 };
666 
667 /* Table of mask values to comparison codes, given a logic op as input.
668    For such, only CC=0 and CC=1 should be possible.  */
669 static const TCGCond nz_cond[16] = {
670     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
671     TCG_COND_NEVER, TCG_COND_NEVER,
672     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
673     TCG_COND_NE, TCG_COND_NE,
674     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
675     TCG_COND_EQ, TCG_COND_EQ,
676     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
677     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
678 };
679 
680 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
681    details required to generate a TCG comparison.  */
682 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
683 {
684     TCGCond cond;
685     enum cc_op old_cc_op = s->cc_op;
686 
687     if (mask == 15 || mask == 0) {
688         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
689         c->u.s32.a = cc_op;
690         c->u.s32.b = cc_op;
691         c->is_64 = false;
692         return;
693     }
694 
695     /* Find the TCG condition for the mask + cc op.  */
696     switch (old_cc_op) {
697     case CC_OP_LTGT0_32:
698     case CC_OP_LTGT0_64:
699     case CC_OP_LTGT_32:
700     case CC_OP_LTGT_64:
701         cond = ltgt_cond[mask];
702         if (cond == TCG_COND_NEVER) {
703             goto do_dynamic;
704         }
705         account_inline_branch(s, old_cc_op);
706         break;
707 
708     case CC_OP_LTUGTU_32:
709     case CC_OP_LTUGTU_64:
710         cond = tcg_unsigned_cond(ltgt_cond[mask]);
711         if (cond == TCG_COND_NEVER) {
712             goto do_dynamic;
713         }
714         account_inline_branch(s, old_cc_op);
715         break;
716 
717     case CC_OP_NZ:
718         cond = nz_cond[mask];
719         if (cond == TCG_COND_NEVER) {
720             goto do_dynamic;
721         }
722         account_inline_branch(s, old_cc_op);
723         break;
724 
725     case CC_OP_TM_32:
726     case CC_OP_TM_64:
727         switch (mask) {
728         case 8:
729             cond = TCG_COND_TSTEQ;
730             break;
731         case 4 | 2 | 1:
732             cond = TCG_COND_TSTNE;
733             break;
734         default:
735             goto do_dynamic;
736         }
737         account_inline_branch(s, old_cc_op);
738         break;
739 
740     case CC_OP_ICM:
741         switch (mask) {
742         case 8:
743             cond = TCG_COND_TSTEQ;
744             break;
745         case 4 | 2 | 1:
746         case 4 | 2:
747             cond = TCG_COND_TSTNE;
748             break;
749         default:
750             goto do_dynamic;
751         }
752         account_inline_branch(s, old_cc_op);
753         break;
754 
755     case CC_OP_FLOGR:
756         switch (mask & 0xa) {
757         case 8: /* src == 0 -> no one bit found */
758             cond = TCG_COND_EQ;
759             break;
760         case 2: /* src != 0 -> one bit found */
761             cond = TCG_COND_NE;
762             break;
763         default:
764             goto do_dynamic;
765         }
766         account_inline_branch(s, old_cc_op);
767         break;
768 
769     case CC_OP_ADDU:
770     case CC_OP_SUBU:
771         switch (mask) {
772         case 8 | 2: /* result == 0 */
773             cond = TCG_COND_EQ;
774             break;
775         case 4 | 1: /* result != 0 */
776             cond = TCG_COND_NE;
777             break;
778         case 8 | 4: /* !carry (borrow) */
779             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
780             break;
781         case 2 | 1: /* carry (!borrow) */
782             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
783             break;
784         default:
785             goto do_dynamic;
786         }
787         account_inline_branch(s, old_cc_op);
788         break;
789 
790     default:
791     do_dynamic:
792         /* Calculate cc value.  */
793         gen_op_calc_cc(s);
794         /* FALLTHRU */
795 
796     case CC_OP_STATIC:
797         /* Jump based on CC.  We'll load up the real cond below;
798            the assignment here merely avoids a compiler warning.  */
799         account_noninline_branch(s, old_cc_op);
800         old_cc_op = CC_OP_STATIC;
801         cond = TCG_COND_NEVER;
802         break;
803     }
804 
805     /* Load up the arguments of the comparison.  */
806     c->is_64 = true;
807     switch (old_cc_op) {
808     case CC_OP_LTGT0_32:
809         c->is_64 = false;
810         c->u.s32.a = tcg_temp_new_i32();
811         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
812         c->u.s32.b = tcg_constant_i32(0);
813         break;
814     case CC_OP_LTGT_32:
815     case CC_OP_LTUGTU_32:
816         c->is_64 = false;
817         c->u.s32.a = tcg_temp_new_i32();
818         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
819         c->u.s32.b = tcg_temp_new_i32();
820         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
821         break;
822 
823     case CC_OP_LTGT0_64:
824     case CC_OP_NZ:
825     case CC_OP_FLOGR:
826         c->u.s64.a = cc_dst;
827         c->u.s64.b = tcg_constant_i64(0);
828         break;
829 
830     case CC_OP_LTGT_64:
831     case CC_OP_LTUGTU_64:
832     case CC_OP_TM_32:
833     case CC_OP_TM_64:
834     case CC_OP_ICM:
835         c->u.s64.a = cc_src;
836         c->u.s64.b = cc_dst;
837         break;
838 
839     case CC_OP_ADDU:
840     case CC_OP_SUBU:
841         c->is_64 = true;
842         c->u.s64.b = tcg_constant_i64(0);
843         switch (mask) {
844         case 8 | 2:
845         case 4 | 1: /* result */
846             c->u.s64.a = cc_dst;
847             break;
848         case 8 | 4:
849         case 2 | 1: /* carry */
850             c->u.s64.a = cc_src;
851             break;
852         default:
853             g_assert_not_reached();
854         }
855         break;
856 
857     case CC_OP_STATIC:
858         c->is_64 = false;
859         c->u.s32.a = cc_op;
860 
861         /* Fold half of the cases using bit 3 to invert. */
862         switch (mask & 8 ? mask ^ 0xf : mask) {
863         case 0x1: /* cc == 3 */
864             cond = TCG_COND_EQ;
865             c->u.s32.b = tcg_constant_i32(3);
866             break;
867         case 0x2: /* cc == 2 */
868             cond = TCG_COND_EQ;
869             c->u.s32.b = tcg_constant_i32(2);
870             break;
871         case 0x4: /* cc == 1 */
872             cond = TCG_COND_EQ;
873             c->u.s32.b = tcg_constant_i32(1);
874             break;
875         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
876             cond = TCG_COND_GTU;
877             c->u.s32.b = tcg_constant_i32(1);
878             break;
879         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
880             cond = TCG_COND_TSTNE;
881             c->u.s32.b = tcg_constant_i32(1);
882             break;
883         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
884             cond = TCG_COND_LEU;
885             c->u.s32.a = tcg_temp_new_i32();
886             c->u.s32.b = tcg_constant_i32(1);
887             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
888             break;
889         case 0x4 | 0x2 | 0x1: /* cc != 0 */
890             cond = TCG_COND_NE;
891             c->u.s32.b = tcg_constant_i32(0);
892             break;
893         default:
894             /* case 0: never, handled above. */
895             g_assert_not_reached();
896         }
897         if (mask & 8) {
898             cond = tcg_invert_cond(cond);
899         }
900         break;
901 
902     default:
903         abort();
904     }
905     c->cond = cond;
906 }
907 
908 /* ====================================================================== */
909 /* Define the insn format enumeration.  */
910 #define F0(N)                         FMT_##N,
911 #define F1(N, X1)                     F0(N)
912 #define F2(N, X1, X2)                 F0(N)
913 #define F3(N, X1, X2, X3)             F0(N)
914 #define F4(N, X1, X2, X3, X4)         F0(N)
915 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
916 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
917 
918 typedef enum {
919 #include "insn-format.h.inc"
920 } DisasFormat;
921 
922 #undef F0
923 #undef F1
924 #undef F2
925 #undef F3
926 #undef F4
927 #undef F5
928 #undef F6
929 
930 /* This is the way fields are to be accessed out of DisasFields.  */
931 #define have_field(S, F)  have_field1((S), FLD_O_##F)
932 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
933 
934 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
935 {
936     return (s->fields.presentO >> c) & 1;
937 }
938 
939 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
940                       enum DisasFieldIndexC c)
941 {
942     assert(have_field1(s, o));
943     return s->fields.c[c];
944 }
945 
946 /* Describe the layout of each field in each format.  */
947 typedef struct DisasField {
948     unsigned int beg:8;
949     unsigned int size:8;
950     unsigned int type:2;
951     unsigned int indexC:6;
952     enum DisasFieldIndexO indexO:8;
953 } DisasField;
954 
955 typedef struct DisasFormatInfo {
956     DisasField op[NUM_C_FIELD];
957 } DisasFormatInfo;
958 
959 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
960 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
961 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
962 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
963                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
964 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
965                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
966                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
967 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
968                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
969 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
970                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
971                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
972 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
973 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
974 
975 #define F0(N)                     { { } },
976 #define F1(N, X1)                 { { X1 } },
977 #define F2(N, X1, X2)             { { X1, X2 } },
978 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
979 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
980 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
981 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
982 
983 static const DisasFormatInfo format_info[] = {
984 #include "insn-format.h.inc"
985 };
986 
987 #undef F0
988 #undef F1
989 #undef F2
990 #undef F3
991 #undef F4
992 #undef F5
993 #undef F6
994 #undef R
995 #undef M
996 #undef V
997 #undef BD
998 #undef BXD
999 #undef BDL
1000 #undef BXDL
1001 #undef I
1002 #undef L
1003 
1004 /* Generally, we'll extract operands into this structures, operate upon
1005    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1006    of routines below for more details.  */
1007 typedef struct {
1008     TCGv_i64 out, out2, in1, in2;
1009     TCGv_i64 addr1;
1010     TCGv_i128 out_128, in1_128, in2_128;
1011 } DisasOps;
1012 
1013 /* Instructions can place constraints on their operands, raising specification
1014    exceptions if they are violated.  To make this easy to automate, each "in1",
1015    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1016    of the following, or 0.  To make this easy to document, we'll put the
1017    SPEC_<name> defines next to <name>.  */
1018 
1019 #define SPEC_r1_even    1
1020 #define SPEC_r2_even    2
1021 #define SPEC_r3_even    4
1022 #define SPEC_r1_f128    8
1023 #define SPEC_r2_f128    16
1024 
1025 /* Return values from translate_one, indicating the state of the TB.  */
1026 
1027 /* We are not using a goto_tb (for whatever reason), but have updated
1028    the PC (for whatever reason), so there's no need to do it again on
1029    exiting the TB.  */
1030 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1031 
1032 /* We have updated the PC and CC values.  */
1033 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1034 
1035 
1036 /* Instruction flags */
1037 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1038 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1039 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1040 #define IF_BFP      0x0008      /* binary floating point instruction */
1041 #define IF_DFP      0x0010      /* decimal floating point instruction */
1042 #define IF_PRIV     0x0020      /* privileged instruction */
1043 #define IF_VEC      0x0040      /* vector instruction */
1044 #define IF_IO       0x0080      /* input/output instruction */
1045 
1046 struct DisasInsn {
1047     unsigned opc:16;
1048     unsigned flags:16;
1049     DisasFormat fmt:8;
1050     unsigned fac:8;
1051     unsigned spec:8;
1052 
1053     const char *name;
1054 
1055     /* Pre-process arguments before HELP_OP.  */
1056     void (*help_in1)(DisasContext *, DisasOps *);
1057     void (*help_in2)(DisasContext *, DisasOps *);
1058     void (*help_prep)(DisasContext *, DisasOps *);
1059 
1060     /*
1061      * Post-process output after HELP_OP.
1062      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1063      */
1064     void (*help_wout)(DisasContext *, DisasOps *);
1065     void (*help_cout)(DisasContext *, DisasOps *);
1066 
1067     /* Implement the operation itself.  */
1068     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1069 
1070     uint64_t data;
1071 };
1072 
1073 /* ====================================================================== */
1074 /* Miscellaneous helpers, used by several operations.  */
1075 
1076 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1077 {
1078     update_cc_op(s);
1079     per_breaking_event(s);
1080     per_branch(s, tcg_constant_i64(dest));
1081 
1082     if (dest == s->pc_tmp) {
1083         return DISAS_NEXT;
1084     }
1085     if (use_goto_tb(s, dest)) {
1086         tcg_gen_goto_tb(0);
1087         tcg_gen_movi_i64(psw_addr, dest);
1088         tcg_gen_exit_tb(s->base.tb, 0);
1089         return DISAS_NORETURN;
1090     } else {
1091         tcg_gen_movi_i64(psw_addr, dest);
1092         return DISAS_PC_CC_UPDATED;
1093     }
1094 }
1095 
1096 static DisasJumpType help_goto_indirect(DisasContext *s, TCGv_i64 dest)
1097 {
1098     update_cc_op(s);
1099     per_breaking_event(s);
1100     tcg_gen_mov_i64(psw_addr, dest);
1101     per_branch(s, psw_addr);
1102     return DISAS_PC_CC_UPDATED;
1103 }
1104 
1105 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1106                                  bool is_imm, int imm, TCGv_i64 cdest)
1107 {
1108     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1109     TCGLabel *lab;
1110 
1111     /* Take care of the special cases first.  */
1112     if (c->cond == TCG_COND_NEVER) {
1113         return DISAS_NEXT;
1114     }
1115     if (is_imm) {
1116         /*
1117          * Do not optimize a conditional branch if PER enabled, because we
1118          * still need a conditional call to helper_per_branch.
1119          */
1120         if (c->cond == TCG_COND_ALWAYS
1121             || (dest == s->pc_tmp &&
1122                 !(s->base.tb->flags & FLAG_MASK_PER_BRANCH))) {
1123             return help_goto_direct(s, dest);
1124         }
1125     } else {
1126         if (!cdest) {
1127             /* E.g. bcr %r0 -> no branch.  */
1128             return DISAS_NEXT;
1129         }
1130         if (c->cond == TCG_COND_ALWAYS) {
1131             return help_goto_indirect(s, cdest);
1132         }
1133     }
1134 
1135     update_cc_op(s);
1136 
1137     /*
1138      * Ensure the taken branch is fall-through of the tcg branch.
1139      * This keeps @cdest usage within the extended basic block,
1140      * which avoids an otherwise unnecessary spill to the stack.
1141      */
1142     lab = gen_new_label();
1143     if (c->is_64) {
1144         tcg_gen_brcond_i64(tcg_invert_cond(c->cond),
1145                            c->u.s64.a, c->u.s64.b, lab);
1146     } else {
1147         tcg_gen_brcond_i32(tcg_invert_cond(c->cond),
1148                            c->u.s32.a, c->u.s32.b, lab);
1149     }
1150 
1151     /* Branch taken.  */
1152     per_breaking_event(s);
1153     if (is_imm) {
1154         tcg_gen_movi_i64(psw_addr, dest);
1155     } else {
1156         tcg_gen_mov_i64(psw_addr, cdest);
1157     }
1158     per_branch(s, psw_addr);
1159 
1160     if (is_imm && use_goto_tb(s, dest)) {
1161         tcg_gen_goto_tb(0);
1162         tcg_gen_exit_tb(s->base.tb, 0);
1163     } else {
1164         tcg_gen_lookup_and_goto_ptr();
1165     }
1166 
1167     gen_set_label(lab);
1168 
1169     /* Branch not taken.  */
1170     tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1171     if (use_goto_tb(s, s->pc_tmp)) {
1172         tcg_gen_goto_tb(1);
1173         tcg_gen_exit_tb(s->base.tb, 1);
1174         return DISAS_NORETURN;
1175     }
1176     return DISAS_PC_CC_UPDATED;
1177 }
1178 
1179 /* ====================================================================== */
1180 /* The operations.  These perform the bulk of the work for any insn,
1181    usually after the operands have been loaded and output initialized.  */
1182 
1183 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1184 {
1185     tcg_gen_abs_i64(o->out, o->in2);
1186     return DISAS_NEXT;
1187 }
1188 
1189 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1190 {
1191     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1192     return DISAS_NEXT;
1193 }
1194 
1195 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1196 {
1197     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1198     return DISAS_NEXT;
1199 }
1200 
1201 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1202 {
1203     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1204     tcg_gen_mov_i64(o->out2, o->in2);
1205     return DISAS_NEXT;
1206 }
1207 
1208 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1209 {
1210     tcg_gen_add_i64(o->out, o->in1, o->in2);
1211     return DISAS_NEXT;
1212 }
1213 
1214 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1215 {
1216     tcg_gen_movi_i64(cc_src, 0);
1217     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1218     return DISAS_NEXT;
1219 }
1220 
1221 /* Compute carry into cc_src. */
1222 static void compute_carry(DisasContext *s)
1223 {
1224     switch (s->cc_op) {
1225     case CC_OP_ADDU:
1226         /* The carry value is already in cc_src (1,0). */
1227         break;
1228     case CC_OP_SUBU:
1229         tcg_gen_addi_i64(cc_src, cc_src, 1);
1230         break;
1231     default:
1232         gen_op_calc_cc(s);
1233         /* fall through */
1234     case CC_OP_STATIC:
1235         /* The carry flag is the msb of CC; compute into cc_src. */
1236         tcg_gen_extu_i32_i64(cc_src, cc_op);
1237         tcg_gen_shri_i64(cc_src, cc_src, 1);
1238         break;
1239     }
1240 }
1241 
1242 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1243 {
1244     compute_carry(s);
1245     tcg_gen_add_i64(o->out, o->in1, o->in2);
1246     tcg_gen_add_i64(o->out, o->out, cc_src);
1247     return DISAS_NEXT;
1248 }
1249 
1250 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1251 {
1252     compute_carry(s);
1253     tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src);
1254     return DISAS_NEXT;
1255 }
1256 
1257 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1258 {
1259     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1260 
1261     o->in1 = tcg_temp_new_i64();
1262     if (non_atomic) {
1263         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1264     } else {
1265         /* Perform the atomic addition in memory. */
1266         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1267                                      s->insn->data);
1268     }
1269 
1270     /* Recompute also for atomic case: needed for setting CC. */
1271     tcg_gen_add_i64(o->out, o->in1, o->in2);
1272 
1273     if (non_atomic) {
1274         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1275     }
1276     return DISAS_NEXT;
1277 }
1278 
1279 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1280 {
1281     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1282 
1283     o->in1 = tcg_temp_new_i64();
1284     if (non_atomic) {
1285         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1286     } else {
1287         /* Perform the atomic addition in memory. */
1288         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1289                                      s->insn->data);
1290     }
1291 
1292     /* Recompute also for atomic case: needed for setting CC. */
1293     tcg_gen_movi_i64(cc_src, 0);
1294     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1295 
1296     if (non_atomic) {
1297         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1298     }
1299     return DISAS_NEXT;
1300 }
1301 
1302 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1303 {
1304     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1305     return DISAS_NEXT;
1306 }
1307 
1308 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1309 {
1310     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1311     return DISAS_NEXT;
1312 }
1313 
1314 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1315 {
1316     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1317     return DISAS_NEXT;
1318 }
1319 
1320 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1321 {
1322     tcg_gen_and_i64(o->out, o->in1, o->in2);
1323     return DISAS_NEXT;
1324 }
1325 
1326 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1327 {
1328     int shift = s->insn->data & 0xff;
1329     int size = s->insn->data >> 8;
1330     uint64_t mask = ((1ull << size) - 1) << shift;
1331     TCGv_i64 t = tcg_temp_new_i64();
1332 
1333     tcg_gen_shli_i64(t, o->in2, shift);
1334     tcg_gen_ori_i64(t, t, ~mask);
1335     tcg_gen_and_i64(o->out, o->in1, t);
1336 
1337     /* Produce the CC from only the bits manipulated.  */
1338     tcg_gen_andi_i64(cc_dst, o->out, mask);
1339     set_cc_nz_u64(s, cc_dst);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1344 {
1345     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1346     return DISAS_NEXT;
1347 }
1348 
1349 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1350 {
1351     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1352     return DISAS_NEXT;
1353 }
1354 
1355 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1356 {
1357     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1358     return DISAS_NEXT;
1359 }
1360 
1361 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1362 {
1363     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1364     return DISAS_NEXT;
1365 }
1366 
1367 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1368 {
1369     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1370     return DISAS_NEXT;
1371 }
1372 
1373 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1374 {
1375     o->in1 = tcg_temp_new_i64();
1376 
1377     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1378         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1379     } else {
1380         /* Perform the atomic operation in memory. */
1381         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1382                                      s->insn->data);
1383     }
1384 
1385     /* Recompute also for atomic case: needed for setting CC. */
1386     tcg_gen_and_i64(o->out, o->in1, o->in2);
1387 
1388     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1389         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1390     }
1391     return DISAS_NEXT;
1392 }
1393 
1394 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1395 {
1396     pc_to_link_info(o->out, s, s->pc_tmp);
1397     if (o->in2) {
1398         return help_goto_indirect(s, o->in2);
1399     } else {
1400         return DISAS_NEXT;
1401     }
1402 }
1403 
1404 static void save_link_info(DisasContext *s, DisasOps *o)
1405 {
1406     TCGv_i64 t;
1407 
1408     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1409         pc_to_link_info(o->out, s, s->pc_tmp);
1410         return;
1411     }
1412     gen_op_calc_cc(s);
1413     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1414     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1415     t = tcg_temp_new_i64();
1416     tcg_gen_shri_i64(t, psw_mask, 16);
1417     tcg_gen_andi_i64(t, t, 0x0f000000);
1418     tcg_gen_or_i64(o->out, o->out, t);
1419     tcg_gen_extu_i32_i64(t, cc_op);
1420     tcg_gen_shli_i64(t, t, 28);
1421     tcg_gen_or_i64(o->out, o->out, t);
1422 }
1423 
1424 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1425 {
1426     save_link_info(s, o);
1427     if (o->in2) {
1428         return help_goto_indirect(s, o->in2);
1429     } else {
1430         return DISAS_NEXT;
1431     }
1432 }
1433 
1434 /*
1435  * Disassemble the target of a branch. The results are returned in a form
1436  * suitable for passing into help_branch():
1437  *
1438  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1439  *   branches, whose DisasContext *S contains the relative immediate field RI,
1440  *   are considered fixed. All the other branches are considered computed.
1441  * - int IMM is the value of RI.
1442  * - TCGv_i64 CDEST is the address of the computed target.
1443  */
1444 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1445     if (have_field(s, ri)) {                                                   \
1446         if (unlikely(s->ex_value)) {                                           \
1447             cdest = tcg_temp_new_i64();                                        \
1448             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1449             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1450             is_imm = false;                                                    \
1451         } else {                                                               \
1452             is_imm = true;                                                     \
1453         }                                                                      \
1454     } else {                                                                   \
1455         is_imm = false;                                                        \
1456     }                                                                          \
1457     imm = is_imm ? get_field(s, ri) : 0;                                       \
1458 } while (false)
1459 
1460 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1461 {
1462     DisasCompare c;
1463     bool is_imm;
1464     int imm;
1465 
1466     pc_to_link_info(o->out, s, s->pc_tmp);
1467 
1468     disas_jdest(s, i2, is_imm, imm, o->in2);
1469     disas_jcc(s, &c, 0xf);
1470     return help_branch(s, &c, is_imm, imm, o->in2);
1471 }
1472 
1473 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1474 {
1475     int m1 = get_field(s, m1);
1476     DisasCompare c;
1477     bool is_imm;
1478     int imm;
1479 
1480     /* BCR with R2 = 0 causes no branching */
1481     if (have_field(s, r2) && get_field(s, r2) == 0) {
1482         if (m1 == 14) {
1483             /* Perform serialization */
1484             /* FIXME: check for fast-BCR-serialization facility */
1485             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1486         }
1487         if (m1 == 15) {
1488             /* Perform serialization */
1489             /* FIXME: perform checkpoint-synchronisation */
1490             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1491         }
1492         return DISAS_NEXT;
1493     }
1494 
1495     disas_jdest(s, i2, is_imm, imm, o->in2);
1496     disas_jcc(s, &c, m1);
1497     return help_branch(s, &c, is_imm, imm, o->in2);
1498 }
1499 
1500 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1501 {
1502     int r1 = get_field(s, r1);
1503     DisasCompare c;
1504     bool is_imm;
1505     TCGv_i64 t;
1506     int imm;
1507 
1508     c.cond = TCG_COND_NE;
1509     c.is_64 = false;
1510 
1511     t = tcg_temp_new_i64();
1512     tcg_gen_subi_i64(t, regs[r1], 1);
1513     store_reg32_i64(r1, t);
1514     c.u.s32.a = tcg_temp_new_i32();
1515     c.u.s32.b = tcg_constant_i32(0);
1516     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1517 
1518     disas_jdest(s, i2, is_imm, imm, o->in2);
1519     return help_branch(s, &c, is_imm, imm, o->in2);
1520 }
1521 
1522 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1523 {
1524     int r1 = get_field(s, r1);
1525     int imm = get_field(s, i2);
1526     DisasCompare c;
1527     TCGv_i64 t;
1528 
1529     c.cond = TCG_COND_NE;
1530     c.is_64 = false;
1531 
1532     t = tcg_temp_new_i64();
1533     tcg_gen_shri_i64(t, regs[r1], 32);
1534     tcg_gen_subi_i64(t, t, 1);
1535     store_reg32h_i64(r1, t);
1536     c.u.s32.a = tcg_temp_new_i32();
1537     c.u.s32.b = tcg_constant_i32(0);
1538     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1539 
1540     return help_branch(s, &c, 1, imm, o->in2);
1541 }
1542 
1543 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1544 {
1545     int r1 = get_field(s, r1);
1546     DisasCompare c;
1547     bool is_imm;
1548     int imm;
1549 
1550     c.cond = TCG_COND_NE;
1551     c.is_64 = true;
1552 
1553     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1554     c.u.s64.a = regs[r1];
1555     c.u.s64.b = tcg_constant_i64(0);
1556 
1557     disas_jdest(s, i2, is_imm, imm, o->in2);
1558     return help_branch(s, &c, is_imm, imm, o->in2);
1559 }
1560 
1561 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1562 {
1563     int r1 = get_field(s, r1);
1564     int r3 = get_field(s, r3);
1565     DisasCompare c;
1566     bool is_imm;
1567     TCGv_i64 t;
1568     int imm;
1569 
1570     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1571     c.is_64 = false;
1572 
1573     t = tcg_temp_new_i64();
1574     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1575     c.u.s32.a = tcg_temp_new_i32();
1576     c.u.s32.b = tcg_temp_new_i32();
1577     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1578     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1579     store_reg32_i64(r1, t);
1580 
1581     disas_jdest(s, i2, is_imm, imm, o->in2);
1582     return help_branch(s, &c, is_imm, imm, o->in2);
1583 }
1584 
1585 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1586 {
1587     int r1 = get_field(s, r1);
1588     int r3 = get_field(s, r3);
1589     DisasCompare c;
1590     bool is_imm;
1591     int imm;
1592 
1593     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1594     c.is_64 = true;
1595 
1596     if (r1 == (r3 | 1)) {
1597         c.u.s64.b = load_reg(r3 | 1);
1598     } else {
1599         c.u.s64.b = regs[r3 | 1];
1600     }
1601 
1602     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1603     c.u.s64.a = regs[r1];
1604 
1605     disas_jdest(s, i2, is_imm, imm, o->in2);
1606     return help_branch(s, &c, is_imm, imm, o->in2);
1607 }
1608 
1609 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1610 {
1611     int imm, m3 = get_field(s, m3);
1612     bool is_imm;
1613     DisasCompare c;
1614 
1615     c.cond = ltgt_cond[m3];
1616     if (s->insn->data) {
1617         c.cond = tcg_unsigned_cond(c.cond);
1618     }
1619     c.is_64 = true;
1620     c.u.s64.a = o->in1;
1621     c.u.s64.b = o->in2;
1622 
1623     o->out = NULL;
1624     disas_jdest(s, i4, is_imm, imm, o->out);
1625     if (!is_imm && !o->out) {
1626         imm = 0;
1627         o->out = get_address(s, 0, get_field(s, b4),
1628                              get_field(s, d4));
1629     }
1630 
1631     return help_branch(s, &c, is_imm, imm, o->out);
1632 }
1633 
1634 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1635 {
1636     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1637     set_cc_static(s);
1638     return DISAS_NEXT;
1639 }
1640 
1641 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1642 {
1643     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1644     set_cc_static(s);
1645     return DISAS_NEXT;
1646 }
1647 
1648 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1649 {
1650     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1651     set_cc_static(s);
1652     return DISAS_NEXT;
1653 }
1654 
1655 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1656                                    bool m4_with_fpe)
1657 {
1658     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1659     uint8_t m3 = get_field(s, m3);
1660     uint8_t m4 = get_field(s, m4);
1661 
1662     /* m3 field was introduced with FPE */
1663     if (!fpe && m3_with_fpe) {
1664         m3 = 0;
1665     }
1666     /* m4 field was introduced with FPE */
1667     if (!fpe && m4_with_fpe) {
1668         m4 = 0;
1669     }
1670 
1671     /* Check for valid rounding modes. Mode 3 was introduced later. */
1672     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1673         gen_program_exception(s, PGM_SPECIFICATION);
1674         return NULL;
1675     }
1676 
1677     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1678 }
1679 
1680 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1681 {
1682     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1683 
1684     if (!m34) {
1685         return DISAS_NORETURN;
1686     }
1687     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1688     set_cc_static(s);
1689     return DISAS_NEXT;
1690 }
1691 
1692 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1693 {
1694     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1695 
1696     if (!m34) {
1697         return DISAS_NORETURN;
1698     }
1699     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1700     set_cc_static(s);
1701     return DISAS_NEXT;
1702 }
1703 
1704 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1705 {
1706     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1707 
1708     if (!m34) {
1709         return DISAS_NORETURN;
1710     }
1711     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1712     set_cc_static(s);
1713     return DISAS_NEXT;
1714 }
1715 
1716 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1717 {
1718     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1719 
1720     if (!m34) {
1721         return DISAS_NORETURN;
1722     }
1723     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1724     set_cc_static(s);
1725     return DISAS_NEXT;
1726 }
1727 
1728 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1729 {
1730     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1731 
1732     if (!m34) {
1733         return DISAS_NORETURN;
1734     }
1735     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1736     set_cc_static(s);
1737     return DISAS_NEXT;
1738 }
1739 
1740 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1741 {
1742     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1743 
1744     if (!m34) {
1745         return DISAS_NORETURN;
1746     }
1747     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1748     set_cc_static(s);
1749     return DISAS_NEXT;
1750 }
1751 
1752 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1753 {
1754     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1755 
1756     if (!m34) {
1757         return DISAS_NORETURN;
1758     }
1759     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1760     set_cc_static(s);
1761     return DISAS_NEXT;
1762 }
1763 
1764 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1765 {
1766     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1767 
1768     if (!m34) {
1769         return DISAS_NORETURN;
1770     }
1771     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1772     set_cc_static(s);
1773     return DISAS_NEXT;
1774 }
1775 
1776 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1777 {
1778     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1779 
1780     if (!m34) {
1781         return DISAS_NORETURN;
1782     }
1783     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1784     set_cc_static(s);
1785     return DISAS_NEXT;
1786 }
1787 
1788 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1789 {
1790     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1791 
1792     if (!m34) {
1793         return DISAS_NORETURN;
1794     }
1795     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1796     set_cc_static(s);
1797     return DISAS_NEXT;
1798 }
1799 
1800 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1801 {
1802     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1803 
1804     if (!m34) {
1805         return DISAS_NORETURN;
1806     }
1807     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1808     set_cc_static(s);
1809     return DISAS_NEXT;
1810 }
1811 
1812 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1813 {
1814     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1815 
1816     if (!m34) {
1817         return DISAS_NORETURN;
1818     }
1819     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1820     set_cc_static(s);
1821     return DISAS_NEXT;
1822 }
1823 
1824 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1825 {
1826     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1827 
1828     if (!m34) {
1829         return DISAS_NORETURN;
1830     }
1831     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1832     return DISAS_NEXT;
1833 }
1834 
1835 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1836 {
1837     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1838 
1839     if (!m34) {
1840         return DISAS_NORETURN;
1841     }
1842     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1843     return DISAS_NEXT;
1844 }
1845 
1846 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1847 {
1848     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1849 
1850     if (!m34) {
1851         return DISAS_NORETURN;
1852     }
1853     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1854     return DISAS_NEXT;
1855 }
1856 
1857 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1858 {
1859     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1860 
1861     if (!m34) {
1862         return DISAS_NORETURN;
1863     }
1864     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1865     return DISAS_NEXT;
1866 }
1867 
1868 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1869 {
1870     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1871 
1872     if (!m34) {
1873         return DISAS_NORETURN;
1874     }
1875     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1876     return DISAS_NEXT;
1877 }
1878 
1879 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1880 {
1881     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1882 
1883     if (!m34) {
1884         return DISAS_NORETURN;
1885     }
1886     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1887     return DISAS_NEXT;
1888 }
1889 
1890 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1891 {
1892     int r2 = get_field(s, r2);
1893     TCGv_i128 pair = tcg_temp_new_i128();
1894     TCGv_i64 len = tcg_temp_new_i64();
1895 
1896     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1897     set_cc_static(s);
1898     tcg_gen_extr_i128_i64(o->out, len, pair);
1899 
1900     tcg_gen_add_i64(regs[r2], regs[r2], len);
1901     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1902 
1903     return DISAS_NEXT;
1904 }
1905 
1906 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1907 {
1908     int l = get_field(s, l1);
1909     TCGv_i64 src;
1910     TCGv_i32 vl;
1911     MemOp mop;
1912 
1913     switch (l + 1) {
1914     case 1:
1915     case 2:
1916     case 4:
1917     case 8:
1918         mop = ctz32(l + 1) | MO_TE;
1919         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1920         src = tcg_temp_new_i64();
1921         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1922         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1923         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1924         return DISAS_NEXT;
1925     default:
1926         vl = tcg_constant_i32(l);
1927         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
1928         set_cc_static(s);
1929         return DISAS_NEXT;
1930     }
1931 }
1932 
1933 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1934 {
1935     int r1 = get_field(s, r1);
1936     int r2 = get_field(s, r2);
1937     TCGv_i32 t1, t2;
1938 
1939     /* r1 and r2 must be even.  */
1940     if (r1 & 1 || r2 & 1) {
1941         gen_program_exception(s, PGM_SPECIFICATION);
1942         return DISAS_NORETURN;
1943     }
1944 
1945     t1 = tcg_constant_i32(r1);
1946     t2 = tcg_constant_i32(r2);
1947     gen_helper_clcl(cc_op, tcg_env, t1, t2);
1948     set_cc_static(s);
1949     return DISAS_NEXT;
1950 }
1951 
1952 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1953 {
1954     int r1 = get_field(s, r1);
1955     int r3 = get_field(s, r3);
1956     TCGv_i32 t1, t3;
1957 
1958     /* r1 and r3 must be even.  */
1959     if (r1 & 1 || r3 & 1) {
1960         gen_program_exception(s, PGM_SPECIFICATION);
1961         return DISAS_NORETURN;
1962     }
1963 
1964     t1 = tcg_constant_i32(r1);
1965     t3 = tcg_constant_i32(r3);
1966     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
1967     set_cc_static(s);
1968     return DISAS_NEXT;
1969 }
1970 
1971 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1972 {
1973     int r1 = get_field(s, r1);
1974     int r3 = get_field(s, r3);
1975     TCGv_i32 t1, t3;
1976 
1977     /* r1 and r3 must be even.  */
1978     if (r1 & 1 || r3 & 1) {
1979         gen_program_exception(s, PGM_SPECIFICATION);
1980         return DISAS_NORETURN;
1981     }
1982 
1983     t1 = tcg_constant_i32(r1);
1984     t3 = tcg_constant_i32(r3);
1985     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
1986     set_cc_static(s);
1987     return DISAS_NEXT;
1988 }
1989 
1990 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1991 {
1992     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
1993     TCGv_i32 t1 = tcg_temp_new_i32();
1994 
1995     tcg_gen_extrl_i64_i32(t1, o->in1);
1996     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
1997     set_cc_static(s);
1998     return DISAS_NEXT;
1999 }
2000 
2001 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2002 {
2003     TCGv_i128 pair = tcg_temp_new_i128();
2004 
2005     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2006     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2007 
2008     set_cc_static(s);
2009     return DISAS_NEXT;
2010 }
2011 
2012 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2013 {
2014     TCGv_i64 t = tcg_temp_new_i64();
2015     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2016     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2017     tcg_gen_or_i64(o->out, o->out, t);
2018     return DISAS_NEXT;
2019 }
2020 
2021 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2022 {
2023     int d2 = get_field(s, d2);
2024     int b2 = get_field(s, b2);
2025     TCGv_i64 addr, cc;
2026 
2027     /* Note that in1 = R3 (new value) and
2028        in2 = (zero-extended) R1 (expected value).  */
2029 
2030     addr = get_address(s, 0, b2, d2);
2031     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2032                                get_mem_index(s), s->insn->data | MO_ALIGN);
2033 
2034     /* Are the memory and expected values (un)equal?  Note that this setcond
2035        produces the output CC value, thus the NE sense of the test.  */
2036     cc = tcg_temp_new_i64();
2037     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2038     tcg_gen_extrl_i64_i32(cc_op, cc);
2039     set_cc_static(s);
2040 
2041     return DISAS_NEXT;
2042 }
2043 
2044 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2045 {
2046     int r1 = get_field(s, r1);
2047 
2048     o->out_128 = tcg_temp_new_i128();
2049     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2050 
2051     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2052     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2053                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2054 
2055     /*
2056      * Extract result into cc_dst:cc_src, compare vs the expected value
2057      * in the as yet unmodified input registers, then update CC_OP.
2058      */
2059     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2060     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2061     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2062     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2063     set_cc_nz_u64(s, cc_dst);
2064 
2065     return DISAS_NEXT;
2066 }
2067 
2068 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2069 {
2070     int r3 = get_field(s, r3);
2071     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2072 
2073     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2074         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2075     } else {
2076         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2077     }
2078 
2079     set_cc_static(s);
2080     return DISAS_NEXT;
2081 }
2082 
2083 #ifndef CONFIG_USER_ONLY
2084 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2085 {
2086     MemOp mop = s->insn->data;
2087     TCGv_i64 addr, old, cc;
2088     TCGLabel *lab = gen_new_label();
2089 
2090     /* Note that in1 = R1 (zero-extended expected value),
2091        out = R1 (original reg), out2 = R1+1 (new value).  */
2092 
2093     addr = tcg_temp_new_i64();
2094     old = tcg_temp_new_i64();
2095     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2096     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2097                                get_mem_index(s), mop | MO_ALIGN);
2098 
2099     /* Are the memory and expected values (un)equal?  */
2100     cc = tcg_temp_new_i64();
2101     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2102     tcg_gen_extrl_i64_i32(cc_op, cc);
2103 
2104     /* Write back the output now, so that it happens before the
2105        following branch, so that we don't need local temps.  */
2106     if ((mop & MO_SIZE) == MO_32) {
2107         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2108     } else {
2109         tcg_gen_mov_i64(o->out, old);
2110     }
2111 
2112     /* If the comparison was equal, and the LSB of R2 was set,
2113        then we need to flush the TLB (for all cpus).  */
2114     tcg_gen_xori_i64(cc, cc, 1);
2115     tcg_gen_and_i64(cc, cc, o->in2);
2116     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2117 
2118     gen_helper_purge(tcg_env);
2119     gen_set_label(lab);
2120 
2121     return DISAS_NEXT;
2122 }
2123 #endif
2124 
2125 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2126 {
2127     TCGv_i64 t = tcg_temp_new_i64();
2128     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2129     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2130     return DISAS_NEXT;
2131 }
2132 
2133 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2134 {
2135     TCGv_i128 t = tcg_temp_new_i128();
2136     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2137     gen_helper_cvbg(o->out, tcg_env, t);
2138     return DISAS_NEXT;
2139 }
2140 
2141 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2142 {
2143     TCGv_i64 t1 = tcg_temp_new_i64();
2144     TCGv_i32 t2 = tcg_temp_new_i32();
2145     tcg_gen_extrl_i64_i32(t2, o->in1);
2146     gen_helper_cvd(t1, t2);
2147     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2148     return DISAS_NEXT;
2149 }
2150 
2151 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2152 {
2153     TCGv_i128 t = tcg_temp_new_i128();
2154     gen_helper_cvdg(t, o->in1);
2155     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2156     return DISAS_NEXT;
2157 }
2158 
2159 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2160 {
2161     int m3 = get_field(s, m3);
2162     TCGLabel *lab = gen_new_label();
2163     TCGCond c;
2164 
2165     c = tcg_invert_cond(ltgt_cond[m3]);
2166     if (s->insn->data) {
2167         c = tcg_unsigned_cond(c);
2168     }
2169     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2170 
2171     /* Trap.  */
2172     gen_trap(s);
2173 
2174     gen_set_label(lab);
2175     return DISAS_NEXT;
2176 }
2177 
2178 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2179 {
2180     int m3 = get_field(s, m3);
2181     int r1 = get_field(s, r1);
2182     int r2 = get_field(s, r2);
2183     TCGv_i32 tr1, tr2, chk;
2184 
2185     /* R1 and R2 must both be even.  */
2186     if ((r1 | r2) & 1) {
2187         gen_program_exception(s, PGM_SPECIFICATION);
2188         return DISAS_NORETURN;
2189     }
2190     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2191         m3 = 0;
2192     }
2193 
2194     tr1 = tcg_constant_i32(r1);
2195     tr2 = tcg_constant_i32(r2);
2196     chk = tcg_constant_i32(m3);
2197 
2198     switch (s->insn->data) {
2199     case 12:
2200         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2201         break;
2202     case 14:
2203         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2204         break;
2205     case 21:
2206         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2207         break;
2208     case 24:
2209         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2210         break;
2211     case 41:
2212         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2213         break;
2214     case 42:
2215         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2216         break;
2217     default:
2218         g_assert_not_reached();
2219     }
2220 
2221     set_cc_static(s);
2222     return DISAS_NEXT;
2223 }
2224 
2225 #ifndef CONFIG_USER_ONLY
2226 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2227 {
2228     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2229     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2230     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2231 
2232     gen_helper_diag(tcg_env, r1, r3, func_code);
2233     return DISAS_NEXT;
2234 }
2235 #endif
2236 
2237 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2238 {
2239     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2240     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2241     return DISAS_NEXT;
2242 }
2243 
2244 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2245 {
2246     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2247     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2248     return DISAS_NEXT;
2249 }
2250 
2251 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2252 {
2253     TCGv_i128 t = tcg_temp_new_i128();
2254 
2255     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2256     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2257     return DISAS_NEXT;
2258 }
2259 
2260 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2261 {
2262     TCGv_i128 t = tcg_temp_new_i128();
2263 
2264     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2265     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2266     return DISAS_NEXT;
2267 }
2268 
2269 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2270 {
2271     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2272     return DISAS_NEXT;
2273 }
2274 
2275 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2276 {
2277     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2278     return DISAS_NEXT;
2279 }
2280 
2281 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2282 {
2283     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2284     return DISAS_NEXT;
2285 }
2286 
2287 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2288 {
2289     int r2 = get_field(s, r2);
2290     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2291     return DISAS_NEXT;
2292 }
2293 
2294 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2295 {
2296     /* No cache information provided.  */
2297     tcg_gen_movi_i64(o->out, -1);
2298     return DISAS_NEXT;
2299 }
2300 
2301 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2302 {
2303     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2304     return DISAS_NEXT;
2305 }
2306 
2307 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2308 {
2309     int r1 = get_field(s, r1);
2310     int r2 = get_field(s, r2);
2311     TCGv_i64 t = tcg_temp_new_i64();
2312     TCGv_i64 t_cc = tcg_temp_new_i64();
2313 
2314     /* Note the "subsequently" in the PoO, which implies a defined result
2315        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2316     gen_op_calc_cc(s);
2317     tcg_gen_extu_i32_i64(t_cc, cc_op);
2318     tcg_gen_shri_i64(t, psw_mask, 32);
2319     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2320     store_reg32_i64(r1, t);
2321     if (r2 != 0) {
2322         store_reg32_i64(r2, psw_mask);
2323     }
2324     return DISAS_NEXT;
2325 }
2326 
2327 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2328 {
2329     int r1 = get_field(s, r1);
2330     TCGv_i32 ilen;
2331     TCGv_i64 v1;
2332 
2333     /* Nested EXECUTE is not allowed.  */
2334     if (unlikely(s->ex_value)) {
2335         gen_program_exception(s, PGM_EXECUTE);
2336         return DISAS_NORETURN;
2337     }
2338 
2339     update_psw_addr(s);
2340     update_cc_op(s);
2341 
2342     if (r1 == 0) {
2343         v1 = tcg_constant_i64(0);
2344     } else {
2345         v1 = regs[r1];
2346     }
2347 
2348     ilen = tcg_constant_i32(s->ilen);
2349     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2350 
2351     return DISAS_PC_CC_UPDATED;
2352 }
2353 
2354 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2355 {
2356     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2357 
2358     if (!m34) {
2359         return DISAS_NORETURN;
2360     }
2361     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2362     return DISAS_NEXT;
2363 }
2364 
2365 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2366 {
2367     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2368 
2369     if (!m34) {
2370         return DISAS_NORETURN;
2371     }
2372     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2373     return DISAS_NEXT;
2374 }
2375 
2376 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2377 {
2378     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2379 
2380     if (!m34) {
2381         return DISAS_NORETURN;
2382     }
2383     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2384     return DISAS_NEXT;
2385 }
2386 
2387 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2388 {
2389     /* We'll use the original input for cc computation, since we get to
2390        compare that against 0, which ought to be better than comparing
2391        the real output against 64.  It also lets cc_dst be a convenient
2392        temporary during our computation.  */
2393     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2394 
2395     /* R1 = IN ? CLZ(IN) : 64.  */
2396     tcg_gen_clzi_i64(o->out, o->in2, 64);
2397 
2398     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2399        value by 64, which is undefined.  But since the shift is 64 iff the
2400        input is zero, we still get the correct result after and'ing.  */
2401     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2402     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2403     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2404     return DISAS_NEXT;
2405 }
2406 
2407 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2408 {
2409     int m3 = get_field(s, m3);
2410     int pos, len, base = s->insn->data;
2411     TCGv_i64 tmp = tcg_temp_new_i64();
2412     uint64_t ccm;
2413 
2414     switch (m3) {
2415     case 0xf:
2416         /* Effectively a 32-bit load.  */
2417         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2418         len = 32;
2419         goto one_insert;
2420 
2421     case 0xc:
2422     case 0x6:
2423     case 0x3:
2424         /* Effectively a 16-bit load.  */
2425         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2426         len = 16;
2427         goto one_insert;
2428 
2429     case 0x8:
2430     case 0x4:
2431     case 0x2:
2432     case 0x1:
2433         /* Effectively an 8-bit load.  */
2434         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2435         len = 8;
2436         goto one_insert;
2437 
2438     one_insert:
2439         pos = base + ctz32(m3) * 8;
2440         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2441         ccm = ((1ull << len) - 1) << pos;
2442         break;
2443 
2444     case 0:
2445         /* Recognize access exceptions for the first byte.  */
2446         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2447         gen_op_movi_cc(s, 0);
2448         return DISAS_NEXT;
2449 
2450     default:
2451         /* This is going to be a sequence of loads and inserts.  */
2452         pos = base + 32 - 8;
2453         ccm = 0;
2454         while (m3) {
2455             if (m3 & 0x8) {
2456                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2457                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2458                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2459                 ccm |= 0xffull << pos;
2460             }
2461             m3 = (m3 << 1) & 0xf;
2462             pos -= 8;
2463         }
2464         break;
2465     }
2466 
2467     tcg_gen_movi_i64(tmp, ccm);
2468     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2469     return DISAS_NEXT;
2470 }
2471 
2472 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2473 {
2474     int shift = s->insn->data & 0xff;
2475     int size = s->insn->data >> 8;
2476     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2477     return DISAS_NEXT;
2478 }
2479 
2480 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2481 {
2482     TCGv_i64 t1, t2;
2483 
2484     gen_op_calc_cc(s);
2485     t1 = tcg_temp_new_i64();
2486     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2487     t2 = tcg_temp_new_i64();
2488     tcg_gen_extu_i32_i64(t2, cc_op);
2489     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2490     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2491     return DISAS_NEXT;
2492 }
2493 
2494 #ifndef CONFIG_USER_ONLY
2495 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2496 {
2497     TCGv_i32 m4;
2498 
2499     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2500         m4 = tcg_constant_i32(get_field(s, m4));
2501     } else {
2502         m4 = tcg_constant_i32(0);
2503     }
2504     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2505     return DISAS_NEXT;
2506 }
2507 
2508 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2509 {
2510     TCGv_i32 m4;
2511 
2512     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2513         m4 = tcg_constant_i32(get_field(s, m4));
2514     } else {
2515         m4 = tcg_constant_i32(0);
2516     }
2517     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2518     return DISAS_NEXT;
2519 }
2520 
2521 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2522 {
2523     gen_helper_iske(o->out, tcg_env, o->in2);
2524     return DISAS_NEXT;
2525 }
2526 #endif
2527 
2528 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2529 {
2530     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2531     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2532     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2533     TCGv_i32 t_r1, t_r2, t_r3, type;
2534 
2535     switch (s->insn->data) {
2536     case S390_FEAT_TYPE_KMA:
2537         if (r3 == r1 || r3 == r2) {
2538             gen_program_exception(s, PGM_SPECIFICATION);
2539             return DISAS_NORETURN;
2540         }
2541         /* FALL THROUGH */
2542     case S390_FEAT_TYPE_KMCTR:
2543         if (r3 & 1 || !r3) {
2544             gen_program_exception(s, PGM_SPECIFICATION);
2545             return DISAS_NORETURN;
2546         }
2547         /* FALL THROUGH */
2548     case S390_FEAT_TYPE_PPNO:
2549     case S390_FEAT_TYPE_KMF:
2550     case S390_FEAT_TYPE_KMC:
2551     case S390_FEAT_TYPE_KMO:
2552     case S390_FEAT_TYPE_KM:
2553         if (r1 & 1 || !r1) {
2554             gen_program_exception(s, PGM_SPECIFICATION);
2555             return DISAS_NORETURN;
2556         }
2557         /* FALL THROUGH */
2558     case S390_FEAT_TYPE_KMAC:
2559     case S390_FEAT_TYPE_KIMD:
2560     case S390_FEAT_TYPE_KLMD:
2561         if (r2 & 1 || !r2) {
2562             gen_program_exception(s, PGM_SPECIFICATION);
2563             return DISAS_NORETURN;
2564         }
2565         /* FALL THROUGH */
2566     case S390_FEAT_TYPE_PCKMO:
2567     case S390_FEAT_TYPE_PCC:
2568         break;
2569     default:
2570         g_assert_not_reached();
2571     };
2572 
2573     t_r1 = tcg_constant_i32(r1);
2574     t_r2 = tcg_constant_i32(r2);
2575     t_r3 = tcg_constant_i32(r3);
2576     type = tcg_constant_i32(s->insn->data);
2577     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2578     set_cc_static(s);
2579     return DISAS_NEXT;
2580 }
2581 
2582 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2583 {
2584     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2585     set_cc_static(s);
2586     return DISAS_NEXT;
2587 }
2588 
2589 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2590 {
2591     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2592     set_cc_static(s);
2593     return DISAS_NEXT;
2594 }
2595 
2596 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2597 {
2598     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2599     set_cc_static(s);
2600     return DISAS_NEXT;
2601 }
2602 
2603 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2604 {
2605     /* The real output is indeed the original value in memory;
2606        recompute the addition for the computation of CC.  */
2607     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2608                                  s->insn->data | MO_ALIGN);
2609     /* However, we need to recompute the addition for setting CC.  */
2610     if (addu64) {
2611         tcg_gen_movi_i64(cc_src, 0);
2612         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2613     } else {
2614         tcg_gen_add_i64(o->out, o->in1, o->in2);
2615     }
2616     return DISAS_NEXT;
2617 }
2618 
2619 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2620 {
2621     return help_laa(s, o, false);
2622 }
2623 
2624 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2625 {
2626     return help_laa(s, o, true);
2627 }
2628 
2629 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2630 {
2631     /* The real output is indeed the original value in memory;
2632        recompute the addition for the computation of CC.  */
2633     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2634                                  s->insn->data | MO_ALIGN);
2635     /* However, we need to recompute the operation for setting CC.  */
2636     tcg_gen_and_i64(o->out, o->in1, o->in2);
2637     return DISAS_NEXT;
2638 }
2639 
2640 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2641 {
2642     /* The real output is indeed the original value in memory;
2643        recompute the addition for the computation of CC.  */
2644     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2645                                 s->insn->data | MO_ALIGN);
2646     /* However, we need to recompute the operation for setting CC.  */
2647     tcg_gen_or_i64(o->out, o->in1, o->in2);
2648     return DISAS_NEXT;
2649 }
2650 
2651 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2652 {
2653     /* The real output is indeed the original value in memory;
2654        recompute the addition for the computation of CC.  */
2655     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2656                                  s->insn->data | MO_ALIGN);
2657     /* However, we need to recompute the operation for setting CC.  */
2658     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2663 {
2664     gen_helper_ldeb(o->out, tcg_env, o->in2);
2665     return DISAS_NEXT;
2666 }
2667 
2668 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2669 {
2670     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2671 
2672     if (!m34) {
2673         return DISAS_NORETURN;
2674     }
2675     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2676     return DISAS_NEXT;
2677 }
2678 
2679 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2680 {
2681     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2682 
2683     if (!m34) {
2684         return DISAS_NORETURN;
2685     }
2686     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2687     return DISAS_NEXT;
2688 }
2689 
2690 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2691 {
2692     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2693 
2694     if (!m34) {
2695         return DISAS_NORETURN;
2696     }
2697     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2698     return DISAS_NEXT;
2699 }
2700 
2701 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2702 {
2703     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2704     return DISAS_NEXT;
2705 }
2706 
2707 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2708 {
2709     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2710     return DISAS_NEXT;
2711 }
2712 
2713 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2714 {
2715     tcg_gen_shli_i64(o->out, o->in2, 32);
2716     return DISAS_NEXT;
2717 }
2718 
2719 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2720 {
2721     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2722     return DISAS_NEXT;
2723 }
2724 
2725 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2726 {
2727     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2728     return DISAS_NEXT;
2729 }
2730 
2731 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2732 {
2733     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2734     return DISAS_NEXT;
2735 }
2736 
2737 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2738 {
2739     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2740     return DISAS_NEXT;
2741 }
2742 
2743 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2744 {
2745     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2746     return DISAS_NEXT;
2747 }
2748 
2749 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2750 {
2751     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2752                        MO_TESL | s->insn->data);
2753     return DISAS_NEXT;
2754 }
2755 
2756 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2757 {
2758     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2759                        MO_TEUL | s->insn->data);
2760     return DISAS_NEXT;
2761 }
2762 
2763 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2764 {
2765     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2766                         MO_TEUQ | s->insn->data);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2771 {
2772     TCGLabel *lab = gen_new_label();
2773     store_reg32_i64(get_field(s, r1), o->in2);
2774     /* The value is stored even in case of trap. */
2775     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2776     gen_trap(s);
2777     gen_set_label(lab);
2778     return DISAS_NEXT;
2779 }
2780 
2781 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2782 {
2783     TCGLabel *lab = gen_new_label();
2784     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2785     /* The value is stored even in case of trap. */
2786     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2787     gen_trap(s);
2788     gen_set_label(lab);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2793 {
2794     TCGLabel *lab = gen_new_label();
2795     store_reg32h_i64(get_field(s, r1), o->in2);
2796     /* The value is stored even in case of trap. */
2797     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2798     gen_trap(s);
2799     gen_set_label(lab);
2800     return DISAS_NEXT;
2801 }
2802 
2803 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2804 {
2805     TCGLabel *lab = gen_new_label();
2806 
2807     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2808     /* The value is stored even in case of trap. */
2809     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2810     gen_trap(s);
2811     gen_set_label(lab);
2812     return DISAS_NEXT;
2813 }
2814 
2815 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2816 {
2817     TCGLabel *lab = gen_new_label();
2818     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2819     /* The value is stored even in case of trap. */
2820     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2821     gen_trap(s);
2822     gen_set_label(lab);
2823     return DISAS_NEXT;
2824 }
2825 
2826 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2827 {
2828     DisasCompare c;
2829 
2830     if (have_field(s, m3)) {
2831         /* LOAD * ON CONDITION */
2832         disas_jcc(s, &c, get_field(s, m3));
2833     } else {
2834         /* SELECT */
2835         disas_jcc(s, &c, get_field(s, m4));
2836     }
2837 
2838     if (c.is_64) {
2839         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2840                             o->in2, o->in1);
2841     } else {
2842         TCGv_i32 t32 = tcg_temp_new_i32();
2843         TCGv_i64 t, z;
2844 
2845         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2846 
2847         t = tcg_temp_new_i64();
2848         tcg_gen_extu_i32_i64(t, t32);
2849 
2850         z = tcg_constant_i64(0);
2851         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2852     }
2853 
2854     return DISAS_NEXT;
2855 }
2856 
2857 #ifndef CONFIG_USER_ONLY
2858 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2859 {
2860     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2861     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2862 
2863     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2864     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2865     s->exit_to_mainloop = true;
2866     return DISAS_TOO_MANY;
2867 }
2868 
2869 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2870 {
2871     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2872     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2873 
2874     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2875     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2876     s->exit_to_mainloop = true;
2877     return DISAS_TOO_MANY;
2878 }
2879 
2880 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2881 {
2882     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2883     set_cc_static(s);
2884     return DISAS_NEXT;
2885 }
2886 
2887 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2888 {
2889     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2890     return DISAS_NEXT;
2891 }
2892 
2893 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2894 {
2895     TCGv_i64 mask, addr;
2896 
2897     per_breaking_event(s);
2898 
2899     /*
2900      * Convert the short PSW into the normal PSW, similar to what
2901      * s390_cpu_load_normal() does.
2902      */
2903     mask = tcg_temp_new_i64();
2904     addr = tcg_temp_new_i64();
2905     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2906     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2907     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2908     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2909     gen_helper_load_psw(tcg_env, mask, addr);
2910     return DISAS_NORETURN;
2911 }
2912 
2913 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2914 {
2915     TCGv_i64 t1, t2;
2916 
2917     per_breaking_event(s);
2918 
2919     t1 = tcg_temp_new_i64();
2920     t2 = tcg_temp_new_i64();
2921     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2922                         MO_TEUQ | MO_ALIGN_8);
2923     tcg_gen_addi_i64(o->in2, o->in2, 8);
2924     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2925     gen_helper_load_psw(tcg_env, t1, t2);
2926     return DISAS_NORETURN;
2927 }
2928 #endif
2929 
2930 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2931 {
2932     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2933     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2934 
2935     gen_helper_lam(tcg_env, r1, o->in2, r3);
2936     return DISAS_NEXT;
2937 }
2938 
2939 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2940 {
2941     int r1 = get_field(s, r1);
2942     int r3 = get_field(s, r3);
2943     TCGv_i64 t1, t2;
2944 
2945     /* Only one register to read. */
2946     t1 = tcg_temp_new_i64();
2947     if (unlikely(r1 == r3)) {
2948         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2949         store_reg32_i64(r1, t1);
2950         return DISAS_NEXT;
2951     }
2952 
2953     /* First load the values of the first and last registers to trigger
2954        possible page faults. */
2955     t2 = tcg_temp_new_i64();
2956     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2957     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2958     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2959     store_reg32_i64(r1, t1);
2960     store_reg32_i64(r3, t2);
2961 
2962     /* Only two registers to read. */
2963     if (((r1 + 1) & 15) == r3) {
2964         return DISAS_NEXT;
2965     }
2966 
2967     /* Then load the remaining registers. Page fault can't occur. */
2968     r3 = (r3 - 1) & 15;
2969     tcg_gen_movi_i64(t2, 4);
2970     while (r1 != r3) {
2971         r1 = (r1 + 1) & 15;
2972         tcg_gen_add_i64(o->in2, o->in2, t2);
2973         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2974         store_reg32_i64(r1, t1);
2975     }
2976     return DISAS_NEXT;
2977 }
2978 
2979 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2980 {
2981     int r1 = get_field(s, r1);
2982     int r3 = get_field(s, r3);
2983     TCGv_i64 t1, t2;
2984 
2985     /* Only one register to read. */
2986     t1 = tcg_temp_new_i64();
2987     if (unlikely(r1 == r3)) {
2988         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2989         store_reg32h_i64(r1, t1);
2990         return DISAS_NEXT;
2991     }
2992 
2993     /* First load the values of the first and last registers to trigger
2994        possible page faults. */
2995     t2 = tcg_temp_new_i64();
2996     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2997     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2998     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2999     store_reg32h_i64(r1, t1);
3000     store_reg32h_i64(r3, t2);
3001 
3002     /* Only two registers to read. */
3003     if (((r1 + 1) & 15) == r3) {
3004         return DISAS_NEXT;
3005     }
3006 
3007     /* Then load the remaining registers. Page fault can't occur. */
3008     r3 = (r3 - 1) & 15;
3009     tcg_gen_movi_i64(t2, 4);
3010     while (r1 != r3) {
3011         r1 = (r1 + 1) & 15;
3012         tcg_gen_add_i64(o->in2, o->in2, t2);
3013         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3014         store_reg32h_i64(r1, t1);
3015     }
3016     return DISAS_NEXT;
3017 }
3018 
3019 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3020 {
3021     int r1 = get_field(s, r1);
3022     int r3 = get_field(s, r3);
3023     TCGv_i64 t1, t2;
3024 
3025     /* Only one register to read. */
3026     if (unlikely(r1 == r3)) {
3027         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3028         return DISAS_NEXT;
3029     }
3030 
3031     /* First load the values of the first and last registers to trigger
3032        possible page faults. */
3033     t1 = tcg_temp_new_i64();
3034     t2 = tcg_temp_new_i64();
3035     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3036     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3037     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3038     tcg_gen_mov_i64(regs[r1], t1);
3039 
3040     /* Only two registers to read. */
3041     if (((r1 + 1) & 15) == r3) {
3042         return DISAS_NEXT;
3043     }
3044 
3045     /* Then load the remaining registers. Page fault can't occur. */
3046     r3 = (r3 - 1) & 15;
3047     tcg_gen_movi_i64(t1, 8);
3048     while (r1 != r3) {
3049         r1 = (r1 + 1) & 15;
3050         tcg_gen_add_i64(o->in2, o->in2, t1);
3051         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3052     }
3053     return DISAS_NEXT;
3054 }
3055 
3056 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3057 {
3058     TCGv_i64 a1, a2;
3059     MemOp mop = s->insn->data;
3060 
3061     /* In a parallel context, stop the world and single step.  */
3062     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3063         update_psw_addr(s);
3064         update_cc_op(s);
3065         gen_exception(EXCP_ATOMIC);
3066         return DISAS_NORETURN;
3067     }
3068 
3069     /* In a serial context, perform the two loads ... */
3070     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3071     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3072     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3073     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3074 
3075     /* ... and indicate that we performed them while interlocked.  */
3076     gen_op_movi_cc(s, 0);
3077     return DISAS_NEXT;
3078 }
3079 
3080 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3081 {
3082     o->out_128 = tcg_temp_new_i128();
3083     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3084                          MO_TE | MO_128 | MO_ALIGN);
3085     return DISAS_NEXT;
3086 }
3087 
3088 #ifndef CONFIG_USER_ONLY
3089 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3090 {
3091     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3092     return DISAS_NEXT;
3093 }
3094 #endif
3095 
3096 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3097 {
3098     tcg_gen_andi_i64(o->out, o->in2, -256);
3099     return DISAS_NEXT;
3100 }
3101 
3102 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3103 {
3104     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3105 
3106     if (get_field(s, m3) > 6) {
3107         gen_program_exception(s, PGM_SPECIFICATION);
3108         return DISAS_NORETURN;
3109     }
3110 
3111     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3112     tcg_gen_neg_i64(o->addr1, o->addr1);
3113     tcg_gen_movi_i64(o->out, 16);
3114     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3115     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3116     return DISAS_NEXT;
3117 }
3118 
3119 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3120 {
3121     const uint8_t monitor_class = get_field(s, i2);
3122 
3123     if (monitor_class & 0xf0) {
3124         gen_program_exception(s, PGM_SPECIFICATION);
3125         return DISAS_NORETURN;
3126     }
3127 
3128 #if !defined(CONFIG_USER_ONLY)
3129     gen_helper_monitor_call(tcg_env, o->addr1,
3130                             tcg_constant_i32(monitor_class));
3131 #endif
3132     /* Defaults to a NOP. */
3133     return DISAS_NEXT;
3134 }
3135 
3136 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3137 {
3138     o->out = o->in2;
3139     o->in2 = NULL;
3140     return DISAS_NEXT;
3141 }
3142 
3143 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3144 {
3145     int b2 = get_field(s, b2);
3146     TCGv ar1 = tcg_temp_new_i64();
3147     int r1 = get_field(s, r1);
3148 
3149     o->out = o->in2;
3150     o->in2 = NULL;
3151 
3152     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3153     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3154         tcg_gen_movi_i64(ar1, 0);
3155         break;
3156     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3157         tcg_gen_movi_i64(ar1, 1);
3158         break;
3159     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3160         if (b2) {
3161             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3162         } else {
3163             tcg_gen_movi_i64(ar1, 0);
3164         }
3165         break;
3166     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3167         tcg_gen_movi_i64(ar1, 2);
3168         break;
3169     }
3170 
3171     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3172     return DISAS_NEXT;
3173 }
3174 
3175 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3176 {
3177     o->out = o->in1;
3178     o->out2 = o->in2;
3179     o->in1 = NULL;
3180     o->in2 = NULL;
3181     return DISAS_NEXT;
3182 }
3183 
3184 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3185 {
3186     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3187 
3188     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3189     return DISAS_NEXT;
3190 }
3191 
3192 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3193 {
3194     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3195     return DISAS_NEXT;
3196 }
3197 
3198 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3199 {
3200     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3201 
3202     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3203     return DISAS_NEXT;
3204 }
3205 
3206 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3207 {
3208     int r1 = get_field(s, r1);
3209     int r2 = get_field(s, r2);
3210     TCGv_i32 t1, t2;
3211 
3212     /* r1 and r2 must be even.  */
3213     if (r1 & 1 || r2 & 1) {
3214         gen_program_exception(s, PGM_SPECIFICATION);
3215         return DISAS_NORETURN;
3216     }
3217 
3218     t1 = tcg_constant_i32(r1);
3219     t2 = tcg_constant_i32(r2);
3220     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3221     set_cc_static(s);
3222     return DISAS_NEXT;
3223 }
3224 
3225 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3226 {
3227     int r1 = get_field(s, r1);
3228     int r3 = get_field(s, r3);
3229     TCGv_i32 t1, t3;
3230 
3231     /* r1 and r3 must be even.  */
3232     if (r1 & 1 || r3 & 1) {
3233         gen_program_exception(s, PGM_SPECIFICATION);
3234         return DISAS_NORETURN;
3235     }
3236 
3237     t1 = tcg_constant_i32(r1);
3238     t3 = tcg_constant_i32(r3);
3239     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3240     set_cc_static(s);
3241     return DISAS_NEXT;
3242 }
3243 
3244 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3245 {
3246     int r1 = get_field(s, r1);
3247     int r3 = get_field(s, r3);
3248     TCGv_i32 t1, t3;
3249 
3250     /* r1 and r3 must be even.  */
3251     if (r1 & 1 || r3 & 1) {
3252         gen_program_exception(s, PGM_SPECIFICATION);
3253         return DISAS_NORETURN;
3254     }
3255 
3256     t1 = tcg_constant_i32(r1);
3257     t3 = tcg_constant_i32(r3);
3258     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3259     set_cc_static(s);
3260     return DISAS_NEXT;
3261 }
3262 
3263 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3264 {
3265     int r3 = get_field(s, r3);
3266     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3267     set_cc_static(s);
3268     return DISAS_NEXT;
3269 }
3270 
3271 #ifndef CONFIG_USER_ONLY
3272 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3273 {
3274     int r1 = get_field(s, l1);
3275     int r3 = get_field(s, r3);
3276     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3277     set_cc_static(s);
3278     return DISAS_NEXT;
3279 }
3280 
3281 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3282 {
3283     int r1 = get_field(s, l1);
3284     int r3 = get_field(s, r3);
3285     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3286     set_cc_static(s);
3287     return DISAS_NEXT;
3288 }
3289 #endif
3290 
3291 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3292 {
3293     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3294 
3295     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3296     return DISAS_NEXT;
3297 }
3298 
3299 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3300 {
3301     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3302 
3303     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3304     return DISAS_NEXT;
3305 }
3306 
3307 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3308 {
3309     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3310     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3311 
3312     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3313     set_cc_static(s);
3314     return DISAS_NEXT;
3315 }
3316 
3317 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3318 {
3319     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3320     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3321 
3322     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3323     set_cc_static(s);
3324     return DISAS_NEXT;
3325 }
3326 
3327 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3328 {
3329     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3330 
3331     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3332     return DISAS_NEXT;
3333 }
3334 
3335 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3336 {
3337     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3338     return DISAS_NEXT;
3339 }
3340 
3341 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3342 {
3343     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3344     return DISAS_NEXT;
3345 }
3346 
3347 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3348 {
3349     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3350     return DISAS_NEXT;
3351 }
3352 
3353 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3354 {
3355     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3356     return DISAS_NEXT;
3357 }
3358 
3359 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3360 {
3361     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3362     return DISAS_NEXT;
3363 }
3364 
3365 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3366 {
3367     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3368     return DISAS_NEXT;
3369 }
3370 
3371 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3372 {
3373     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3374     return DISAS_NEXT;
3375 }
3376 
3377 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3378 {
3379     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3380     return DISAS_NEXT;
3381 }
3382 
3383 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3384 {
3385     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3386     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3391 {
3392     TCGv_i64 r3 = load_freg(get_field(s, r3));
3393     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3394     return DISAS_NEXT;
3395 }
3396 
3397 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3398 {
3399     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3400     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3405 {
3406     TCGv_i64 r3 = load_freg(get_field(s, r3));
3407     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3412 {
3413     TCGv_i64 z = tcg_constant_i64(0);
3414     TCGv_i64 n = tcg_temp_new_i64();
3415 
3416     tcg_gen_neg_i64(n, o->in2);
3417     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3418     return DISAS_NEXT;
3419 }
3420 
3421 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3422 {
3423     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3424     return DISAS_NEXT;
3425 }
3426 
3427 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3428 {
3429     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3430     return DISAS_NEXT;
3431 }
3432 
3433 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3434 {
3435     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3436     tcg_gen_mov_i64(o->out2, o->in2);
3437     return DISAS_NEXT;
3438 }
3439 
3440 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3441 {
3442     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3443 
3444     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3445     set_cc_static(s);
3446     return DISAS_NEXT;
3447 }
3448 
3449 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3450 {
3451     tcg_gen_neg_i64(o->out, o->in2);
3452     return DISAS_NEXT;
3453 }
3454 
3455 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3456 {
3457     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3458     return DISAS_NEXT;
3459 }
3460 
3461 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3462 {
3463     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3464     return DISAS_NEXT;
3465 }
3466 
3467 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3468 {
3469     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3470     tcg_gen_mov_i64(o->out2, o->in2);
3471     return DISAS_NEXT;
3472 }
3473 
3474 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3475 {
3476     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3477 
3478     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3479     set_cc_static(s);
3480     return DISAS_NEXT;
3481 }
3482 
3483 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3484 {
3485     tcg_gen_or_i64(o->out, o->in1, o->in2);
3486     return DISAS_NEXT;
3487 }
3488 
3489 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3490 {
3491     int shift = s->insn->data & 0xff;
3492     int size = s->insn->data >> 8;
3493     uint64_t mask = ((1ull << size) - 1) << shift;
3494     TCGv_i64 t = tcg_temp_new_i64();
3495 
3496     tcg_gen_shli_i64(t, o->in2, shift);
3497     tcg_gen_or_i64(o->out, o->in1, t);
3498 
3499     /* Produce the CC from only the bits manipulated.  */
3500     tcg_gen_andi_i64(cc_dst, o->out, mask);
3501     set_cc_nz_u64(s, cc_dst);
3502     return DISAS_NEXT;
3503 }
3504 
3505 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3506 {
3507     o->in1 = tcg_temp_new_i64();
3508 
3509     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3510         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3511     } else {
3512         /* Perform the atomic operation in memory. */
3513         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3514                                     s->insn->data);
3515     }
3516 
3517     /* Recompute also for atomic case: needed for setting CC. */
3518     tcg_gen_or_i64(o->out, o->in1, o->in2);
3519 
3520     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3521         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3522     }
3523     return DISAS_NEXT;
3524 }
3525 
3526 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3527 {
3528     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3529 
3530     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3531     return DISAS_NEXT;
3532 }
3533 
3534 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3535 {
3536     int l2 = get_field(s, l2) + 1;
3537     TCGv_i32 l;
3538 
3539     /* The length must not exceed 32 bytes.  */
3540     if (l2 > 32) {
3541         gen_program_exception(s, PGM_SPECIFICATION);
3542         return DISAS_NORETURN;
3543     }
3544     l = tcg_constant_i32(l2);
3545     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3546     return DISAS_NEXT;
3547 }
3548 
3549 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3550 {
3551     int l2 = get_field(s, l2) + 1;
3552     TCGv_i32 l;
3553 
3554     /* The length must be even and should not exceed 64 bytes.  */
3555     if ((l2 & 1) || (l2 > 64)) {
3556         gen_program_exception(s, PGM_SPECIFICATION);
3557         return DISAS_NORETURN;
3558     }
3559     l = tcg_constant_i32(l2);
3560     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3561     return DISAS_NEXT;
3562 }
3563 
3564 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3565 {
3566     const uint8_t m3 = get_field(s, m3);
3567 
3568     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3569         tcg_gen_ctpop_i64(o->out, o->in2);
3570     } else {
3571         gen_helper_popcnt(o->out, o->in2);
3572     }
3573     return DISAS_NEXT;
3574 }
3575 
3576 #ifndef CONFIG_USER_ONLY
3577 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3578 {
3579     gen_helper_ptlb(tcg_env);
3580     return DISAS_NEXT;
3581 }
3582 #endif
3583 
3584 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3585 {
3586     int i3 = get_field(s, i3);
3587     int i4 = get_field(s, i4);
3588     int i5 = get_field(s, i5);
3589     int do_zero = i4 & 0x80;
3590     uint64_t mask, imask, pmask;
3591     int pos, len, rot;
3592 
3593     /* Adjust the arguments for the specific insn.  */
3594     switch (s->fields.op2) {
3595     case 0x55: /* risbg */
3596     case 0x59: /* risbgn */
3597         i3 &= 63;
3598         i4 &= 63;
3599         pmask = ~0;
3600         break;
3601     case 0x5d: /* risbhg */
3602         i3 &= 31;
3603         i4 &= 31;
3604         pmask = 0xffffffff00000000ull;
3605         break;
3606     case 0x51: /* risblg */
3607         i3 = (i3 & 31) + 32;
3608         i4 = (i4 & 31) + 32;
3609         pmask = 0x00000000ffffffffull;
3610         break;
3611     default:
3612         g_assert_not_reached();
3613     }
3614 
3615     /* MASK is the set of bits to be inserted from R2. */
3616     if (i3 <= i4) {
3617         /* [0...i3---i4...63] */
3618         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3619     } else {
3620         /* [0---i4...i3---63] */
3621         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3622     }
3623     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3624     mask &= pmask;
3625 
3626     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3627        insns, we need to keep the other half of the register.  */
3628     imask = ~mask | ~pmask;
3629     if (do_zero) {
3630         imask = ~pmask;
3631     }
3632 
3633     len = i4 - i3 + 1;
3634     pos = 63 - i4;
3635     rot = i5 & 63;
3636 
3637     /* In some cases we can implement this with extract.  */
3638     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3639         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3640         return DISAS_NEXT;
3641     }
3642 
3643     /* In some cases we can implement this with deposit.  */
3644     if (len > 0 && (imask == 0 || ~mask == imask)) {
3645         /* Note that we rotate the bits to be inserted to the lsb, not to
3646            the position as described in the PoO.  */
3647         rot = (rot - pos) & 63;
3648     } else {
3649         pos = -1;
3650     }
3651 
3652     /* Rotate the input as necessary.  */
3653     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3654 
3655     /* Insert the selected bits into the output.  */
3656     if (pos >= 0) {
3657         if (imask == 0) {
3658             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3659         } else {
3660             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3661         }
3662     } else if (imask == 0) {
3663         tcg_gen_andi_i64(o->out, o->in2, mask);
3664     } else {
3665         tcg_gen_andi_i64(o->in2, o->in2, mask);
3666         tcg_gen_andi_i64(o->out, o->out, imask);
3667         tcg_gen_or_i64(o->out, o->out, o->in2);
3668     }
3669     return DISAS_NEXT;
3670 }
3671 
3672 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3673 {
3674     int i3 = get_field(s, i3);
3675     int i4 = get_field(s, i4);
3676     int i5 = get_field(s, i5);
3677     TCGv_i64 orig_out;
3678     uint64_t mask;
3679 
3680     /* If this is a test-only form, arrange to discard the result.  */
3681     if (i3 & 0x80) {
3682         tcg_debug_assert(o->out != NULL);
3683         orig_out = o->out;
3684         o->out = tcg_temp_new_i64();
3685         tcg_gen_mov_i64(o->out, orig_out);
3686     }
3687 
3688     i3 &= 63;
3689     i4 &= 63;
3690     i5 &= 63;
3691 
3692     /* MASK is the set of bits to be operated on from R2.
3693        Take care for I3/I4 wraparound.  */
3694     mask = ~0ull >> i3;
3695     if (i3 <= i4) {
3696         mask ^= ~0ull >> i4 >> 1;
3697     } else {
3698         mask |= ~(~0ull >> i4 >> 1);
3699     }
3700 
3701     /* Rotate the input as necessary.  */
3702     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3703 
3704     /* Operate.  */
3705     switch (s->fields.op2) {
3706     case 0x54: /* AND */
3707         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3708         tcg_gen_and_i64(o->out, o->out, o->in2);
3709         break;
3710     case 0x56: /* OR */
3711         tcg_gen_andi_i64(o->in2, o->in2, mask);
3712         tcg_gen_or_i64(o->out, o->out, o->in2);
3713         break;
3714     case 0x57: /* XOR */
3715         tcg_gen_andi_i64(o->in2, o->in2, mask);
3716         tcg_gen_xor_i64(o->out, o->out, o->in2);
3717         break;
3718     default:
3719         abort();
3720     }
3721 
3722     /* Set the CC.  */
3723     tcg_gen_andi_i64(cc_dst, o->out, mask);
3724     set_cc_nz_u64(s, cc_dst);
3725     return DISAS_NEXT;
3726 }
3727 
3728 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3729 {
3730     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3731     return DISAS_NEXT;
3732 }
3733 
3734 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3735 {
3736     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3737     return DISAS_NEXT;
3738 }
3739 
3740 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3741 {
3742     tcg_gen_bswap64_i64(o->out, o->in2);
3743     return DISAS_NEXT;
3744 }
3745 
3746 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3747 {
3748     TCGv_i32 t1 = tcg_temp_new_i32();
3749     TCGv_i32 t2 = tcg_temp_new_i32();
3750     TCGv_i32 to = tcg_temp_new_i32();
3751     tcg_gen_extrl_i64_i32(t1, o->in1);
3752     tcg_gen_extrl_i64_i32(t2, o->in2);
3753     tcg_gen_rotl_i32(to, t1, t2);
3754     tcg_gen_extu_i32_i64(o->out, to);
3755     return DISAS_NEXT;
3756 }
3757 
3758 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3759 {
3760     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3761     return DISAS_NEXT;
3762 }
3763 
3764 #ifndef CONFIG_USER_ONLY
3765 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3766 {
3767     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3768     set_cc_static(s);
3769     return DISAS_NEXT;
3770 }
3771 
3772 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3773 {
3774     gen_helper_sacf(tcg_env, o->in2);
3775     /* Addressing mode has changed, so end the block.  */
3776     return DISAS_TOO_MANY;
3777 }
3778 #endif
3779 
3780 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3781 {
3782     int sam = s->insn->data;
3783     TCGv_i64 tsam;
3784     uint64_t mask;
3785 
3786     switch (sam) {
3787     case 0:
3788         mask = 0xffffff;
3789         break;
3790     case 1:
3791         mask = 0x7fffffff;
3792         break;
3793     default:
3794         mask = -1;
3795         break;
3796     }
3797 
3798     /* Bizarre but true, we check the address of the current insn for the
3799        specification exception, not the next to be executed.  Thus the PoO
3800        documents that Bad Things Happen two bytes before the end.  */
3801     if (s->base.pc_next & ~mask) {
3802         gen_program_exception(s, PGM_SPECIFICATION);
3803         return DISAS_NORETURN;
3804     }
3805     s->pc_tmp &= mask;
3806 
3807     tsam = tcg_constant_i64(sam);
3808     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3809 
3810     /* Always exit the TB, since we (may have) changed execution mode.  */
3811     return DISAS_TOO_MANY;
3812 }
3813 
3814 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3815 {
3816     int r1 = get_field(s, r1);
3817     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3818     return DISAS_NEXT;
3819 }
3820 
3821 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3822 {
3823     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3824     return DISAS_NEXT;
3825 }
3826 
3827 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3828 {
3829     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3830     return DISAS_NEXT;
3831 }
3832 
3833 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3834 {
3835     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3836     return DISAS_NEXT;
3837 }
3838 
3839 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3840 {
3841     gen_helper_sqeb(o->out, tcg_env, o->in2);
3842     return DISAS_NEXT;
3843 }
3844 
3845 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3846 {
3847     gen_helper_sqdb(o->out, tcg_env, o->in2);
3848     return DISAS_NEXT;
3849 }
3850 
3851 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3852 {
3853     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3854     return DISAS_NEXT;
3855 }
3856 
3857 #ifndef CONFIG_USER_ONLY
3858 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3859 {
3860     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3861     set_cc_static(s);
3862     return DISAS_NEXT;
3863 }
3864 
3865 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3866 {
3867     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3868     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3869 
3870     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3871     set_cc_static(s);
3872     return DISAS_NEXT;
3873 }
3874 #endif
3875 
3876 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3877 {
3878     DisasCompare c;
3879     TCGv_i64 a, h;
3880     TCGLabel *lab;
3881     int r1;
3882 
3883     disas_jcc(s, &c, get_field(s, m3));
3884 
3885     /* We want to store when the condition is fulfilled, so branch
3886        out when it's not */
3887     c.cond = tcg_invert_cond(c.cond);
3888 
3889     lab = gen_new_label();
3890     if (c.is_64) {
3891         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3892     } else {
3893         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3894     }
3895 
3896     r1 = get_field(s, r1);
3897     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3898     switch (s->insn->data) {
3899     case 1: /* STOCG */
3900         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3901         break;
3902     case 0: /* STOC */
3903         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3904         break;
3905     case 2: /* STOCFH */
3906         h = tcg_temp_new_i64();
3907         tcg_gen_shri_i64(h, regs[r1], 32);
3908         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3909         break;
3910     default:
3911         g_assert_not_reached();
3912     }
3913 
3914     gen_set_label(lab);
3915     return DISAS_NEXT;
3916 }
3917 
3918 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3919 {
3920     TCGv_i64 t;
3921     uint64_t sign = 1ull << s->insn->data;
3922     if (s->insn->data == 31) {
3923         t = tcg_temp_new_i64();
3924         tcg_gen_shli_i64(t, o->in1, 32);
3925     } else {
3926         t = o->in1;
3927     }
3928     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3929     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3930     /* The arithmetic left shift is curious in that it does not affect
3931        the sign bit.  Copy that over from the source unchanged.  */
3932     tcg_gen_andi_i64(o->out, o->out, ~sign);
3933     tcg_gen_andi_i64(o->in1, o->in1, sign);
3934     tcg_gen_or_i64(o->out, o->out, o->in1);
3935     return DISAS_NEXT;
3936 }
3937 
3938 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3939 {
3940     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3941     return DISAS_NEXT;
3942 }
3943 
3944 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3945 {
3946     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3947     return DISAS_NEXT;
3948 }
3949 
3950 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3951 {
3952     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3953     return DISAS_NEXT;
3954 }
3955 
3956 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3957 {
3958     gen_helper_sfpc(tcg_env, o->in2);
3959     return DISAS_NEXT;
3960 }
3961 
3962 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3963 {
3964     gen_helper_sfas(tcg_env, o->in2);
3965     return DISAS_NEXT;
3966 }
3967 
3968 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3969 {
3970     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3971     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3972     gen_helper_srnm(tcg_env, o->addr1);
3973     return DISAS_NEXT;
3974 }
3975 
3976 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
3977 {
3978     /* Bits 0-55 are are ignored. */
3979     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
3980     gen_helper_srnm(tcg_env, o->addr1);
3981     return DISAS_NEXT;
3982 }
3983 
3984 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
3985 {
3986     TCGv_i64 tmp = tcg_temp_new_i64();
3987 
3988     /* Bits other than 61-63 are ignored. */
3989     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
3990 
3991     /* No need to call a helper, we don't implement dfp */
3992     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3993     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
3994     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
3995     return DISAS_NEXT;
3996 }
3997 
3998 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3999 {
4000     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4001     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4002     set_cc_static(s);
4003 
4004     tcg_gen_shri_i64(o->in1, o->in1, 24);
4005     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4006     return DISAS_NEXT;
4007 }
4008 
4009 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4010 {
4011     int b1 = get_field(s, b1);
4012     int d1 = get_field(s, d1);
4013     int b2 = get_field(s, b2);
4014     int d2 = get_field(s, d2);
4015     int r3 = get_field(s, r3);
4016     TCGv_i64 tmp = tcg_temp_new_i64();
4017 
4018     /* fetch all operands first */
4019     o->in1 = tcg_temp_new_i64();
4020     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4021     o->in2 = tcg_temp_new_i64();
4022     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4023     o->addr1 = tcg_temp_new_i64();
4024     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4025 
4026     /* load the third operand into r3 before modifying anything */
4027     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4028 
4029     /* subtract CPU timer from first operand and store in GR0 */
4030     gen_helper_stpt(tmp, tcg_env);
4031     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4032 
4033     /* store second operand in GR1 */
4034     tcg_gen_mov_i64(regs[1], o->in2);
4035     return DISAS_NEXT;
4036 }
4037 
4038 #ifndef CONFIG_USER_ONLY
4039 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4040 {
4041     tcg_gen_shri_i64(o->in2, o->in2, 4);
4042     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4043     return DISAS_NEXT;
4044 }
4045 
4046 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4047 {
4048     gen_helper_sske(tcg_env, o->in1, o->in2);
4049     return DISAS_NEXT;
4050 }
4051 
4052 static void gen_check_psw_mask(DisasContext *s)
4053 {
4054     TCGv_i64 reserved = tcg_temp_new_i64();
4055     TCGLabel *ok = gen_new_label();
4056 
4057     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4058     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4059     gen_program_exception(s, PGM_SPECIFICATION);
4060     gen_set_label(ok);
4061 }
4062 
4063 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4064 {
4065     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4066 
4067     gen_check_psw_mask(s);
4068 
4069     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4070     s->exit_to_mainloop = true;
4071     return DISAS_TOO_MANY;
4072 }
4073 
4074 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4075 {
4076     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4077     return DISAS_NEXT;
4078 }
4079 #endif
4080 
4081 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4082 {
4083     gen_helper_stck(o->out, tcg_env);
4084     /* ??? We don't implement clock states.  */
4085     gen_op_movi_cc(s, 0);
4086     return DISAS_NEXT;
4087 }
4088 
4089 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4090 {
4091     TCGv_i64 c1 = tcg_temp_new_i64();
4092     TCGv_i64 c2 = tcg_temp_new_i64();
4093     TCGv_i64 todpr = tcg_temp_new_i64();
4094     gen_helper_stck(c1, tcg_env);
4095     /* 16 bit value store in an uint32_t (only valid bits set) */
4096     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4097     /* Shift the 64-bit value into its place as a zero-extended
4098        104-bit value.  Note that "bit positions 64-103 are always
4099        non-zero so that they compare differently to STCK"; we set
4100        the least significant bit to 1.  */
4101     tcg_gen_shli_i64(c2, c1, 56);
4102     tcg_gen_shri_i64(c1, c1, 8);
4103     tcg_gen_ori_i64(c2, c2, 0x10000);
4104     tcg_gen_or_i64(c2, c2, todpr);
4105     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4106     tcg_gen_addi_i64(o->in2, o->in2, 8);
4107     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4108     /* ??? We don't implement clock states.  */
4109     gen_op_movi_cc(s, 0);
4110     return DISAS_NEXT;
4111 }
4112 
4113 #ifndef CONFIG_USER_ONLY
4114 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4115 {
4116     gen_helper_sck(cc_op, tcg_env, o->in2);
4117     set_cc_static(s);
4118     return DISAS_NEXT;
4119 }
4120 
4121 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4122 {
4123     gen_helper_sckc(tcg_env, o->in2);
4124     return DISAS_NEXT;
4125 }
4126 
4127 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4128 {
4129     gen_helper_sckpf(tcg_env, regs[0]);
4130     return DISAS_NEXT;
4131 }
4132 
4133 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4134 {
4135     gen_helper_stckc(o->out, tcg_env);
4136     return DISAS_NEXT;
4137 }
4138 
4139 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4140 {
4141     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4142     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4143 
4144     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4145     return DISAS_NEXT;
4146 }
4147 
4148 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4149 {
4150     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4151     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4152 
4153     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4154     return DISAS_NEXT;
4155 }
4156 
4157 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4158 {
4159     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4160     return DISAS_NEXT;
4161 }
4162 
4163 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4164 {
4165     gen_helper_spt(tcg_env, o->in2);
4166     return DISAS_NEXT;
4167 }
4168 
4169 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4170 {
4171     gen_helper_stfl(tcg_env);
4172     return DISAS_NEXT;
4173 }
4174 
4175 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4176 {
4177     gen_helper_stpt(o->out, tcg_env);
4178     return DISAS_NEXT;
4179 }
4180 
4181 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4182 {
4183     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4184     set_cc_static(s);
4185     return DISAS_NEXT;
4186 }
4187 
4188 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4189 {
4190     gen_helper_spx(tcg_env, o->in2);
4191     return DISAS_NEXT;
4192 }
4193 
4194 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4195 {
4196     gen_helper_xsch(tcg_env, regs[1]);
4197     set_cc_static(s);
4198     return DISAS_NEXT;
4199 }
4200 
4201 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4202 {
4203     gen_helper_csch(tcg_env, regs[1]);
4204     set_cc_static(s);
4205     return DISAS_NEXT;
4206 }
4207 
4208 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4209 {
4210     gen_helper_hsch(tcg_env, regs[1]);
4211     set_cc_static(s);
4212     return DISAS_NEXT;
4213 }
4214 
4215 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4216 {
4217     gen_helper_msch(tcg_env, regs[1], o->in2);
4218     set_cc_static(s);
4219     return DISAS_NEXT;
4220 }
4221 
4222 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4223 {
4224     gen_helper_rchp(tcg_env, regs[1]);
4225     set_cc_static(s);
4226     return DISAS_NEXT;
4227 }
4228 
4229 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4230 {
4231     gen_helper_rsch(tcg_env, regs[1]);
4232     set_cc_static(s);
4233     return DISAS_NEXT;
4234 }
4235 
4236 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4237 {
4238     gen_helper_sal(tcg_env, regs[1]);
4239     return DISAS_NEXT;
4240 }
4241 
4242 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4243 {
4244     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4245     return DISAS_NEXT;
4246 }
4247 
4248 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4249 {
4250     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4251     gen_op_movi_cc(s, 3);
4252     return DISAS_NEXT;
4253 }
4254 
4255 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4256 {
4257     /* The instruction is suppressed if not provided. */
4258     return DISAS_NEXT;
4259 }
4260 
4261 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4262 {
4263     gen_helper_ssch(tcg_env, regs[1], o->in2);
4264     set_cc_static(s);
4265     return DISAS_NEXT;
4266 }
4267 
4268 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4269 {
4270     gen_helper_stsch(tcg_env, regs[1], o->in2);
4271     set_cc_static(s);
4272     return DISAS_NEXT;
4273 }
4274 
4275 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4276 {
4277     gen_helper_stcrw(tcg_env, o->in2);
4278     set_cc_static(s);
4279     return DISAS_NEXT;
4280 }
4281 
4282 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4283 {
4284     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4285     set_cc_static(s);
4286     return DISAS_NEXT;
4287 }
4288 
4289 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4290 {
4291     gen_helper_tsch(tcg_env, regs[1], o->in2);
4292     set_cc_static(s);
4293     return DISAS_NEXT;
4294 }
4295 
4296 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4297 {
4298     gen_helper_chsc(tcg_env, o->in2);
4299     set_cc_static(s);
4300     return DISAS_NEXT;
4301 }
4302 
4303 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4304 {
4305     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4306     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4307     return DISAS_NEXT;
4308 }
4309 
4310 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4311 {
4312     uint64_t i2 = get_field(s, i2);
4313     TCGv_i64 t;
4314 
4315     /* It is important to do what the instruction name says: STORE THEN.
4316        If we let the output hook perform the store then if we fault and
4317        restart, we'll have the wrong SYSTEM MASK in place.  */
4318     t = tcg_temp_new_i64();
4319     tcg_gen_shri_i64(t, psw_mask, 56);
4320     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4321 
4322     if (s->fields.op == 0xac) {
4323         tcg_gen_andi_i64(psw_mask, psw_mask,
4324                          (i2 << 56) | 0x00ffffffffffffffull);
4325     } else {
4326         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4327     }
4328 
4329     gen_check_psw_mask(s);
4330 
4331     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4332     s->exit_to_mainloop = true;
4333     return DISAS_TOO_MANY;
4334 }
4335 
4336 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4337 {
4338     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4339 
4340     if (s->base.tb->flags & FLAG_MASK_PER_STORE_REAL) {
4341         update_cc_op(s);
4342         update_psw_addr(s);
4343         gen_helper_per_store_real(tcg_env, tcg_constant_i32(s->ilen));
4344         return DISAS_NORETURN;
4345     }
4346     return DISAS_NEXT;
4347 }
4348 #endif
4349 
4350 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4351 {
4352     gen_helper_stfle(cc_op, tcg_env, o->in2);
4353     set_cc_static(s);
4354     return DISAS_NEXT;
4355 }
4356 
4357 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4358 {
4359     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4360     return DISAS_NEXT;
4361 }
4362 
4363 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4364 {
4365     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4366     return DISAS_NEXT;
4367 }
4368 
4369 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4370 {
4371     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4372                        MO_TEUL | s->insn->data);
4373     return DISAS_NEXT;
4374 }
4375 
4376 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4377 {
4378     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4379                         MO_TEUQ | s->insn->data);
4380     return DISAS_NEXT;
4381 }
4382 
4383 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4384 {
4385     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4386     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4387 
4388     gen_helper_stam(tcg_env, r1, o->in2, r3);
4389     return DISAS_NEXT;
4390 }
4391 
4392 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4393 {
4394     int m3 = get_field(s, m3);
4395     int pos, base = s->insn->data;
4396     TCGv_i64 tmp = tcg_temp_new_i64();
4397 
4398     pos = base + ctz32(m3) * 8;
4399     switch (m3) {
4400     case 0xf:
4401         /* Effectively a 32-bit store.  */
4402         tcg_gen_shri_i64(tmp, o->in1, pos);
4403         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4404         break;
4405 
4406     case 0xc:
4407     case 0x6:
4408     case 0x3:
4409         /* Effectively a 16-bit store.  */
4410         tcg_gen_shri_i64(tmp, o->in1, pos);
4411         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4412         break;
4413 
4414     case 0x8:
4415     case 0x4:
4416     case 0x2:
4417     case 0x1:
4418         /* Effectively an 8-bit store.  */
4419         tcg_gen_shri_i64(tmp, o->in1, pos);
4420         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4421         break;
4422 
4423     default:
4424         /* This is going to be a sequence of shifts and stores.  */
4425         pos = base + 32 - 8;
4426         while (m3) {
4427             if (m3 & 0x8) {
4428                 tcg_gen_shri_i64(tmp, o->in1, pos);
4429                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4430                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4431             }
4432             m3 = (m3 << 1) & 0xf;
4433             pos -= 8;
4434         }
4435         break;
4436     }
4437     return DISAS_NEXT;
4438 }
4439 
4440 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4441 {
4442     int r1 = get_field(s, r1);
4443     int r3 = get_field(s, r3);
4444     int size = s->insn->data;
4445     TCGv_i64 tsize = tcg_constant_i64(size);
4446 
4447     while (1) {
4448         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4449                             size == 8 ? MO_TEUQ : MO_TEUL);
4450         if (r1 == r3) {
4451             break;
4452         }
4453         tcg_gen_add_i64(o->in2, o->in2, tsize);
4454         r1 = (r1 + 1) & 15;
4455     }
4456 
4457     return DISAS_NEXT;
4458 }
4459 
4460 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4461 {
4462     int r1 = get_field(s, r1);
4463     int r3 = get_field(s, r3);
4464     TCGv_i64 t = tcg_temp_new_i64();
4465     TCGv_i64 t4 = tcg_constant_i64(4);
4466     TCGv_i64 t32 = tcg_constant_i64(32);
4467 
4468     while (1) {
4469         tcg_gen_shl_i64(t, regs[r1], t32);
4470         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4471         if (r1 == r3) {
4472             break;
4473         }
4474         tcg_gen_add_i64(o->in2, o->in2, t4);
4475         r1 = (r1 + 1) & 15;
4476     }
4477     return DISAS_NEXT;
4478 }
4479 
4480 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4481 {
4482     TCGv_i128 t16 = tcg_temp_new_i128();
4483 
4484     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4485     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4486                          MO_TE | MO_128 | MO_ALIGN);
4487     return DISAS_NEXT;
4488 }
4489 
4490 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4491 {
4492     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4493     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4494 
4495     gen_helper_srst(tcg_env, r1, r2);
4496     set_cc_static(s);
4497     return DISAS_NEXT;
4498 }
4499 
4500 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4501 {
4502     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4503     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4504 
4505     gen_helper_srstu(tcg_env, r1, r2);
4506     set_cc_static(s);
4507     return DISAS_NEXT;
4508 }
4509 
4510 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4511 {
4512     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4513     return DISAS_NEXT;
4514 }
4515 
4516 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4517 {
4518     tcg_gen_movi_i64(cc_src, 0);
4519     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4520     return DISAS_NEXT;
4521 }
4522 
4523 /* Compute borrow (0, -1) into cc_src. */
4524 static void compute_borrow(DisasContext *s)
4525 {
4526     switch (s->cc_op) {
4527     case CC_OP_SUBU:
4528         /* The borrow value is already in cc_src (0,-1). */
4529         break;
4530     default:
4531         gen_op_calc_cc(s);
4532         /* fall through */
4533     case CC_OP_STATIC:
4534         /* The carry flag is the msb of CC; compute into cc_src. */
4535         tcg_gen_extu_i32_i64(cc_src, cc_op);
4536         tcg_gen_shri_i64(cc_src, cc_src, 1);
4537         /* fall through */
4538     case CC_OP_ADDU:
4539         /* Convert carry (1,0) to borrow (0,-1). */
4540         tcg_gen_subi_i64(cc_src, cc_src, 1);
4541         break;
4542     }
4543 }
4544 
4545 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4546 {
4547     compute_borrow(s);
4548 
4549     /* Borrow is {0, -1}, so add to subtract. */
4550     tcg_gen_add_i64(o->out, o->in1, cc_src);
4551     tcg_gen_sub_i64(o->out, o->out, o->in2);
4552     return DISAS_NEXT;
4553 }
4554 
4555 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4556 {
4557     compute_borrow(s);
4558 
4559     /*
4560      * Borrow is {0, -1}, so add to subtract; replicate the
4561      * borrow input to produce 128-bit -1 for the addition.
4562      */
4563     TCGv_i64 zero = tcg_constant_i64(0);
4564     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4565     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4566 
4567     return DISAS_NEXT;
4568 }
4569 
4570 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4571 {
4572     TCGv_i32 t;
4573 
4574     update_psw_addr(s);
4575     update_cc_op(s);
4576 
4577     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4578     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4579 
4580     t = tcg_constant_i32(s->ilen);
4581     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4582 
4583     gen_exception(EXCP_SVC);
4584     return DISAS_NORETURN;
4585 }
4586 
4587 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4588 {
4589     int cc = 0;
4590 
4591     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4592     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4593     gen_op_movi_cc(s, cc);
4594     return DISAS_NEXT;
4595 }
4596 
4597 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4598 {
4599     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4600     set_cc_static(s);
4601     return DISAS_NEXT;
4602 }
4603 
4604 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4605 {
4606     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4607     set_cc_static(s);
4608     return DISAS_NEXT;
4609 }
4610 
4611 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4612 {
4613     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4614     set_cc_static(s);
4615     return DISAS_NEXT;
4616 }
4617 
4618 #ifndef CONFIG_USER_ONLY
4619 
4620 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4621 {
4622     gen_helper_testblock(cc_op, tcg_env, o->in2);
4623     set_cc_static(s);
4624     return DISAS_NEXT;
4625 }
4626 
4627 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4628 {
4629     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4630     set_cc_static(s);
4631     return DISAS_NEXT;
4632 }
4633 
4634 #endif
4635 
4636 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4637 {
4638     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4639 
4640     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4641     set_cc_static(s);
4642     return DISAS_NEXT;
4643 }
4644 
4645 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4646 {
4647     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4648 
4649     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4650     set_cc_static(s);
4651     return DISAS_NEXT;
4652 }
4653 
4654 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4655 {
4656     TCGv_i128 pair = tcg_temp_new_i128();
4657 
4658     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4659     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4660     set_cc_static(s);
4661     return DISAS_NEXT;
4662 }
4663 
4664 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4665 {
4666     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4667 
4668     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4669     set_cc_static(s);
4670     return DISAS_NEXT;
4671 }
4672 
4673 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4674 {
4675     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4676 
4677     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4678     set_cc_static(s);
4679     return DISAS_NEXT;
4680 }
4681 
4682 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4683 {
4684     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4685     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4686     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4687     TCGv_i32 tst = tcg_temp_new_i32();
4688     int m3 = get_field(s, m3);
4689 
4690     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4691         m3 = 0;
4692     }
4693     if (m3 & 1) {
4694         tcg_gen_movi_i32(tst, -1);
4695     } else {
4696         tcg_gen_extrl_i64_i32(tst, regs[0]);
4697         if (s->insn->opc & 3) {
4698             tcg_gen_ext8u_i32(tst, tst);
4699         } else {
4700             tcg_gen_ext16u_i32(tst, tst);
4701         }
4702     }
4703     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4704 
4705     set_cc_static(s);
4706     return DISAS_NEXT;
4707 }
4708 
4709 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4710 {
4711     TCGv_i32 ff = tcg_constant_i32(0xff);
4712     TCGv_i32 t1 = tcg_temp_new_i32();
4713 
4714     tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4715     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4716     set_cc_static(s);
4717     return DISAS_NEXT;
4718 }
4719 
4720 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4721 {
4722     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4723 
4724     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4725     return DISAS_NEXT;
4726 }
4727 
4728 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4729 {
4730     int l1 = get_field(s, l1) + 1;
4731     TCGv_i32 l;
4732 
4733     /* The length must not exceed 32 bytes.  */
4734     if (l1 > 32) {
4735         gen_program_exception(s, PGM_SPECIFICATION);
4736         return DISAS_NORETURN;
4737     }
4738     l = tcg_constant_i32(l1);
4739     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4740     set_cc_static(s);
4741     return DISAS_NEXT;
4742 }
4743 
4744 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4745 {
4746     int l1 = get_field(s, l1) + 1;
4747     TCGv_i32 l;
4748 
4749     /* The length must be even and should not exceed 64 bytes.  */
4750     if ((l1 & 1) || (l1 > 64)) {
4751         gen_program_exception(s, PGM_SPECIFICATION);
4752         return DISAS_NORETURN;
4753     }
4754     l = tcg_constant_i32(l1);
4755     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4756     set_cc_static(s);
4757     return DISAS_NEXT;
4758 }
4759 
4760 
4761 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4762 {
4763     int d1 = get_field(s, d1);
4764     int d2 = get_field(s, d2);
4765     int b1 = get_field(s, b1);
4766     int b2 = get_field(s, b2);
4767     int l = get_field(s, l1);
4768     TCGv_i32 t32;
4769 
4770     o->addr1 = get_address(s, 0, b1, d1);
4771 
4772     /* If the addresses are identical, this is a store/memset of zero.  */
4773     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4774         o->in2 = tcg_constant_i64(0);
4775 
4776         l++;
4777         while (l >= 8) {
4778             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4779             l -= 8;
4780             if (l > 0) {
4781                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4782             }
4783         }
4784         if (l >= 4) {
4785             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4786             l -= 4;
4787             if (l > 0) {
4788                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4789             }
4790         }
4791         if (l >= 2) {
4792             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4793             l -= 2;
4794             if (l > 0) {
4795                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4796             }
4797         }
4798         if (l) {
4799             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4800         }
4801         gen_op_movi_cc(s, 0);
4802         return DISAS_NEXT;
4803     }
4804 
4805     /* But in general we'll defer to a helper.  */
4806     o->in2 = get_address(s, 0, b2, d2);
4807     t32 = tcg_constant_i32(l);
4808     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4809     set_cc_static(s);
4810     return DISAS_NEXT;
4811 }
4812 
4813 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4814 {
4815     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4816     return DISAS_NEXT;
4817 }
4818 
4819 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4820 {
4821     int shift = s->insn->data & 0xff;
4822     int size = s->insn->data >> 8;
4823     uint64_t mask = ((1ull << size) - 1) << shift;
4824     TCGv_i64 t = tcg_temp_new_i64();
4825 
4826     tcg_gen_shli_i64(t, o->in2, shift);
4827     tcg_gen_xor_i64(o->out, o->in1, t);
4828 
4829     /* Produce the CC from only the bits manipulated.  */
4830     tcg_gen_andi_i64(cc_dst, o->out, mask);
4831     set_cc_nz_u64(s, cc_dst);
4832     return DISAS_NEXT;
4833 }
4834 
4835 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4836 {
4837     o->in1 = tcg_temp_new_i64();
4838 
4839     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4840         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4841     } else {
4842         /* Perform the atomic operation in memory. */
4843         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4844                                      s->insn->data);
4845     }
4846 
4847     /* Recompute also for atomic case: needed for setting CC. */
4848     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4849 
4850     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4851         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4852     }
4853     return DISAS_NEXT;
4854 }
4855 
4856 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4857 {
4858     o->out = tcg_constant_i64(0);
4859     return DISAS_NEXT;
4860 }
4861 
4862 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4863 {
4864     o->out = tcg_constant_i64(0);
4865     o->out2 = o->out;
4866     return DISAS_NEXT;
4867 }
4868 
4869 #ifndef CONFIG_USER_ONLY
4870 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4871 {
4872     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4873 
4874     gen_helper_clp(tcg_env, r2);
4875     set_cc_static(s);
4876     return DISAS_NEXT;
4877 }
4878 
4879 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4880 {
4881     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4882     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4883 
4884     gen_helper_pcilg(tcg_env, r1, r2);
4885     set_cc_static(s);
4886     return DISAS_NEXT;
4887 }
4888 
4889 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4890 {
4891     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4892     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4893 
4894     gen_helper_pcistg(tcg_env, r1, r2);
4895     set_cc_static(s);
4896     return DISAS_NEXT;
4897 }
4898 
4899 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4900 {
4901     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4902     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4903 
4904     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4905     set_cc_static(s);
4906     return DISAS_NEXT;
4907 }
4908 
4909 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4910 {
4911     gen_helper_sic(tcg_env, o->in1, o->in2);
4912     return DISAS_NEXT;
4913 }
4914 
4915 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4916 {
4917     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4918     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4919 
4920     gen_helper_rpcit(tcg_env, r1, r2);
4921     set_cc_static(s);
4922     return DISAS_NEXT;
4923 }
4924 
4925 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4926 {
4927     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4928     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4929     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4930 
4931     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
4932     set_cc_static(s);
4933     return DISAS_NEXT;
4934 }
4935 
4936 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4937 {
4938     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4939     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4940 
4941     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
4942     set_cc_static(s);
4943     return DISAS_NEXT;
4944 }
4945 #endif
4946 
4947 #include "translate_vx.c.inc"
4948 
4949 /* ====================================================================== */
4950 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4951    the original inputs), update the various cc data structures in order to
4952    be able to compute the new condition code.  */
4953 
4954 static void cout_abs32(DisasContext *s, DisasOps *o)
4955 {
4956     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4957 }
4958 
4959 static void cout_abs64(DisasContext *s, DisasOps *o)
4960 {
4961     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4962 }
4963 
4964 static void cout_adds32(DisasContext *s, DisasOps *o)
4965 {
4966     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4967 }
4968 
4969 static void cout_adds64(DisasContext *s, DisasOps *o)
4970 {
4971     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4972 }
4973 
4974 static void cout_addu32(DisasContext *s, DisasOps *o)
4975 {
4976     tcg_gen_shri_i64(cc_src, o->out, 32);
4977     tcg_gen_ext32u_i64(cc_dst, o->out);
4978     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
4979 }
4980 
4981 static void cout_addu64(DisasContext *s, DisasOps *o)
4982 {
4983     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
4984 }
4985 
4986 static void cout_cmps32(DisasContext *s, DisasOps *o)
4987 {
4988     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4989 }
4990 
4991 static void cout_cmps64(DisasContext *s, DisasOps *o)
4992 {
4993     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4994 }
4995 
4996 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4997 {
4998     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4999 }
5000 
5001 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5002 {
5003     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5004 }
5005 
5006 static void cout_f32(DisasContext *s, DisasOps *o)
5007 {
5008     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5009 }
5010 
5011 static void cout_f64(DisasContext *s, DisasOps *o)
5012 {
5013     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5014 }
5015 
5016 static void cout_f128(DisasContext *s, DisasOps *o)
5017 {
5018     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5019 }
5020 
5021 static void cout_nabs32(DisasContext *s, DisasOps *o)
5022 {
5023     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5024 }
5025 
5026 static void cout_nabs64(DisasContext *s, DisasOps *o)
5027 {
5028     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5029 }
5030 
5031 static void cout_neg32(DisasContext *s, DisasOps *o)
5032 {
5033     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5034 }
5035 
5036 static void cout_neg64(DisasContext *s, DisasOps *o)
5037 {
5038     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5039 }
5040 
5041 static void cout_nz32(DisasContext *s, DisasOps *o)
5042 {
5043     tcg_gen_ext32u_i64(cc_dst, o->out);
5044     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5045 }
5046 
5047 static void cout_nz64(DisasContext *s, DisasOps *o)
5048 {
5049     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5050 }
5051 
5052 static void cout_s32(DisasContext *s, DisasOps *o)
5053 {
5054     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5055 }
5056 
5057 static void cout_s64(DisasContext *s, DisasOps *o)
5058 {
5059     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5060 }
5061 
5062 static void cout_subs32(DisasContext *s, DisasOps *o)
5063 {
5064     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5065 }
5066 
5067 static void cout_subs64(DisasContext *s, DisasOps *o)
5068 {
5069     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5070 }
5071 
5072 static void cout_subu32(DisasContext *s, DisasOps *o)
5073 {
5074     tcg_gen_sari_i64(cc_src, o->out, 32);
5075     tcg_gen_ext32u_i64(cc_dst, o->out);
5076     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5077 }
5078 
5079 static void cout_subu64(DisasContext *s, DisasOps *o)
5080 {
5081     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5082 }
5083 
5084 static void cout_tm32(DisasContext *s, DisasOps *o)
5085 {
5086     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5087 }
5088 
5089 static void cout_tm64(DisasContext *s, DisasOps *o)
5090 {
5091     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5092 }
5093 
5094 static void cout_muls32(DisasContext *s, DisasOps *o)
5095 {
5096     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5097 }
5098 
5099 static void cout_muls64(DisasContext *s, DisasOps *o)
5100 {
5101     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5102     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5103 }
5104 
5105 /* ====================================================================== */
5106 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5107    with the TCG register to which we will write.  Used in combination with
5108    the "wout" generators, in some cases we need a new temporary, and in
5109    some cases we can write to a TCG global.  */
5110 
5111 static void prep_new(DisasContext *s, DisasOps *o)
5112 {
5113     o->out = tcg_temp_new_i64();
5114 }
5115 #define SPEC_prep_new 0
5116 
5117 static void prep_new_P(DisasContext *s, DisasOps *o)
5118 {
5119     o->out = tcg_temp_new_i64();
5120     o->out2 = tcg_temp_new_i64();
5121 }
5122 #define SPEC_prep_new_P 0
5123 
5124 static void prep_new_x(DisasContext *s, DisasOps *o)
5125 {
5126     o->out_128 = tcg_temp_new_i128();
5127 }
5128 #define SPEC_prep_new_x 0
5129 
5130 static void prep_r1(DisasContext *s, DisasOps *o)
5131 {
5132     o->out = regs[get_field(s, r1)];
5133 }
5134 #define SPEC_prep_r1 0
5135 
5136 static void prep_r1_P(DisasContext *s, DisasOps *o)
5137 {
5138     int r1 = get_field(s, r1);
5139     o->out = regs[r1];
5140     o->out2 = regs[r1 + 1];
5141 }
5142 #define SPEC_prep_r1_P SPEC_r1_even
5143 
5144 /* ====================================================================== */
5145 /* The "Write OUTput" generators.  These generally perform some non-trivial
5146    copy of data to TCG globals, or to main memory.  The trivial cases are
5147    generally handled by having a "prep" generator install the TCG global
5148    as the destination of the operation.  */
5149 
5150 static void wout_r1(DisasContext *s, DisasOps *o)
5151 {
5152     store_reg(get_field(s, r1), o->out);
5153 }
5154 #define SPEC_wout_r1 0
5155 
5156 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5157 {
5158     store_reg(get_field(s, r1), o->out2);
5159 }
5160 #define SPEC_wout_out2_r1 0
5161 
5162 static void wout_r1_8(DisasContext *s, DisasOps *o)
5163 {
5164     int r1 = get_field(s, r1);
5165     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5166 }
5167 #define SPEC_wout_r1_8 0
5168 
5169 static void wout_r1_16(DisasContext *s, DisasOps *o)
5170 {
5171     int r1 = get_field(s, r1);
5172     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5173 }
5174 #define SPEC_wout_r1_16 0
5175 
5176 static void wout_r1_32(DisasContext *s, DisasOps *o)
5177 {
5178     store_reg32_i64(get_field(s, r1), o->out);
5179 }
5180 #define SPEC_wout_r1_32 0
5181 
5182 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5183 {
5184     store_reg32h_i64(get_field(s, r1), o->out);
5185 }
5186 #define SPEC_wout_r1_32h 0
5187 
5188 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5189 {
5190     int r1 = get_field(s, r1);
5191     store_reg32_i64(r1, o->out);
5192     store_reg32_i64(r1 + 1, o->out2);
5193 }
5194 #define SPEC_wout_r1_P32 SPEC_r1_even
5195 
5196 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5197 {
5198     int r1 = get_field(s, r1);
5199     TCGv_i64 t = tcg_temp_new_i64();
5200     store_reg32_i64(r1 + 1, o->out);
5201     tcg_gen_shri_i64(t, o->out, 32);
5202     store_reg32_i64(r1, t);
5203 }
5204 #define SPEC_wout_r1_D32 SPEC_r1_even
5205 
5206 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5207 {
5208     int r1 = get_field(s, r1);
5209     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5210 }
5211 #define SPEC_wout_r1_D64 SPEC_r1_even
5212 
5213 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5214 {
5215     int r3 = get_field(s, r3);
5216     store_reg32_i64(r3, o->out);
5217     store_reg32_i64(r3 + 1, o->out2);
5218 }
5219 #define SPEC_wout_r3_P32 SPEC_r3_even
5220 
5221 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5222 {
5223     int r3 = get_field(s, r3);
5224     store_reg(r3, o->out);
5225     store_reg(r3 + 1, o->out2);
5226 }
5227 #define SPEC_wout_r3_P64 SPEC_r3_even
5228 
5229 static void wout_e1(DisasContext *s, DisasOps *o)
5230 {
5231     store_freg32_i64(get_field(s, r1), o->out);
5232 }
5233 #define SPEC_wout_e1 0
5234 
5235 static void wout_f1(DisasContext *s, DisasOps *o)
5236 {
5237     store_freg(get_field(s, r1), o->out);
5238 }
5239 #define SPEC_wout_f1 0
5240 
5241 static void wout_x1(DisasContext *s, DisasOps *o)
5242 {
5243     int f1 = get_field(s, r1);
5244 
5245     /* Split out_128 into out+out2 for cout_f128. */
5246     tcg_debug_assert(o->out == NULL);
5247     o->out = tcg_temp_new_i64();
5248     o->out2 = tcg_temp_new_i64();
5249 
5250     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5251     store_freg(f1, o->out);
5252     store_freg(f1 + 2, o->out2);
5253 }
5254 #define SPEC_wout_x1 SPEC_r1_f128
5255 
5256 static void wout_x1_P(DisasContext *s, DisasOps *o)
5257 {
5258     int f1 = get_field(s, r1);
5259     store_freg(f1, o->out);
5260     store_freg(f1 + 2, o->out2);
5261 }
5262 #define SPEC_wout_x1_P SPEC_r1_f128
5263 
5264 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5265 {
5266     if (get_field(s, r1) != get_field(s, r2)) {
5267         store_reg32_i64(get_field(s, r1), o->out);
5268     }
5269 }
5270 #define SPEC_wout_cond_r1r2_32 0
5271 
5272 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5273 {
5274     if (get_field(s, r1) != get_field(s, r2)) {
5275         store_freg32_i64(get_field(s, r1), o->out);
5276     }
5277 }
5278 #define SPEC_wout_cond_e1e2 0
5279 
5280 static void wout_m1_8(DisasContext *s, DisasOps *o)
5281 {
5282     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5283 }
5284 #define SPEC_wout_m1_8 0
5285 
5286 static void wout_m1_16(DisasContext *s, DisasOps *o)
5287 {
5288     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5289 }
5290 #define SPEC_wout_m1_16 0
5291 
5292 #ifndef CONFIG_USER_ONLY
5293 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5294 {
5295     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5296 }
5297 #define SPEC_wout_m1_16a 0
5298 #endif
5299 
5300 static void wout_m1_32(DisasContext *s, DisasOps *o)
5301 {
5302     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5303 }
5304 #define SPEC_wout_m1_32 0
5305 
5306 #ifndef CONFIG_USER_ONLY
5307 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5308 {
5309     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5310 }
5311 #define SPEC_wout_m1_32a 0
5312 #endif
5313 
5314 static void wout_m1_64(DisasContext *s, DisasOps *o)
5315 {
5316     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5317 }
5318 #define SPEC_wout_m1_64 0
5319 
5320 #ifndef CONFIG_USER_ONLY
5321 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5322 {
5323     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5324 }
5325 #define SPEC_wout_m1_64a 0
5326 #endif
5327 
5328 static void wout_m2_32(DisasContext *s, DisasOps *o)
5329 {
5330     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5331 }
5332 #define SPEC_wout_m2_32 0
5333 
5334 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5335 {
5336     store_reg(get_field(s, r1), o->in2);
5337 }
5338 #define SPEC_wout_in2_r1 0
5339 
5340 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5341 {
5342     store_reg32_i64(get_field(s, r1), o->in2);
5343 }
5344 #define SPEC_wout_in2_r1_32 0
5345 
5346 /* ====================================================================== */
5347 /* The "INput 1" generators.  These load the first operand to an insn.  */
5348 
5349 static void in1_r1(DisasContext *s, DisasOps *o)
5350 {
5351     o->in1 = load_reg(get_field(s, r1));
5352 }
5353 #define SPEC_in1_r1 0
5354 
5355 static void in1_r1_o(DisasContext *s, DisasOps *o)
5356 {
5357     o->in1 = regs[get_field(s, r1)];
5358 }
5359 #define SPEC_in1_r1_o 0
5360 
5361 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5362 {
5363     o->in1 = tcg_temp_new_i64();
5364     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5365 }
5366 #define SPEC_in1_r1_32s 0
5367 
5368 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5369 {
5370     o->in1 = tcg_temp_new_i64();
5371     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5372 }
5373 #define SPEC_in1_r1_32u 0
5374 
5375 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5376 {
5377     o->in1 = tcg_temp_new_i64();
5378     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5379 }
5380 #define SPEC_in1_r1_sr32 0
5381 
5382 static void in1_r1p1(DisasContext *s, DisasOps *o)
5383 {
5384     o->in1 = load_reg(get_field(s, r1) + 1);
5385 }
5386 #define SPEC_in1_r1p1 SPEC_r1_even
5387 
5388 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5389 {
5390     o->in1 = regs[get_field(s, r1) + 1];
5391 }
5392 #define SPEC_in1_r1p1_o SPEC_r1_even
5393 
5394 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5395 {
5396     o->in1 = tcg_temp_new_i64();
5397     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5398 }
5399 #define SPEC_in1_r1p1_32s SPEC_r1_even
5400 
5401 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5402 {
5403     o->in1 = tcg_temp_new_i64();
5404     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5405 }
5406 #define SPEC_in1_r1p1_32u SPEC_r1_even
5407 
5408 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5409 {
5410     int r1 = get_field(s, r1);
5411     o->in1 = tcg_temp_new_i64();
5412     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5413 }
5414 #define SPEC_in1_r1_D32 SPEC_r1_even
5415 
5416 static void in1_r2(DisasContext *s, DisasOps *o)
5417 {
5418     o->in1 = load_reg(get_field(s, r2));
5419 }
5420 #define SPEC_in1_r2 0
5421 
5422 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5423 {
5424     o->in1 = tcg_temp_new_i64();
5425     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5426 }
5427 #define SPEC_in1_r2_sr32 0
5428 
5429 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5430 {
5431     o->in1 = tcg_temp_new_i64();
5432     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5433 }
5434 #define SPEC_in1_r2_32u 0
5435 
5436 static void in1_r3(DisasContext *s, DisasOps *o)
5437 {
5438     o->in1 = load_reg(get_field(s, r3));
5439 }
5440 #define SPEC_in1_r3 0
5441 
5442 static void in1_r3_o(DisasContext *s, DisasOps *o)
5443 {
5444     o->in1 = regs[get_field(s, r3)];
5445 }
5446 #define SPEC_in1_r3_o 0
5447 
5448 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5449 {
5450     o->in1 = tcg_temp_new_i64();
5451     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5452 }
5453 #define SPEC_in1_r3_32s 0
5454 
5455 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5456 {
5457     o->in1 = tcg_temp_new_i64();
5458     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5459 }
5460 #define SPEC_in1_r3_32u 0
5461 
5462 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5463 {
5464     int r3 = get_field(s, r3);
5465     o->in1 = tcg_temp_new_i64();
5466     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5467 }
5468 #define SPEC_in1_r3_D32 SPEC_r3_even
5469 
5470 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5471 {
5472     o->in1 = tcg_temp_new_i64();
5473     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5474 }
5475 #define SPEC_in1_r3_sr32 0
5476 
5477 static void in1_e1(DisasContext *s, DisasOps *o)
5478 {
5479     o->in1 = load_freg32_i64(get_field(s, r1));
5480 }
5481 #define SPEC_in1_e1 0
5482 
5483 static void in1_f1(DisasContext *s, DisasOps *o)
5484 {
5485     o->in1 = load_freg(get_field(s, r1));
5486 }
5487 #define SPEC_in1_f1 0
5488 
5489 static void in1_x1(DisasContext *s, DisasOps *o)
5490 {
5491     o->in1_128 = load_freg_128(get_field(s, r1));
5492 }
5493 #define SPEC_in1_x1 SPEC_r1_f128
5494 
5495 /* Load the high double word of an extended (128-bit) format FP number */
5496 static void in1_x2h(DisasContext *s, DisasOps *o)
5497 {
5498     o->in1 = load_freg(get_field(s, r2));
5499 }
5500 #define SPEC_in1_x2h SPEC_r2_f128
5501 
5502 static void in1_f3(DisasContext *s, DisasOps *o)
5503 {
5504     o->in1 = load_freg(get_field(s, r3));
5505 }
5506 #define SPEC_in1_f3 0
5507 
5508 static void in1_la1(DisasContext *s, DisasOps *o)
5509 {
5510     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5511 }
5512 #define SPEC_in1_la1 0
5513 
5514 static void in1_la2(DisasContext *s, DisasOps *o)
5515 {
5516     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5517     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5518 }
5519 #define SPEC_in1_la2 0
5520 
5521 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5522 {
5523     in1_la1(s, o);
5524     o->in1 = tcg_temp_new_i64();
5525     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5526 }
5527 #define SPEC_in1_m1_8u 0
5528 
5529 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5530 {
5531     in1_la1(s, o);
5532     o->in1 = tcg_temp_new_i64();
5533     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5534 }
5535 #define SPEC_in1_m1_16s 0
5536 
5537 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5538 {
5539     in1_la1(s, o);
5540     o->in1 = tcg_temp_new_i64();
5541     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5542 }
5543 #define SPEC_in1_m1_16u 0
5544 
5545 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5546 {
5547     in1_la1(s, o);
5548     o->in1 = tcg_temp_new_i64();
5549     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5550 }
5551 #define SPEC_in1_m1_32s 0
5552 
5553 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5554 {
5555     in1_la1(s, o);
5556     o->in1 = tcg_temp_new_i64();
5557     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5558 }
5559 #define SPEC_in1_m1_32u 0
5560 
5561 static void in1_m1_64(DisasContext *s, DisasOps *o)
5562 {
5563     in1_la1(s, o);
5564     o->in1 = tcg_temp_new_i64();
5565     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5566 }
5567 #define SPEC_in1_m1_64 0
5568 
5569 /* ====================================================================== */
5570 /* The "INput 2" generators.  These load the second operand to an insn.  */
5571 
5572 static void in2_r1_o(DisasContext *s, DisasOps *o)
5573 {
5574     o->in2 = regs[get_field(s, r1)];
5575 }
5576 #define SPEC_in2_r1_o 0
5577 
5578 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5579 {
5580     o->in2 = tcg_temp_new_i64();
5581     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5582 }
5583 #define SPEC_in2_r1_16u 0
5584 
5585 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5586 {
5587     o->in2 = tcg_temp_new_i64();
5588     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5589 }
5590 #define SPEC_in2_r1_32u 0
5591 
5592 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5593 {
5594     int r1 = get_field(s, r1);
5595     o->in2 = tcg_temp_new_i64();
5596     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5597 }
5598 #define SPEC_in2_r1_D32 SPEC_r1_even
5599 
5600 static void in2_r2(DisasContext *s, DisasOps *o)
5601 {
5602     o->in2 = load_reg(get_field(s, r2));
5603 }
5604 #define SPEC_in2_r2 0
5605 
5606 static void in2_r2_o(DisasContext *s, DisasOps *o)
5607 {
5608     o->in2 = regs[get_field(s, r2)];
5609 }
5610 #define SPEC_in2_r2_o 0
5611 
5612 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5613 {
5614     int r2 = get_field(s, r2);
5615     if (r2 != 0) {
5616         o->in2 = load_reg(r2);
5617     }
5618 }
5619 #define SPEC_in2_r2_nz 0
5620 
5621 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5622 {
5623     o->in2 = tcg_temp_new_i64();
5624     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5625 }
5626 #define SPEC_in2_r2_8s 0
5627 
5628 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5629 {
5630     o->in2 = tcg_temp_new_i64();
5631     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5632 }
5633 #define SPEC_in2_r2_8u 0
5634 
5635 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5636 {
5637     o->in2 = tcg_temp_new_i64();
5638     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5639 }
5640 #define SPEC_in2_r2_16s 0
5641 
5642 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5643 {
5644     o->in2 = tcg_temp_new_i64();
5645     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5646 }
5647 #define SPEC_in2_r2_16u 0
5648 
5649 static void in2_r3(DisasContext *s, DisasOps *o)
5650 {
5651     o->in2 = load_reg(get_field(s, r3));
5652 }
5653 #define SPEC_in2_r3 0
5654 
5655 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5656 {
5657     int r3 = get_field(s, r3);
5658     o->in2_128 = tcg_temp_new_i128();
5659     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5660 }
5661 #define SPEC_in2_r3_D64 SPEC_r3_even
5662 
5663 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5664 {
5665     o->in2 = tcg_temp_new_i64();
5666     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5667 }
5668 #define SPEC_in2_r3_sr32 0
5669 
5670 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5671 {
5672     o->in2 = tcg_temp_new_i64();
5673     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5674 }
5675 #define SPEC_in2_r3_32u 0
5676 
5677 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5678 {
5679     o->in2 = tcg_temp_new_i64();
5680     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5681 }
5682 #define SPEC_in2_r2_32s 0
5683 
5684 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5685 {
5686     o->in2 = tcg_temp_new_i64();
5687     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5688 }
5689 #define SPEC_in2_r2_32u 0
5690 
5691 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5692 {
5693     o->in2 = tcg_temp_new_i64();
5694     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5695 }
5696 #define SPEC_in2_r2_sr32 0
5697 
5698 static void in2_e2(DisasContext *s, DisasOps *o)
5699 {
5700     o->in2 = load_freg32_i64(get_field(s, r2));
5701 }
5702 #define SPEC_in2_e2 0
5703 
5704 static void in2_f2(DisasContext *s, DisasOps *o)
5705 {
5706     o->in2 = load_freg(get_field(s, r2));
5707 }
5708 #define SPEC_in2_f2 0
5709 
5710 static void in2_x2(DisasContext *s, DisasOps *o)
5711 {
5712     o->in2_128 = load_freg_128(get_field(s, r2));
5713 }
5714 #define SPEC_in2_x2 SPEC_r2_f128
5715 
5716 /* Load the low double word of an extended (128-bit) format FP number */
5717 static void in2_x2l(DisasContext *s, DisasOps *o)
5718 {
5719     o->in2 = load_freg(get_field(s, r2) + 2);
5720 }
5721 #define SPEC_in2_x2l SPEC_r2_f128
5722 
5723 static void in2_ra2(DisasContext *s, DisasOps *o)
5724 {
5725     int r2 = get_field(s, r2);
5726 
5727     /* Note: *don't* treat !r2 as 0, use the reg value. */
5728     o->in2 = tcg_temp_new_i64();
5729     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5730 }
5731 #define SPEC_in2_ra2 0
5732 
5733 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5734 {
5735     return in2_ra2(s, o);
5736 }
5737 #define SPEC_in2_ra2_E SPEC_r2_even
5738 
5739 static void in2_a2(DisasContext *s, DisasOps *o)
5740 {
5741     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5742     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5743 }
5744 #define SPEC_in2_a2 0
5745 
5746 static TCGv gen_ri2(DisasContext *s)
5747 {
5748     TCGv ri2 = NULL;
5749     bool is_imm;
5750     int imm;
5751 
5752     disas_jdest(s, i2, is_imm, imm, ri2);
5753     if (is_imm) {
5754         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5755     }
5756 
5757     return ri2;
5758 }
5759 
5760 static void in2_ri2(DisasContext *s, DisasOps *o)
5761 {
5762     o->in2 = gen_ri2(s);
5763 }
5764 #define SPEC_in2_ri2 0
5765 
5766 static void in2_sh(DisasContext *s, DisasOps *o)
5767 {
5768     int b2 = get_field(s, b2);
5769     int d2 = get_field(s, d2);
5770 
5771     if (b2 == 0) {
5772         o->in2 = tcg_constant_i64(d2 & 0x3f);
5773     } else {
5774         o->in2 = get_address(s, 0, b2, d2);
5775         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5776     }
5777 }
5778 #define SPEC_in2_sh 0
5779 
5780 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5781 {
5782     in2_a2(s, o);
5783     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5784 }
5785 #define SPEC_in2_m2_8u 0
5786 
5787 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5788 {
5789     in2_a2(s, o);
5790     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5791 }
5792 #define SPEC_in2_m2_16s 0
5793 
5794 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5795 {
5796     in2_a2(s, o);
5797     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5798 }
5799 #define SPEC_in2_m2_16u 0
5800 
5801 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5802 {
5803     in2_a2(s, o);
5804     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5805 }
5806 #define SPEC_in2_m2_32s 0
5807 
5808 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5809 {
5810     in2_a2(s, o);
5811     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5812 }
5813 #define SPEC_in2_m2_32u 0
5814 
5815 #ifndef CONFIG_USER_ONLY
5816 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5817 {
5818     in2_a2(s, o);
5819     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5820 }
5821 #define SPEC_in2_m2_32ua 0
5822 #endif
5823 
5824 static void in2_m2_64(DisasContext *s, DisasOps *o)
5825 {
5826     in2_a2(s, o);
5827     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5828 }
5829 #define SPEC_in2_m2_64 0
5830 
5831 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5832 {
5833     in2_a2(s, o);
5834     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5835     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5836 }
5837 #define SPEC_in2_m2_64w 0
5838 
5839 #ifndef CONFIG_USER_ONLY
5840 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5841 {
5842     in2_a2(s, o);
5843     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5844 }
5845 #define SPEC_in2_m2_64a 0
5846 #endif
5847 
5848 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5849 {
5850     o->in2 = tcg_temp_new_i64();
5851     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5852 }
5853 #define SPEC_in2_mri2_16s 0
5854 
5855 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5856 {
5857     o->in2 = tcg_temp_new_i64();
5858     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5859 }
5860 #define SPEC_in2_mri2_16u 0
5861 
5862 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5863 {
5864     o->in2 = tcg_temp_new_i64();
5865     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5866                        MO_TESL | MO_ALIGN);
5867 }
5868 #define SPEC_in2_mri2_32s 0
5869 
5870 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5871 {
5872     o->in2 = tcg_temp_new_i64();
5873     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5874                        MO_TEUL | MO_ALIGN);
5875 }
5876 #define SPEC_in2_mri2_32u 0
5877 
5878 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5879 {
5880     o->in2 = tcg_temp_new_i64();
5881     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5882                         MO_TEUQ | MO_ALIGN);
5883 }
5884 #define SPEC_in2_mri2_64 0
5885 
5886 static void in2_i2(DisasContext *s, DisasOps *o)
5887 {
5888     o->in2 = tcg_constant_i64(get_field(s, i2));
5889 }
5890 #define SPEC_in2_i2 0
5891 
5892 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5893 {
5894     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5895 }
5896 #define SPEC_in2_i2_8u 0
5897 
5898 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5899 {
5900     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5901 }
5902 #define SPEC_in2_i2_16u 0
5903 
5904 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5905 {
5906     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5907 }
5908 #define SPEC_in2_i2_32u 0
5909 
5910 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5911 {
5912     uint64_t i2 = (uint16_t)get_field(s, i2);
5913     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5914 }
5915 #define SPEC_in2_i2_16u_shl 0
5916 
5917 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5918 {
5919     uint64_t i2 = (uint32_t)get_field(s, i2);
5920     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5921 }
5922 #define SPEC_in2_i2_32u_shl 0
5923 
5924 #ifndef CONFIG_USER_ONLY
5925 static void in2_insn(DisasContext *s, DisasOps *o)
5926 {
5927     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5928 }
5929 #define SPEC_in2_insn 0
5930 #endif
5931 
5932 /* ====================================================================== */
5933 
5934 /* Find opc within the table of insns.  This is formulated as a switch
5935    statement so that (1) we get compile-time notice of cut-paste errors
5936    for duplicated opcodes, and (2) the compiler generates the binary
5937    search tree, rather than us having to post-process the table.  */
5938 
5939 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5940     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5941 
5942 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5943     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5944 
5945 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5946     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5947 
5948 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5949 
5950 enum DisasInsnEnum {
5951 #include "insn-data.h.inc"
5952 };
5953 
5954 #undef E
5955 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5956     .opc = OPC,                                                             \
5957     .flags = FL,                                                            \
5958     .fmt = FMT_##FT,                                                        \
5959     .fac = FAC_##FC,                                                        \
5960     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5961     .name = #NM,                                                            \
5962     .help_in1 = in1_##I1,                                                   \
5963     .help_in2 = in2_##I2,                                                   \
5964     .help_prep = prep_##P,                                                  \
5965     .help_wout = wout_##W,                                                  \
5966     .help_cout = cout_##CC,                                                 \
5967     .help_op = op_##OP,                                                     \
5968     .data = D                                                               \
5969  },
5970 
5971 /* Allow 0 to be used for NULL in the table below.  */
5972 #define in1_0  NULL
5973 #define in2_0  NULL
5974 #define prep_0  NULL
5975 #define wout_0  NULL
5976 #define cout_0  NULL
5977 #define op_0  NULL
5978 
5979 #define SPEC_in1_0 0
5980 #define SPEC_in2_0 0
5981 #define SPEC_prep_0 0
5982 #define SPEC_wout_0 0
5983 
5984 /* Give smaller names to the various facilities.  */
5985 #define FAC_Z           S390_FEAT_ZARCH
5986 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5987 #define FAC_DFP         S390_FEAT_DFP
5988 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
5989 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5990 #define FAC_EE          S390_FEAT_EXECUTE_EXT
5991 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5992 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5993 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
5994 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
5995 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5996 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5997 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5998 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
5999 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6000 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6001 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6002 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6003 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6004 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6005 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6006 #define FAC_SFLE        S390_FEAT_STFLE
6007 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6008 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6009 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6010 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6011 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6012 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6013 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6014 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6015 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6016 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6017 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6018 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6019 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6020 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6021 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6022 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6023 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6024 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6025 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6026 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6027 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6028 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6029 
6030 static const DisasInsn insn_info[] = {
6031 #include "insn-data.h.inc"
6032 };
6033 
6034 #undef E
6035 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6036     case OPC: return &insn_info[insn_ ## NM];
6037 
6038 static const DisasInsn *lookup_opc(uint16_t opc)
6039 {
6040     switch (opc) {
6041 #include "insn-data.h.inc"
6042     default:
6043         return NULL;
6044     }
6045 }
6046 
6047 #undef F
6048 #undef E
6049 #undef D
6050 #undef C
6051 
6052 /* Extract a field from the insn.  The INSN should be left-aligned in
6053    the uint64_t so that we can more easily utilize the big-bit-endian
6054    definitions we extract from the Principals of Operation.  */
6055 
6056 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6057 {
6058     uint32_t r, m;
6059 
6060     if (f->size == 0) {
6061         return;
6062     }
6063 
6064     /* Zero extract the field from the insn.  */
6065     r = (insn << f->beg) >> (64 - f->size);
6066 
6067     /* Sign-extend, or un-swap the field as necessary.  */
6068     switch (f->type) {
6069     case 0: /* unsigned */
6070         break;
6071     case 1: /* signed */
6072         assert(f->size <= 32);
6073         m = 1u << (f->size - 1);
6074         r = (r ^ m) - m;
6075         break;
6076     case 2: /* dl+dh split, signed 20 bit. */
6077         r = ((int8_t)r << 12) | (r >> 8);
6078         break;
6079     case 3: /* MSB stored in RXB */
6080         g_assert(f->size == 4);
6081         switch (f->beg) {
6082         case 8:
6083             r |= extract64(insn, 63 - 36, 1) << 4;
6084             break;
6085         case 12:
6086             r |= extract64(insn, 63 - 37, 1) << 4;
6087             break;
6088         case 16:
6089             r |= extract64(insn, 63 - 38, 1) << 4;
6090             break;
6091         case 32:
6092             r |= extract64(insn, 63 - 39, 1) << 4;
6093             break;
6094         default:
6095             g_assert_not_reached();
6096         }
6097         break;
6098     default:
6099         abort();
6100     }
6101 
6102     /*
6103      * Validate that the "compressed" encoding we selected above is valid.
6104      * I.e. we haven't made two different original fields overlap.
6105      */
6106     assert(((o->presentC >> f->indexC) & 1) == 0);
6107     o->presentC |= 1 << f->indexC;
6108     o->presentO |= 1 << f->indexO;
6109 
6110     o->c[f->indexC] = r;
6111 }
6112 
6113 /* Lookup the insn at the current PC, extracting the operands into O and
6114    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6115 
6116 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6117 {
6118     uint64_t insn, pc = s->base.pc_next;
6119     int op, op2, ilen;
6120     const DisasInsn *info;
6121 
6122     if (unlikely(s->ex_value)) {
6123         uint64_t be_insn;
6124 
6125         /* Drop the EX data now, so that it's clear on exception paths.  */
6126         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6127                        offsetof(CPUS390XState, ex_value));
6128 
6129         /* Extract the values saved by EXECUTE.  */
6130         insn = s->ex_value & 0xffffffffffff0000ull;
6131         ilen = s->ex_value & 0xf;
6132         op = insn >> 56;
6133 
6134         /* Register insn bytes with translator so plugins work. */
6135         be_insn = cpu_to_be64(insn);
6136         translator_fake_ld(&s->base, &be_insn, get_ilen(op));
6137     } else {
6138         insn = ld_code2(env, s, pc);
6139         op = (insn >> 8) & 0xff;
6140         ilen = get_ilen(op);
6141         switch (ilen) {
6142         case 2:
6143             insn = insn << 48;
6144             break;
6145         case 4:
6146             insn = ld_code4(env, s, pc) << 32;
6147             break;
6148         case 6:
6149             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6150             break;
6151         default:
6152             g_assert_not_reached();
6153         }
6154     }
6155     s->pc_tmp = s->base.pc_next + ilen;
6156     s->ilen = ilen;
6157 
6158     /* We can't actually determine the insn format until we've looked up
6159        the full insn opcode.  Which we can't do without locating the
6160        secondary opcode.  Assume by default that OP2 is at bit 40; for
6161        those smaller insns that don't actually have a secondary opcode
6162        this will correctly result in OP2 = 0. */
6163     switch (op) {
6164     case 0x01: /* E */
6165     case 0x80: /* S */
6166     case 0x82: /* S */
6167     case 0x93: /* S */
6168     case 0xb2: /* S, RRF, RRE, IE */
6169     case 0xb3: /* RRE, RRD, RRF */
6170     case 0xb9: /* RRE, RRF */
6171     case 0xe5: /* SSE, SIL */
6172         op2 = (insn << 8) >> 56;
6173         break;
6174     case 0xa5: /* RI */
6175     case 0xa7: /* RI */
6176     case 0xc0: /* RIL */
6177     case 0xc2: /* RIL */
6178     case 0xc4: /* RIL */
6179     case 0xc6: /* RIL */
6180     case 0xc8: /* SSF */
6181     case 0xcc: /* RIL */
6182         op2 = (insn << 12) >> 60;
6183         break;
6184     case 0xc5: /* MII */
6185     case 0xc7: /* SMI */
6186     case 0xd0 ... 0xdf: /* SS */
6187     case 0xe1: /* SS */
6188     case 0xe2: /* SS */
6189     case 0xe8: /* SS */
6190     case 0xe9: /* SS */
6191     case 0xea: /* SS */
6192     case 0xee ... 0xf3: /* SS */
6193     case 0xf8 ... 0xfd: /* SS */
6194         op2 = 0;
6195         break;
6196     default:
6197         op2 = (insn << 40) >> 56;
6198         break;
6199     }
6200 
6201     memset(&s->fields, 0, sizeof(s->fields));
6202     s->fields.raw_insn = insn;
6203     s->fields.op = op;
6204     s->fields.op2 = op2;
6205 
6206     /* Lookup the instruction.  */
6207     info = lookup_opc(op << 8 | op2);
6208     s->insn = info;
6209 
6210     /* If we found it, extract the operands.  */
6211     if (info != NULL) {
6212         DisasFormat fmt = info->fmt;
6213         int i;
6214 
6215         for (i = 0; i < NUM_C_FIELD; ++i) {
6216             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6217         }
6218     }
6219     return info;
6220 }
6221 
6222 static bool is_afp_reg(int reg)
6223 {
6224     return reg % 2 || reg > 6;
6225 }
6226 
6227 static bool is_fp_pair(int reg)
6228 {
6229     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6230     return !(reg & 0x2);
6231 }
6232 
6233 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6234 {
6235     const DisasInsn *insn;
6236     DisasJumpType ret = DISAS_NEXT;
6237     DisasOps o = {};
6238     bool icount = false;
6239 
6240     /* Search for the insn in the table.  */
6241     insn = extract_insn(env, s);
6242 
6243     /* Update insn_start now that we know the ILEN.  */
6244     tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6245 
6246     /* Not found means unimplemented/illegal opcode.  */
6247     if (insn == NULL) {
6248         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6249                       s->fields.op, s->fields.op2);
6250         gen_illegal_opcode(s);
6251         ret = DISAS_NORETURN;
6252         goto out;
6253     }
6254 
6255 #ifndef CONFIG_USER_ONLY
6256     if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6257         /* With ifetch set, psw_addr and cc_op are always up-to-date. */
6258         gen_helper_per_ifetch(tcg_env, tcg_constant_i32(s->ilen));
6259     }
6260 #endif
6261 
6262     /* process flags */
6263     if (insn->flags) {
6264         /* privileged instruction */
6265         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6266             gen_program_exception(s, PGM_PRIVILEGED);
6267             ret = DISAS_NORETURN;
6268             goto out;
6269         }
6270 
6271         /* if AFP is not enabled, instructions and registers are forbidden */
6272         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6273             uint8_t dxc = 0;
6274 
6275             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6276                 dxc = 1;
6277             }
6278             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6279                 dxc = 1;
6280             }
6281             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6282                 dxc = 1;
6283             }
6284             if (insn->flags & IF_BFP) {
6285                 dxc = 2;
6286             }
6287             if (insn->flags & IF_DFP) {
6288                 dxc = 3;
6289             }
6290             if (insn->flags & IF_VEC) {
6291                 dxc = 0xfe;
6292             }
6293             if (dxc) {
6294                 gen_data_exception(dxc);
6295                 ret = DISAS_NORETURN;
6296                 goto out;
6297             }
6298         }
6299 
6300         /* if vector instructions not enabled, executing them is forbidden */
6301         if (insn->flags & IF_VEC) {
6302             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6303                 gen_data_exception(0xfe);
6304                 ret = DISAS_NORETURN;
6305                 goto out;
6306             }
6307         }
6308 
6309         /* input/output is the special case for icount mode */
6310         if (unlikely(insn->flags & IF_IO)) {
6311             icount = translator_io_start(&s->base);
6312         }
6313     }
6314 
6315     /* Check for insn specification exceptions.  */
6316     if (insn->spec) {
6317         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6318             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6319             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6320             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6321             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6322             gen_program_exception(s, PGM_SPECIFICATION);
6323             ret = DISAS_NORETURN;
6324             goto out;
6325         }
6326     }
6327 
6328     /* Implement the instruction.  */
6329     if (insn->help_in1) {
6330         insn->help_in1(s, &o);
6331     }
6332     if (insn->help_in2) {
6333         insn->help_in2(s, &o);
6334     }
6335     if (insn->help_prep) {
6336         insn->help_prep(s, &o);
6337     }
6338     if (insn->help_op) {
6339         ret = insn->help_op(s, &o);
6340         if (ret == DISAS_NORETURN) {
6341             goto out;
6342         }
6343     }
6344     if (insn->help_wout) {
6345         insn->help_wout(s, &o);
6346     }
6347     if (insn->help_cout) {
6348         insn->help_cout(s, &o);
6349     }
6350 
6351     /* io should be the last instruction in tb when icount is enabled */
6352     if (unlikely(icount && ret == DISAS_NEXT)) {
6353         ret = DISAS_TOO_MANY;
6354     }
6355 
6356 #ifndef CONFIG_USER_ONLY
6357     if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
6358         switch (ret) {
6359         case DISAS_TOO_MANY:
6360             s->base.is_jmp = DISAS_PC_CC_UPDATED;
6361             /* fall through */
6362         case DISAS_NEXT:
6363             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6364             break;
6365         default:
6366             break;
6367         }
6368         update_cc_op(s);
6369         gen_helper_per_check_exception(tcg_env);
6370     }
6371 #endif
6372 
6373 out:
6374     /* Advance to the next instruction.  */
6375     s->base.pc_next = s->pc_tmp;
6376     return ret;
6377 }
6378 
6379 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6380 {
6381     DisasContext *dc = container_of(dcbase, DisasContext, base);
6382 
6383     /* 31-bit mode */
6384     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6385         dc->base.pc_first &= 0x7fffffff;
6386         dc->base.pc_next = dc->base.pc_first;
6387     }
6388 
6389     dc->cc_op = CC_OP_DYNAMIC;
6390     dc->ex_value = dc->base.tb->cs_base;
6391     dc->exit_to_mainloop = dc->ex_value;
6392 }
6393 
6394 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6395 {
6396 }
6397 
6398 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6399 {
6400     DisasContext *dc = container_of(dcbase, DisasContext, base);
6401 
6402     /* Delay the set of ilen until we've read the insn. */
6403     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6404 }
6405 
6406 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6407                                 uint64_t pc)
6408 {
6409     uint64_t insn = translator_lduw(env, &s->base, pc);
6410 
6411     return pc + get_ilen((insn >> 8) & 0xff);
6412 }
6413 
6414 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6415 {
6416     CPUS390XState *env = cpu_env(cs);
6417     DisasContext *dc = container_of(dcbase, DisasContext, base);
6418 
6419     dc->base.is_jmp = translate_one(env, dc);
6420     if (dc->base.is_jmp == DISAS_NEXT) {
6421         if (dc->ex_value ||
6422             !translator_is_same_page(dcbase, dc->base.pc_next) ||
6423             !translator_is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6424             dc->base.is_jmp = DISAS_TOO_MANY;
6425         }
6426     }
6427 }
6428 
6429 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6430 {
6431     DisasContext *dc = container_of(dcbase, DisasContext, base);
6432 
6433     switch (dc->base.is_jmp) {
6434     case DISAS_NORETURN:
6435         break;
6436     case DISAS_TOO_MANY:
6437         update_psw_addr(dc);
6438         /* FALLTHRU */
6439     case DISAS_PC_UPDATED:
6440         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6441            cc op type is in env */
6442         update_cc_op(dc);
6443         /* FALLTHRU */
6444     case DISAS_PC_CC_UPDATED:
6445         /* Exit the TB, either by raising a debug exception or by return.  */
6446         if (dc->exit_to_mainloop) {
6447             tcg_gen_exit_tb(NULL, 0);
6448         } else {
6449             tcg_gen_lookup_and_goto_ptr();
6450         }
6451         break;
6452     default:
6453         g_assert_not_reached();
6454     }
6455 }
6456 
6457 static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
6458                                CPUState *cs, FILE *logfile)
6459 {
6460     DisasContext *dc = container_of(dcbase, DisasContext, base);
6461 
6462     if (unlikely(dc->ex_value)) {
6463         /* The ex_value has been recorded with translator_fake_ld. */
6464         fprintf(logfile, "IN: EXECUTE\n");
6465         target_disas(logfile, cs, &dc->base);
6466         return true;
6467     }
6468     return false;
6469 }
6470 
6471 static const TranslatorOps s390x_tr_ops = {
6472     .init_disas_context = s390x_tr_init_disas_context,
6473     .tb_start           = s390x_tr_tb_start,
6474     .insn_start         = s390x_tr_insn_start,
6475     .translate_insn     = s390x_tr_translate_insn,
6476     .tb_stop            = s390x_tr_tb_stop,
6477     .disas_log          = s390x_tr_disas_log,
6478 };
6479 
6480 void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
6481                           int *max_insns, vaddr pc, void *host_pc)
6482 {
6483     DisasContext dc;
6484 
6485     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6486 }
6487 
6488 void s390x_restore_state_to_opc(CPUState *cs,
6489                                 const TranslationBlock *tb,
6490                                 const uint64_t *data)
6491 {
6492     CPUS390XState *env = cpu_env(cs);
6493     int cc_op = data[1];
6494 
6495     env->psw.addr = data[0];
6496 
6497     /* Update the CC opcode if it is not already up-to-date.  */
6498     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6499         env->cc_op = cc_op;
6500     }
6501 
6502     /* Record ILEN.  */
6503     env->int_pgm_ilen = data[2];
6504 }
6505