xref: /qemu/target/i386/tcg/translate.c (revision 6986cf003226ddf7e5af36a9f4f033cb16c8636c)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "fpu/softfloat.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "helper-tcg.h"
33 #include "decode-new.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 /* Fixes for Windows namespace pollution.  */
42 #undef IN
43 #undef OUT
44 
45 #define PREFIX_REPZ   0x01
46 #define PREFIX_REPNZ  0x02
47 #define PREFIX_LOCK   0x04
48 #define PREFIX_DATA   0x08
49 #define PREFIX_ADR    0x10
50 #define PREFIX_VEX    0x20
51 #define PREFIX_REX    0x40
52 
53 #ifdef TARGET_X86_64
54 # define ctztl  ctz64
55 # define clztl  clz64
56 #else
57 # define ctztl  ctz32
58 # define clztl  clz32
59 #endif
60 
61 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
62 #define CASE_MODRM_MEM_OP(OP) \
63     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
64     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
65     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 
67 #define CASE_MODRM_OP(OP) \
68     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
69     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
70     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
71     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 
73 //#define MACRO_TEST   1
74 
75 /* global register indexes */
76 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
77 static TCGv cpu_eip;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
83 
84 typedef struct DisasContext {
85     DisasContextBase base;
86 
87     target_ulong pc;       /* pc = eip + cs_base */
88     target_ulong cs_base;  /* base of CS segment */
89     target_ulong pc_save;
90 
91     MemOp aflag;
92     MemOp dflag;
93 
94     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
95     uint8_t prefix;
96 
97     bool has_modrm;
98     uint8_t modrm;
99 
100 #ifndef CONFIG_USER_ONLY
101     uint8_t cpl;   /* code priv level */
102     uint8_t iopl;  /* i/o priv level */
103 #endif
104     uint8_t vex_l;  /* vex vector length */
105     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
106     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
107     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
108 
109 #ifdef TARGET_X86_64
110     uint8_t rex_r;
111     uint8_t rex_x;
112     uint8_t rex_b;
113 #endif
114     bool vex_w; /* used by AVX even on 32-bit processors */
115     bool jmp_opt; /* use direct block chaining for direct jumps */
116     bool cc_op_dirty;
117 
118     CCOp cc_op;  /* current CC operation */
119     int mem_index; /* select memory access functions */
120     uint32_t flags; /* all execution flags */
121     int cpuid_features;
122     int cpuid_ext_features;
123     int cpuid_ext2_features;
124     int cpuid_ext3_features;
125     int cpuid_7_0_ebx_features;
126     int cpuid_7_0_ecx_features;
127     int cpuid_7_1_eax_features;
128     int cpuid_xsave_features;
129 
130     /* TCG local temps */
131     TCGv cc_srcT;
132     TCGv A0;
133     TCGv T0;
134     TCGv T1;
135 
136     /* TCG local register indexes (only used inside old micro ops) */
137     TCGv tmp0;
138     TCGv tmp4;
139     TCGv_i32 tmp2_i32;
140     TCGv_i32 tmp3_i32;
141     TCGv_i64 tmp1_i64;
142 
143     sigjmp_buf jmpbuf;
144     TCGOp *prev_insn_start;
145     TCGOp *prev_insn_end;
146 } DisasContext;
147 
148 /*
149  * Point EIP to next instruction before ending translation.
150  * For instructions that can change hflags.
151  */
152 #define DISAS_EOB_NEXT         DISAS_TARGET_0
153 
154 /*
155  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156  * already set.  For instructions that activate interrupt shadow.
157  */
158 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
159 
160 /*
161  * Return to the main loop; EIP might have already been updated
162  * but even in that case do not use lookup_and_goto_ptr().
163  */
164 #define DISAS_EOB_ONLY         DISAS_TARGET_2
165 
166 /*
167  * EIP has already been updated.  For jumps that wish to use
168  * lookup_and_goto_ptr()
169  */
170 #define DISAS_JUMP             DISAS_TARGET_3
171 
172 /*
173  * EIP has already been updated.  Use updated value of
174  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175  */
176 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
177 
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S)     true
181 #define CPL(S)    3
182 #define IOPL(S)   0
183 #define SVME(S)   false
184 #define GUEST(S)  false
185 #else
186 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S)    ((S)->cpl)
188 #define IOPL(S)   ((S)->iopl)
189 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S)   false
194 #define CODE32(S) true
195 #define SS32(S)   true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S)    false
214 #endif
215 
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S)       ((S)->vex_w)
219 #define REX_R(S)       ((S)->rex_r + 0)
220 #define REX_X(S)       ((S)->rex_x + 0)
221 #define REX_B(S)       ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S)  false
224 #define REX_W(S)       false
225 #define REX_R(S)       0
226 #define REX_X(S)       0
227 #define REX_B(S)       0
228 #endif
229 
230 /*
231  * Many system-only helpers are not reachable for user-only.
232  * Define stub generators here, so that we need not either sprinkle
233  * ifdefs through the translator, nor provide the helper function.
234  */
235 #define STUB_HELPER(NAME, ...) \
236     static inline void gen_helper_##NAME(__VA_ARGS__) \
237     { qemu_build_not_reached(); }
238 
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258 
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262 
263 /* i386 shift ops */
264 enum {
265     OP_ROL,
266     OP_ROR,
267     OP_RCL,
268     OP_RCR,
269     OP_SHL,
270     OP_SHR,
271     OP_SHL1, /* undocumented */
272     OP_SAR = 7,
273 };
274 
275 enum {
276     JCC_O,
277     JCC_B,
278     JCC_Z,
279     JCC_BE,
280     JCC_S,
281     JCC_P,
282     JCC_L,
283     JCC_LE,
284 };
285 
286 enum {
287     USES_CC_DST  = 1,
288     USES_CC_SRC  = 2,
289     USES_CC_SRC2 = 4,
290     USES_CC_SRCT = 8,
291 };
292 
293 /* Bit set if the global variable is live after setting CC_OP to X.  */
294 static const uint8_t cc_op_live_[] = {
295     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296     [CC_OP_EFLAGS] = USES_CC_SRC,
297     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_POPCNT] = USES_CC_DST,
313 };
314 
315 static uint8_t cc_op_live(CCOp op)
316 {
317     uint8_t result;
318     assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
319 
320     /*
321      * Check that the array is fully populated.  A zero entry would correspond
322      * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
323      * as well.
324      */
325     result = cc_op_live_[op];
326     assert(result);
327     return result;
328 }
329 
330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332     int dead;
333 
334     if (s->cc_op == op) {
335         return;
336     }
337 
338     /* Discard CC computation that will no longer be used.  */
339     dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
340     if (dead & USES_CC_DST) {
341         tcg_gen_discard_tl(cpu_cc_dst);
342     }
343     if (dead & USES_CC_SRC) {
344         tcg_gen_discard_tl(cpu_cc_src);
345     }
346     if (dead & USES_CC_SRC2) {
347         tcg_gen_discard_tl(cpu_cc_src2);
348     }
349     if (dead & USES_CC_SRCT) {
350         tcg_gen_discard_tl(s->cc_srcT);
351     }
352 
353     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354         tcg_gen_discard_i32(cpu_cc_op);
355     }
356     s->cc_op_dirty = dirty;
357     s->cc_op = op;
358 }
359 
360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362     /*
363      * The DYNAMIC setting is translator only, everything else
364      * will be spilled later.
365      */
366     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368 
369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371     set_cc_op_1(s, op, false);
372 }
373 
374 static void gen_update_cc_op(DisasContext *s)
375 {
376     if (s->cc_op_dirty) {
377         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378         s->cc_op_dirty = false;
379     }
380 }
381 
382 #ifdef TARGET_X86_64
383 
384 #define NB_OP_SIZES 4
385 
386 #else /* !TARGET_X86_64 */
387 
388 #define NB_OP_SIZES 3
389 
390 #endif /* !TARGET_X86_64 */
391 
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405 
406 /* In instruction encodings for byte register accesses the
407  * register number usually indicates "low 8 bits of register N";
408  * however there are some special cases where N 4..7 indicates
409  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410  * true for this special case, false otherwise.
411  */
412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414     /* Any time the REX prefix is present, byte registers are uniform */
415     if (reg < 4 || REX_PREFIX(s)) {
416         return false;
417     }
418     return true;
419 }
420 
421 /* Select the size of a push/pop operation.  */
422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424     if (CODE64(s)) {
425         return ot == MO_16 ? MO_16 : MO_64;
426     } else {
427         return ot;
428     }
429 }
430 
431 /* Select the size of the stack pointer.  */
432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436 
437 /* Compute the result of writing t0 to the OT-sized register REG.
438  *
439  * If DEST is NULL, store the result into the register and return the
440  * register's TCGv.
441  *
442  * If DEST is not NULL, store the result into DEST and return the
443  * register's TCGv.
444  */
445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447     switch(ot) {
448     case MO_8:
449         if (byte_reg_is_xH(s, reg)) {
450             dest = dest ? dest : cpu_regs[reg - 4];
451             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452             return cpu_regs[reg - 4];
453         }
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456         break;
457     case MO_16:
458         dest = dest ? dest : cpu_regs[reg];
459         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460         break;
461     case MO_32:
462         /* For x86_64, this sets the higher half of register to zero.
463            For i386, this is equivalent to a mov. */
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_ext32u_tl(dest, t0);
466         break;
467 #ifdef TARGET_X86_64
468     case MO_64:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_mov_tl(dest, t0);
471         break;
472 #endif
473     default:
474         g_assert_not_reached();
475     }
476     return cpu_regs[reg];
477 }
478 
479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483 
484 static inline
485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488         tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
489     } else {
490         tcg_gen_mov_tl(t0, cpu_regs[reg]);
491     }
492 }
493 
494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496     tcg_gen_addi_tl(s->A0, s->A0, val);
497     if (!CODE64(s)) {
498         tcg_gen_ext32u_tl(s->A0, s->A0);
499     }
500 }
501 
502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504     tcg_gen_mov_tl(cpu_eip, dest);
505     s->pc_save = -1;
506 }
507 
508 static inline
509 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
510 {
511     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
512     gen_op_mov_reg_v(s, size, reg, s->tmp0);
513 }
514 
515 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
516 {
517     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
518     gen_op_mov_reg_v(s, size, reg, s->tmp0);
519 }
520 
521 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
522 {
523     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
524 }
525 
526 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530 
531 static void gen_update_eip_next(DisasContext *s)
532 {
533     assert(s->pc_save != -1);
534     if (tb_cflags(s->base.tb) & CF_PCREL) {
535         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
536     } else if (CODE64(s)) {
537         tcg_gen_movi_tl(cpu_eip, s->pc);
538     } else {
539         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
540     }
541     s->pc_save = s->pc;
542 }
543 
544 static void gen_update_eip_cur(DisasContext *s)
545 {
546     assert(s->pc_save != -1);
547     if (tb_cflags(s->base.tb) & CF_PCREL) {
548         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
549     } else if (CODE64(s)) {
550         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
551     } else {
552         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
553     }
554     s->pc_save = s->base.pc_next;
555 }
556 
557 static int cur_insn_len(DisasContext *s)
558 {
559     return s->pc - s->base.pc_next;
560 }
561 
562 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
563 {
564     return tcg_constant_i32(cur_insn_len(s));
565 }
566 
567 static TCGv_i32 eip_next_i32(DisasContext *s)
568 {
569     assert(s->pc_save != -1);
570     /*
571      * This function has two users: lcall_real (always 16-bit mode), and
572      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
573      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
574      * why passing a 32-bit value isn't broken.  To avoid using this where
575      * we shouldn't, return -1 in 64-bit mode so that execution goes into
576      * the weeds quickly.
577      */
578     if (CODE64(s)) {
579         return tcg_constant_i32(-1);
580     }
581     if (tb_cflags(s->base.tb) & CF_PCREL) {
582         TCGv_i32 ret = tcg_temp_new_i32();
583         tcg_gen_trunc_tl_i32(ret, cpu_eip);
584         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
585         return ret;
586     } else {
587         return tcg_constant_i32(s->pc - s->cs_base);
588     }
589 }
590 
591 static TCGv eip_next_tl(DisasContext *s)
592 {
593     assert(s->pc_save != -1);
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv ret = tcg_temp_new();
596         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
597         return ret;
598     } else if (CODE64(s)) {
599         return tcg_constant_tl(s->pc);
600     } else {
601         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
602     }
603 }
604 
605 static TCGv eip_cur_tl(DisasContext *s)
606 {
607     assert(s->pc_save != -1);
608     if (tb_cflags(s->base.tb) & CF_PCREL) {
609         TCGv ret = tcg_temp_new();
610         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
611         return ret;
612     } else if (CODE64(s)) {
613         return tcg_constant_tl(s->base.pc_next);
614     } else {
615         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
616     }
617 }
618 
619 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
620    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
621    indicate no override.  */
622 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
623                                int def_seg, int ovr_seg)
624 {
625     switch (aflag) {
626 #ifdef TARGET_X86_64
627     case MO_64:
628         if (ovr_seg < 0) {
629             tcg_gen_mov_tl(dest, a0);
630             return;
631         }
632         break;
633 #endif
634     case MO_32:
635         /* 32 bit address */
636         if (ovr_seg < 0 && ADDSEG(s)) {
637             ovr_seg = def_seg;
638         }
639         if (ovr_seg < 0) {
640             tcg_gen_ext32u_tl(dest, a0);
641             return;
642         }
643         break;
644     case MO_16:
645         /* 16 bit address */
646         tcg_gen_ext16u_tl(dest, a0);
647         a0 = dest;
648         if (ovr_seg < 0) {
649             if (ADDSEG(s)) {
650                 ovr_seg = def_seg;
651             } else {
652                 return;
653             }
654         }
655         break;
656     default:
657         g_assert_not_reached();
658     }
659 
660     if (ovr_seg >= 0) {
661         TCGv seg = cpu_seg_base[ovr_seg];
662 
663         if (aflag == MO_64) {
664             tcg_gen_add_tl(dest, a0, seg);
665         } else if (CODE64(s)) {
666             tcg_gen_ext32u_tl(dest, a0);
667             tcg_gen_add_tl(dest, dest, seg);
668         } else {
669             tcg_gen_add_tl(dest, a0, seg);
670             tcg_gen_ext32u_tl(dest, dest);
671         }
672     }
673 }
674 
675 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
676                           int def_seg, int ovr_seg)
677 {
678     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
679 }
680 
681 static inline void gen_string_movl_A0_ESI(DisasContext *s)
682 {
683     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
684 }
685 
686 static inline void gen_string_movl_A0_EDI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
689 }
690 
691 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
692 {
693     TCGv dshift = tcg_temp_new();
694     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
695     tcg_gen_shli_tl(dshift, dshift, ot);
696     return dshift;
697 };
698 
699 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
700 {
701     if (size == MO_TL) {
702         return src;
703     }
704     if (!dst) {
705         dst = tcg_temp_new();
706     }
707     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
708     return dst;
709 }
710 
711 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
712 {
713     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
714 
715     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
716 }
717 
718 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
719 {
720     gen_op_j_ecx(s, TCG_COND_EQ, label1);
721 }
722 
723 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
724 {
725     gen_op_j_ecx(s, TCG_COND_NE, label1);
726 }
727 
728 static void gen_set_hflag(DisasContext *s, uint32_t mask)
729 {
730     if ((s->flags & mask) == 0) {
731         TCGv_i32 t = tcg_temp_new_i32();
732         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
733         tcg_gen_ori_i32(t, t, mask);
734         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
735         s->flags |= mask;
736     }
737 }
738 
739 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
740 {
741     if (s->flags & mask) {
742         TCGv_i32 t = tcg_temp_new_i32();
743         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
744         tcg_gen_andi_i32(t, t, ~mask);
745         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
746         s->flags &= ~mask;
747     }
748 }
749 
750 static void gen_set_eflags(DisasContext *s, target_ulong mask)
751 {
752     TCGv t = tcg_temp_new();
753 
754     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
755     tcg_gen_ori_tl(t, t, mask);
756     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
757 }
758 
759 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
760 {
761     TCGv t = tcg_temp_new();
762 
763     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
764     tcg_gen_andi_tl(t, t, ~mask);
765     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
766 }
767 
768 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
769 {
770     switch (ot) {
771     case MO_8:
772         gen_helper_inb(v, tcg_env, n);
773         break;
774     case MO_16:
775         gen_helper_inw(v, tcg_env, n);
776         break;
777     case MO_32:
778         gen_helper_inl(v, tcg_env, n);
779         break;
780     default:
781         g_assert_not_reached();
782     }
783 }
784 
785 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
786 {
787     switch (ot) {
788     case MO_8:
789         gen_helper_outb(tcg_env, v, n);
790         break;
791     case MO_16:
792         gen_helper_outw(tcg_env, v, n);
793         break;
794     case MO_32:
795         gen_helper_outl(tcg_env, v, n);
796         break;
797     default:
798         g_assert_not_reached();
799     }
800 }
801 
802 /*
803  * Validate that access to [port, port + 1<<ot) is allowed.
804  * Raise #GP, or VMM exit if not.
805  */
806 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
807                          uint32_t svm_flags)
808 {
809 #ifdef CONFIG_USER_ONLY
810     /*
811      * We do not implement the ioperm(2) syscall, so the TSS check
812      * will always fail.
813      */
814     gen_exception_gpf(s);
815     return false;
816 #else
817     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
818         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
819     }
820     if (GUEST(s)) {
821         gen_update_cc_op(s);
822         gen_update_eip_cur(s);
823         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
824             svm_flags |= SVM_IOIO_REP_MASK;
825         }
826         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
827         gen_helper_svm_check_io(tcg_env, port,
828                                 tcg_constant_i32(svm_flags),
829                                 cur_insn_len_i32(s));
830     }
831     return true;
832 #endif
833 }
834 
835 static void gen_movs(DisasContext *s, MemOp ot)
836 {
837     TCGv dshift;
838 
839     gen_string_movl_A0_ESI(s);
840     gen_op_ld_v(s, ot, s->T0, s->A0);
841     gen_string_movl_A0_EDI(s);
842     gen_op_st_v(s, ot, s->T0, s->A0);
843 
844     dshift = gen_compute_Dshift(s, ot);
845     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
846     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
847 }
848 
849 /* compute all eflags to reg */
850 static void gen_mov_eflags(DisasContext *s, TCGv reg)
851 {
852     TCGv dst, src1, src2;
853     TCGv_i32 cc_op;
854     int live, dead;
855 
856     if (s->cc_op == CC_OP_EFLAGS) {
857         tcg_gen_mov_tl(reg, cpu_cc_src);
858         return;
859     }
860 
861     dst = cpu_cc_dst;
862     src1 = cpu_cc_src;
863     src2 = cpu_cc_src2;
864 
865     /* Take care to not read values that are not live.  */
866     live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
867     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
868     if (dead) {
869         TCGv zero = tcg_constant_tl(0);
870         if (dead & USES_CC_DST) {
871             dst = zero;
872         }
873         if (dead & USES_CC_SRC) {
874             src1 = zero;
875         }
876         if (dead & USES_CC_SRC2) {
877             src2 = zero;
878         }
879     }
880 
881     if (s->cc_op != CC_OP_DYNAMIC) {
882         cc_op = tcg_constant_i32(s->cc_op);
883     } else {
884         cc_op = cpu_cc_op;
885     }
886     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
887 }
888 
889 /* compute all eflags to cc_src */
890 static void gen_compute_eflags(DisasContext *s)
891 {
892     gen_mov_eflags(s, cpu_cc_src);
893     set_cc_op(s, CC_OP_EFLAGS);
894 }
895 
896 typedef struct CCPrepare {
897     TCGCond cond;
898     TCGv reg;
899     TCGv reg2;
900     target_ulong imm;
901     bool use_reg2;
902     bool no_setcond;
903 } CCPrepare;
904 
905 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
906 {
907     if (size == MO_TL) {
908         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
909     } else {
910         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
911                              .imm = 1ull << ((8 << size) - 1) };
912     }
913 }
914 
915 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
916 {
917     if (size == MO_TL) {
918         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
919                              .reg = src };
920     } else {
921         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
922                              .imm = MAKE_64BIT_MASK(0, 8 << size),
923                              .reg = src };
924     }
925 }
926 
927 /* compute eflags.C, trying to store it in reg if not NULL */
928 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
929 {
930     MemOp size;
931 
932     switch (s->cc_op) {
933     case CC_OP_SUBB ... CC_OP_SUBQ:
934         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
935         size = s->cc_op - CC_OP_SUBB;
936         tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
937         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
938         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
939                              .reg2 = cpu_cc_src, .use_reg2 = true };
940 
941     case CC_OP_ADDB ... CC_OP_ADDQ:
942         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
943         size = cc_op_size(s->cc_op);
944         tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
945         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
946         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
947                              .reg2 = cpu_cc_src, .use_reg2 = true };
948 
949     case CC_OP_LOGICB ... CC_OP_LOGICQ:
950     case CC_OP_POPCNT:
951         return (CCPrepare) { .cond = TCG_COND_NEVER };
952 
953     case CC_OP_INCB ... CC_OP_INCQ:
954     case CC_OP_DECB ... CC_OP_DECQ:
955         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
956                              .no_setcond = true };
957 
958     case CC_OP_SHLB ... CC_OP_SHLQ:
959         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
960         size = cc_op_size(s->cc_op);
961         return gen_prepare_sign_nz(cpu_cc_src, size);
962 
963     case CC_OP_MULB ... CC_OP_MULQ:
964         return (CCPrepare) { .cond = TCG_COND_NE,
965                              .reg = cpu_cc_src };
966 
967     case CC_OP_BMILGB ... CC_OP_BMILGQ:
968         size = cc_op_size(s->cc_op);
969         return gen_prepare_val_nz(cpu_cc_src, size, true);
970 
971     case CC_OP_BLSIB ... CC_OP_BLSIQ:
972         size = cc_op_size(s->cc_op);
973         return gen_prepare_val_nz(cpu_cc_src, size, false);
974 
975     case CC_OP_ADCX:
976     case CC_OP_ADCOX:
977         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
978                              .no_setcond = true };
979 
980     case CC_OP_EFLAGS:
981     case CC_OP_SARB ... CC_OP_SARQ:
982         /* CC_SRC & 1 */
983         return (CCPrepare) { .cond = TCG_COND_TSTNE,
984                              .reg = cpu_cc_src, .imm = CC_C };
985 
986     default:
987        /* The need to compute only C from CC_OP_DYNAMIC is important
988           in efficiently implementing e.g. INC at the start of a TB.  */
989        gen_update_cc_op(s);
990        if (!reg) {
991            reg = tcg_temp_new();
992        }
993        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
994                                cpu_cc_src2, cpu_cc_op);
995        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
996                             .no_setcond = true };
997     }
998 }
999 
1000 /* compute eflags.P, trying to store it in reg if not NULL */
1001 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1002 {
1003     gen_compute_eflags(s);
1004     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1005                          .imm = CC_P };
1006 }
1007 
1008 /* compute eflags.S, trying to store it in reg if not NULL */
1009 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1010 {
1011     switch (s->cc_op) {
1012     case CC_OP_DYNAMIC:
1013         gen_compute_eflags(s);
1014         /* FALLTHRU */
1015     case CC_OP_EFLAGS:
1016     case CC_OP_ADCX:
1017     case CC_OP_ADOX:
1018     case CC_OP_ADCOX:
1019         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1020                              .imm = CC_S };
1021     case CC_OP_POPCNT:
1022         return (CCPrepare) { .cond = TCG_COND_NEVER };
1023     default:
1024         return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
1025     }
1026 }
1027 
1028 /* compute eflags.O, trying to store it in reg if not NULL */
1029 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1030 {
1031     switch (s->cc_op) {
1032     case CC_OP_ADOX:
1033     case CC_OP_ADCOX:
1034         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1035                              .no_setcond = true };
1036     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1037     case CC_OP_POPCNT:
1038         return (CCPrepare) { .cond = TCG_COND_NEVER };
1039     case CC_OP_MULB ... CC_OP_MULQ:
1040         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1041     default:
1042         gen_compute_eflags(s);
1043         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1044                              .imm = CC_O };
1045     }
1046 }
1047 
1048 /* compute eflags.Z, trying to store it in reg if not NULL */
1049 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1050 {
1051     switch (s->cc_op) {
1052     case CC_OP_EFLAGS:
1053     case CC_OP_ADCX:
1054     case CC_OP_ADOX:
1055     case CC_OP_ADCOX:
1056         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1057                              .imm = CC_Z };
1058     case CC_OP_DYNAMIC:
1059         gen_update_cc_op(s);
1060         if (!reg) {
1061             reg = tcg_temp_new();
1062         }
1063         gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1064         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1065     case CC_OP_POPCNT:
1066         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1067     default:
1068         {
1069             MemOp size = cc_op_size(s->cc_op);
1070             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1071         }
1072     }
1073 }
1074 
1075 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1076  * if needed; it may be used for CCPrepare.reg if that will
1077  * provide more freedom in the translation of a subsequent setcond. */
1078 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1079 {
1080     int inv, jcc_op, cond;
1081     MemOp size;
1082     CCPrepare cc;
1083 
1084     inv = b & 1;
1085     jcc_op = (b >> 1) & 7;
1086 
1087     switch (s->cc_op) {
1088     case CC_OP_SUBB ... CC_OP_SUBQ:
1089         /* We optimize relational operators for the cmp/jcc case.  */
1090         size = cc_op_size(s->cc_op);
1091         switch (jcc_op) {
1092         case JCC_BE:
1093             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1094             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1095             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1096                                .reg2 = cpu_cc_src, .use_reg2 = true };
1097             break;
1098         case JCC_L:
1099             cond = TCG_COND_LT;
1100             goto fast_jcc_l;
1101         case JCC_LE:
1102             cond = TCG_COND_LE;
1103         fast_jcc_l:
1104             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1105             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1106             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1107                                .reg2 = cpu_cc_src, .use_reg2 = true };
1108             break;
1109 
1110         default:
1111             goto slow_jcc;
1112         }
1113         break;
1114 
1115     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1116         /* Mostly used for test+jump */
1117         size = s->cc_op - CC_OP_LOGICB;
1118         switch (jcc_op) {
1119         case JCC_BE:
1120             /* CF = 0, becomes jz/je */
1121             jcc_op = JCC_Z;
1122             goto slow_jcc;
1123         case JCC_L:
1124             /* OF = 0, becomes js/jns */
1125             jcc_op = JCC_S;
1126             goto slow_jcc;
1127         case JCC_LE:
1128             /* SF or ZF, becomes signed <= 0 */
1129             tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1130             cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1131             break;
1132         default:
1133             goto slow_jcc;
1134         }
1135         break;
1136 
1137     default:
1138     slow_jcc:
1139         /* This actually generates good code for JC, JZ and JS.  */
1140         switch (jcc_op) {
1141         case JCC_O:
1142             cc = gen_prepare_eflags_o(s, reg);
1143             break;
1144         case JCC_B:
1145             cc = gen_prepare_eflags_c(s, reg);
1146             break;
1147         case JCC_Z:
1148             cc = gen_prepare_eflags_z(s, reg);
1149             break;
1150         case JCC_BE:
1151             gen_compute_eflags(s);
1152             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1153                                .imm = CC_Z | CC_C };
1154             break;
1155         case JCC_S:
1156             cc = gen_prepare_eflags_s(s, reg);
1157             break;
1158         case JCC_P:
1159             cc = gen_prepare_eflags_p(s, reg);
1160             break;
1161         case JCC_L:
1162             gen_compute_eflags(s);
1163             if (!reg || reg == cpu_cc_src) {
1164                 reg = tcg_temp_new();
1165             }
1166             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1167             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1168                                .imm = CC_O };
1169             break;
1170         default:
1171         case JCC_LE:
1172             gen_compute_eflags(s);
1173             if (!reg || reg == cpu_cc_src) {
1174                 reg = tcg_temp_new();
1175             }
1176             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1177             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1178                                .imm = CC_O | CC_Z };
1179             break;
1180         }
1181         break;
1182     }
1183 
1184     if (inv) {
1185         cc.cond = tcg_invert_cond(cc.cond);
1186     }
1187     return cc;
1188 }
1189 
1190 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1191 {
1192     CCPrepare cc = gen_prepare_cc(s, b, reg);
1193 
1194     if (cc.no_setcond) {
1195         if (cc.cond == TCG_COND_EQ) {
1196             tcg_gen_xori_tl(reg, cc.reg, 1);
1197         } else {
1198             tcg_gen_mov_tl(reg, cc.reg);
1199         }
1200         return;
1201     }
1202 
1203     if (cc.use_reg2) {
1204         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1205     } else {
1206         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1207     }
1208 }
1209 
1210 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1211 {
1212     gen_setcc(s, JCC_B << 1, reg);
1213 }
1214 
1215 /* generate a conditional jump to label 'l1' according to jump opcode
1216    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1217 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1218 {
1219     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1220 
1221     if (cc.use_reg2) {
1222         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1223     } else {
1224         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1225     }
1226 }
1227 
1228 /* Generate a conditional jump to label 'l1' according to jump opcode
1229    value 'b'. In the fast case, T0 is guaranteed not to be used.
1230    One or both of the branches will call gen_jmp_rel, so ensure
1231    cc_op is clean.  */
1232 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1233 {
1234     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1235 
1236     /*
1237      * Note that this must be _after_ gen_prepare_cc, because it can change
1238      * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
1239      * it's cheaper to just compute the flags)!
1240      */
1241     gen_update_cc_op(s);
1242     if (cc.use_reg2) {
1243         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1244     } else {
1245         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1246     }
1247 }
1248 
1249 static void gen_stos(DisasContext *s, MemOp ot)
1250 {
1251     gen_string_movl_A0_EDI(s);
1252     gen_op_st_v(s, ot, s->T0, s->A0);
1253     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1254 }
1255 
1256 static void gen_lods(DisasContext *s, MemOp ot)
1257 {
1258     gen_string_movl_A0_ESI(s);
1259     gen_op_ld_v(s, ot, s->T0, s->A0);
1260     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1261     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1262 }
1263 
1264 static void gen_scas(DisasContext *s, MemOp ot)
1265 {
1266     gen_string_movl_A0_EDI(s);
1267     gen_op_ld_v(s, ot, s->T1, s->A0);
1268     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1269     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1270     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1271     set_cc_op(s, CC_OP_SUBB + ot);
1272 
1273     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1274 }
1275 
1276 static void gen_cmps(DisasContext *s, MemOp ot)
1277 {
1278     TCGv dshift;
1279 
1280     gen_string_movl_A0_EDI(s);
1281     gen_op_ld_v(s, ot, s->T1, s->A0);
1282     gen_string_movl_A0_ESI(s);
1283     gen_op_ld_v(s, ot, s->T0, s->A0);
1284     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1285     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1286     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1287     set_cc_op(s, CC_OP_SUBB + ot);
1288 
1289     dshift = gen_compute_Dshift(s, ot);
1290     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1291     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1292 }
1293 
1294 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1295 {
1296     if (s->flags & HF_IOBPT_MASK) {
1297 #ifdef CONFIG_USER_ONLY
1298         /* user-mode cpu should not be in IOBPT mode */
1299         g_assert_not_reached();
1300 #else
1301         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1302         TCGv t_next = eip_next_tl(s);
1303         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1304 #endif /* CONFIG_USER_ONLY */
1305     }
1306 }
1307 
1308 static void gen_ins(DisasContext *s, MemOp ot)
1309 {
1310     gen_string_movl_A0_EDI(s);
1311     /* Note: we must do this dummy write first to be restartable in
1312        case of page fault. */
1313     tcg_gen_movi_tl(s->T0, 0);
1314     gen_op_st_v(s, ot, s->T0, s->A0);
1315     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1316     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1317     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1318     gen_op_st_v(s, ot, s->T0, s->A0);
1319     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1320     gen_bpt_io(s, s->tmp2_i32, ot);
1321 }
1322 
1323 static void gen_outs(DisasContext *s, MemOp ot)
1324 {
1325     gen_string_movl_A0_ESI(s);
1326     gen_op_ld_v(s, ot, s->T0, s->A0);
1327 
1328     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1329     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1330     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1331     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1332     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 static void do_gen_rep(DisasContext *s, MemOp ot,
1337                        void (*fn)(DisasContext *s, MemOp ot),
1338                        bool is_repz_nz)
1339 {
1340     TCGLabel *done = gen_new_label();
1341     bool had_rf = s->flags & HF_RF_MASK;
1342 
1343     /*
1344      * Even if EFLAGS.RF was set on entry (such as if we're on the second or
1345      * later iteration and an exception or interrupt happened), force gen_eob()
1346      * not to clear the flag.  We do that ourselves after the last iteration.
1347      */
1348     s->flags &= ~HF_RF_MASK;
1349 
1350     /*
1351      * For CMPS/SCAS, the CC_OP after a memory fault could come from either
1352      * the previous instruction or the string instruction; but because we
1353      * arrange to keep CC_OP up to date all the time, just mark the whole
1354      * insn as CC_OP_DYNAMIC.
1355      *
1356      * It's not a problem to do this even for instructions that do not
1357      * modify the flags, so do it unconditionally.
1358      */
1359     gen_update_cc_op(s);
1360     tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
1361 
1362     /* Any iteration at all?  */
1363     gen_op_jz_ecx(s, done);
1364 
1365     fn(s, ot);
1366     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1367     gen_update_cc_op(s);
1368 
1369     /* Leave if REP condition fails.  */
1370     if (is_repz_nz) {
1371         int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1372         gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
1373         /* gen_prepare_eflags_z never changes cc_op.  */
1374 	assert(!s->cc_op_dirty);
1375     }
1376 
1377     /*
1378      * Traps or interrupts set RF_MASK if they happen after any iteration
1379      * but the last.  Set it here before giving the main loop a chance to
1380      * execute.  (For faults, seg_helper.c sets the flag as usual).
1381      */
1382     if (!had_rf) {
1383         gen_set_eflags(s, RF_MASK);
1384     }
1385 
1386     /* Go to the main loop but reenter the same instruction.  */
1387     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1388 
1389     /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition.  */
1390     gen_set_label(done);
1391     set_cc_op(s, CC_OP_DYNAMIC);
1392     if (had_rf) {
1393         gen_reset_eflags(s, RF_MASK);
1394     }
1395     gen_jmp_rel_csize(s, 0, 1);
1396 }
1397 
1398 static void gen_repz(DisasContext *s, MemOp ot,
1399                      void (*fn)(DisasContext *s, MemOp ot))
1400 
1401 {
1402     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1403         do_gen_rep(s, ot, fn, false);
1404     } else {
1405         fn(s, ot);
1406     }
1407 }
1408 
1409 static void gen_repz_nz(DisasContext *s, MemOp ot,
1410                         void (*fn)(DisasContext *s, MemOp ot))
1411 {
1412     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1413         do_gen_rep(s, ot, fn, true);
1414     } else {
1415         fn(s, ot);
1416     }
1417 }
1418 
1419 static void gen_helper_fp_arith_ST0_FT0(int op)
1420 {
1421     switch (op) {
1422     case 0:
1423         gen_helper_fadd_ST0_FT0(tcg_env);
1424         break;
1425     case 1:
1426         gen_helper_fmul_ST0_FT0(tcg_env);
1427         break;
1428     case 2:
1429         gen_helper_fcom_ST0_FT0(tcg_env);
1430         break;
1431     case 3:
1432         gen_helper_fcom_ST0_FT0(tcg_env);
1433         break;
1434     case 4:
1435         gen_helper_fsub_ST0_FT0(tcg_env);
1436         break;
1437     case 5:
1438         gen_helper_fsubr_ST0_FT0(tcg_env);
1439         break;
1440     case 6:
1441         gen_helper_fdiv_ST0_FT0(tcg_env);
1442         break;
1443     case 7:
1444         gen_helper_fdivr_ST0_FT0(tcg_env);
1445         break;
1446     }
1447 }
1448 
1449 /* NOTE the exception in "r" op ordering */
1450 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1451 {
1452     TCGv_i32 tmp = tcg_constant_i32(opreg);
1453     switch (op) {
1454     case 0:
1455         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1456         break;
1457     case 1:
1458         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1459         break;
1460     case 4:
1461         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1462         break;
1463     case 5:
1464         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1465         break;
1466     case 6:
1467         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1468         break;
1469     case 7:
1470         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1471         break;
1472     }
1473 }
1474 
1475 static void gen_exception(DisasContext *s, int trapno)
1476 {
1477     gen_update_cc_op(s);
1478     gen_update_eip_cur(s);
1479     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1480     s->base.is_jmp = DISAS_NORETURN;
1481 }
1482 
1483 /* Generate #UD for the current instruction.  The assumption here is that
1484    the instruction is known, but it isn't allowed in the current cpu mode.  */
1485 static void gen_illegal_opcode(DisasContext *s)
1486 {
1487     gen_exception(s, EXCP06_ILLOP);
1488 }
1489 
1490 /* Generate #GP for the current instruction. */
1491 static void gen_exception_gpf(DisasContext *s)
1492 {
1493     gen_exception(s, EXCP0D_GPF);
1494 }
1495 
1496 /* Check for cpl == 0; if not, raise #GP and return false. */
1497 static bool check_cpl0(DisasContext *s)
1498 {
1499     if (CPL(s) == 0) {
1500         return true;
1501     }
1502     gen_exception_gpf(s);
1503     return false;
1504 }
1505 
1506 /* XXX: add faster immediate case */
1507 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1508                              bool is_right, TCGv count)
1509 {
1510     target_ulong mask = (ot == MO_64 ? 63 : 31);
1511 
1512     switch (ot) {
1513     case MO_16:
1514         /* Note: we implement the Intel behaviour for shift count > 16.
1515            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1516            portion by constructing it as a 32-bit value.  */
1517         if (is_right) {
1518             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1519             tcg_gen_mov_tl(s->T1, s->T0);
1520             tcg_gen_mov_tl(s->T0, s->tmp0);
1521         } else {
1522             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1523         }
1524         /*
1525          * If TARGET_X86_64 defined then fall through into MO_32 case,
1526          * otherwise fall through default case.
1527          */
1528     case MO_32:
1529 #ifdef TARGET_X86_64
1530         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1531         tcg_gen_subi_tl(s->tmp0, count, 1);
1532         if (is_right) {
1533             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1534             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1535             tcg_gen_shr_i64(s->T0, s->T0, count);
1536         } else {
1537             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1538             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1539             tcg_gen_shl_i64(s->T0, s->T0, count);
1540             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1541             tcg_gen_shri_i64(s->T0, s->T0, 32);
1542         }
1543         break;
1544 #endif
1545     default:
1546         tcg_gen_subi_tl(s->tmp0, count, 1);
1547         if (is_right) {
1548             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1549 
1550             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1551             tcg_gen_shr_tl(s->T0, s->T0, count);
1552             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1553         } else {
1554             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1555             if (ot == MO_16) {
1556                 /* Only needed if count > 16, for Intel behaviour.  */
1557                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1558                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1559                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1560             }
1561 
1562             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1563             tcg_gen_shl_tl(s->T0, s->T0, count);
1564             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1565         }
1566         tcg_gen_movi_tl(s->tmp4, 0);
1567         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1568                            s->tmp4, s->T1);
1569         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1570         break;
1571     }
1572 }
1573 
1574 #define X86_MAX_INSN_LENGTH 15
1575 
1576 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1577 {
1578     uint64_t pc = s->pc;
1579 
1580     /* This is a subsequent insn that crosses a page boundary.  */
1581     if (s->base.num_insns > 1 &&
1582         !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1583         siglongjmp(s->jmpbuf, 2);
1584     }
1585 
1586     s->pc += num_bytes;
1587     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1588         /* If the instruction's 16th byte is on a different page than the 1st, a
1589          * page fault on the second page wins over the general protection fault
1590          * caused by the instruction being too long.
1591          * This can happen even if the operand is only one byte long!
1592          */
1593         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1594             (void)translator_ldub(env, &s->base,
1595                                   (s->pc - 1) & TARGET_PAGE_MASK);
1596         }
1597         siglongjmp(s->jmpbuf, 1);
1598     }
1599 
1600     return pc;
1601 }
1602 
1603 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1604 {
1605     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1606 }
1607 
1608 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1609 {
1610     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1611 }
1612 
1613 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1614 {
1615     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1616 }
1617 
1618 #ifdef TARGET_X86_64
1619 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1620 {
1621     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1622 }
1623 #endif
1624 
1625 /* Decompose an address.  */
1626 
1627 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1628                                     int modrm, bool is_vsib)
1629 {
1630     int def_seg, base, index, scale, mod, rm;
1631     target_long disp;
1632     bool havesib;
1633 
1634     def_seg = R_DS;
1635     index = -1;
1636     scale = 0;
1637     disp = 0;
1638 
1639     mod = (modrm >> 6) & 3;
1640     rm = modrm & 7;
1641     base = rm | REX_B(s);
1642 
1643     if (mod == 3) {
1644         /* Normally filtered out earlier, but including this path
1645            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1646         goto done;
1647     }
1648 
1649     switch (s->aflag) {
1650     case MO_64:
1651     case MO_32:
1652         havesib = 0;
1653         if (rm == 4) {
1654             int code = x86_ldub_code(env, s);
1655             scale = (code >> 6) & 3;
1656             index = ((code >> 3) & 7) | REX_X(s);
1657             if (index == 4 && !is_vsib) {
1658                 index = -1;  /* no index */
1659             }
1660             base = (code & 7) | REX_B(s);
1661             havesib = 1;
1662         }
1663 
1664         switch (mod) {
1665         case 0:
1666             if ((base & 7) == 5) {
1667                 base = -1;
1668                 disp = (int32_t)x86_ldl_code(env, s);
1669                 if (CODE64(s) && !havesib) {
1670                     base = -2;
1671                     disp += s->pc + s->rip_offset;
1672                 }
1673             }
1674             break;
1675         case 1:
1676             disp = (int8_t)x86_ldub_code(env, s);
1677             break;
1678         default:
1679         case 2:
1680             disp = (int32_t)x86_ldl_code(env, s);
1681             break;
1682         }
1683 
1684         /* For correct popl handling with esp.  */
1685         if (base == R_ESP && s->popl_esp_hack) {
1686             disp += s->popl_esp_hack;
1687         }
1688         if (base == R_EBP || base == R_ESP) {
1689             def_seg = R_SS;
1690         }
1691         break;
1692 
1693     case MO_16:
1694         if (mod == 0) {
1695             if (rm == 6) {
1696                 base = -1;
1697                 disp = x86_lduw_code(env, s);
1698                 break;
1699             }
1700         } else if (mod == 1) {
1701             disp = (int8_t)x86_ldub_code(env, s);
1702         } else {
1703             disp = (int16_t)x86_lduw_code(env, s);
1704         }
1705 
1706         switch (rm) {
1707         case 0:
1708             base = R_EBX;
1709             index = R_ESI;
1710             break;
1711         case 1:
1712             base = R_EBX;
1713             index = R_EDI;
1714             break;
1715         case 2:
1716             base = R_EBP;
1717             index = R_ESI;
1718             def_seg = R_SS;
1719             break;
1720         case 3:
1721             base = R_EBP;
1722             index = R_EDI;
1723             def_seg = R_SS;
1724             break;
1725         case 4:
1726             base = R_ESI;
1727             break;
1728         case 5:
1729             base = R_EDI;
1730             break;
1731         case 6:
1732             base = R_EBP;
1733             def_seg = R_SS;
1734             break;
1735         default:
1736         case 7:
1737             base = R_EBX;
1738             break;
1739         }
1740         break;
1741 
1742     default:
1743         g_assert_not_reached();
1744     }
1745 
1746  done:
1747     return (AddressParts){ def_seg, base, index, scale, disp };
1748 }
1749 
1750 /* Compute the address, with a minimum number of TCG ops.  */
1751 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1752 {
1753     TCGv ea = NULL;
1754 
1755     if (a.index >= 0 && !is_vsib) {
1756         if (a.scale == 0) {
1757             ea = cpu_regs[a.index];
1758         } else {
1759             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1760             ea = s->A0;
1761         }
1762         if (a.base >= 0) {
1763             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1764             ea = s->A0;
1765         }
1766     } else if (a.base >= 0) {
1767         ea = cpu_regs[a.base];
1768     }
1769     if (!ea) {
1770         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1771             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1772             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1773         } else {
1774             tcg_gen_movi_tl(s->A0, a.disp);
1775         }
1776         ea = s->A0;
1777     } else if (a.disp != 0) {
1778         tcg_gen_addi_tl(s->A0, ea, a.disp);
1779         ea = s->A0;
1780     }
1781 
1782     return ea;
1783 }
1784 
1785 /* Used for BNDCL, BNDCU, BNDCN.  */
1786 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1787                       TCGCond cond, TCGv_i64 bndv)
1788 {
1789     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1790 
1791     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1792     if (!CODE64(s)) {
1793         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1794     }
1795     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1796     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1797     gen_helper_bndck(tcg_env, s->tmp2_i32);
1798 }
1799 
1800 /* generate modrm load of memory or register. */
1801 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1802 {
1803     int modrm = s->modrm;
1804     int mod, rm;
1805 
1806     mod = (modrm >> 6) & 3;
1807     rm = (modrm & 7) | REX_B(s);
1808     if (mod == 3) {
1809         gen_op_mov_v_reg(s, ot, s->T0, rm);
1810     } else {
1811         gen_lea_modrm(s, decode);
1812         gen_op_ld_v(s, ot, s->T0, s->A0);
1813     }
1814 }
1815 
1816 /* generate modrm store of memory or register. */
1817 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1818 {
1819     int modrm = s->modrm;
1820     int mod, rm;
1821 
1822     mod = (modrm >> 6) & 3;
1823     rm = (modrm & 7) | REX_B(s);
1824     if (mod == 3) {
1825         gen_op_mov_reg_v(s, ot, rm, s->T0);
1826     } else {
1827         gen_lea_modrm(s, decode);
1828         gen_op_st_v(s, ot, s->T0, s->A0);
1829     }
1830 }
1831 
1832 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1833 {
1834     target_ulong ret;
1835 
1836     switch (ot) {
1837     case MO_8:
1838         ret = x86_ldub_code(env, s);
1839         break;
1840     case MO_16:
1841         ret = x86_lduw_code(env, s);
1842         break;
1843     case MO_32:
1844         ret = x86_ldl_code(env, s);
1845         break;
1846 #ifdef TARGET_X86_64
1847     case MO_64:
1848         ret = x86_ldq_code(env, s);
1849         break;
1850 #endif
1851     default:
1852         g_assert_not_reached();
1853     }
1854     return ret;
1855 }
1856 
1857 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1858 {
1859     uint32_t ret;
1860 
1861     switch (ot) {
1862     case MO_8:
1863         ret = x86_ldub_code(env, s);
1864         break;
1865     case MO_16:
1866         ret = x86_lduw_code(env, s);
1867         break;
1868     case MO_32:
1869 #ifdef TARGET_X86_64
1870     case MO_64:
1871 #endif
1872         ret = x86_ldl_code(env, s);
1873         break;
1874     default:
1875         g_assert_not_reached();
1876     }
1877     return ret;
1878 }
1879 
1880 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1881 {
1882     target_long ret;
1883 
1884     switch (ot) {
1885     case MO_8:
1886         ret = (int8_t) x86_ldub_code(env, s);
1887         break;
1888     case MO_16:
1889         ret = (int16_t) x86_lduw_code(env, s);
1890         break;
1891     case MO_32:
1892         ret = (int32_t) x86_ldl_code(env, s);
1893         break;
1894 #ifdef TARGET_X86_64
1895     case MO_64:
1896         ret = x86_ldq_code(env, s);
1897         break;
1898 #endif
1899     default:
1900         g_assert_not_reached();
1901     }
1902     return ret;
1903 }
1904 
1905 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1906                                         TCGLabel *not_taken, TCGLabel *taken)
1907 {
1908     if (not_taken) {
1909         gen_set_label(not_taken);
1910     }
1911     gen_jmp_rel_csize(s, 0, 1);
1912 
1913     gen_set_label(taken);
1914     gen_jmp_rel(s, s->dflag, diff, 0);
1915 }
1916 
1917 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
1918 {
1919     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1920 
1921     if (!cc.use_reg2) {
1922         cc.reg2 = tcg_constant_tl(cc.imm);
1923     }
1924 
1925     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1926 }
1927 
1928 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1929 {
1930     TCGv selector = tcg_temp_new();
1931     tcg_gen_ext16u_tl(selector, seg);
1932     tcg_gen_st32_tl(selector, tcg_env,
1933                     offsetof(CPUX86State,segs[seg_reg].selector));
1934     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1935 }
1936 
1937 /* move SRC to seg_reg and compute if the CPU state may change. Never
1938    call this function with seg_reg == R_CS */
1939 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1940 {
1941     if (PE(s) && !VM86(s)) {
1942         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1943         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1944         /* abort translation because the addseg value may change or
1945            because ss32 may change. For R_SS, translation must always
1946            stop as a special handling must be done to disable hardware
1947            interrupts for the next instruction */
1948         if (seg_reg == R_SS) {
1949             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1950         } else if (CODE32(s) && seg_reg < R_FS) {
1951             s->base.is_jmp = DISAS_EOB_NEXT;
1952         }
1953     } else {
1954         gen_op_movl_seg_real(s, seg_reg, src);
1955         if (seg_reg == R_SS) {
1956             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1957         }
1958     }
1959 }
1960 
1961 static void gen_far_call(DisasContext *s)
1962 {
1963     TCGv_i32 new_cs = tcg_temp_new_i32();
1964     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1965     if (PE(s) && !VM86(s)) {
1966         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1967                                    tcg_constant_i32(s->dflag - 1),
1968                                    eip_next_tl(s));
1969     } else {
1970         TCGv_i32 new_eip = tcg_temp_new_i32();
1971         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1972         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1973                               tcg_constant_i32(s->dflag - 1),
1974                               eip_next_i32(s));
1975     }
1976     s->base.is_jmp = DISAS_JUMP;
1977 }
1978 
1979 static void gen_far_jmp(DisasContext *s)
1980 {
1981     if (PE(s) && !VM86(s)) {
1982         TCGv_i32 new_cs = tcg_temp_new_i32();
1983         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1984         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1985                                   eip_next_tl(s));
1986     } else {
1987         gen_op_movl_seg_real(s, R_CS, s->T1);
1988         gen_op_jmp_v(s, s->T0);
1989     }
1990     s->base.is_jmp = DISAS_JUMP;
1991 }
1992 
1993 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1994 {
1995     /* no SVM activated; fast case */
1996     if (likely(!GUEST(s))) {
1997         return;
1998     }
1999     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2000 }
2001 
2002 static inline void gen_stack_update(DisasContext *s, int addend)
2003 {
2004     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2005 }
2006 
2007 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2008 {
2009     if (offset) {
2010         tcg_gen_addi_tl(dest, src, offset);
2011         src = dest;
2012     }
2013     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2014 }
2015 
2016 /* Generate a push. It depends on ss32, addseg and dflag.  */
2017 static void gen_push_v(DisasContext *s, TCGv val)
2018 {
2019     MemOp d_ot = mo_pushpop(s, s->dflag);
2020     MemOp a_ot = mo_stacksize(s);
2021     int size = 1 << d_ot;
2022     TCGv new_esp = tcg_temp_new();
2023 
2024     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2025 
2026     /* Now reduce the value to the address size and apply SS base.  */
2027     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2028     gen_op_st_v(s, d_ot, val, s->A0);
2029     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2030 }
2031 
2032 /* two step pop is necessary for precise exceptions */
2033 static MemOp gen_pop_T0(DisasContext *s)
2034 {
2035     MemOp d_ot = mo_pushpop(s, s->dflag);
2036 
2037     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2038     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2039 
2040     return d_ot;
2041 }
2042 
2043 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2044 {
2045     gen_stack_update(s, 1 << ot);
2046 }
2047 
2048 static void gen_pusha(DisasContext *s)
2049 {
2050     MemOp d_ot = s->dflag;
2051     int size = 1 << d_ot;
2052     int i;
2053 
2054     for (i = 0; i < 8; i++) {
2055         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2056         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2057     }
2058 
2059     gen_stack_update(s, -8 * size);
2060 }
2061 
2062 static void gen_popa(DisasContext *s)
2063 {
2064     MemOp d_ot = s->dflag;
2065     int size = 1 << d_ot;
2066     int i;
2067 
2068     for (i = 0; i < 8; i++) {
2069         /* ESP is not reloaded */
2070         if (7 - i == R_ESP) {
2071             continue;
2072         }
2073         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2074         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2075         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2076     }
2077 
2078     gen_stack_update(s, 8 * size);
2079 }
2080 
2081 static void gen_enter(DisasContext *s, int esp_addend, int level)
2082 {
2083     MemOp d_ot = mo_pushpop(s, s->dflag);
2084     MemOp a_ot = mo_stacksize(s);
2085     int size = 1 << d_ot;
2086 
2087     /* Push BP; compute FrameTemp into T1.  */
2088     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2089     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2090     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2091 
2092     level &= 31;
2093     if (level != 0) {
2094         int i;
2095 
2096         /* Copy level-1 pointers from the previous frame.  */
2097         for (i = 1; i < level; ++i) {
2098             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2099             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2100 
2101             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2102             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2103         }
2104 
2105         /* Push the current FrameTemp as the last level.  */
2106         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2107         gen_op_st_v(s, d_ot, s->T1, s->A0);
2108     }
2109 
2110     /* Copy the FrameTemp value to EBP.  */
2111     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2112 
2113     /* Compute the final value of ESP.  */
2114     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2115     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2116 }
2117 
2118 static void gen_leave(DisasContext *s)
2119 {
2120     MemOp d_ot = mo_pushpop(s, s->dflag);
2121     MemOp a_ot = mo_stacksize(s);
2122 
2123     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2124     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2125 
2126     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2127 
2128     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2129     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2130 }
2131 
2132 /* Similarly, except that the assumption here is that we don't decode
2133    the instruction at all -- either a missing opcode, an unimplemented
2134    feature, or just a bogus instruction stream.  */
2135 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2136 {
2137     gen_illegal_opcode(s);
2138 
2139     if (qemu_loglevel_mask(LOG_UNIMP)) {
2140         FILE *logfile = qemu_log_trylock();
2141         if (logfile) {
2142             target_ulong pc = s->base.pc_next, end = s->pc;
2143 
2144             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2145             for (; pc < end; ++pc) {
2146                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2147             }
2148             fprintf(logfile, "\n");
2149             qemu_log_unlock(logfile);
2150         }
2151     }
2152 }
2153 
2154 /* an interrupt is different from an exception because of the
2155    privilege checks */
2156 static void gen_interrupt(DisasContext *s, uint8_t intno)
2157 {
2158     gen_update_cc_op(s);
2159     gen_update_eip_cur(s);
2160     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2161                                cur_insn_len_i32(s));
2162     s->base.is_jmp = DISAS_NORETURN;
2163 }
2164 
2165 /* Clear BND registers during legacy branches.  */
2166 static void gen_bnd_jmp(DisasContext *s)
2167 {
2168     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2169        and if the BNDREGs are known to be in use (non-zero) already.
2170        The helper itself will check BNDPRESERVE at runtime.  */
2171     if ((s->prefix & PREFIX_REPNZ) == 0
2172         && (s->flags & HF_MPX_EN_MASK) != 0
2173         && (s->flags & HF_MPX_IU_MASK) != 0) {
2174         gen_helper_bnd_jmp(tcg_env);
2175     }
2176 }
2177 
2178 /*
2179  * Generate an end of block, including common tasks such as generating
2180  * single step traps, resetting the RF flag, and handling the interrupt
2181  * shadow.
2182  */
2183 static void
2184 gen_eob(DisasContext *s, int mode)
2185 {
2186     bool inhibit_reset;
2187 
2188     gen_update_cc_op(s);
2189 
2190     /* If several instructions disable interrupts, only the first does it.  */
2191     inhibit_reset = false;
2192     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2193         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2194         inhibit_reset = true;
2195     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2196         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2197     }
2198 
2199     if (s->flags & HF_RF_MASK) {
2200         gen_reset_eflags(s, RF_MASK);
2201     }
2202     if (mode == DISAS_EOB_RECHECK_TF) {
2203         gen_helper_rechecking_single_step(tcg_env);
2204         tcg_gen_exit_tb(NULL, 0);
2205     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2206         gen_helper_single_step(tcg_env);
2207     } else if (mode == DISAS_JUMP &&
2208                /* give irqs a chance to happen */
2209                !inhibit_reset) {
2210         tcg_gen_lookup_and_goto_ptr();
2211     } else {
2212         tcg_gen_exit_tb(NULL, 0);
2213     }
2214 
2215     s->base.is_jmp = DISAS_NORETURN;
2216 }
2217 
2218 /* Jump to eip+diff, truncating the result to OT. */
2219 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2220 {
2221     bool use_goto_tb = s->jmp_opt;
2222     target_ulong mask = -1;
2223     target_ulong new_pc = s->pc + diff;
2224     target_ulong new_eip = new_pc - s->cs_base;
2225 
2226     assert(!s->cc_op_dirty);
2227 
2228     /* In 64-bit mode, operand size is fixed at 64 bits. */
2229     if (!CODE64(s)) {
2230         if (ot == MO_16) {
2231             mask = 0xffff;
2232             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2233                 use_goto_tb = false;
2234             }
2235         } else {
2236             mask = 0xffffffff;
2237         }
2238     }
2239     new_eip &= mask;
2240 
2241     if (tb_cflags(s->base.tb) & CF_PCREL) {
2242         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2243         /*
2244          * If we can prove the branch does not leave the page and we have
2245          * no extra masking to apply (data16 branch in code32, see above),
2246          * then we have also proven that the addition does not wrap.
2247          */
2248         if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2249             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2250             use_goto_tb = false;
2251         }
2252     } else if (!CODE64(s)) {
2253         new_pc = (uint32_t)(new_eip + s->cs_base);
2254     }
2255 
2256     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2257         /* jump to same page: we can use a direct jump */
2258         tcg_gen_goto_tb(tb_num);
2259         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2260             tcg_gen_movi_tl(cpu_eip, new_eip);
2261         }
2262         tcg_gen_exit_tb(s->base.tb, tb_num);
2263         s->base.is_jmp = DISAS_NORETURN;
2264     } else {
2265         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2266             tcg_gen_movi_tl(cpu_eip, new_eip);
2267         }
2268         if (s->jmp_opt) {
2269             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2270         } else {
2271             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2272         }
2273     }
2274 }
2275 
2276 /* Jump to eip+diff, truncating to the current code size. */
2277 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2278 {
2279     /* CODE64 ignores the OT argument, so we need not consider it. */
2280     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2281 }
2282 
2283 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2284 {
2285     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2286     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2287 }
2288 
2289 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2290 {
2291     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2292     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2293 }
2294 
2295 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2296 {
2297     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2298                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2299     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2300     int mem_index = s->mem_index;
2301     TCGv_i128 t = tcg_temp_new_i128();
2302 
2303     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2304     tcg_gen_st_i128(t, tcg_env, offset);
2305 }
2306 
2307 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2308 {
2309     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2310                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2311     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2312     int mem_index = s->mem_index;
2313     TCGv_i128 t = tcg_temp_new_i128();
2314 
2315     tcg_gen_ld_i128(t, tcg_env, offset);
2316     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2317 }
2318 
2319 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2320 {
2321     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2322     int mem_index = s->mem_index;
2323     TCGv_i128 t0 = tcg_temp_new_i128();
2324     TCGv_i128 t1 = tcg_temp_new_i128();
2325 
2326     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2327     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2328     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2329 
2330     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2331     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2332 }
2333 
2334 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2335 {
2336     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2337     int mem_index = s->mem_index;
2338     TCGv_i128 t = tcg_temp_new_i128();
2339 
2340     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2341     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2342     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2343     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2344     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2345 }
2346 
2347 #include "emit.c.inc"
2348 
2349 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2350 {
2351     bool update_fip = true;
2352     int b = decode->b;
2353     int modrm = s->modrm;
2354     int mod, rm, op;
2355 
2356     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2357         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2358         /* XXX: what to do if illegal op ? */
2359         gen_exception(s, EXCP07_PREX);
2360         return;
2361     }
2362     mod = (modrm >> 6) & 3;
2363     rm = modrm & 7;
2364     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2365     if (mod != 3) {
2366         /* memory op */
2367         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2368         TCGv last_addr = tcg_temp_new();
2369         bool update_fdp = true;
2370 
2371         tcg_gen_mov_tl(last_addr, ea);
2372         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2373 
2374         switch (op) {
2375         case 0x00 ... 0x07: /* fxxxs */
2376         case 0x10 ... 0x17: /* fixxxl */
2377         case 0x20 ... 0x27: /* fxxxl */
2378         case 0x30 ... 0x37: /* fixxx */
2379             {
2380                 int op1;
2381                 op1 = op & 7;
2382 
2383                 switch (op >> 4) {
2384                 case 0:
2385                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2386                                         s->mem_index, MO_LEUL);
2387                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2388                     break;
2389                 case 1:
2390                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2391                                         s->mem_index, MO_LEUL);
2392                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2393                     break;
2394                 case 2:
2395                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2396                                         s->mem_index, MO_LEUQ);
2397                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2398                     break;
2399                 case 3:
2400                 default:
2401                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2402                                         s->mem_index, MO_LESW);
2403                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2404                     break;
2405                 }
2406 
2407                 gen_helper_fp_arith_ST0_FT0(op1);
2408                 if (op1 == 3) {
2409                     /* fcomp needs pop */
2410                     gen_helper_fpop(tcg_env);
2411                 }
2412             }
2413             break;
2414         case 0x08: /* flds */
2415         case 0x0a: /* fsts */
2416         case 0x0b: /* fstps */
2417         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2418         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2419         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2420             switch (op & 7) {
2421             case 0:
2422                 switch (op >> 4) {
2423                 case 0:
2424                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2425                                         s->mem_index, MO_LEUL);
2426                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2427                     break;
2428                 case 1:
2429                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2430                                         s->mem_index, MO_LEUL);
2431                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2432                     break;
2433                 case 2:
2434                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2435                                         s->mem_index, MO_LEUQ);
2436                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2437                     break;
2438                 case 3:
2439                 default:
2440                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2441                                         s->mem_index, MO_LESW);
2442                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2443                     break;
2444                 }
2445                 break;
2446             case 1:
2447                 /* XXX: the corresponding CPUID bit must be tested ! */
2448                 switch (op >> 4) {
2449                 case 1:
2450                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2451                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2452                                         s->mem_index, MO_LEUL);
2453                     break;
2454                 case 2:
2455                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2456                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2457                                         s->mem_index, MO_LEUQ);
2458                     break;
2459                 case 3:
2460                 default:
2461                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2462                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2463                                         s->mem_index, MO_LEUW);
2464                     break;
2465                 }
2466                 gen_helper_fpop(tcg_env);
2467                 break;
2468             default:
2469                 switch (op >> 4) {
2470                 case 0:
2471                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2472                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2473                                         s->mem_index, MO_LEUL);
2474                     break;
2475                 case 1:
2476                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2477                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2478                                         s->mem_index, MO_LEUL);
2479                     break;
2480                 case 2:
2481                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2482                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2483                                         s->mem_index, MO_LEUQ);
2484                     break;
2485                 case 3:
2486                 default:
2487                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2488                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2489                                         s->mem_index, MO_LEUW);
2490                     break;
2491                 }
2492                 if ((op & 7) == 3) {
2493                     gen_helper_fpop(tcg_env);
2494                 }
2495                 break;
2496             }
2497             break;
2498         case 0x0c: /* fldenv mem */
2499             gen_helper_fldenv(tcg_env, s->A0,
2500                               tcg_constant_i32(s->dflag - 1));
2501             update_fip = update_fdp = false;
2502             break;
2503         case 0x0d: /* fldcw mem */
2504             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2505                                 s->mem_index, MO_LEUW);
2506             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2507             update_fip = update_fdp = false;
2508             break;
2509         case 0x0e: /* fnstenv mem */
2510             gen_helper_fstenv(tcg_env, s->A0,
2511                               tcg_constant_i32(s->dflag - 1));
2512             update_fip = update_fdp = false;
2513             break;
2514         case 0x0f: /* fnstcw mem */
2515             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2516             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2517                                 s->mem_index, MO_LEUW);
2518             update_fip = update_fdp = false;
2519             break;
2520         case 0x1d: /* fldt mem */
2521             gen_helper_fldt_ST0(tcg_env, s->A0);
2522             break;
2523         case 0x1f: /* fstpt mem */
2524             gen_helper_fstt_ST0(tcg_env, s->A0);
2525             gen_helper_fpop(tcg_env);
2526             break;
2527         case 0x2c: /* frstor mem */
2528             gen_helper_frstor(tcg_env, s->A0,
2529                               tcg_constant_i32(s->dflag - 1));
2530             update_fip = update_fdp = false;
2531             break;
2532         case 0x2e: /* fnsave mem */
2533             gen_helper_fsave(tcg_env, s->A0,
2534                              tcg_constant_i32(s->dflag - 1));
2535             update_fip = update_fdp = false;
2536             break;
2537         case 0x2f: /* fnstsw mem */
2538             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2539             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2540                                 s->mem_index, MO_LEUW);
2541             update_fip = update_fdp = false;
2542             break;
2543         case 0x3c: /* fbld */
2544             gen_helper_fbld_ST0(tcg_env, s->A0);
2545             break;
2546         case 0x3e: /* fbstp */
2547             gen_helper_fbst_ST0(tcg_env, s->A0);
2548             gen_helper_fpop(tcg_env);
2549             break;
2550         case 0x3d: /* fildll */
2551             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2552                                 s->mem_index, MO_LEUQ);
2553             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2554             break;
2555         case 0x3f: /* fistpll */
2556             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2557             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2558                                 s->mem_index, MO_LEUQ);
2559             gen_helper_fpop(tcg_env);
2560             break;
2561         default:
2562             goto illegal_op;
2563         }
2564 
2565         if (update_fdp) {
2566             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2567 
2568             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2569                            offsetof(CPUX86State,
2570                                     segs[last_seg].selector));
2571             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2572                              offsetof(CPUX86State, fpds));
2573             tcg_gen_st_tl(last_addr, tcg_env,
2574                           offsetof(CPUX86State, fpdp));
2575         }
2576     } else {
2577         /* register float ops */
2578         int opreg = rm;
2579 
2580         switch (op) {
2581         case 0x08: /* fld sti */
2582             gen_helper_fpush(tcg_env);
2583             gen_helper_fmov_ST0_STN(tcg_env,
2584                                     tcg_constant_i32((opreg + 1) & 7));
2585             break;
2586         case 0x09: /* fxchg sti */
2587         case 0x29: /* fxchg4 sti, undocumented op */
2588         case 0x39: /* fxchg7 sti, undocumented op */
2589             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2590             break;
2591         case 0x0a: /* grp d9/2 */
2592             switch (rm) {
2593             case 0: /* fnop */
2594                 /*
2595                  * check exceptions (FreeBSD FPU probe)
2596                  * needs to be treated as I/O because of ferr_irq
2597                  */
2598                 translator_io_start(&s->base);
2599                 gen_helper_fwait(tcg_env);
2600                 update_fip = false;
2601                 break;
2602             default:
2603                 goto illegal_op;
2604             }
2605             break;
2606         case 0x0c: /* grp d9/4 */
2607             switch (rm) {
2608             case 0: /* fchs */
2609                 gen_helper_fchs_ST0(tcg_env);
2610                 break;
2611             case 1: /* fabs */
2612                 gen_helper_fabs_ST0(tcg_env);
2613                 break;
2614             case 4: /* ftst */
2615                 gen_helper_fldz_FT0(tcg_env);
2616                 gen_helper_fcom_ST0_FT0(tcg_env);
2617                 break;
2618             case 5: /* fxam */
2619                 gen_helper_fxam_ST0(tcg_env);
2620                 break;
2621             default:
2622                 goto illegal_op;
2623             }
2624             break;
2625         case 0x0d: /* grp d9/5 */
2626             {
2627                 switch (rm) {
2628                 case 0:
2629                     gen_helper_fpush(tcg_env);
2630                     gen_helper_fld1_ST0(tcg_env);
2631                     break;
2632                 case 1:
2633                     gen_helper_fpush(tcg_env);
2634                     gen_helper_fldl2t_ST0(tcg_env);
2635                     break;
2636                 case 2:
2637                     gen_helper_fpush(tcg_env);
2638                     gen_helper_fldl2e_ST0(tcg_env);
2639                     break;
2640                 case 3:
2641                     gen_helper_fpush(tcg_env);
2642                     gen_helper_fldpi_ST0(tcg_env);
2643                     break;
2644                 case 4:
2645                     gen_helper_fpush(tcg_env);
2646                     gen_helper_fldlg2_ST0(tcg_env);
2647                     break;
2648                 case 5:
2649                     gen_helper_fpush(tcg_env);
2650                     gen_helper_fldln2_ST0(tcg_env);
2651                     break;
2652                 case 6:
2653                     gen_helper_fpush(tcg_env);
2654                     gen_helper_fldz_ST0(tcg_env);
2655                     break;
2656                 default:
2657                     goto illegal_op;
2658                 }
2659             }
2660             break;
2661         case 0x0e: /* grp d9/6 */
2662             switch (rm) {
2663             case 0: /* f2xm1 */
2664                 gen_helper_f2xm1(tcg_env);
2665                 break;
2666             case 1: /* fyl2x */
2667                 gen_helper_fyl2x(tcg_env);
2668                 break;
2669             case 2: /* fptan */
2670                 gen_helper_fptan(tcg_env);
2671                 break;
2672             case 3: /* fpatan */
2673                 gen_helper_fpatan(tcg_env);
2674                 break;
2675             case 4: /* fxtract */
2676                 gen_helper_fxtract(tcg_env);
2677                 break;
2678             case 5: /* fprem1 */
2679                 gen_helper_fprem1(tcg_env);
2680                 break;
2681             case 6: /* fdecstp */
2682                 gen_helper_fdecstp(tcg_env);
2683                 break;
2684             default:
2685             case 7: /* fincstp */
2686                 gen_helper_fincstp(tcg_env);
2687                 break;
2688             }
2689             break;
2690         case 0x0f: /* grp d9/7 */
2691             switch (rm) {
2692             case 0: /* fprem */
2693                 gen_helper_fprem(tcg_env);
2694                 break;
2695             case 1: /* fyl2xp1 */
2696                 gen_helper_fyl2xp1(tcg_env);
2697                 break;
2698             case 2: /* fsqrt */
2699                 gen_helper_fsqrt(tcg_env);
2700                 break;
2701             case 3: /* fsincos */
2702                 gen_helper_fsincos(tcg_env);
2703                 break;
2704             case 5: /* fscale */
2705                 gen_helper_fscale(tcg_env);
2706                 break;
2707             case 4: /* frndint */
2708                 gen_helper_frndint(tcg_env);
2709                 break;
2710             case 6: /* fsin */
2711                 gen_helper_fsin(tcg_env);
2712                 break;
2713             default:
2714             case 7: /* fcos */
2715                 gen_helper_fcos(tcg_env);
2716                 break;
2717             }
2718             break;
2719         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2720         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2721         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2722             {
2723                 int op1;
2724 
2725                 op1 = op & 7;
2726                 if (op >= 0x20) {
2727                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2728                     if (op >= 0x30) {
2729                         gen_helper_fpop(tcg_env);
2730                     }
2731                 } else {
2732                     gen_helper_fmov_FT0_STN(tcg_env,
2733                                             tcg_constant_i32(opreg));
2734                     gen_helper_fp_arith_ST0_FT0(op1);
2735                 }
2736             }
2737             break;
2738         case 0x02: /* fcom */
2739         case 0x22: /* fcom2, undocumented op */
2740             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2741             gen_helper_fcom_ST0_FT0(tcg_env);
2742             break;
2743         case 0x03: /* fcomp */
2744         case 0x23: /* fcomp3, undocumented op */
2745         case 0x32: /* fcomp5, undocumented op */
2746             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2747             gen_helper_fcom_ST0_FT0(tcg_env);
2748             gen_helper_fpop(tcg_env);
2749             break;
2750         case 0x15: /* da/5 */
2751             switch (rm) {
2752             case 1: /* fucompp */
2753                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2754                 gen_helper_fucom_ST0_FT0(tcg_env);
2755                 gen_helper_fpop(tcg_env);
2756                 gen_helper_fpop(tcg_env);
2757                 break;
2758             default:
2759                 goto illegal_op;
2760             }
2761             break;
2762         case 0x1c:
2763             switch (rm) {
2764             case 0: /* feni (287 only, just do nop here) */
2765                 break;
2766             case 1: /* fdisi (287 only, just do nop here) */
2767                 break;
2768             case 2: /* fclex */
2769                 gen_helper_fclex(tcg_env);
2770                 update_fip = false;
2771                 break;
2772             case 3: /* fninit */
2773                 gen_helper_fninit(tcg_env);
2774                 update_fip = false;
2775                 break;
2776             case 4: /* fsetpm (287 only, just do nop here) */
2777                 break;
2778             default:
2779                 goto illegal_op;
2780             }
2781             break;
2782         case 0x1d: /* fucomi */
2783             if (!(s->cpuid_features & CPUID_CMOV)) {
2784                 goto illegal_op;
2785             }
2786             gen_update_cc_op(s);
2787             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2788             gen_helper_fucomi_ST0_FT0(tcg_env);
2789             assume_cc_op(s, CC_OP_EFLAGS);
2790             break;
2791         case 0x1e: /* fcomi */
2792             if (!(s->cpuid_features & CPUID_CMOV)) {
2793                 goto illegal_op;
2794             }
2795             gen_update_cc_op(s);
2796             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2797             gen_helper_fcomi_ST0_FT0(tcg_env);
2798             assume_cc_op(s, CC_OP_EFLAGS);
2799             break;
2800         case 0x28: /* ffree sti */
2801             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2802             break;
2803         case 0x2a: /* fst sti */
2804             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2805             break;
2806         case 0x2b: /* fstp sti */
2807         case 0x0b: /* fstp1 sti, undocumented op */
2808         case 0x3a: /* fstp8 sti, undocumented op */
2809         case 0x3b: /* fstp9 sti, undocumented op */
2810             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2811             gen_helper_fpop(tcg_env);
2812             break;
2813         case 0x2c: /* fucom st(i) */
2814             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2815             gen_helper_fucom_ST0_FT0(tcg_env);
2816             break;
2817         case 0x2d: /* fucomp st(i) */
2818             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2819             gen_helper_fucom_ST0_FT0(tcg_env);
2820             gen_helper_fpop(tcg_env);
2821             break;
2822         case 0x33: /* de/3 */
2823             switch (rm) {
2824             case 1: /* fcompp */
2825                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2826                 gen_helper_fcom_ST0_FT0(tcg_env);
2827                 gen_helper_fpop(tcg_env);
2828                 gen_helper_fpop(tcg_env);
2829                 break;
2830             default:
2831                 goto illegal_op;
2832             }
2833             break;
2834         case 0x38: /* ffreep sti, undocumented op */
2835             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2836             gen_helper_fpop(tcg_env);
2837             break;
2838         case 0x3c: /* df/4 */
2839             switch (rm) {
2840             case 0:
2841                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2842                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2843                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2844                 break;
2845             default:
2846                 goto illegal_op;
2847             }
2848             break;
2849         case 0x3d: /* fucomip */
2850             if (!(s->cpuid_features & CPUID_CMOV)) {
2851                 goto illegal_op;
2852             }
2853             gen_update_cc_op(s);
2854             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2855             gen_helper_fucomi_ST0_FT0(tcg_env);
2856             gen_helper_fpop(tcg_env);
2857             assume_cc_op(s, CC_OP_EFLAGS);
2858             break;
2859         case 0x3e: /* fcomip */
2860             if (!(s->cpuid_features & CPUID_CMOV)) {
2861                 goto illegal_op;
2862             }
2863             gen_update_cc_op(s);
2864             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2865             gen_helper_fcomi_ST0_FT0(tcg_env);
2866             gen_helper_fpop(tcg_env);
2867             assume_cc_op(s, CC_OP_EFLAGS);
2868             break;
2869         case 0x10 ... 0x13: /* fcmovxx */
2870         case 0x18 ... 0x1b:
2871             {
2872                 int op1;
2873                 TCGLabel *l1;
2874                 static const uint8_t fcmov_cc[8] = {
2875                     (JCC_B << 1),
2876                     (JCC_Z << 1),
2877                     (JCC_BE << 1),
2878                     (JCC_P << 1),
2879                 };
2880 
2881                 if (!(s->cpuid_features & CPUID_CMOV)) {
2882                     goto illegal_op;
2883                 }
2884                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2885                 l1 = gen_new_label();
2886                 gen_jcc_noeob(s, op1, l1);
2887                 gen_helper_fmov_ST0_STN(tcg_env,
2888                                         tcg_constant_i32(opreg));
2889                 gen_set_label(l1);
2890             }
2891             break;
2892         default:
2893             goto illegal_op;
2894         }
2895     }
2896 
2897     if (update_fip) {
2898         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2899                        offsetof(CPUX86State, segs[R_CS].selector));
2900         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2901                          offsetof(CPUX86State, fpcs));
2902         tcg_gen_st_tl(eip_cur_tl(s),
2903                       tcg_env, offsetof(CPUX86State, fpip));
2904     }
2905     return;
2906 
2907  illegal_op:
2908     gen_illegal_opcode(s);
2909 }
2910 
2911 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2912 {
2913     int prefixes = s->prefix;
2914     MemOp dflag = s->dflag;
2915     int b = decode->b + 0x100;
2916     int modrm = s->modrm;
2917     MemOp ot;
2918     int reg, rm, mod, op;
2919 
2920     /* now check op code */
2921     switch (b) {
2922     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2923         mod = (modrm >> 6) & 3;
2924         switch ((modrm >> 3) & 7) {
2925         case 7:
2926             if (mod != 3 ||
2927                 (s->prefix & PREFIX_REPNZ)) {
2928                 goto illegal_op;
2929             }
2930             if (s->prefix & PREFIX_REPZ) {
2931                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
2932                     goto illegal_op;
2933                 }
2934                 gen_helper_rdpid(s->T0, tcg_env);
2935                 rm = (modrm & 7) | REX_B(s);
2936                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
2937                 break;
2938             } else {
2939                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
2940                     goto illegal_op;
2941                 }
2942                 goto do_rdrand;
2943             }
2944 
2945         case 6: /* RDRAND */
2946             if (mod != 3 ||
2947                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
2948                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
2949                 goto illegal_op;
2950             }
2951         do_rdrand:
2952             translator_io_start(&s->base);
2953             gen_helper_rdrand(s->T0, tcg_env);
2954             rm = (modrm & 7) | REX_B(s);
2955             gen_op_mov_reg_v(s, dflag, rm, s->T0);
2956             assume_cc_op(s, CC_OP_EFLAGS);
2957             break;
2958 
2959         default:
2960             goto illegal_op;
2961         }
2962         break;
2963 
2964     case 0x100:
2965         mod = (modrm >> 6) & 3;
2966         op = (modrm >> 3) & 7;
2967         switch(op) {
2968         case 0: /* sldt */
2969             if (!PE(s) || VM86(s))
2970                 goto illegal_op;
2971             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2972                 break;
2973             }
2974             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
2975             tcg_gen_ld32u_tl(s->T0, tcg_env,
2976                              offsetof(CPUX86State, ldt.selector));
2977             ot = mod == 3 ? dflag : MO_16;
2978             gen_st_modrm(s, decode, ot);
2979             break;
2980         case 2: /* lldt */
2981             if (!PE(s) || VM86(s))
2982                 goto illegal_op;
2983             if (check_cpl0(s)) {
2984                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
2985                 gen_ld_modrm(s, decode, MO_16);
2986                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2987                 gen_helper_lldt(tcg_env, s->tmp2_i32);
2988             }
2989             break;
2990         case 1: /* str */
2991             if (!PE(s) || VM86(s))
2992                 goto illegal_op;
2993             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2994                 break;
2995             }
2996             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
2997             tcg_gen_ld32u_tl(s->T0, tcg_env,
2998                              offsetof(CPUX86State, tr.selector));
2999             ot = mod == 3 ? dflag : MO_16;
3000             gen_st_modrm(s, decode, ot);
3001             break;
3002         case 3: /* ltr */
3003             if (!PE(s) || VM86(s))
3004                 goto illegal_op;
3005             if (check_cpl0(s)) {
3006                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3007                 gen_ld_modrm(s, decode, MO_16);
3008                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3009                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3010             }
3011             break;
3012         case 4: /* verr */
3013         case 5: /* verw */
3014             if (!PE(s) || VM86(s))
3015                 goto illegal_op;
3016             gen_ld_modrm(s, decode, MO_16);
3017             gen_update_cc_op(s);
3018             if (op == 4) {
3019                 gen_helper_verr(tcg_env, s->T0);
3020             } else {
3021                 gen_helper_verw(tcg_env, s->T0);
3022             }
3023             assume_cc_op(s, CC_OP_EFLAGS);
3024             break;
3025         default:
3026             goto illegal_op;
3027         }
3028         break;
3029 
3030     case 0x101:
3031         switch (modrm) {
3032         CASE_MODRM_MEM_OP(0): /* sgdt */
3033             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3034                 break;
3035             }
3036             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3037             gen_lea_modrm(s, decode);
3038             tcg_gen_ld32u_tl(s->T0,
3039                              tcg_env, offsetof(CPUX86State, gdt.limit));
3040             gen_op_st_v(s, MO_16, s->T0, s->A0);
3041             gen_add_A0_im(s, 2);
3042             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3043             /*
3044              * NB: Despite a confusing description in Intel CPU documentation,
3045              *     all 32-bits are written regardless of operand size.
3046              */
3047             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3048             break;
3049 
3050         case 0xc8: /* monitor */
3051             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3052                 goto illegal_op;
3053             }
3054             gen_update_cc_op(s);
3055             gen_update_eip_cur(s);
3056             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3057             gen_helper_monitor(tcg_env, s->A0);
3058             break;
3059 
3060         case 0xc9: /* mwait */
3061             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3062                 goto illegal_op;
3063             }
3064             gen_update_cc_op(s);
3065             gen_update_eip_cur(s);
3066             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3067             s->base.is_jmp = DISAS_NORETURN;
3068             break;
3069 
3070         case 0xca: /* clac */
3071             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3072                 || CPL(s) != 0) {
3073                 goto illegal_op;
3074             }
3075             gen_reset_eflags(s, AC_MASK);
3076             s->base.is_jmp = DISAS_EOB_NEXT;
3077             break;
3078 
3079         case 0xcb: /* stac */
3080             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3081                 || CPL(s) != 0) {
3082                 goto illegal_op;
3083             }
3084             gen_set_eflags(s, AC_MASK);
3085             s->base.is_jmp = DISAS_EOB_NEXT;
3086             break;
3087 
3088         CASE_MODRM_MEM_OP(1): /* sidt */
3089             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3090                 break;
3091             }
3092             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3093             gen_lea_modrm(s, decode);
3094             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3095             gen_op_st_v(s, MO_16, s->T0, s->A0);
3096             gen_add_A0_im(s, 2);
3097             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3098             /*
3099              * NB: Despite a confusing description in Intel CPU documentation,
3100              *     all 32-bits are written regardless of operand size.
3101              */
3102             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3103             break;
3104 
3105         case 0xd0: /* xgetbv */
3106             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3107                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3108                 goto illegal_op;
3109             }
3110             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3111             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3112             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3113             break;
3114 
3115         case 0xd1: /* xsetbv */
3116             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3117                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3118                 goto illegal_op;
3119             }
3120             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3121             if (!check_cpl0(s)) {
3122                 break;
3123             }
3124             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3125                                   cpu_regs[R_EDX]);
3126             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3127             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3128             /* End TB because translation flags may change.  */
3129             s->base.is_jmp = DISAS_EOB_NEXT;
3130             break;
3131 
3132         case 0xd8: /* VMRUN */
3133             if (!SVME(s) || !PE(s)) {
3134                 goto illegal_op;
3135             }
3136             if (!check_cpl0(s)) {
3137                 break;
3138             }
3139             gen_update_cc_op(s);
3140             gen_update_eip_cur(s);
3141             /*
3142              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3143              * The usual gen_eob() handling is performed on vmexit after
3144              * host state is reloaded.
3145              */
3146             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3147                              cur_insn_len_i32(s));
3148             tcg_gen_exit_tb(NULL, 0);
3149             s->base.is_jmp = DISAS_NORETURN;
3150             break;
3151 
3152         case 0xd9: /* VMMCALL */
3153             if (!SVME(s)) {
3154                 goto illegal_op;
3155             }
3156             gen_update_cc_op(s);
3157             gen_update_eip_cur(s);
3158             gen_helper_vmmcall(tcg_env);
3159             break;
3160 
3161         case 0xda: /* VMLOAD */
3162             if (!SVME(s) || !PE(s)) {
3163                 goto illegal_op;
3164             }
3165             if (!check_cpl0(s)) {
3166                 break;
3167             }
3168             gen_update_cc_op(s);
3169             gen_update_eip_cur(s);
3170             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3171             break;
3172 
3173         case 0xdb: /* VMSAVE */
3174             if (!SVME(s) || !PE(s)) {
3175                 goto illegal_op;
3176             }
3177             if (!check_cpl0(s)) {
3178                 break;
3179             }
3180             gen_update_cc_op(s);
3181             gen_update_eip_cur(s);
3182             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3183             break;
3184 
3185         case 0xdc: /* STGI */
3186             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3187                 || !PE(s)) {
3188                 goto illegal_op;
3189             }
3190             if (!check_cpl0(s)) {
3191                 break;
3192             }
3193             gen_update_cc_op(s);
3194             gen_helper_stgi(tcg_env);
3195             s->base.is_jmp = DISAS_EOB_NEXT;
3196             break;
3197 
3198         case 0xdd: /* CLGI */
3199             if (!SVME(s) || !PE(s)) {
3200                 goto illegal_op;
3201             }
3202             if (!check_cpl0(s)) {
3203                 break;
3204             }
3205             gen_update_cc_op(s);
3206             gen_update_eip_cur(s);
3207             gen_helper_clgi(tcg_env);
3208             break;
3209 
3210         case 0xde: /* SKINIT */
3211             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3212                 || !PE(s)) {
3213                 goto illegal_op;
3214             }
3215             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3216             /* If not intercepted, not implemented -- raise #UD. */
3217             goto illegal_op;
3218 
3219         case 0xdf: /* INVLPGA */
3220             if (!SVME(s) || !PE(s)) {
3221                 goto illegal_op;
3222             }
3223             if (!check_cpl0(s)) {
3224                 break;
3225             }
3226             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3227             if (s->aflag == MO_64) {
3228                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3229             } else {
3230                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3231             }
3232             gen_helper_flush_page(tcg_env, s->A0);
3233             s->base.is_jmp = DISAS_EOB_NEXT;
3234             break;
3235 
3236         CASE_MODRM_MEM_OP(2): /* lgdt */
3237             if (!check_cpl0(s)) {
3238                 break;
3239             }
3240             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3241             gen_lea_modrm(s, decode);
3242             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3243             gen_add_A0_im(s, 2);
3244             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3245             if (dflag == MO_16) {
3246                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3247             }
3248             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3249             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3250             break;
3251 
3252         CASE_MODRM_MEM_OP(3): /* lidt */
3253             if (!check_cpl0(s)) {
3254                 break;
3255             }
3256             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3257             gen_lea_modrm(s, decode);
3258             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3259             gen_add_A0_im(s, 2);
3260             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3261             if (dflag == MO_16) {
3262                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3263             }
3264             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3265             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3266             break;
3267 
3268         CASE_MODRM_OP(4): /* smsw */
3269             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3270                 break;
3271             }
3272             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3273             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3274             /*
3275              * In 32-bit mode, the higher 16 bits of the destination
3276              * register are undefined.  In practice CR0[31:0] is stored
3277              * just like in 64-bit mode.
3278              */
3279             mod = (modrm >> 6) & 3;
3280             ot = (mod != 3 ? MO_16 : s->dflag);
3281             gen_st_modrm(s, decode, ot);
3282             break;
3283         case 0xee: /* rdpkru */
3284             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3285                 goto illegal_op;
3286             }
3287             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3288             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3289             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3290             break;
3291         case 0xef: /* wrpkru */
3292             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3293                 goto illegal_op;
3294             }
3295             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3296                                   cpu_regs[R_EDX]);
3297             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3298             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3299             break;
3300 
3301         CASE_MODRM_OP(6): /* lmsw */
3302             if (!check_cpl0(s)) {
3303                 break;
3304             }
3305             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3306             gen_ld_modrm(s, decode, MO_16);
3307             /*
3308              * Only the 4 lower bits of CR0 are modified.
3309              * PE cannot be set to zero if already set to one.
3310              */
3311             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3312             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3313             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3314             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3315             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3316             s->base.is_jmp = DISAS_EOB_NEXT;
3317             break;
3318 
3319         CASE_MODRM_MEM_OP(7): /* invlpg */
3320             if (!check_cpl0(s)) {
3321                 break;
3322             }
3323             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3324             gen_lea_modrm(s, decode);
3325             gen_helper_flush_page(tcg_env, s->A0);
3326             s->base.is_jmp = DISAS_EOB_NEXT;
3327             break;
3328 
3329         case 0xf8: /* swapgs */
3330 #ifdef TARGET_X86_64
3331             if (CODE64(s)) {
3332                 if (check_cpl0(s)) {
3333                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3334                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3335                                   offsetof(CPUX86State, kernelgsbase));
3336                     tcg_gen_st_tl(s->T0, tcg_env,
3337                                   offsetof(CPUX86State, kernelgsbase));
3338                 }
3339                 break;
3340             }
3341 #endif
3342             goto illegal_op;
3343 
3344         case 0xf9: /* rdtscp */
3345             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3346                 goto illegal_op;
3347             }
3348             gen_update_cc_op(s);
3349             gen_update_eip_cur(s);
3350             translator_io_start(&s->base);
3351             gen_helper_rdtsc(tcg_env);
3352             gen_helper_rdpid(s->T0, tcg_env);
3353             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3354             break;
3355 
3356         default:
3357             goto illegal_op;
3358         }
3359         break;
3360 
3361     case 0x11a:
3362         if (s->flags & HF_MPX_EN_MASK) {
3363             mod = (modrm >> 6) & 3;
3364             reg = ((modrm >> 3) & 7) | REX_R(s);
3365             if (prefixes & PREFIX_REPZ) {
3366                 /* bndcl */
3367                 if (reg >= 4
3368                     || s->aflag == MO_16) {
3369                     goto illegal_op;
3370                 }
3371                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3372             } else if (prefixes & PREFIX_REPNZ) {
3373                 /* bndcu */
3374                 if (reg >= 4
3375                     || s->aflag == MO_16) {
3376                     goto illegal_op;
3377                 }
3378                 TCGv_i64 notu = tcg_temp_new_i64();
3379                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3380                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3381             } else if (prefixes & PREFIX_DATA) {
3382                 /* bndmov -- from reg/mem */
3383                 if (reg >= 4 || s->aflag == MO_16) {
3384                     goto illegal_op;
3385                 }
3386                 if (mod == 3) {
3387                     int reg2 = (modrm & 7) | REX_B(s);
3388                     if (reg2 >= 4) {
3389                         goto illegal_op;
3390                     }
3391                     if (s->flags & HF_MPX_IU_MASK) {
3392                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3393                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3394                     }
3395                 } else {
3396                     gen_lea_modrm(s, decode);
3397                     if (CODE64(s)) {
3398                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3399                                             s->mem_index, MO_LEUQ);
3400                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3401                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3402                                             s->mem_index, MO_LEUQ);
3403                     } else {
3404                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3405                                             s->mem_index, MO_LEUL);
3406                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3407                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3408                                             s->mem_index, MO_LEUL);
3409                     }
3410                     /* bnd registers are now in-use */
3411                     gen_set_hflag(s, HF_MPX_IU_MASK);
3412                 }
3413             } else if (mod != 3) {
3414                 /* bndldx */
3415                 AddressParts a = decode->mem;
3416                 if (reg >= 4
3417                     || s->aflag == MO_16
3418                     || a.base < -1) {
3419                     goto illegal_op;
3420                 }
3421                 if (a.base >= 0) {
3422                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3423                 } else {
3424                     tcg_gen_movi_tl(s->A0, 0);
3425                 }
3426                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3427                 if (a.index >= 0) {
3428                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3429                 } else {
3430                     tcg_gen_movi_tl(s->T0, 0);
3431                 }
3432                 if (CODE64(s)) {
3433                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3434                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3435                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3436                 } else {
3437                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3438                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3439                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3440                 }
3441                 gen_set_hflag(s, HF_MPX_IU_MASK);
3442             }
3443         }
3444         break;
3445     case 0x11b:
3446         if (s->flags & HF_MPX_EN_MASK) {
3447             mod = (modrm >> 6) & 3;
3448             reg = ((modrm >> 3) & 7) | REX_R(s);
3449             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3450                 /* bndmk */
3451                 if (reg >= 4
3452                     || s->aflag == MO_16) {
3453                     goto illegal_op;
3454                 }
3455                 AddressParts a = decode->mem;
3456                 if (a.base >= 0) {
3457                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3458                     if (!CODE64(s)) {
3459                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3460                     }
3461                 } else if (a.base == -1) {
3462                     /* no base register has lower bound of 0 */
3463                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3464                 } else {
3465                     /* rip-relative generates #ud */
3466                     goto illegal_op;
3467                 }
3468                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3469                 if (!CODE64(s)) {
3470                     tcg_gen_ext32u_tl(s->A0, s->A0);
3471                 }
3472                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3473                 /* bnd registers are now in-use */
3474                 gen_set_hflag(s, HF_MPX_IU_MASK);
3475                 break;
3476             } else if (prefixes & PREFIX_REPNZ) {
3477                 /* bndcn */
3478                 if (reg >= 4
3479                     || s->aflag == MO_16) {
3480                     goto illegal_op;
3481                 }
3482                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3483             } else if (prefixes & PREFIX_DATA) {
3484                 /* bndmov -- to reg/mem */
3485                 if (reg >= 4 || s->aflag == MO_16) {
3486                     goto illegal_op;
3487                 }
3488                 if (mod == 3) {
3489                     int reg2 = (modrm & 7) | REX_B(s);
3490                     if (reg2 >= 4) {
3491                         goto illegal_op;
3492                     }
3493                     if (s->flags & HF_MPX_IU_MASK) {
3494                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3495                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3496                     }
3497                 } else {
3498                     gen_lea_modrm(s, decode);
3499                     if (CODE64(s)) {
3500                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3501                                             s->mem_index, MO_LEUQ);
3502                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3503                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3504                                             s->mem_index, MO_LEUQ);
3505                     } else {
3506                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3507                                             s->mem_index, MO_LEUL);
3508                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3509                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3510                                             s->mem_index, MO_LEUL);
3511                     }
3512                 }
3513             } else if (mod != 3) {
3514                 /* bndstx */
3515                 AddressParts a = decode->mem;
3516                 if (reg >= 4
3517                     || s->aflag == MO_16
3518                     || a.base < -1) {
3519                     goto illegal_op;
3520                 }
3521                 if (a.base >= 0) {
3522                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3523                 } else {
3524                     tcg_gen_movi_tl(s->A0, 0);
3525                 }
3526                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3527                 if (a.index >= 0) {
3528                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3529                 } else {
3530                     tcg_gen_movi_tl(s->T0, 0);
3531                 }
3532                 if (CODE64(s)) {
3533                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3534                                         cpu_bndl[reg], cpu_bndu[reg]);
3535                 } else {
3536                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3537                                         cpu_bndl[reg], cpu_bndu[reg]);
3538                 }
3539             }
3540         }
3541         break;
3542     default:
3543         g_assert_not_reached();
3544     }
3545     return;
3546  illegal_op:
3547     gen_illegal_opcode(s);
3548     return;
3549 }
3550 
3551 #include "decode-new.c.inc"
3552 
3553 void tcg_x86_init(void)
3554 {
3555     static const char reg_names[CPU_NB_REGS][4] = {
3556 #ifdef TARGET_X86_64
3557         [R_EAX] = "rax",
3558         [R_EBX] = "rbx",
3559         [R_ECX] = "rcx",
3560         [R_EDX] = "rdx",
3561         [R_ESI] = "rsi",
3562         [R_EDI] = "rdi",
3563         [R_EBP] = "rbp",
3564         [R_ESP] = "rsp",
3565         [8]  = "r8",
3566         [9]  = "r9",
3567         [10] = "r10",
3568         [11] = "r11",
3569         [12] = "r12",
3570         [13] = "r13",
3571         [14] = "r14",
3572         [15] = "r15",
3573 #else
3574         [R_EAX] = "eax",
3575         [R_EBX] = "ebx",
3576         [R_ECX] = "ecx",
3577         [R_EDX] = "edx",
3578         [R_ESI] = "esi",
3579         [R_EDI] = "edi",
3580         [R_EBP] = "ebp",
3581         [R_ESP] = "esp",
3582 #endif
3583     };
3584     static const char eip_name[] = {
3585 #ifdef TARGET_X86_64
3586         "rip"
3587 #else
3588         "eip"
3589 #endif
3590     };
3591     static const char seg_base_names[6][8] = {
3592         [R_CS] = "cs_base",
3593         [R_DS] = "ds_base",
3594         [R_ES] = "es_base",
3595         [R_FS] = "fs_base",
3596         [R_GS] = "gs_base",
3597         [R_SS] = "ss_base",
3598     };
3599     static const char bnd_regl_names[4][8] = {
3600         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3601     };
3602     static const char bnd_regu_names[4][8] = {
3603         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3604     };
3605     int i;
3606 
3607     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3608                                        offsetof(CPUX86State, cc_op), "cc_op");
3609     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3610                                     "cc_dst");
3611     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3612                                     "cc_src");
3613     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3614                                      "cc_src2");
3615     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3616 
3617     for (i = 0; i < CPU_NB_REGS; ++i) {
3618         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3619                                          offsetof(CPUX86State, regs[i]),
3620                                          reg_names[i]);
3621     }
3622 
3623     for (i = 0; i < 6; ++i) {
3624         cpu_seg_base[i]
3625             = tcg_global_mem_new(tcg_env,
3626                                  offsetof(CPUX86State, segs[i].base),
3627                                  seg_base_names[i]);
3628     }
3629 
3630     for (i = 0; i < 4; ++i) {
3631         cpu_bndl[i]
3632             = tcg_global_mem_new_i64(tcg_env,
3633                                      offsetof(CPUX86State, bnd_regs[i].lb),
3634                                      bnd_regl_names[i]);
3635         cpu_bndu[i]
3636             = tcg_global_mem_new_i64(tcg_env,
3637                                      offsetof(CPUX86State, bnd_regs[i].ub),
3638                                      bnd_regu_names[i]);
3639     }
3640 }
3641 
3642 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3643 {
3644     DisasContext *dc = container_of(dcbase, DisasContext, base);
3645     CPUX86State *env = cpu_env(cpu);
3646     uint32_t flags = dc->base.tb->flags;
3647     uint32_t cflags = tb_cflags(dc->base.tb);
3648     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3649     int iopl = (flags >> IOPL_SHIFT) & 3;
3650 
3651     dc->cs_base = dc->base.tb->cs_base;
3652     dc->pc_save = dc->base.pc_next;
3653     dc->flags = flags;
3654 #ifndef CONFIG_USER_ONLY
3655     dc->cpl = cpl;
3656     dc->iopl = iopl;
3657 #endif
3658 
3659     /* We make some simplifying assumptions; validate they're correct. */
3660     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3661     g_assert(CPL(dc) == cpl);
3662     g_assert(IOPL(dc) == iopl);
3663     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3664     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3665     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3666     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3667     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3668     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3669     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3670     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3671 
3672     dc->cc_op = CC_OP_DYNAMIC;
3673     dc->cc_op_dirty = false;
3674     /* select memory access functions */
3675     dc->mem_index = cpu_mmu_index(cpu, false);
3676     dc->cpuid_features = env->features[FEAT_1_EDX];
3677     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3678     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3679     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3680     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3681     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3682     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3683     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3684     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3685                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3686 
3687     dc->T0 = tcg_temp_new();
3688     dc->T1 = tcg_temp_new();
3689     dc->A0 = tcg_temp_new();
3690 
3691     dc->tmp0 = tcg_temp_new();
3692     dc->tmp1_i64 = tcg_temp_new_i64();
3693     dc->tmp2_i32 = tcg_temp_new_i32();
3694     dc->tmp3_i32 = tcg_temp_new_i32();
3695     dc->tmp4 = tcg_temp_new();
3696     dc->cc_srcT = tcg_temp_new();
3697 }
3698 
3699 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3700 {
3701 }
3702 
3703 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3704 {
3705     DisasContext *dc = container_of(dcbase, DisasContext, base);
3706     target_ulong pc_arg = dc->base.pc_next;
3707 
3708     dc->prev_insn_start = dc->base.insn_start;
3709     dc->prev_insn_end = tcg_last_op();
3710     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3711         pc_arg &= ~TARGET_PAGE_MASK;
3712     }
3713     tcg_gen_insn_start(pc_arg, dc->cc_op);
3714 }
3715 
3716 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3717 {
3718     DisasContext *dc = container_of(dcbase, DisasContext, base);
3719     bool orig_cc_op_dirty = dc->cc_op_dirty;
3720     CCOp orig_cc_op = dc->cc_op;
3721     target_ulong orig_pc_save = dc->pc_save;
3722 
3723 #ifdef TARGET_VSYSCALL_PAGE
3724     /*
3725      * Detect entry into the vsyscall page and invoke the syscall.
3726      */
3727     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3728         gen_exception(dc, EXCP_VSYSCALL);
3729         dc->base.pc_next = dc->pc + 1;
3730         return;
3731     }
3732 #endif
3733 
3734     switch (sigsetjmp(dc->jmpbuf, 0)) {
3735     case 0:
3736         disas_insn(dc, cpu);
3737         break;
3738     case 1:
3739         gen_exception_gpf(dc);
3740         break;
3741     case 2:
3742         /* Restore state that may affect the next instruction. */
3743         dc->pc = dc->base.pc_next;
3744         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3745         assert(dc->cc_op == orig_cc_op);
3746         assert(dc->pc_save == orig_pc_save);
3747         dc->base.num_insns--;
3748         tcg_remove_ops_after(dc->prev_insn_end);
3749         dc->base.insn_start = dc->prev_insn_start;
3750         dc->base.is_jmp = DISAS_TOO_MANY;
3751         return;
3752     default:
3753         g_assert_not_reached();
3754     }
3755 
3756     /*
3757      * Instruction decoding completed (possibly with #GP if the
3758      * 15-byte boundary was exceeded).
3759      */
3760     dc->base.pc_next = dc->pc;
3761     if (dc->base.is_jmp == DISAS_NEXT) {
3762         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3763             /*
3764              * If single step mode, we generate only one instruction and
3765              * generate an exception.
3766              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3767              * the flag and abort the translation to give the irqs a
3768              * chance to happen.
3769              */
3770             dc->base.is_jmp = DISAS_EOB_NEXT;
3771         } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3772             dc->base.is_jmp = DISAS_TOO_MANY;
3773         }
3774     }
3775 }
3776 
3777 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3778 {
3779     DisasContext *dc = container_of(dcbase, DisasContext, base);
3780 
3781     switch (dc->base.is_jmp) {
3782     case DISAS_NORETURN:
3783         /*
3784          * Most instructions should not use DISAS_NORETURN, as that suppresses
3785          * the handling of hflags normally done by gen_eob().  We can
3786          * get here:
3787          * - for exception and interrupts
3788          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3789          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3790          *   and INHIBIT_IRQ is loaded from the VMCB
3791          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3792          *   the helpers handle themselves the tasks normally done by gen_eob().
3793          */
3794         break;
3795     case DISAS_TOO_MANY:
3796         gen_update_cc_op(dc);
3797         gen_jmp_rel_csize(dc, 0, 0);
3798         break;
3799     case DISAS_EOB_NEXT:
3800     case DISAS_EOB_INHIBIT_IRQ:
3801         assert(dc->base.pc_next == dc->pc);
3802         gen_update_eip_cur(dc);
3803         /* fall through */
3804     case DISAS_EOB_ONLY:
3805     case DISAS_EOB_RECHECK_TF:
3806     case DISAS_JUMP:
3807         gen_eob(dc, dc->base.is_jmp);
3808         break;
3809     default:
3810         g_assert_not_reached();
3811     }
3812 }
3813 
3814 static const TranslatorOps i386_tr_ops = {
3815     .init_disas_context = i386_tr_init_disas_context,
3816     .tb_start           = i386_tr_tb_start,
3817     .insn_start         = i386_tr_insn_start,
3818     .translate_insn     = i386_tr_translate_insn,
3819     .tb_stop            = i386_tr_tb_stop,
3820 };
3821 
3822 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3823                         int *max_insns, vaddr pc, void *host_pc)
3824 {
3825     DisasContext dc;
3826 
3827     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3828 }
3829