xref: /qemu/target/i386/tcg/translate.c (revision 0eb7046e1bbe83468169a74b1886fa9c2605ffa7)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "fpu/softfloat.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "helper-tcg.h"
33 #include "decode-new.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 /* Fixes for Windows namespace pollution.  */
42 #undef IN
43 #undef OUT
44 
45 #define PREFIX_REPZ   0x01
46 #define PREFIX_REPNZ  0x02
47 #define PREFIX_LOCK   0x04
48 #define PREFIX_DATA   0x08
49 #define PREFIX_ADR    0x10
50 #define PREFIX_VEX    0x20
51 #define PREFIX_REX    0x40
52 
53 #ifdef TARGET_X86_64
54 # define ctztl  ctz64
55 # define clztl  clz64
56 #else
57 # define ctztl  ctz32
58 # define clztl  clz32
59 #endif
60 
61 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
62 #define CASE_MODRM_MEM_OP(OP) \
63     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
64     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
65     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 
67 #define CASE_MODRM_OP(OP) \
68     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
69     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
70     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
71     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 
73 //#define MACRO_TEST   1
74 
75 /* global register indexes */
76 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
77 static TCGv cpu_eip;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
83 
84 typedef struct DisasContext {
85     DisasContextBase base;
86 
87     target_ulong pc;       /* pc = eip + cs_base */
88     target_ulong cs_base;  /* base of CS segment */
89     target_ulong pc_save;
90 
91     MemOp aflag;
92     MemOp dflag;
93 
94     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
95     uint8_t prefix;
96 
97     bool has_modrm;
98     uint8_t modrm;
99 
100 #ifndef CONFIG_USER_ONLY
101     uint8_t cpl;   /* code priv level */
102     uint8_t iopl;  /* i/o priv level */
103 #endif
104     uint8_t vex_l;  /* vex vector length */
105     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
106     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
107     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
108 
109 #ifdef TARGET_X86_64
110     uint8_t rex_r;
111     uint8_t rex_x;
112     uint8_t rex_b;
113 #endif
114     bool vex_w; /* used by AVX even on 32-bit processors */
115     bool jmp_opt; /* use direct block chaining for direct jumps */
116     bool cc_op_dirty;
117 
118     CCOp cc_op;  /* current CC operation */
119     int mem_index; /* select memory access functions */
120     uint32_t flags; /* all execution flags */
121     int cpuid_features;
122     int cpuid_ext_features;
123     int cpuid_ext2_features;
124     int cpuid_ext3_features;
125     int cpuid_7_0_ebx_features;
126     int cpuid_7_0_ecx_features;
127     int cpuid_7_1_eax_features;
128     int cpuid_xsave_features;
129 
130     /* TCG local temps */
131     TCGv cc_srcT;
132     TCGv A0;
133     TCGv T0;
134     TCGv T1;
135 
136     /* TCG local register indexes (only used inside old micro ops) */
137     TCGv tmp0;
138     TCGv tmp4;
139     TCGv_i32 tmp2_i32;
140     TCGv_i32 tmp3_i32;
141     TCGv_i64 tmp1_i64;
142 
143     sigjmp_buf jmpbuf;
144     TCGOp *prev_insn_start;
145     TCGOp *prev_insn_end;
146 } DisasContext;
147 
148 /*
149  * Point EIP to next instruction before ending translation.
150  * For instructions that can change hflags.
151  */
152 #define DISAS_EOB_NEXT         DISAS_TARGET_0
153 
154 /*
155  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156  * already set.  For instructions that activate interrupt shadow.
157  */
158 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
159 
160 /*
161  * Return to the main loop; EIP might have already been updated
162  * but even in that case do not use lookup_and_goto_ptr().
163  */
164 #define DISAS_EOB_ONLY         DISAS_TARGET_2
165 
166 /*
167  * EIP has already been updated.  For jumps that wish to use
168  * lookup_and_goto_ptr()
169  */
170 #define DISAS_JUMP             DISAS_TARGET_3
171 
172 /*
173  * EIP has already been updated.  Use updated value of
174  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175  */
176 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
177 
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S)     true
181 #define CPL(S)    3
182 #define IOPL(S)   0
183 #define SVME(S)   false
184 #define GUEST(S)  false
185 #else
186 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S)    ((S)->cpl)
188 #define IOPL(S)   ((S)->iopl)
189 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S)   false
194 #define CODE32(S) true
195 #define SS32(S)   true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S)    false
214 #endif
215 
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S)       ((S)->vex_w)
219 #define REX_R(S)       ((S)->rex_r + 0)
220 #define REX_X(S)       ((S)->rex_x + 0)
221 #define REX_B(S)       ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S)  false
224 #define REX_W(S)       false
225 #define REX_R(S)       0
226 #define REX_X(S)       0
227 #define REX_B(S)       0
228 #endif
229 
230 /*
231  * Many system-only helpers are not reachable for user-only.
232  * Define stub generators here, so that we need not either sprinkle
233  * ifdefs through the translator, nor provide the helper function.
234  */
235 #define STUB_HELPER(NAME, ...) \
236     static inline void gen_helper_##NAME(__VA_ARGS__) \
237     { qemu_build_not_reached(); }
238 
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258 
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262 
263 /* i386 shift ops */
264 enum {
265     OP_ROL,
266     OP_ROR,
267     OP_RCL,
268     OP_RCR,
269     OP_SHL,
270     OP_SHR,
271     OP_SHL1, /* undocumented */
272     OP_SAR = 7,
273 };
274 
275 enum {
276     JCC_O,
277     JCC_B,
278     JCC_Z,
279     JCC_BE,
280     JCC_S,
281     JCC_P,
282     JCC_L,
283     JCC_LE,
284 };
285 
286 enum {
287     USES_CC_DST  = 1,
288     USES_CC_SRC  = 2,
289     USES_CC_SRC2 = 4,
290     USES_CC_SRCT = 8,
291 };
292 
293 /* Bit set if the global variable is live after setting CC_OP to X.  */
294 static const uint8_t cc_op_live_[] = {
295     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296     [CC_OP_EFLAGS] = USES_CC_SRC,
297     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_POPCNT] = USES_CC_DST,
313 };
314 
315 static uint8_t cc_op_live(CCOp op)
316 {
317     uint8_t result;
318     assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
319 
320     /*
321      * Check that the array is fully populated.  A zero entry would correspond
322      * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
323      * as well.
324      */
325     result = cc_op_live_[op];
326     assert(result);
327     return result;
328 }
329 
330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332     int dead;
333 
334     if (s->cc_op == op) {
335         return;
336     }
337 
338     /* Discard CC computation that will no longer be used.  */
339     dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
340     if (dead & USES_CC_DST) {
341         tcg_gen_discard_tl(cpu_cc_dst);
342     }
343     if (dead & USES_CC_SRC) {
344         tcg_gen_discard_tl(cpu_cc_src);
345     }
346     if (dead & USES_CC_SRC2) {
347         tcg_gen_discard_tl(cpu_cc_src2);
348     }
349     if (dead & USES_CC_SRCT) {
350         tcg_gen_discard_tl(s->cc_srcT);
351     }
352 
353     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354         tcg_gen_discard_i32(cpu_cc_op);
355     }
356     s->cc_op_dirty = dirty;
357     s->cc_op = op;
358 }
359 
360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362     /*
363      * The DYNAMIC setting is translator only, everything else
364      * will be spilled later.
365      */
366     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368 
369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371     set_cc_op_1(s, op, false);
372 }
373 
374 static void gen_update_cc_op(DisasContext *s)
375 {
376     if (s->cc_op_dirty) {
377         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378         s->cc_op_dirty = false;
379     }
380 }
381 
382 #ifdef TARGET_X86_64
383 
384 #define NB_OP_SIZES 4
385 
386 #else /* !TARGET_X86_64 */
387 
388 #define NB_OP_SIZES 3
389 
390 #endif /* !TARGET_X86_64 */
391 
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405 
406 /* In instruction encodings for byte register accesses the
407  * register number usually indicates "low 8 bits of register N";
408  * however there are some special cases where N 4..7 indicates
409  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410  * true for this special case, false otherwise.
411  */
412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414     /* Any time the REX prefix is present, byte registers are uniform */
415     if (reg < 4 || REX_PREFIX(s)) {
416         return false;
417     }
418     return true;
419 }
420 
421 /* Select the size of a push/pop operation.  */
422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424     if (CODE64(s)) {
425         return ot == MO_16 ? MO_16 : MO_64;
426     } else {
427         return ot;
428     }
429 }
430 
431 /* Select the size of the stack pointer.  */
432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436 
437 /* Compute the result of writing t0 to the OT-sized register REG.
438  *
439  * If DEST is NULL, store the result into the register and return the
440  * register's TCGv.
441  *
442  * If DEST is not NULL, store the result into DEST and return the
443  * register's TCGv.
444  */
445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447     switch(ot) {
448     case MO_8:
449         if (byte_reg_is_xH(s, reg)) {
450             dest = dest ? dest : cpu_regs[reg - 4];
451             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452             return cpu_regs[reg - 4];
453         }
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456         break;
457     case MO_16:
458         dest = dest ? dest : cpu_regs[reg];
459         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460         break;
461     case MO_32:
462         /* For x86_64, this sets the higher half of register to zero.
463            For i386, this is equivalent to a mov. */
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_ext32u_tl(dest, t0);
466         break;
467 #ifdef TARGET_X86_64
468     case MO_64:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_mov_tl(dest, t0);
471         break;
472 #endif
473     default:
474         g_assert_not_reached();
475     }
476     return cpu_regs[reg];
477 }
478 
479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483 
484 static inline
485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488         tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
489     } else {
490         tcg_gen_mov_tl(t0, cpu_regs[reg]);
491     }
492 }
493 
494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496     tcg_gen_addi_tl(s->A0, s->A0, val);
497     if (!CODE64(s)) {
498         tcg_gen_ext32u_tl(s->A0, s->A0);
499     }
500 }
501 
502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504     tcg_gen_mov_tl(cpu_eip, dest);
505     s->pc_save = -1;
506 }
507 
508 static inline
509 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
510 {
511     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
512     gen_op_mov_reg_v(s, size, reg, s->tmp0);
513 }
514 
515 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
516 {
517     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
518     gen_op_mov_reg_v(s, size, reg, s->tmp0);
519 }
520 
521 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
522 {
523     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
524 }
525 
526 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530 
531 static void gen_update_eip_next(DisasContext *s)
532 {
533     assert(s->pc_save != -1);
534     if (tb_cflags(s->base.tb) & CF_PCREL) {
535         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
536     } else if (CODE64(s)) {
537         tcg_gen_movi_tl(cpu_eip, s->pc);
538     } else {
539         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
540     }
541     s->pc_save = s->pc;
542 }
543 
544 static void gen_update_eip_cur(DisasContext *s)
545 {
546     assert(s->pc_save != -1);
547     if (tb_cflags(s->base.tb) & CF_PCREL) {
548         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
549     } else if (CODE64(s)) {
550         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
551     } else {
552         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
553     }
554     s->pc_save = s->base.pc_next;
555 }
556 
557 static int cur_insn_len(DisasContext *s)
558 {
559     return s->pc - s->base.pc_next;
560 }
561 
562 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
563 {
564     return tcg_constant_i32(cur_insn_len(s));
565 }
566 
567 static TCGv_i32 eip_next_i32(DisasContext *s)
568 {
569     assert(s->pc_save != -1);
570     /*
571      * This function has two users: lcall_real (always 16-bit mode), and
572      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
573      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
574      * why passing a 32-bit value isn't broken.  To avoid using this where
575      * we shouldn't, return -1 in 64-bit mode so that execution goes into
576      * the weeds quickly.
577      */
578     if (CODE64(s)) {
579         return tcg_constant_i32(-1);
580     }
581     if (tb_cflags(s->base.tb) & CF_PCREL) {
582         TCGv_i32 ret = tcg_temp_new_i32();
583         tcg_gen_trunc_tl_i32(ret, cpu_eip);
584         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
585         return ret;
586     } else {
587         return tcg_constant_i32(s->pc - s->cs_base);
588     }
589 }
590 
591 static TCGv eip_next_tl(DisasContext *s)
592 {
593     assert(s->pc_save != -1);
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv ret = tcg_temp_new();
596         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
597         return ret;
598     } else if (CODE64(s)) {
599         return tcg_constant_tl(s->pc);
600     } else {
601         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
602     }
603 }
604 
605 static TCGv eip_cur_tl(DisasContext *s)
606 {
607     assert(s->pc_save != -1);
608     if (tb_cflags(s->base.tb) & CF_PCREL) {
609         TCGv ret = tcg_temp_new();
610         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
611         return ret;
612     } else if (CODE64(s)) {
613         return tcg_constant_tl(s->base.pc_next);
614     } else {
615         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
616     }
617 }
618 
619 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
620    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
621    indicate no override.  */
622 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
623                                int def_seg, int ovr_seg)
624 {
625     switch (aflag) {
626 #ifdef TARGET_X86_64
627     case MO_64:
628         if (ovr_seg < 0) {
629             tcg_gen_mov_tl(dest, a0);
630             return;
631         }
632         break;
633 #endif
634     case MO_32:
635         /* 32 bit address */
636         if (ovr_seg < 0 && ADDSEG(s)) {
637             ovr_seg = def_seg;
638         }
639         if (ovr_seg < 0) {
640             tcg_gen_ext32u_tl(dest, a0);
641             return;
642         }
643         break;
644     case MO_16:
645         /* 16 bit address */
646         tcg_gen_ext16u_tl(dest, a0);
647         a0 = dest;
648         if (ovr_seg < 0) {
649             if (ADDSEG(s)) {
650                 ovr_seg = def_seg;
651             } else {
652                 return;
653             }
654         }
655         break;
656     default:
657         g_assert_not_reached();
658     }
659 
660     if (ovr_seg >= 0) {
661         TCGv seg = cpu_seg_base[ovr_seg];
662 
663         if (aflag == MO_64) {
664             tcg_gen_add_tl(dest, a0, seg);
665         } else if (CODE64(s)) {
666             tcg_gen_ext32u_tl(dest, a0);
667             tcg_gen_add_tl(dest, dest, seg);
668         } else {
669             tcg_gen_add_tl(dest, a0, seg);
670             tcg_gen_ext32u_tl(dest, dest);
671         }
672     }
673 }
674 
675 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
676                           int def_seg, int ovr_seg)
677 {
678     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
679 }
680 
681 static inline void gen_string_movl_A0_ESI(DisasContext *s)
682 {
683     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
684 }
685 
686 static inline void gen_string_movl_A0_EDI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
689 }
690 
691 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
692 {
693     TCGv dshift = tcg_temp_new();
694     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
695     tcg_gen_shli_tl(dshift, dshift, ot);
696     return dshift;
697 };
698 
699 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
700 {
701     if (size == MO_TL) {
702         return src;
703     }
704     if (!dst) {
705         dst = tcg_temp_new();
706     }
707     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
708     return dst;
709 }
710 
711 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
712 {
713     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
714 
715     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
716 }
717 
718 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
719 {
720     gen_op_j_ecx(s, TCG_COND_EQ, label1);
721 }
722 
723 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
724 {
725     gen_op_j_ecx(s, TCG_COND_NE, label1);
726 }
727 
728 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
729 {
730     switch (ot) {
731     case MO_8:
732         gen_helper_inb(v, tcg_env, n);
733         break;
734     case MO_16:
735         gen_helper_inw(v, tcg_env, n);
736         break;
737     case MO_32:
738         gen_helper_inl(v, tcg_env, n);
739         break;
740     default:
741         g_assert_not_reached();
742     }
743 }
744 
745 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
746 {
747     switch (ot) {
748     case MO_8:
749         gen_helper_outb(tcg_env, v, n);
750         break;
751     case MO_16:
752         gen_helper_outw(tcg_env, v, n);
753         break;
754     case MO_32:
755         gen_helper_outl(tcg_env, v, n);
756         break;
757     default:
758         g_assert_not_reached();
759     }
760 }
761 
762 /*
763  * Validate that access to [port, port + 1<<ot) is allowed.
764  * Raise #GP, or VMM exit if not.
765  */
766 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
767                          uint32_t svm_flags)
768 {
769 #ifdef CONFIG_USER_ONLY
770     /*
771      * We do not implement the ioperm(2) syscall, so the TSS check
772      * will always fail.
773      */
774     gen_exception_gpf(s);
775     return false;
776 #else
777     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
778         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
779     }
780     if (GUEST(s)) {
781         gen_update_cc_op(s);
782         gen_update_eip_cur(s);
783         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
784             svm_flags |= SVM_IOIO_REP_MASK;
785         }
786         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
787         gen_helper_svm_check_io(tcg_env, port,
788                                 tcg_constant_i32(svm_flags),
789                                 cur_insn_len_i32(s));
790     }
791     return true;
792 #endif
793 }
794 
795 static void gen_movs(DisasContext *s, MemOp ot)
796 {
797     TCGv dshift;
798 
799     gen_string_movl_A0_ESI(s);
800     gen_op_ld_v(s, ot, s->T0, s->A0);
801     gen_string_movl_A0_EDI(s);
802     gen_op_st_v(s, ot, s->T0, s->A0);
803 
804     dshift = gen_compute_Dshift(s, ot);
805     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
806     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
807 }
808 
809 /* compute all eflags to reg */
810 static void gen_mov_eflags(DisasContext *s, TCGv reg)
811 {
812     TCGv dst, src1, src2;
813     TCGv_i32 cc_op;
814     int live, dead;
815 
816     if (s->cc_op == CC_OP_EFLAGS) {
817         tcg_gen_mov_tl(reg, cpu_cc_src);
818         return;
819     }
820 
821     dst = cpu_cc_dst;
822     src1 = cpu_cc_src;
823     src2 = cpu_cc_src2;
824 
825     /* Take care to not read values that are not live.  */
826     live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
827     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
828     if (dead) {
829         TCGv zero = tcg_constant_tl(0);
830         if (dead & USES_CC_DST) {
831             dst = zero;
832         }
833         if (dead & USES_CC_SRC) {
834             src1 = zero;
835         }
836         if (dead & USES_CC_SRC2) {
837             src2 = zero;
838         }
839     }
840 
841     if (s->cc_op != CC_OP_DYNAMIC) {
842         cc_op = tcg_constant_i32(s->cc_op);
843     } else {
844         cc_op = cpu_cc_op;
845     }
846     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
847 }
848 
849 /* compute all eflags to cc_src */
850 static void gen_compute_eflags(DisasContext *s)
851 {
852     gen_mov_eflags(s, cpu_cc_src);
853     set_cc_op(s, CC_OP_EFLAGS);
854 }
855 
856 typedef struct CCPrepare {
857     TCGCond cond;
858     TCGv reg;
859     TCGv reg2;
860     target_ulong imm;
861     bool use_reg2;
862     bool no_setcond;
863 } CCPrepare;
864 
865 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
866 {
867     if (size == MO_TL) {
868         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
869     } else {
870         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
871                              .imm = 1ull << ((8 << size) - 1) };
872     }
873 }
874 
875 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
876 {
877     if (size == MO_TL) {
878         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
879                              .reg = src };
880     } else {
881         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
882                              .imm = MAKE_64BIT_MASK(0, 8 << size),
883                              .reg = src };
884     }
885 }
886 
887 /* compute eflags.C, trying to store it in reg if not NULL */
888 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
889 {
890     MemOp size;
891 
892     switch (s->cc_op) {
893     case CC_OP_SUBB ... CC_OP_SUBQ:
894         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
895         size = s->cc_op - CC_OP_SUBB;
896         tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
897         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
898         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
899                              .reg2 = cpu_cc_src, .use_reg2 = true };
900 
901     case CC_OP_ADDB ... CC_OP_ADDQ:
902         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
903         size = cc_op_size(s->cc_op);
904         tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
905         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
906         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
907                              .reg2 = cpu_cc_src, .use_reg2 = true };
908 
909     case CC_OP_LOGICB ... CC_OP_LOGICQ:
910     case CC_OP_POPCNT:
911         return (CCPrepare) { .cond = TCG_COND_NEVER };
912 
913     case CC_OP_INCB ... CC_OP_INCQ:
914     case CC_OP_DECB ... CC_OP_DECQ:
915         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
916                              .no_setcond = true };
917 
918     case CC_OP_SHLB ... CC_OP_SHLQ:
919         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
920         size = cc_op_size(s->cc_op);
921         return gen_prepare_sign_nz(cpu_cc_src, size);
922 
923     case CC_OP_MULB ... CC_OP_MULQ:
924         return (CCPrepare) { .cond = TCG_COND_NE,
925                              .reg = cpu_cc_src };
926 
927     case CC_OP_BMILGB ... CC_OP_BMILGQ:
928         size = cc_op_size(s->cc_op);
929         return gen_prepare_val_nz(cpu_cc_src, size, true);
930 
931     case CC_OP_BLSIB ... CC_OP_BLSIQ:
932         size = cc_op_size(s->cc_op);
933         return gen_prepare_val_nz(cpu_cc_src, size, false);
934 
935     case CC_OP_ADCX:
936     case CC_OP_ADCOX:
937         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
938                              .no_setcond = true };
939 
940     case CC_OP_EFLAGS:
941     case CC_OP_SARB ... CC_OP_SARQ:
942         /* CC_SRC & 1 */
943         return (CCPrepare) { .cond = TCG_COND_TSTNE,
944                              .reg = cpu_cc_src, .imm = CC_C };
945 
946     default:
947        /* The need to compute only C from CC_OP_DYNAMIC is important
948           in efficiently implementing e.g. INC at the start of a TB.  */
949        gen_update_cc_op(s);
950        if (!reg) {
951            reg = tcg_temp_new();
952        }
953        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
954                                cpu_cc_src2, cpu_cc_op);
955        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
956                             .no_setcond = true };
957     }
958 }
959 
960 /* compute eflags.P, trying to store it in reg if not NULL */
961 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
962 {
963     gen_compute_eflags(s);
964     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
965                          .imm = CC_P };
966 }
967 
968 /* compute eflags.S, trying to store it in reg if not NULL */
969 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
970 {
971     switch (s->cc_op) {
972     case CC_OP_DYNAMIC:
973         gen_compute_eflags(s);
974         /* FALLTHRU */
975     case CC_OP_EFLAGS:
976     case CC_OP_ADCX:
977     case CC_OP_ADOX:
978     case CC_OP_ADCOX:
979         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
980                              .imm = CC_S };
981     case CC_OP_POPCNT:
982         return (CCPrepare) { .cond = TCG_COND_NEVER };
983     default:
984         return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
985     }
986 }
987 
988 /* compute eflags.O, trying to store it in reg if not NULL */
989 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
990 {
991     switch (s->cc_op) {
992     case CC_OP_ADOX:
993     case CC_OP_ADCOX:
994         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
995                              .no_setcond = true };
996     case CC_OP_LOGICB ... CC_OP_LOGICQ:
997     case CC_OP_POPCNT:
998         return (CCPrepare) { .cond = TCG_COND_NEVER };
999     case CC_OP_MULB ... CC_OP_MULQ:
1000         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1001     default:
1002         gen_compute_eflags(s);
1003         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1004                              .imm = CC_O };
1005     }
1006 }
1007 
1008 /* compute eflags.Z, trying to store it in reg if not NULL */
1009 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1010 {
1011     switch (s->cc_op) {
1012     case CC_OP_EFLAGS:
1013     case CC_OP_ADCX:
1014     case CC_OP_ADOX:
1015     case CC_OP_ADCOX:
1016         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1017                              .imm = CC_Z };
1018     case CC_OP_DYNAMIC:
1019         gen_update_cc_op(s);
1020         if (!reg) {
1021             reg = tcg_temp_new();
1022         }
1023         gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1024         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1025     case CC_OP_POPCNT:
1026         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1027     default:
1028         {
1029             MemOp size = cc_op_size(s->cc_op);
1030             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1031         }
1032     }
1033 }
1034 
1035 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1036  * if needed; it may be used for CCPrepare.reg if that will
1037  * provide more freedom in the translation of a subsequent setcond. */
1038 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1039 {
1040     int inv, jcc_op, cond;
1041     MemOp size;
1042     CCPrepare cc;
1043 
1044     inv = b & 1;
1045     jcc_op = (b >> 1) & 7;
1046 
1047     switch (s->cc_op) {
1048     case CC_OP_SUBB ... CC_OP_SUBQ:
1049         /* We optimize relational operators for the cmp/jcc case.  */
1050         size = cc_op_size(s->cc_op);
1051         switch (jcc_op) {
1052         case JCC_BE:
1053             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1054             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1055             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1056                                .reg2 = cpu_cc_src, .use_reg2 = true };
1057             break;
1058         case JCC_L:
1059             cond = TCG_COND_LT;
1060             goto fast_jcc_l;
1061         case JCC_LE:
1062             cond = TCG_COND_LE;
1063         fast_jcc_l:
1064             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1065             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1066             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1067                                .reg2 = cpu_cc_src, .use_reg2 = true };
1068             break;
1069 
1070         default:
1071             goto slow_jcc;
1072         }
1073         break;
1074 
1075     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1076         /* Mostly used for test+jump */
1077         size = s->cc_op - CC_OP_LOGICB;
1078         switch (jcc_op) {
1079         case JCC_BE:
1080             /* CF = 0, becomes jz/je */
1081             jcc_op = JCC_Z;
1082             goto slow_jcc;
1083         case JCC_L:
1084             /* OF = 0, becomes js/jns */
1085             jcc_op = JCC_S;
1086             goto slow_jcc;
1087         case JCC_LE:
1088             /* SF or ZF, becomes signed <= 0 */
1089             tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1090             cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1091             break;
1092         default:
1093             goto slow_jcc;
1094         }
1095         break;
1096 
1097     default:
1098     slow_jcc:
1099         /* This actually generates good code for JC, JZ and JS.  */
1100         switch (jcc_op) {
1101         case JCC_O:
1102             cc = gen_prepare_eflags_o(s, reg);
1103             break;
1104         case JCC_B:
1105             cc = gen_prepare_eflags_c(s, reg);
1106             break;
1107         case JCC_Z:
1108             cc = gen_prepare_eflags_z(s, reg);
1109             break;
1110         case JCC_BE:
1111             gen_compute_eflags(s);
1112             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1113                                .imm = CC_Z | CC_C };
1114             break;
1115         case JCC_S:
1116             cc = gen_prepare_eflags_s(s, reg);
1117             break;
1118         case JCC_P:
1119             cc = gen_prepare_eflags_p(s, reg);
1120             break;
1121         case JCC_L:
1122             gen_compute_eflags(s);
1123             if (!reg || reg == cpu_cc_src) {
1124                 reg = tcg_temp_new();
1125             }
1126             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1127             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1128                                .imm = CC_O };
1129             break;
1130         default:
1131         case JCC_LE:
1132             gen_compute_eflags(s);
1133             if (!reg || reg == cpu_cc_src) {
1134                 reg = tcg_temp_new();
1135             }
1136             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1137             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1138                                .imm = CC_O | CC_Z };
1139             break;
1140         }
1141         break;
1142     }
1143 
1144     if (inv) {
1145         cc.cond = tcg_invert_cond(cc.cond);
1146     }
1147     return cc;
1148 }
1149 
1150 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1151 {
1152     CCPrepare cc = gen_prepare_cc(s, b, reg);
1153 
1154     if (cc.no_setcond) {
1155         if (cc.cond == TCG_COND_EQ) {
1156             tcg_gen_xori_tl(reg, cc.reg, 1);
1157         } else {
1158             tcg_gen_mov_tl(reg, cc.reg);
1159         }
1160         return;
1161     }
1162 
1163     if (cc.use_reg2) {
1164         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1165     } else {
1166         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1167     }
1168 }
1169 
1170 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1171 {
1172     gen_setcc(s, JCC_B << 1, reg);
1173 }
1174 
1175 /* generate a conditional jump to label 'l1' according to jump opcode
1176    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1177 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1178 {
1179     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1180 
1181     if (cc.use_reg2) {
1182         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1183     } else {
1184         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1185     }
1186 }
1187 
1188 /* Generate a conditional jump to label 'l1' according to jump opcode
1189    value 'b'. In the fast case, T0 is guaranteed not to be used.
1190    One or both of the branches will call gen_jmp_rel, so ensure
1191    cc_op is clean.  */
1192 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1193 {
1194     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1195 
1196     /*
1197      * Note that this must be _after_ gen_prepare_cc, because it
1198      * can change the cc_op from CC_OP_DYNAMIC to CC_OP_EFLAGS!
1199      */
1200     gen_update_cc_op(s);
1201     if (cc.use_reg2) {
1202         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1203     } else {
1204         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1205     }
1206 }
1207 
1208 static void gen_stos(DisasContext *s, MemOp ot)
1209 {
1210     gen_string_movl_A0_EDI(s);
1211     gen_op_st_v(s, ot, s->T0, s->A0);
1212     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1213 }
1214 
1215 static void gen_lods(DisasContext *s, MemOp ot)
1216 {
1217     gen_string_movl_A0_ESI(s);
1218     gen_op_ld_v(s, ot, s->T0, s->A0);
1219     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1220     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1221 }
1222 
1223 static void gen_scas(DisasContext *s, MemOp ot)
1224 {
1225     gen_string_movl_A0_EDI(s);
1226     gen_op_ld_v(s, ot, s->T1, s->A0);
1227     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1228     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1229     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1230     set_cc_op(s, CC_OP_SUBB + ot);
1231 
1232     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1233 }
1234 
1235 static void gen_cmps(DisasContext *s, MemOp ot)
1236 {
1237     TCGv dshift;
1238 
1239     gen_string_movl_A0_EDI(s);
1240     gen_op_ld_v(s, ot, s->T1, s->A0);
1241     gen_string_movl_A0_ESI(s);
1242     gen_op_ld_v(s, ot, s->T0, s->A0);
1243     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1244     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1245     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1246     set_cc_op(s, CC_OP_SUBB + ot);
1247 
1248     dshift = gen_compute_Dshift(s, ot);
1249     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1250     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1251 }
1252 
1253 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1254 {
1255     if (s->flags & HF_IOBPT_MASK) {
1256 #ifdef CONFIG_USER_ONLY
1257         /* user-mode cpu should not be in IOBPT mode */
1258         g_assert_not_reached();
1259 #else
1260         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1261         TCGv t_next = eip_next_tl(s);
1262         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1263 #endif /* CONFIG_USER_ONLY */
1264     }
1265 }
1266 
1267 static void gen_ins(DisasContext *s, MemOp ot)
1268 {
1269     gen_string_movl_A0_EDI(s);
1270     /* Note: we must do this dummy write first to be restartable in
1271        case of page fault. */
1272     tcg_gen_movi_tl(s->T0, 0);
1273     gen_op_st_v(s, ot, s->T0, s->A0);
1274     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1275     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1276     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1277     gen_op_st_v(s, ot, s->T0, s->A0);
1278     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1279     gen_bpt_io(s, s->tmp2_i32, ot);
1280 }
1281 
1282 static void gen_outs(DisasContext *s, MemOp ot)
1283 {
1284     gen_string_movl_A0_ESI(s);
1285     gen_op_ld_v(s, ot, s->T0, s->A0);
1286 
1287     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1288     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1289     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1290     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1291     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1292     gen_bpt_io(s, s->tmp2_i32, ot);
1293 }
1294 
1295 static void do_gen_rep(DisasContext *s, MemOp ot,
1296                        void (*fn)(DisasContext *s, MemOp ot),
1297                        bool is_repz_nz)
1298 {
1299     TCGLabel *done = gen_new_label();
1300 
1301     gen_update_cc_op(s);
1302     gen_op_jz_ecx(s, done);
1303 
1304     fn(s, ot);
1305     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1306     if (is_repz_nz) {
1307         int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1308         gen_jcc(s, (JCC_Z << 1) | (nz ^ 1), done);
1309     }
1310 
1311     /* Go to the main loop but reenter the same instruction.  */
1312     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1313 
1314     /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition.  */
1315     gen_set_label(done);
1316     set_cc_op(s, CC_OP_DYNAMIC);
1317     gen_jmp_rel_csize(s, 0, 1);
1318 }
1319 
1320 static void gen_repz(DisasContext *s, MemOp ot,
1321                      void (*fn)(DisasContext *s, MemOp ot))
1322 
1323 {
1324     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1325         do_gen_rep(s, ot, fn, false);
1326     } else {
1327         fn(s, ot);
1328     }
1329 }
1330 
1331 static void gen_repz_nz(DisasContext *s, MemOp ot,
1332                         void (*fn)(DisasContext *s, MemOp ot))
1333 {
1334     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1335         do_gen_rep(s, ot, fn, true);
1336     } else {
1337         fn(s, ot);
1338     }
1339 }
1340 
1341 static void gen_helper_fp_arith_ST0_FT0(int op)
1342 {
1343     switch (op) {
1344     case 0:
1345         gen_helper_fadd_ST0_FT0(tcg_env);
1346         break;
1347     case 1:
1348         gen_helper_fmul_ST0_FT0(tcg_env);
1349         break;
1350     case 2:
1351         gen_helper_fcom_ST0_FT0(tcg_env);
1352         break;
1353     case 3:
1354         gen_helper_fcom_ST0_FT0(tcg_env);
1355         break;
1356     case 4:
1357         gen_helper_fsub_ST0_FT0(tcg_env);
1358         break;
1359     case 5:
1360         gen_helper_fsubr_ST0_FT0(tcg_env);
1361         break;
1362     case 6:
1363         gen_helper_fdiv_ST0_FT0(tcg_env);
1364         break;
1365     case 7:
1366         gen_helper_fdivr_ST0_FT0(tcg_env);
1367         break;
1368     }
1369 }
1370 
1371 /* NOTE the exception in "r" op ordering */
1372 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1373 {
1374     TCGv_i32 tmp = tcg_constant_i32(opreg);
1375     switch (op) {
1376     case 0:
1377         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1378         break;
1379     case 1:
1380         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1381         break;
1382     case 4:
1383         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1384         break;
1385     case 5:
1386         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1387         break;
1388     case 6:
1389         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1390         break;
1391     case 7:
1392         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1393         break;
1394     }
1395 }
1396 
1397 static void gen_exception(DisasContext *s, int trapno)
1398 {
1399     gen_update_cc_op(s);
1400     gen_update_eip_cur(s);
1401     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1402     s->base.is_jmp = DISAS_NORETURN;
1403 }
1404 
1405 /* Generate #UD for the current instruction.  The assumption here is that
1406    the instruction is known, but it isn't allowed in the current cpu mode.  */
1407 static void gen_illegal_opcode(DisasContext *s)
1408 {
1409     gen_exception(s, EXCP06_ILLOP);
1410 }
1411 
1412 /* Generate #GP for the current instruction. */
1413 static void gen_exception_gpf(DisasContext *s)
1414 {
1415     gen_exception(s, EXCP0D_GPF);
1416 }
1417 
1418 /* Check for cpl == 0; if not, raise #GP and return false. */
1419 static bool check_cpl0(DisasContext *s)
1420 {
1421     if (CPL(s) == 0) {
1422         return true;
1423     }
1424     gen_exception_gpf(s);
1425     return false;
1426 }
1427 
1428 /* XXX: add faster immediate case */
1429 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1430                              bool is_right, TCGv count)
1431 {
1432     target_ulong mask = (ot == MO_64 ? 63 : 31);
1433 
1434     switch (ot) {
1435     case MO_16:
1436         /* Note: we implement the Intel behaviour for shift count > 16.
1437            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1438            portion by constructing it as a 32-bit value.  */
1439         if (is_right) {
1440             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1441             tcg_gen_mov_tl(s->T1, s->T0);
1442             tcg_gen_mov_tl(s->T0, s->tmp0);
1443         } else {
1444             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1445         }
1446         /*
1447          * If TARGET_X86_64 defined then fall through into MO_32 case,
1448          * otherwise fall through default case.
1449          */
1450     case MO_32:
1451 #ifdef TARGET_X86_64
1452         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1453         tcg_gen_subi_tl(s->tmp0, count, 1);
1454         if (is_right) {
1455             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1456             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1457             tcg_gen_shr_i64(s->T0, s->T0, count);
1458         } else {
1459             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1460             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1461             tcg_gen_shl_i64(s->T0, s->T0, count);
1462             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1463             tcg_gen_shri_i64(s->T0, s->T0, 32);
1464         }
1465         break;
1466 #endif
1467     default:
1468         tcg_gen_subi_tl(s->tmp0, count, 1);
1469         if (is_right) {
1470             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1471 
1472             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1473             tcg_gen_shr_tl(s->T0, s->T0, count);
1474             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1475         } else {
1476             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1477             if (ot == MO_16) {
1478                 /* Only needed if count > 16, for Intel behaviour.  */
1479                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1480                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1481                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1482             }
1483 
1484             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1485             tcg_gen_shl_tl(s->T0, s->T0, count);
1486             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1487         }
1488         tcg_gen_movi_tl(s->tmp4, 0);
1489         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1490                            s->tmp4, s->T1);
1491         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1492         break;
1493     }
1494 }
1495 
1496 #define X86_MAX_INSN_LENGTH 15
1497 
1498 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1499 {
1500     uint64_t pc = s->pc;
1501 
1502     /* This is a subsequent insn that crosses a page boundary.  */
1503     if (s->base.num_insns > 1 &&
1504         !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1505         siglongjmp(s->jmpbuf, 2);
1506     }
1507 
1508     s->pc += num_bytes;
1509     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1510         /* If the instruction's 16th byte is on a different page than the 1st, a
1511          * page fault on the second page wins over the general protection fault
1512          * caused by the instruction being too long.
1513          * This can happen even if the operand is only one byte long!
1514          */
1515         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1516             (void)translator_ldub(env, &s->base,
1517                                   (s->pc - 1) & TARGET_PAGE_MASK);
1518         }
1519         siglongjmp(s->jmpbuf, 1);
1520     }
1521 
1522     return pc;
1523 }
1524 
1525 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1526 {
1527     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1528 }
1529 
1530 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1531 {
1532     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1533 }
1534 
1535 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1536 {
1537     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1538 }
1539 
1540 #ifdef TARGET_X86_64
1541 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1542 {
1543     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1544 }
1545 #endif
1546 
1547 /* Decompose an address.  */
1548 
1549 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1550                                     int modrm, bool is_vsib)
1551 {
1552     int def_seg, base, index, scale, mod, rm;
1553     target_long disp;
1554     bool havesib;
1555 
1556     def_seg = R_DS;
1557     index = -1;
1558     scale = 0;
1559     disp = 0;
1560 
1561     mod = (modrm >> 6) & 3;
1562     rm = modrm & 7;
1563     base = rm | REX_B(s);
1564 
1565     if (mod == 3) {
1566         /* Normally filtered out earlier, but including this path
1567            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1568         goto done;
1569     }
1570 
1571     switch (s->aflag) {
1572     case MO_64:
1573     case MO_32:
1574         havesib = 0;
1575         if (rm == 4) {
1576             int code = x86_ldub_code(env, s);
1577             scale = (code >> 6) & 3;
1578             index = ((code >> 3) & 7) | REX_X(s);
1579             if (index == 4 && !is_vsib) {
1580                 index = -1;  /* no index */
1581             }
1582             base = (code & 7) | REX_B(s);
1583             havesib = 1;
1584         }
1585 
1586         switch (mod) {
1587         case 0:
1588             if ((base & 7) == 5) {
1589                 base = -1;
1590                 disp = (int32_t)x86_ldl_code(env, s);
1591                 if (CODE64(s) && !havesib) {
1592                     base = -2;
1593                     disp += s->pc + s->rip_offset;
1594                 }
1595             }
1596             break;
1597         case 1:
1598             disp = (int8_t)x86_ldub_code(env, s);
1599             break;
1600         default:
1601         case 2:
1602             disp = (int32_t)x86_ldl_code(env, s);
1603             break;
1604         }
1605 
1606         /* For correct popl handling with esp.  */
1607         if (base == R_ESP && s->popl_esp_hack) {
1608             disp += s->popl_esp_hack;
1609         }
1610         if (base == R_EBP || base == R_ESP) {
1611             def_seg = R_SS;
1612         }
1613         break;
1614 
1615     case MO_16:
1616         if (mod == 0) {
1617             if (rm == 6) {
1618                 base = -1;
1619                 disp = x86_lduw_code(env, s);
1620                 break;
1621             }
1622         } else if (mod == 1) {
1623             disp = (int8_t)x86_ldub_code(env, s);
1624         } else {
1625             disp = (int16_t)x86_lduw_code(env, s);
1626         }
1627 
1628         switch (rm) {
1629         case 0:
1630             base = R_EBX;
1631             index = R_ESI;
1632             break;
1633         case 1:
1634             base = R_EBX;
1635             index = R_EDI;
1636             break;
1637         case 2:
1638             base = R_EBP;
1639             index = R_ESI;
1640             def_seg = R_SS;
1641             break;
1642         case 3:
1643             base = R_EBP;
1644             index = R_EDI;
1645             def_seg = R_SS;
1646             break;
1647         case 4:
1648             base = R_ESI;
1649             break;
1650         case 5:
1651             base = R_EDI;
1652             break;
1653         case 6:
1654             base = R_EBP;
1655             def_seg = R_SS;
1656             break;
1657         default:
1658         case 7:
1659             base = R_EBX;
1660             break;
1661         }
1662         break;
1663 
1664     default:
1665         g_assert_not_reached();
1666     }
1667 
1668  done:
1669     return (AddressParts){ def_seg, base, index, scale, disp };
1670 }
1671 
1672 /* Compute the address, with a minimum number of TCG ops.  */
1673 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1674 {
1675     TCGv ea = NULL;
1676 
1677     if (a.index >= 0 && !is_vsib) {
1678         if (a.scale == 0) {
1679             ea = cpu_regs[a.index];
1680         } else {
1681             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1682             ea = s->A0;
1683         }
1684         if (a.base >= 0) {
1685             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1686             ea = s->A0;
1687         }
1688     } else if (a.base >= 0) {
1689         ea = cpu_regs[a.base];
1690     }
1691     if (!ea) {
1692         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1693             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1694             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1695         } else {
1696             tcg_gen_movi_tl(s->A0, a.disp);
1697         }
1698         ea = s->A0;
1699     } else if (a.disp != 0) {
1700         tcg_gen_addi_tl(s->A0, ea, a.disp);
1701         ea = s->A0;
1702     }
1703 
1704     return ea;
1705 }
1706 
1707 /* Used for BNDCL, BNDCU, BNDCN.  */
1708 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1709                       TCGCond cond, TCGv_i64 bndv)
1710 {
1711     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1712 
1713     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1714     if (!CODE64(s)) {
1715         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1716     }
1717     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1718     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1719     gen_helper_bndck(tcg_env, s->tmp2_i32);
1720 }
1721 
1722 /* generate modrm load of memory or register. */
1723 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1724 {
1725     int modrm = s->modrm;
1726     int mod, rm;
1727 
1728     mod = (modrm >> 6) & 3;
1729     rm = (modrm & 7) | REX_B(s);
1730     if (mod == 3) {
1731         gen_op_mov_v_reg(s, ot, s->T0, rm);
1732     } else {
1733         gen_lea_modrm(s, decode);
1734         gen_op_ld_v(s, ot, s->T0, s->A0);
1735     }
1736 }
1737 
1738 /* generate modrm store of memory or register. */
1739 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1740 {
1741     int modrm = s->modrm;
1742     int mod, rm;
1743 
1744     mod = (modrm >> 6) & 3;
1745     rm = (modrm & 7) | REX_B(s);
1746     if (mod == 3) {
1747         gen_op_mov_reg_v(s, ot, rm, s->T0);
1748     } else {
1749         gen_lea_modrm(s, decode);
1750         gen_op_st_v(s, ot, s->T0, s->A0);
1751     }
1752 }
1753 
1754 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1755 {
1756     target_ulong ret;
1757 
1758     switch (ot) {
1759     case MO_8:
1760         ret = x86_ldub_code(env, s);
1761         break;
1762     case MO_16:
1763         ret = x86_lduw_code(env, s);
1764         break;
1765     case MO_32:
1766         ret = x86_ldl_code(env, s);
1767         break;
1768 #ifdef TARGET_X86_64
1769     case MO_64:
1770         ret = x86_ldq_code(env, s);
1771         break;
1772 #endif
1773     default:
1774         g_assert_not_reached();
1775     }
1776     return ret;
1777 }
1778 
1779 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1780 {
1781     uint32_t ret;
1782 
1783     switch (ot) {
1784     case MO_8:
1785         ret = x86_ldub_code(env, s);
1786         break;
1787     case MO_16:
1788         ret = x86_lduw_code(env, s);
1789         break;
1790     case MO_32:
1791 #ifdef TARGET_X86_64
1792     case MO_64:
1793 #endif
1794         ret = x86_ldl_code(env, s);
1795         break;
1796     default:
1797         g_assert_not_reached();
1798     }
1799     return ret;
1800 }
1801 
1802 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1803 {
1804     target_long ret;
1805 
1806     switch (ot) {
1807     case MO_8:
1808         ret = (int8_t) x86_ldub_code(env, s);
1809         break;
1810     case MO_16:
1811         ret = (int16_t) x86_lduw_code(env, s);
1812         break;
1813     case MO_32:
1814         ret = (int32_t) x86_ldl_code(env, s);
1815         break;
1816 #ifdef TARGET_X86_64
1817     case MO_64:
1818         ret = x86_ldq_code(env, s);
1819         break;
1820 #endif
1821     default:
1822         g_assert_not_reached();
1823     }
1824     return ret;
1825 }
1826 
1827 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1828                                         TCGLabel *not_taken, TCGLabel *taken)
1829 {
1830     if (not_taken) {
1831         gen_set_label(not_taken);
1832     }
1833     gen_jmp_rel_csize(s, 0, 1);
1834 
1835     gen_set_label(taken);
1836     gen_jmp_rel(s, s->dflag, diff, 0);
1837 }
1838 
1839 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
1840 {
1841     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1842 
1843     if (!cc.use_reg2) {
1844         cc.reg2 = tcg_constant_tl(cc.imm);
1845     }
1846 
1847     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1848 }
1849 
1850 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1851 {
1852     TCGv selector = tcg_temp_new();
1853     tcg_gen_ext16u_tl(selector, seg);
1854     tcg_gen_st32_tl(selector, tcg_env,
1855                     offsetof(CPUX86State,segs[seg_reg].selector));
1856     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1857 }
1858 
1859 /* move SRC to seg_reg and compute if the CPU state may change. Never
1860    call this function with seg_reg == R_CS */
1861 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1862 {
1863     if (PE(s) && !VM86(s)) {
1864         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1865         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1866         /* abort translation because the addseg value may change or
1867            because ss32 may change. For R_SS, translation must always
1868            stop as a special handling must be done to disable hardware
1869            interrupts for the next instruction */
1870         if (seg_reg == R_SS) {
1871             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1872         } else if (CODE32(s) && seg_reg < R_FS) {
1873             s->base.is_jmp = DISAS_EOB_NEXT;
1874         }
1875     } else {
1876         gen_op_movl_seg_real(s, seg_reg, src);
1877         if (seg_reg == R_SS) {
1878             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1879         }
1880     }
1881 }
1882 
1883 static void gen_far_call(DisasContext *s)
1884 {
1885     TCGv_i32 new_cs = tcg_temp_new_i32();
1886     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1887     if (PE(s) && !VM86(s)) {
1888         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1889                                    tcg_constant_i32(s->dflag - 1),
1890                                    eip_next_tl(s));
1891     } else {
1892         TCGv_i32 new_eip = tcg_temp_new_i32();
1893         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1894         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1895                               tcg_constant_i32(s->dflag - 1),
1896                               eip_next_i32(s));
1897     }
1898     s->base.is_jmp = DISAS_JUMP;
1899 }
1900 
1901 static void gen_far_jmp(DisasContext *s)
1902 {
1903     if (PE(s) && !VM86(s)) {
1904         TCGv_i32 new_cs = tcg_temp_new_i32();
1905         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1906         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1907                                   eip_next_tl(s));
1908     } else {
1909         gen_op_movl_seg_real(s, R_CS, s->T1);
1910         gen_op_jmp_v(s, s->T0);
1911     }
1912     s->base.is_jmp = DISAS_JUMP;
1913 }
1914 
1915 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1916 {
1917     /* no SVM activated; fast case */
1918     if (likely(!GUEST(s))) {
1919         return;
1920     }
1921     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1922 }
1923 
1924 static inline void gen_stack_update(DisasContext *s, int addend)
1925 {
1926     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1927 }
1928 
1929 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1930 {
1931     if (offset) {
1932         tcg_gen_addi_tl(dest, src, offset);
1933         src = dest;
1934     }
1935     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1936 }
1937 
1938 /* Generate a push. It depends on ss32, addseg and dflag.  */
1939 static void gen_push_v(DisasContext *s, TCGv val)
1940 {
1941     MemOp d_ot = mo_pushpop(s, s->dflag);
1942     MemOp a_ot = mo_stacksize(s);
1943     int size = 1 << d_ot;
1944     TCGv new_esp = tcg_temp_new();
1945 
1946     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1947 
1948     /* Now reduce the value to the address size and apply SS base.  */
1949     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1950     gen_op_st_v(s, d_ot, val, s->A0);
1951     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1952 }
1953 
1954 /* two step pop is necessary for precise exceptions */
1955 static MemOp gen_pop_T0(DisasContext *s)
1956 {
1957     MemOp d_ot = mo_pushpop(s, s->dflag);
1958 
1959     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1960     gen_op_ld_v(s, d_ot, s->T0, s->T0);
1961 
1962     return d_ot;
1963 }
1964 
1965 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1966 {
1967     gen_stack_update(s, 1 << ot);
1968 }
1969 
1970 static void gen_pusha(DisasContext *s)
1971 {
1972     MemOp d_ot = s->dflag;
1973     int size = 1 << d_ot;
1974     int i;
1975 
1976     for (i = 0; i < 8; i++) {
1977         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1978         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1979     }
1980 
1981     gen_stack_update(s, -8 * size);
1982 }
1983 
1984 static void gen_popa(DisasContext *s)
1985 {
1986     MemOp d_ot = s->dflag;
1987     int size = 1 << d_ot;
1988     int i;
1989 
1990     for (i = 0; i < 8; i++) {
1991         /* ESP is not reloaded */
1992         if (7 - i == R_ESP) {
1993             continue;
1994         }
1995         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
1996         gen_op_ld_v(s, d_ot, s->T0, s->A0);
1997         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
1998     }
1999 
2000     gen_stack_update(s, 8 * size);
2001 }
2002 
2003 static void gen_enter(DisasContext *s, int esp_addend, int level)
2004 {
2005     MemOp d_ot = mo_pushpop(s, s->dflag);
2006     MemOp a_ot = mo_stacksize(s);
2007     int size = 1 << d_ot;
2008 
2009     /* Push BP; compute FrameTemp into T1.  */
2010     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2011     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2012     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2013 
2014     level &= 31;
2015     if (level != 0) {
2016         int i;
2017 
2018         /* Copy level-1 pointers from the previous frame.  */
2019         for (i = 1; i < level; ++i) {
2020             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2021             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2022 
2023             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2024             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2025         }
2026 
2027         /* Push the current FrameTemp as the last level.  */
2028         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2029         gen_op_st_v(s, d_ot, s->T1, s->A0);
2030     }
2031 
2032     /* Copy the FrameTemp value to EBP.  */
2033     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2034 
2035     /* Compute the final value of ESP.  */
2036     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2037     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2038 }
2039 
2040 static void gen_leave(DisasContext *s)
2041 {
2042     MemOp d_ot = mo_pushpop(s, s->dflag);
2043     MemOp a_ot = mo_stacksize(s);
2044 
2045     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2046     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2047 
2048     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2049 
2050     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2051     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2052 }
2053 
2054 /* Similarly, except that the assumption here is that we don't decode
2055    the instruction at all -- either a missing opcode, an unimplemented
2056    feature, or just a bogus instruction stream.  */
2057 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2058 {
2059     gen_illegal_opcode(s);
2060 
2061     if (qemu_loglevel_mask(LOG_UNIMP)) {
2062         FILE *logfile = qemu_log_trylock();
2063         if (logfile) {
2064             target_ulong pc = s->base.pc_next, end = s->pc;
2065 
2066             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2067             for (; pc < end; ++pc) {
2068                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2069             }
2070             fprintf(logfile, "\n");
2071             qemu_log_unlock(logfile);
2072         }
2073     }
2074 }
2075 
2076 /* an interrupt is different from an exception because of the
2077    privilege checks */
2078 static void gen_interrupt(DisasContext *s, uint8_t intno)
2079 {
2080     gen_update_cc_op(s);
2081     gen_update_eip_cur(s);
2082     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2083                                cur_insn_len_i32(s));
2084     s->base.is_jmp = DISAS_NORETURN;
2085 }
2086 
2087 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2088 {
2089     if ((s->flags & mask) == 0) {
2090         TCGv_i32 t = tcg_temp_new_i32();
2091         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2092         tcg_gen_ori_i32(t, t, mask);
2093         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2094         s->flags |= mask;
2095     }
2096 }
2097 
2098 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2099 {
2100     if (s->flags & mask) {
2101         TCGv_i32 t = tcg_temp_new_i32();
2102         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2103         tcg_gen_andi_i32(t, t, ~mask);
2104         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2105         s->flags &= ~mask;
2106     }
2107 }
2108 
2109 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2110 {
2111     TCGv t = tcg_temp_new();
2112 
2113     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2114     tcg_gen_ori_tl(t, t, mask);
2115     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2116 }
2117 
2118 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2119 {
2120     TCGv t = tcg_temp_new();
2121 
2122     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2123     tcg_gen_andi_tl(t, t, ~mask);
2124     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2125 }
2126 
2127 /* Clear BND registers during legacy branches.  */
2128 static void gen_bnd_jmp(DisasContext *s)
2129 {
2130     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2131        and if the BNDREGs are known to be in use (non-zero) already.
2132        The helper itself will check BNDPRESERVE at runtime.  */
2133     if ((s->prefix & PREFIX_REPNZ) == 0
2134         && (s->flags & HF_MPX_EN_MASK) != 0
2135         && (s->flags & HF_MPX_IU_MASK) != 0) {
2136         gen_helper_bnd_jmp(tcg_env);
2137     }
2138 }
2139 
2140 /*
2141  * Generate an end of block, including common tasks such as generating
2142  * single step traps, resetting the RF flag, and handling the interrupt
2143  * shadow.
2144  */
2145 static void
2146 gen_eob(DisasContext *s, int mode)
2147 {
2148     bool inhibit_reset;
2149 
2150     gen_update_cc_op(s);
2151 
2152     /* If several instructions disable interrupts, only the first does it.  */
2153     inhibit_reset = false;
2154     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2155         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2156         inhibit_reset = true;
2157     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2158         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2159     }
2160 
2161     if (s->base.tb->flags & HF_RF_MASK) {
2162         gen_reset_eflags(s, RF_MASK);
2163     }
2164     if (mode == DISAS_EOB_RECHECK_TF) {
2165         gen_helper_rechecking_single_step(tcg_env);
2166         tcg_gen_exit_tb(NULL, 0);
2167     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2168         gen_helper_single_step(tcg_env);
2169     } else if (mode == DISAS_JUMP &&
2170                /* give irqs a chance to happen */
2171                !inhibit_reset) {
2172         tcg_gen_lookup_and_goto_ptr();
2173     } else {
2174         tcg_gen_exit_tb(NULL, 0);
2175     }
2176 
2177     s->base.is_jmp = DISAS_NORETURN;
2178 }
2179 
2180 /* Jump to eip+diff, truncating the result to OT. */
2181 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2182 {
2183     bool use_goto_tb = s->jmp_opt;
2184     target_ulong mask = -1;
2185     target_ulong new_pc = s->pc + diff;
2186     target_ulong new_eip = new_pc - s->cs_base;
2187 
2188     assert(!s->cc_op_dirty);
2189 
2190     /* In 64-bit mode, operand size is fixed at 64 bits. */
2191     if (!CODE64(s)) {
2192         if (ot == MO_16) {
2193             mask = 0xffff;
2194             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2195                 use_goto_tb = false;
2196             }
2197         } else {
2198             mask = 0xffffffff;
2199         }
2200     }
2201     new_eip &= mask;
2202 
2203     if (tb_cflags(s->base.tb) & CF_PCREL) {
2204         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2205         /*
2206          * If we can prove the branch does not leave the page and we have
2207          * no extra masking to apply (data16 branch in code32, see above),
2208          * then we have also proven that the addition does not wrap.
2209          */
2210         if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2211             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2212             use_goto_tb = false;
2213         }
2214     } else if (!CODE64(s)) {
2215         new_pc = (uint32_t)(new_eip + s->cs_base);
2216     }
2217 
2218     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2219         /* jump to same page: we can use a direct jump */
2220         tcg_gen_goto_tb(tb_num);
2221         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2222             tcg_gen_movi_tl(cpu_eip, new_eip);
2223         }
2224         tcg_gen_exit_tb(s->base.tb, tb_num);
2225         s->base.is_jmp = DISAS_NORETURN;
2226     } else {
2227         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2228             tcg_gen_movi_tl(cpu_eip, new_eip);
2229         }
2230         if (s->jmp_opt) {
2231             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2232         } else {
2233             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2234         }
2235     }
2236 }
2237 
2238 /* Jump to eip+diff, truncating to the current code size. */
2239 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2240 {
2241     /* CODE64 ignores the OT argument, so we need not consider it. */
2242     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2243 }
2244 
2245 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2246 {
2247     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2248     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2249 }
2250 
2251 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2252 {
2253     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2254     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2255 }
2256 
2257 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2258 {
2259     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2260                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2261     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2262     int mem_index = s->mem_index;
2263     TCGv_i128 t = tcg_temp_new_i128();
2264 
2265     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2266     tcg_gen_st_i128(t, tcg_env, offset);
2267 }
2268 
2269 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2270 {
2271     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2272                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2273     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2274     int mem_index = s->mem_index;
2275     TCGv_i128 t = tcg_temp_new_i128();
2276 
2277     tcg_gen_ld_i128(t, tcg_env, offset);
2278     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2279 }
2280 
2281 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2282 {
2283     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2284     int mem_index = s->mem_index;
2285     TCGv_i128 t0 = tcg_temp_new_i128();
2286     TCGv_i128 t1 = tcg_temp_new_i128();
2287 
2288     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2289     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2290     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2291 
2292     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2293     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2294 }
2295 
2296 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2297 {
2298     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2299     int mem_index = s->mem_index;
2300     TCGv_i128 t = tcg_temp_new_i128();
2301 
2302     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2303     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2304     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2305     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2306     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2307 }
2308 
2309 #include "emit.c.inc"
2310 
2311 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2312 {
2313     bool update_fip = true;
2314     int b = decode->b;
2315     int modrm = s->modrm;
2316     int mod, rm, op;
2317 
2318     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2319         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2320         /* XXX: what to do if illegal op ? */
2321         gen_exception(s, EXCP07_PREX);
2322         return;
2323     }
2324     mod = (modrm >> 6) & 3;
2325     rm = modrm & 7;
2326     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2327     if (mod != 3) {
2328         /* memory op */
2329         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2330         TCGv last_addr = tcg_temp_new();
2331         bool update_fdp = true;
2332 
2333         tcg_gen_mov_tl(last_addr, ea);
2334         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2335 
2336         switch (op) {
2337         case 0x00 ... 0x07: /* fxxxs */
2338         case 0x10 ... 0x17: /* fixxxl */
2339         case 0x20 ... 0x27: /* fxxxl */
2340         case 0x30 ... 0x37: /* fixxx */
2341             {
2342                 int op1;
2343                 op1 = op & 7;
2344 
2345                 switch (op >> 4) {
2346                 case 0:
2347                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2348                                         s->mem_index, MO_LEUL);
2349                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2350                     break;
2351                 case 1:
2352                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2353                                         s->mem_index, MO_LEUL);
2354                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2355                     break;
2356                 case 2:
2357                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2358                                         s->mem_index, MO_LEUQ);
2359                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2360                     break;
2361                 case 3:
2362                 default:
2363                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2364                                         s->mem_index, MO_LESW);
2365                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2366                     break;
2367                 }
2368 
2369                 gen_helper_fp_arith_ST0_FT0(op1);
2370                 if (op1 == 3) {
2371                     /* fcomp needs pop */
2372                     gen_helper_fpop(tcg_env);
2373                 }
2374             }
2375             break;
2376         case 0x08: /* flds */
2377         case 0x0a: /* fsts */
2378         case 0x0b: /* fstps */
2379         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2380         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2381         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2382             switch (op & 7) {
2383             case 0:
2384                 switch (op >> 4) {
2385                 case 0:
2386                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2387                                         s->mem_index, MO_LEUL);
2388                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2389                     break;
2390                 case 1:
2391                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2392                                         s->mem_index, MO_LEUL);
2393                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2394                     break;
2395                 case 2:
2396                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2397                                         s->mem_index, MO_LEUQ);
2398                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2399                     break;
2400                 case 3:
2401                 default:
2402                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2403                                         s->mem_index, MO_LESW);
2404                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2405                     break;
2406                 }
2407                 break;
2408             case 1:
2409                 /* XXX: the corresponding CPUID bit must be tested ! */
2410                 switch (op >> 4) {
2411                 case 1:
2412                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2413                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2414                                         s->mem_index, MO_LEUL);
2415                     break;
2416                 case 2:
2417                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2418                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2419                                         s->mem_index, MO_LEUQ);
2420                     break;
2421                 case 3:
2422                 default:
2423                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2424                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2425                                         s->mem_index, MO_LEUW);
2426                     break;
2427                 }
2428                 gen_helper_fpop(tcg_env);
2429                 break;
2430             default:
2431                 switch (op >> 4) {
2432                 case 0:
2433                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2434                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2435                                         s->mem_index, MO_LEUL);
2436                     break;
2437                 case 1:
2438                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2439                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2440                                         s->mem_index, MO_LEUL);
2441                     break;
2442                 case 2:
2443                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2444                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2445                                         s->mem_index, MO_LEUQ);
2446                     break;
2447                 case 3:
2448                 default:
2449                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2450                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2451                                         s->mem_index, MO_LEUW);
2452                     break;
2453                 }
2454                 if ((op & 7) == 3) {
2455                     gen_helper_fpop(tcg_env);
2456                 }
2457                 break;
2458             }
2459             break;
2460         case 0x0c: /* fldenv mem */
2461             gen_helper_fldenv(tcg_env, s->A0,
2462                               tcg_constant_i32(s->dflag - 1));
2463             update_fip = update_fdp = false;
2464             break;
2465         case 0x0d: /* fldcw mem */
2466             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2467                                 s->mem_index, MO_LEUW);
2468             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2469             update_fip = update_fdp = false;
2470             break;
2471         case 0x0e: /* fnstenv mem */
2472             gen_helper_fstenv(tcg_env, s->A0,
2473                               tcg_constant_i32(s->dflag - 1));
2474             update_fip = update_fdp = false;
2475             break;
2476         case 0x0f: /* fnstcw mem */
2477             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2478             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2479                                 s->mem_index, MO_LEUW);
2480             update_fip = update_fdp = false;
2481             break;
2482         case 0x1d: /* fldt mem */
2483             gen_helper_fldt_ST0(tcg_env, s->A0);
2484             break;
2485         case 0x1f: /* fstpt mem */
2486             gen_helper_fstt_ST0(tcg_env, s->A0);
2487             gen_helper_fpop(tcg_env);
2488             break;
2489         case 0x2c: /* frstor mem */
2490             gen_helper_frstor(tcg_env, s->A0,
2491                               tcg_constant_i32(s->dflag - 1));
2492             update_fip = update_fdp = false;
2493             break;
2494         case 0x2e: /* fnsave mem */
2495             gen_helper_fsave(tcg_env, s->A0,
2496                              tcg_constant_i32(s->dflag - 1));
2497             update_fip = update_fdp = false;
2498             break;
2499         case 0x2f: /* fnstsw mem */
2500             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2501             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2502                                 s->mem_index, MO_LEUW);
2503             update_fip = update_fdp = false;
2504             break;
2505         case 0x3c: /* fbld */
2506             gen_helper_fbld_ST0(tcg_env, s->A0);
2507             break;
2508         case 0x3e: /* fbstp */
2509             gen_helper_fbst_ST0(tcg_env, s->A0);
2510             gen_helper_fpop(tcg_env);
2511             break;
2512         case 0x3d: /* fildll */
2513             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2514                                 s->mem_index, MO_LEUQ);
2515             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2516             break;
2517         case 0x3f: /* fistpll */
2518             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2519             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2520                                 s->mem_index, MO_LEUQ);
2521             gen_helper_fpop(tcg_env);
2522             break;
2523         default:
2524             goto illegal_op;
2525         }
2526 
2527         if (update_fdp) {
2528             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2529 
2530             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2531                            offsetof(CPUX86State,
2532                                     segs[last_seg].selector));
2533             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2534                              offsetof(CPUX86State, fpds));
2535             tcg_gen_st_tl(last_addr, tcg_env,
2536                           offsetof(CPUX86State, fpdp));
2537         }
2538     } else {
2539         /* register float ops */
2540         int opreg = rm;
2541 
2542         switch (op) {
2543         case 0x08: /* fld sti */
2544             gen_helper_fpush(tcg_env);
2545             gen_helper_fmov_ST0_STN(tcg_env,
2546                                     tcg_constant_i32((opreg + 1) & 7));
2547             break;
2548         case 0x09: /* fxchg sti */
2549         case 0x29: /* fxchg4 sti, undocumented op */
2550         case 0x39: /* fxchg7 sti, undocumented op */
2551             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2552             break;
2553         case 0x0a: /* grp d9/2 */
2554             switch (rm) {
2555             case 0: /* fnop */
2556                 /*
2557                  * check exceptions (FreeBSD FPU probe)
2558                  * needs to be treated as I/O because of ferr_irq
2559                  */
2560                 translator_io_start(&s->base);
2561                 gen_helper_fwait(tcg_env);
2562                 update_fip = false;
2563                 break;
2564             default:
2565                 goto illegal_op;
2566             }
2567             break;
2568         case 0x0c: /* grp d9/4 */
2569             switch (rm) {
2570             case 0: /* fchs */
2571                 gen_helper_fchs_ST0(tcg_env);
2572                 break;
2573             case 1: /* fabs */
2574                 gen_helper_fabs_ST0(tcg_env);
2575                 break;
2576             case 4: /* ftst */
2577                 gen_helper_fldz_FT0(tcg_env);
2578                 gen_helper_fcom_ST0_FT0(tcg_env);
2579                 break;
2580             case 5: /* fxam */
2581                 gen_helper_fxam_ST0(tcg_env);
2582                 break;
2583             default:
2584                 goto illegal_op;
2585             }
2586             break;
2587         case 0x0d: /* grp d9/5 */
2588             {
2589                 switch (rm) {
2590                 case 0:
2591                     gen_helper_fpush(tcg_env);
2592                     gen_helper_fld1_ST0(tcg_env);
2593                     break;
2594                 case 1:
2595                     gen_helper_fpush(tcg_env);
2596                     gen_helper_fldl2t_ST0(tcg_env);
2597                     break;
2598                 case 2:
2599                     gen_helper_fpush(tcg_env);
2600                     gen_helper_fldl2e_ST0(tcg_env);
2601                     break;
2602                 case 3:
2603                     gen_helper_fpush(tcg_env);
2604                     gen_helper_fldpi_ST0(tcg_env);
2605                     break;
2606                 case 4:
2607                     gen_helper_fpush(tcg_env);
2608                     gen_helper_fldlg2_ST0(tcg_env);
2609                     break;
2610                 case 5:
2611                     gen_helper_fpush(tcg_env);
2612                     gen_helper_fldln2_ST0(tcg_env);
2613                     break;
2614                 case 6:
2615                     gen_helper_fpush(tcg_env);
2616                     gen_helper_fldz_ST0(tcg_env);
2617                     break;
2618                 default:
2619                     goto illegal_op;
2620                 }
2621             }
2622             break;
2623         case 0x0e: /* grp d9/6 */
2624             switch (rm) {
2625             case 0: /* f2xm1 */
2626                 gen_helper_f2xm1(tcg_env);
2627                 break;
2628             case 1: /* fyl2x */
2629                 gen_helper_fyl2x(tcg_env);
2630                 break;
2631             case 2: /* fptan */
2632                 gen_helper_fptan(tcg_env);
2633                 break;
2634             case 3: /* fpatan */
2635                 gen_helper_fpatan(tcg_env);
2636                 break;
2637             case 4: /* fxtract */
2638                 gen_helper_fxtract(tcg_env);
2639                 break;
2640             case 5: /* fprem1 */
2641                 gen_helper_fprem1(tcg_env);
2642                 break;
2643             case 6: /* fdecstp */
2644                 gen_helper_fdecstp(tcg_env);
2645                 break;
2646             default:
2647             case 7: /* fincstp */
2648                 gen_helper_fincstp(tcg_env);
2649                 break;
2650             }
2651             break;
2652         case 0x0f: /* grp d9/7 */
2653             switch (rm) {
2654             case 0: /* fprem */
2655                 gen_helper_fprem(tcg_env);
2656                 break;
2657             case 1: /* fyl2xp1 */
2658                 gen_helper_fyl2xp1(tcg_env);
2659                 break;
2660             case 2: /* fsqrt */
2661                 gen_helper_fsqrt(tcg_env);
2662                 break;
2663             case 3: /* fsincos */
2664                 gen_helper_fsincos(tcg_env);
2665                 break;
2666             case 5: /* fscale */
2667                 gen_helper_fscale(tcg_env);
2668                 break;
2669             case 4: /* frndint */
2670                 gen_helper_frndint(tcg_env);
2671                 break;
2672             case 6: /* fsin */
2673                 gen_helper_fsin(tcg_env);
2674                 break;
2675             default:
2676             case 7: /* fcos */
2677                 gen_helper_fcos(tcg_env);
2678                 break;
2679             }
2680             break;
2681         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2682         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2683         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2684             {
2685                 int op1;
2686 
2687                 op1 = op & 7;
2688                 if (op >= 0x20) {
2689                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2690                     if (op >= 0x30) {
2691                         gen_helper_fpop(tcg_env);
2692                     }
2693                 } else {
2694                     gen_helper_fmov_FT0_STN(tcg_env,
2695                                             tcg_constant_i32(opreg));
2696                     gen_helper_fp_arith_ST0_FT0(op1);
2697                 }
2698             }
2699             break;
2700         case 0x02: /* fcom */
2701         case 0x22: /* fcom2, undocumented op */
2702             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2703             gen_helper_fcom_ST0_FT0(tcg_env);
2704             break;
2705         case 0x03: /* fcomp */
2706         case 0x23: /* fcomp3, undocumented op */
2707         case 0x32: /* fcomp5, undocumented op */
2708             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2709             gen_helper_fcom_ST0_FT0(tcg_env);
2710             gen_helper_fpop(tcg_env);
2711             break;
2712         case 0x15: /* da/5 */
2713             switch (rm) {
2714             case 1: /* fucompp */
2715                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2716                 gen_helper_fucom_ST0_FT0(tcg_env);
2717                 gen_helper_fpop(tcg_env);
2718                 gen_helper_fpop(tcg_env);
2719                 break;
2720             default:
2721                 goto illegal_op;
2722             }
2723             break;
2724         case 0x1c:
2725             switch (rm) {
2726             case 0: /* feni (287 only, just do nop here) */
2727                 break;
2728             case 1: /* fdisi (287 only, just do nop here) */
2729                 break;
2730             case 2: /* fclex */
2731                 gen_helper_fclex(tcg_env);
2732                 update_fip = false;
2733                 break;
2734             case 3: /* fninit */
2735                 gen_helper_fninit(tcg_env);
2736                 update_fip = false;
2737                 break;
2738             case 4: /* fsetpm (287 only, just do nop here) */
2739                 break;
2740             default:
2741                 goto illegal_op;
2742             }
2743             break;
2744         case 0x1d: /* fucomi */
2745             if (!(s->cpuid_features & CPUID_CMOV)) {
2746                 goto illegal_op;
2747             }
2748             gen_update_cc_op(s);
2749             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2750             gen_helper_fucomi_ST0_FT0(tcg_env);
2751             assume_cc_op(s, CC_OP_EFLAGS);
2752             break;
2753         case 0x1e: /* fcomi */
2754             if (!(s->cpuid_features & CPUID_CMOV)) {
2755                 goto illegal_op;
2756             }
2757             gen_update_cc_op(s);
2758             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2759             gen_helper_fcomi_ST0_FT0(tcg_env);
2760             assume_cc_op(s, CC_OP_EFLAGS);
2761             break;
2762         case 0x28: /* ffree sti */
2763             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2764             break;
2765         case 0x2a: /* fst sti */
2766             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2767             break;
2768         case 0x2b: /* fstp sti */
2769         case 0x0b: /* fstp1 sti, undocumented op */
2770         case 0x3a: /* fstp8 sti, undocumented op */
2771         case 0x3b: /* fstp9 sti, undocumented op */
2772             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2773             gen_helper_fpop(tcg_env);
2774             break;
2775         case 0x2c: /* fucom st(i) */
2776             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2777             gen_helper_fucom_ST0_FT0(tcg_env);
2778             break;
2779         case 0x2d: /* fucomp st(i) */
2780             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2781             gen_helper_fucom_ST0_FT0(tcg_env);
2782             gen_helper_fpop(tcg_env);
2783             break;
2784         case 0x33: /* de/3 */
2785             switch (rm) {
2786             case 1: /* fcompp */
2787                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2788                 gen_helper_fcom_ST0_FT0(tcg_env);
2789                 gen_helper_fpop(tcg_env);
2790                 gen_helper_fpop(tcg_env);
2791                 break;
2792             default:
2793                 goto illegal_op;
2794             }
2795             break;
2796         case 0x38: /* ffreep sti, undocumented op */
2797             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2798             gen_helper_fpop(tcg_env);
2799             break;
2800         case 0x3c: /* df/4 */
2801             switch (rm) {
2802             case 0:
2803                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2804                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2805                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2806                 break;
2807             default:
2808                 goto illegal_op;
2809             }
2810             break;
2811         case 0x3d: /* fucomip */
2812             if (!(s->cpuid_features & CPUID_CMOV)) {
2813                 goto illegal_op;
2814             }
2815             gen_update_cc_op(s);
2816             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2817             gen_helper_fucomi_ST0_FT0(tcg_env);
2818             gen_helper_fpop(tcg_env);
2819             assume_cc_op(s, CC_OP_EFLAGS);
2820             break;
2821         case 0x3e: /* fcomip */
2822             if (!(s->cpuid_features & CPUID_CMOV)) {
2823                 goto illegal_op;
2824             }
2825             gen_update_cc_op(s);
2826             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2827             gen_helper_fcomi_ST0_FT0(tcg_env);
2828             gen_helper_fpop(tcg_env);
2829             assume_cc_op(s, CC_OP_EFLAGS);
2830             break;
2831         case 0x10 ... 0x13: /* fcmovxx */
2832         case 0x18 ... 0x1b:
2833             {
2834                 int op1;
2835                 TCGLabel *l1;
2836                 static const uint8_t fcmov_cc[8] = {
2837                     (JCC_B << 1),
2838                     (JCC_Z << 1),
2839                     (JCC_BE << 1),
2840                     (JCC_P << 1),
2841                 };
2842 
2843                 if (!(s->cpuid_features & CPUID_CMOV)) {
2844                     goto illegal_op;
2845                 }
2846                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2847                 l1 = gen_new_label();
2848                 gen_jcc_noeob(s, op1, l1);
2849                 gen_helper_fmov_ST0_STN(tcg_env,
2850                                         tcg_constant_i32(opreg));
2851                 gen_set_label(l1);
2852             }
2853             break;
2854         default:
2855             goto illegal_op;
2856         }
2857     }
2858 
2859     if (update_fip) {
2860         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2861                        offsetof(CPUX86State, segs[R_CS].selector));
2862         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2863                          offsetof(CPUX86State, fpcs));
2864         tcg_gen_st_tl(eip_cur_tl(s),
2865                       tcg_env, offsetof(CPUX86State, fpip));
2866     }
2867     return;
2868 
2869  illegal_op:
2870     gen_illegal_opcode(s);
2871 }
2872 
2873 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2874 {
2875     int prefixes = s->prefix;
2876     MemOp dflag = s->dflag;
2877     int b = decode->b + 0x100;
2878     int modrm = s->modrm;
2879     MemOp ot;
2880     int reg, rm, mod, op;
2881 
2882     /* now check op code */
2883     switch (b) {
2884     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2885         mod = (modrm >> 6) & 3;
2886         switch ((modrm >> 3) & 7) {
2887         case 7:
2888             if (mod != 3 ||
2889                 (s->prefix & PREFIX_REPNZ)) {
2890                 goto illegal_op;
2891             }
2892             if (s->prefix & PREFIX_REPZ) {
2893                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
2894                     goto illegal_op;
2895                 }
2896                 gen_helper_rdpid(s->T0, tcg_env);
2897                 rm = (modrm & 7) | REX_B(s);
2898                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
2899                 break;
2900             } else {
2901                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
2902                     goto illegal_op;
2903                 }
2904                 goto do_rdrand;
2905             }
2906 
2907         case 6: /* RDRAND */
2908             if (mod != 3 ||
2909                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
2910                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
2911                 goto illegal_op;
2912             }
2913         do_rdrand:
2914             translator_io_start(&s->base);
2915             gen_helper_rdrand(s->T0, tcg_env);
2916             rm = (modrm & 7) | REX_B(s);
2917             gen_op_mov_reg_v(s, dflag, rm, s->T0);
2918             assume_cc_op(s, CC_OP_EFLAGS);
2919             break;
2920 
2921         default:
2922             goto illegal_op;
2923         }
2924         break;
2925 
2926     case 0x100:
2927         mod = (modrm >> 6) & 3;
2928         op = (modrm >> 3) & 7;
2929         switch(op) {
2930         case 0: /* sldt */
2931             if (!PE(s) || VM86(s))
2932                 goto illegal_op;
2933             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2934                 break;
2935             }
2936             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
2937             tcg_gen_ld32u_tl(s->T0, tcg_env,
2938                              offsetof(CPUX86State, ldt.selector));
2939             ot = mod == 3 ? dflag : MO_16;
2940             gen_st_modrm(s, decode, ot);
2941             break;
2942         case 2: /* lldt */
2943             if (!PE(s) || VM86(s))
2944                 goto illegal_op;
2945             if (check_cpl0(s)) {
2946                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
2947                 gen_ld_modrm(s, decode, MO_16);
2948                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2949                 gen_helper_lldt(tcg_env, s->tmp2_i32);
2950             }
2951             break;
2952         case 1: /* str */
2953             if (!PE(s) || VM86(s))
2954                 goto illegal_op;
2955             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2956                 break;
2957             }
2958             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
2959             tcg_gen_ld32u_tl(s->T0, tcg_env,
2960                              offsetof(CPUX86State, tr.selector));
2961             ot = mod == 3 ? dflag : MO_16;
2962             gen_st_modrm(s, decode, ot);
2963             break;
2964         case 3: /* ltr */
2965             if (!PE(s) || VM86(s))
2966                 goto illegal_op;
2967             if (check_cpl0(s)) {
2968                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
2969                 gen_ld_modrm(s, decode, MO_16);
2970                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2971                 gen_helper_ltr(tcg_env, s->tmp2_i32);
2972             }
2973             break;
2974         case 4: /* verr */
2975         case 5: /* verw */
2976             if (!PE(s) || VM86(s))
2977                 goto illegal_op;
2978             gen_ld_modrm(s, decode, MO_16);
2979             gen_update_cc_op(s);
2980             if (op == 4) {
2981                 gen_helper_verr(tcg_env, s->T0);
2982             } else {
2983                 gen_helper_verw(tcg_env, s->T0);
2984             }
2985             assume_cc_op(s, CC_OP_EFLAGS);
2986             break;
2987         default:
2988             goto illegal_op;
2989         }
2990         break;
2991 
2992     case 0x101:
2993         switch (modrm) {
2994         CASE_MODRM_MEM_OP(0): /* sgdt */
2995             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2996                 break;
2997             }
2998             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
2999             gen_lea_modrm(s, decode);
3000             tcg_gen_ld32u_tl(s->T0,
3001                              tcg_env, offsetof(CPUX86State, gdt.limit));
3002             gen_op_st_v(s, MO_16, s->T0, s->A0);
3003             gen_add_A0_im(s, 2);
3004             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3005             /*
3006              * NB: Despite a confusing description in Intel CPU documentation,
3007              *     all 32-bits are written regardless of operand size.
3008              */
3009             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3010             break;
3011 
3012         case 0xc8: /* monitor */
3013             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3014                 goto illegal_op;
3015             }
3016             gen_update_cc_op(s);
3017             gen_update_eip_cur(s);
3018             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3019             gen_helper_monitor(tcg_env, s->A0);
3020             break;
3021 
3022         case 0xc9: /* mwait */
3023             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3024                 goto illegal_op;
3025             }
3026             gen_update_cc_op(s);
3027             gen_update_eip_cur(s);
3028             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3029             s->base.is_jmp = DISAS_NORETURN;
3030             break;
3031 
3032         case 0xca: /* clac */
3033             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3034                 || CPL(s) != 0) {
3035                 goto illegal_op;
3036             }
3037             gen_reset_eflags(s, AC_MASK);
3038             s->base.is_jmp = DISAS_EOB_NEXT;
3039             break;
3040 
3041         case 0xcb: /* stac */
3042             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3043                 || CPL(s) != 0) {
3044                 goto illegal_op;
3045             }
3046             gen_set_eflags(s, AC_MASK);
3047             s->base.is_jmp = DISAS_EOB_NEXT;
3048             break;
3049 
3050         CASE_MODRM_MEM_OP(1): /* sidt */
3051             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3052                 break;
3053             }
3054             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3055             gen_lea_modrm(s, decode);
3056             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3057             gen_op_st_v(s, MO_16, s->T0, s->A0);
3058             gen_add_A0_im(s, 2);
3059             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3060             /*
3061              * NB: Despite a confusing description in Intel CPU documentation,
3062              *     all 32-bits are written regardless of operand size.
3063              */
3064             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3065             break;
3066 
3067         case 0xd0: /* xgetbv */
3068             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3069                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3070                 goto illegal_op;
3071             }
3072             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3073             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3074             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3075             break;
3076 
3077         case 0xd1: /* xsetbv */
3078             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3079                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3080                 goto illegal_op;
3081             }
3082             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3083             if (!check_cpl0(s)) {
3084                 break;
3085             }
3086             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3087                                   cpu_regs[R_EDX]);
3088             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3089             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3090             /* End TB because translation flags may change.  */
3091             s->base.is_jmp = DISAS_EOB_NEXT;
3092             break;
3093 
3094         case 0xd8: /* VMRUN */
3095             if (!SVME(s) || !PE(s)) {
3096                 goto illegal_op;
3097             }
3098             if (!check_cpl0(s)) {
3099                 break;
3100             }
3101             gen_update_cc_op(s);
3102             gen_update_eip_cur(s);
3103             /*
3104              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3105              * The usual gen_eob() handling is performed on vmexit after
3106              * host state is reloaded.
3107              */
3108             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3109                              cur_insn_len_i32(s));
3110             tcg_gen_exit_tb(NULL, 0);
3111             s->base.is_jmp = DISAS_NORETURN;
3112             break;
3113 
3114         case 0xd9: /* VMMCALL */
3115             if (!SVME(s)) {
3116                 goto illegal_op;
3117             }
3118             gen_update_cc_op(s);
3119             gen_update_eip_cur(s);
3120             gen_helper_vmmcall(tcg_env);
3121             break;
3122 
3123         case 0xda: /* VMLOAD */
3124             if (!SVME(s) || !PE(s)) {
3125                 goto illegal_op;
3126             }
3127             if (!check_cpl0(s)) {
3128                 break;
3129             }
3130             gen_update_cc_op(s);
3131             gen_update_eip_cur(s);
3132             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3133             break;
3134 
3135         case 0xdb: /* VMSAVE */
3136             if (!SVME(s) || !PE(s)) {
3137                 goto illegal_op;
3138             }
3139             if (!check_cpl0(s)) {
3140                 break;
3141             }
3142             gen_update_cc_op(s);
3143             gen_update_eip_cur(s);
3144             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3145             break;
3146 
3147         case 0xdc: /* STGI */
3148             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3149                 || !PE(s)) {
3150                 goto illegal_op;
3151             }
3152             if (!check_cpl0(s)) {
3153                 break;
3154             }
3155             gen_update_cc_op(s);
3156             gen_helper_stgi(tcg_env);
3157             s->base.is_jmp = DISAS_EOB_NEXT;
3158             break;
3159 
3160         case 0xdd: /* CLGI */
3161             if (!SVME(s) || !PE(s)) {
3162                 goto illegal_op;
3163             }
3164             if (!check_cpl0(s)) {
3165                 break;
3166             }
3167             gen_update_cc_op(s);
3168             gen_update_eip_cur(s);
3169             gen_helper_clgi(tcg_env);
3170             break;
3171 
3172         case 0xde: /* SKINIT */
3173             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3174                 || !PE(s)) {
3175                 goto illegal_op;
3176             }
3177             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3178             /* If not intercepted, not implemented -- raise #UD. */
3179             goto illegal_op;
3180 
3181         case 0xdf: /* INVLPGA */
3182             if (!SVME(s) || !PE(s)) {
3183                 goto illegal_op;
3184             }
3185             if (!check_cpl0(s)) {
3186                 break;
3187             }
3188             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3189             if (s->aflag == MO_64) {
3190                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3191             } else {
3192                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3193             }
3194             gen_helper_flush_page(tcg_env, s->A0);
3195             s->base.is_jmp = DISAS_EOB_NEXT;
3196             break;
3197 
3198         CASE_MODRM_MEM_OP(2): /* lgdt */
3199             if (!check_cpl0(s)) {
3200                 break;
3201             }
3202             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3203             gen_lea_modrm(s, decode);
3204             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3205             gen_add_A0_im(s, 2);
3206             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3207             if (dflag == MO_16) {
3208                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3209             }
3210             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3211             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3212             break;
3213 
3214         CASE_MODRM_MEM_OP(3): /* lidt */
3215             if (!check_cpl0(s)) {
3216                 break;
3217             }
3218             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3219             gen_lea_modrm(s, decode);
3220             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3221             gen_add_A0_im(s, 2);
3222             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3223             if (dflag == MO_16) {
3224                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3225             }
3226             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3227             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3228             break;
3229 
3230         CASE_MODRM_OP(4): /* smsw */
3231             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3232                 break;
3233             }
3234             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3235             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3236             /*
3237              * In 32-bit mode, the higher 16 bits of the destination
3238              * register are undefined.  In practice CR0[31:0] is stored
3239              * just like in 64-bit mode.
3240              */
3241             mod = (modrm >> 6) & 3;
3242             ot = (mod != 3 ? MO_16 : s->dflag);
3243             gen_st_modrm(s, decode, ot);
3244             break;
3245         case 0xee: /* rdpkru */
3246             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3247                 goto illegal_op;
3248             }
3249             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3250             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3251             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3252             break;
3253         case 0xef: /* wrpkru */
3254             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3255                 goto illegal_op;
3256             }
3257             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3258                                   cpu_regs[R_EDX]);
3259             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3260             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3261             break;
3262 
3263         CASE_MODRM_OP(6): /* lmsw */
3264             if (!check_cpl0(s)) {
3265                 break;
3266             }
3267             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3268             gen_ld_modrm(s, decode, MO_16);
3269             /*
3270              * Only the 4 lower bits of CR0 are modified.
3271              * PE cannot be set to zero if already set to one.
3272              */
3273             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3274             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3275             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3276             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3277             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3278             s->base.is_jmp = DISAS_EOB_NEXT;
3279             break;
3280 
3281         CASE_MODRM_MEM_OP(7): /* invlpg */
3282             if (!check_cpl0(s)) {
3283                 break;
3284             }
3285             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3286             gen_lea_modrm(s, decode);
3287             gen_helper_flush_page(tcg_env, s->A0);
3288             s->base.is_jmp = DISAS_EOB_NEXT;
3289             break;
3290 
3291         case 0xf8: /* swapgs */
3292 #ifdef TARGET_X86_64
3293             if (CODE64(s)) {
3294                 if (check_cpl0(s)) {
3295                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3296                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3297                                   offsetof(CPUX86State, kernelgsbase));
3298                     tcg_gen_st_tl(s->T0, tcg_env,
3299                                   offsetof(CPUX86State, kernelgsbase));
3300                 }
3301                 break;
3302             }
3303 #endif
3304             goto illegal_op;
3305 
3306         case 0xf9: /* rdtscp */
3307             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3308                 goto illegal_op;
3309             }
3310             gen_update_cc_op(s);
3311             gen_update_eip_cur(s);
3312             translator_io_start(&s->base);
3313             gen_helper_rdtsc(tcg_env);
3314             gen_helper_rdpid(s->T0, tcg_env);
3315             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3316             break;
3317 
3318         default:
3319             goto illegal_op;
3320         }
3321         break;
3322 
3323     case 0x11a:
3324         if (s->flags & HF_MPX_EN_MASK) {
3325             mod = (modrm >> 6) & 3;
3326             reg = ((modrm >> 3) & 7) | REX_R(s);
3327             if (prefixes & PREFIX_REPZ) {
3328                 /* bndcl */
3329                 if (reg >= 4
3330                     || s->aflag == MO_16) {
3331                     goto illegal_op;
3332                 }
3333                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3334             } else if (prefixes & PREFIX_REPNZ) {
3335                 /* bndcu */
3336                 if (reg >= 4
3337                     || s->aflag == MO_16) {
3338                     goto illegal_op;
3339                 }
3340                 TCGv_i64 notu = tcg_temp_new_i64();
3341                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3342                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3343             } else if (prefixes & PREFIX_DATA) {
3344                 /* bndmov -- from reg/mem */
3345                 if (reg >= 4 || s->aflag == MO_16) {
3346                     goto illegal_op;
3347                 }
3348                 if (mod == 3) {
3349                     int reg2 = (modrm & 7) | REX_B(s);
3350                     if (reg2 >= 4) {
3351                         goto illegal_op;
3352                     }
3353                     if (s->flags & HF_MPX_IU_MASK) {
3354                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3355                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3356                     }
3357                 } else {
3358                     gen_lea_modrm(s, decode);
3359                     if (CODE64(s)) {
3360                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3361                                             s->mem_index, MO_LEUQ);
3362                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3363                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3364                                             s->mem_index, MO_LEUQ);
3365                     } else {
3366                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3367                                             s->mem_index, MO_LEUL);
3368                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3369                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3370                                             s->mem_index, MO_LEUL);
3371                     }
3372                     /* bnd registers are now in-use */
3373                     gen_set_hflag(s, HF_MPX_IU_MASK);
3374                 }
3375             } else if (mod != 3) {
3376                 /* bndldx */
3377                 AddressParts a = decode->mem;
3378                 if (reg >= 4
3379                     || s->aflag == MO_16
3380                     || a.base < -1) {
3381                     goto illegal_op;
3382                 }
3383                 if (a.base >= 0) {
3384                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3385                 } else {
3386                     tcg_gen_movi_tl(s->A0, 0);
3387                 }
3388                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3389                 if (a.index >= 0) {
3390                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3391                 } else {
3392                     tcg_gen_movi_tl(s->T0, 0);
3393                 }
3394                 if (CODE64(s)) {
3395                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3396                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3397                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3398                 } else {
3399                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3400                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3401                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3402                 }
3403                 gen_set_hflag(s, HF_MPX_IU_MASK);
3404             }
3405         }
3406         break;
3407     case 0x11b:
3408         if (s->flags & HF_MPX_EN_MASK) {
3409             mod = (modrm >> 6) & 3;
3410             reg = ((modrm >> 3) & 7) | REX_R(s);
3411             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3412                 /* bndmk */
3413                 if (reg >= 4
3414                     || s->aflag == MO_16) {
3415                     goto illegal_op;
3416                 }
3417                 AddressParts a = decode->mem;
3418                 if (a.base >= 0) {
3419                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3420                     if (!CODE64(s)) {
3421                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3422                     }
3423                 } else if (a.base == -1) {
3424                     /* no base register has lower bound of 0 */
3425                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3426                 } else {
3427                     /* rip-relative generates #ud */
3428                     goto illegal_op;
3429                 }
3430                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3431                 if (!CODE64(s)) {
3432                     tcg_gen_ext32u_tl(s->A0, s->A0);
3433                 }
3434                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3435                 /* bnd registers are now in-use */
3436                 gen_set_hflag(s, HF_MPX_IU_MASK);
3437                 break;
3438             } else if (prefixes & PREFIX_REPNZ) {
3439                 /* bndcn */
3440                 if (reg >= 4
3441                     || s->aflag == MO_16) {
3442                     goto illegal_op;
3443                 }
3444                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3445             } else if (prefixes & PREFIX_DATA) {
3446                 /* bndmov -- to reg/mem */
3447                 if (reg >= 4 || s->aflag == MO_16) {
3448                     goto illegal_op;
3449                 }
3450                 if (mod == 3) {
3451                     int reg2 = (modrm & 7) | REX_B(s);
3452                     if (reg2 >= 4) {
3453                         goto illegal_op;
3454                     }
3455                     if (s->flags & HF_MPX_IU_MASK) {
3456                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3457                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3458                     }
3459                 } else {
3460                     gen_lea_modrm(s, decode);
3461                     if (CODE64(s)) {
3462                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3463                                             s->mem_index, MO_LEUQ);
3464                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3465                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3466                                             s->mem_index, MO_LEUQ);
3467                     } else {
3468                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3469                                             s->mem_index, MO_LEUL);
3470                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3471                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3472                                             s->mem_index, MO_LEUL);
3473                     }
3474                 }
3475             } else if (mod != 3) {
3476                 /* bndstx */
3477                 AddressParts a = decode->mem;
3478                 if (reg >= 4
3479                     || s->aflag == MO_16
3480                     || a.base < -1) {
3481                     goto illegal_op;
3482                 }
3483                 if (a.base >= 0) {
3484                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3485                 } else {
3486                     tcg_gen_movi_tl(s->A0, 0);
3487                 }
3488                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3489                 if (a.index >= 0) {
3490                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3491                 } else {
3492                     tcg_gen_movi_tl(s->T0, 0);
3493                 }
3494                 if (CODE64(s)) {
3495                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3496                                         cpu_bndl[reg], cpu_bndu[reg]);
3497                 } else {
3498                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3499                                         cpu_bndl[reg], cpu_bndu[reg]);
3500                 }
3501             }
3502         }
3503         break;
3504     default:
3505         g_assert_not_reached();
3506     }
3507     return;
3508  illegal_op:
3509     gen_illegal_opcode(s);
3510     return;
3511 }
3512 
3513 #include "decode-new.c.inc"
3514 
3515 void tcg_x86_init(void)
3516 {
3517     static const char reg_names[CPU_NB_REGS][4] = {
3518 #ifdef TARGET_X86_64
3519         [R_EAX] = "rax",
3520         [R_EBX] = "rbx",
3521         [R_ECX] = "rcx",
3522         [R_EDX] = "rdx",
3523         [R_ESI] = "rsi",
3524         [R_EDI] = "rdi",
3525         [R_EBP] = "rbp",
3526         [R_ESP] = "rsp",
3527         [8]  = "r8",
3528         [9]  = "r9",
3529         [10] = "r10",
3530         [11] = "r11",
3531         [12] = "r12",
3532         [13] = "r13",
3533         [14] = "r14",
3534         [15] = "r15",
3535 #else
3536         [R_EAX] = "eax",
3537         [R_EBX] = "ebx",
3538         [R_ECX] = "ecx",
3539         [R_EDX] = "edx",
3540         [R_ESI] = "esi",
3541         [R_EDI] = "edi",
3542         [R_EBP] = "ebp",
3543         [R_ESP] = "esp",
3544 #endif
3545     };
3546     static const char eip_name[] = {
3547 #ifdef TARGET_X86_64
3548         "rip"
3549 #else
3550         "eip"
3551 #endif
3552     };
3553     static const char seg_base_names[6][8] = {
3554         [R_CS] = "cs_base",
3555         [R_DS] = "ds_base",
3556         [R_ES] = "es_base",
3557         [R_FS] = "fs_base",
3558         [R_GS] = "gs_base",
3559         [R_SS] = "ss_base",
3560     };
3561     static const char bnd_regl_names[4][8] = {
3562         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3563     };
3564     static const char bnd_regu_names[4][8] = {
3565         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3566     };
3567     int i;
3568 
3569     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3570                                        offsetof(CPUX86State, cc_op), "cc_op");
3571     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3572                                     "cc_dst");
3573     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3574                                     "cc_src");
3575     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3576                                      "cc_src2");
3577     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3578 
3579     for (i = 0; i < CPU_NB_REGS; ++i) {
3580         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3581                                          offsetof(CPUX86State, regs[i]),
3582                                          reg_names[i]);
3583     }
3584 
3585     for (i = 0; i < 6; ++i) {
3586         cpu_seg_base[i]
3587             = tcg_global_mem_new(tcg_env,
3588                                  offsetof(CPUX86State, segs[i].base),
3589                                  seg_base_names[i]);
3590     }
3591 
3592     for (i = 0; i < 4; ++i) {
3593         cpu_bndl[i]
3594             = tcg_global_mem_new_i64(tcg_env,
3595                                      offsetof(CPUX86State, bnd_regs[i].lb),
3596                                      bnd_regl_names[i]);
3597         cpu_bndu[i]
3598             = tcg_global_mem_new_i64(tcg_env,
3599                                      offsetof(CPUX86State, bnd_regs[i].ub),
3600                                      bnd_regu_names[i]);
3601     }
3602 }
3603 
3604 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3605 {
3606     DisasContext *dc = container_of(dcbase, DisasContext, base);
3607     CPUX86State *env = cpu_env(cpu);
3608     uint32_t flags = dc->base.tb->flags;
3609     uint32_t cflags = tb_cflags(dc->base.tb);
3610     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3611     int iopl = (flags >> IOPL_SHIFT) & 3;
3612 
3613     dc->cs_base = dc->base.tb->cs_base;
3614     dc->pc_save = dc->base.pc_next;
3615     dc->flags = flags;
3616 #ifndef CONFIG_USER_ONLY
3617     dc->cpl = cpl;
3618     dc->iopl = iopl;
3619 #endif
3620 
3621     /* We make some simplifying assumptions; validate they're correct. */
3622     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3623     g_assert(CPL(dc) == cpl);
3624     g_assert(IOPL(dc) == iopl);
3625     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3626     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3627     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3628     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3629     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3630     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3631     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3632     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3633 
3634     dc->cc_op = CC_OP_DYNAMIC;
3635     dc->cc_op_dirty = false;
3636     /* select memory access functions */
3637     dc->mem_index = cpu_mmu_index(cpu, false);
3638     dc->cpuid_features = env->features[FEAT_1_EDX];
3639     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3640     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3641     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3642     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3643     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3644     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3645     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3646     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3647                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3648 
3649     dc->T0 = tcg_temp_new();
3650     dc->T1 = tcg_temp_new();
3651     dc->A0 = tcg_temp_new();
3652 
3653     dc->tmp0 = tcg_temp_new();
3654     dc->tmp1_i64 = tcg_temp_new_i64();
3655     dc->tmp2_i32 = tcg_temp_new_i32();
3656     dc->tmp3_i32 = tcg_temp_new_i32();
3657     dc->tmp4 = tcg_temp_new();
3658     dc->cc_srcT = tcg_temp_new();
3659 }
3660 
3661 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3662 {
3663 }
3664 
3665 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3666 {
3667     DisasContext *dc = container_of(dcbase, DisasContext, base);
3668     target_ulong pc_arg = dc->base.pc_next;
3669 
3670     dc->prev_insn_start = dc->base.insn_start;
3671     dc->prev_insn_end = tcg_last_op();
3672     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3673         pc_arg &= ~TARGET_PAGE_MASK;
3674     }
3675     tcg_gen_insn_start(pc_arg, dc->cc_op);
3676 }
3677 
3678 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3679 {
3680     DisasContext *dc = container_of(dcbase, DisasContext, base);
3681     bool orig_cc_op_dirty = dc->cc_op_dirty;
3682     CCOp orig_cc_op = dc->cc_op;
3683     target_ulong orig_pc_save = dc->pc_save;
3684 
3685 #ifdef TARGET_VSYSCALL_PAGE
3686     /*
3687      * Detect entry into the vsyscall page and invoke the syscall.
3688      */
3689     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3690         gen_exception(dc, EXCP_VSYSCALL);
3691         dc->base.pc_next = dc->pc + 1;
3692         return;
3693     }
3694 #endif
3695 
3696     switch (sigsetjmp(dc->jmpbuf, 0)) {
3697     case 0:
3698         disas_insn(dc, cpu);
3699         break;
3700     case 1:
3701         gen_exception_gpf(dc);
3702         break;
3703     case 2:
3704         /* Restore state that may affect the next instruction. */
3705         dc->pc = dc->base.pc_next;
3706         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3707         assert(dc->cc_op == orig_cc_op);
3708         assert(dc->pc_save == orig_pc_save);
3709         dc->base.num_insns--;
3710         tcg_remove_ops_after(dc->prev_insn_end);
3711         dc->base.insn_start = dc->prev_insn_start;
3712         dc->base.is_jmp = DISAS_TOO_MANY;
3713         return;
3714     default:
3715         g_assert_not_reached();
3716     }
3717 
3718     /*
3719      * Instruction decoding completed (possibly with #GP if the
3720      * 15-byte boundary was exceeded).
3721      */
3722     dc->base.pc_next = dc->pc;
3723     if (dc->base.is_jmp == DISAS_NEXT) {
3724         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3725             /*
3726              * If single step mode, we generate only one instruction and
3727              * generate an exception.
3728              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3729              * the flag and abort the translation to give the irqs a
3730              * chance to happen.
3731              */
3732             dc->base.is_jmp = DISAS_EOB_NEXT;
3733         } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3734             dc->base.is_jmp = DISAS_TOO_MANY;
3735         }
3736     }
3737 }
3738 
3739 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3740 {
3741     DisasContext *dc = container_of(dcbase, DisasContext, base);
3742 
3743     switch (dc->base.is_jmp) {
3744     case DISAS_NORETURN:
3745         /*
3746          * Most instructions should not use DISAS_NORETURN, as that suppresses
3747          * the handling of hflags normally done by gen_eob().  We can
3748          * get here:
3749          * - for exception and interrupts
3750          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3751          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3752          *   and INHIBIT_IRQ is loaded from the VMCB
3753          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3754          *   the helpers handle themselves the tasks normally done by gen_eob().
3755          */
3756         break;
3757     case DISAS_TOO_MANY:
3758         gen_update_cc_op(dc);
3759         gen_jmp_rel_csize(dc, 0, 0);
3760         break;
3761     case DISAS_EOB_NEXT:
3762     case DISAS_EOB_INHIBIT_IRQ:
3763         assert(dc->base.pc_next == dc->pc);
3764         gen_update_eip_cur(dc);
3765         /* fall through */
3766     case DISAS_EOB_ONLY:
3767     case DISAS_EOB_RECHECK_TF:
3768     case DISAS_JUMP:
3769         gen_eob(dc, dc->base.is_jmp);
3770         break;
3771     default:
3772         g_assert_not_reached();
3773     }
3774 }
3775 
3776 static const TranslatorOps i386_tr_ops = {
3777     .init_disas_context = i386_tr_init_disas_context,
3778     .tb_start           = i386_tr_tb_start,
3779     .insn_start         = i386_tr_insn_start,
3780     .translate_insn     = i386_tr_translate_insn,
3781     .tb_stop            = i386_tr_tb_stop,
3782 };
3783 
3784 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3785                         int *max_insns, vaddr pc, void *host_pc)
3786 {
3787     DisasContext dc;
3788 
3789     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3790 }
3791