xref: /qemu/target/i386/tcg/translate.c (revision 43ba160cb4bbb193560eb0d2d7decc4b5fc599fe) !
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-mmu-index.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "exec/target_page.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 #include "decode-new.h"
35 
36 #include "exec/log.h"
37 
38 #define HELPER_H "helper.h"
39 #include "exec/helper-info.c.inc"
40 #undef  HELPER_H
41 
42 /* Fixes for Windows namespace pollution.  */
43 #undef IN
44 #undef OUT
45 
46 #define PREFIX_REPZ   0x01
47 #define PREFIX_REPNZ  0x02
48 #define PREFIX_LOCK   0x04
49 #define PREFIX_DATA   0x08
50 #define PREFIX_ADR    0x10
51 #define PREFIX_VEX    0x20
52 #define PREFIX_REX    0x40
53 
54 #ifdef TARGET_X86_64
55 # define ctztl  ctz64
56 # define clztl  clz64
57 #else
58 # define ctztl  ctz32
59 # define clztl  clz32
60 #endif
61 
62 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
63 #define CASE_MODRM_MEM_OP(OP) \
64     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
65     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
66     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
67 
68 #define CASE_MODRM_OP(OP) \
69     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
70     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
71     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
72     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
73 
74 //#define MACRO_TEST   1
75 
76 /* global register indexes */
77 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
78 static TCGv cpu_eip;
79 static TCGv_i32 cpu_cc_op;
80 static TCGv cpu_regs[CPU_NB_REGS];
81 static TCGv cpu_seg_base[6];
82 static TCGv_i64 cpu_bndl[4];
83 static TCGv_i64 cpu_bndu[4];
84 
85 typedef struct DisasContext {
86     DisasContextBase base;
87 
88     target_ulong pc;       /* pc = eip + cs_base */
89     target_ulong cs_base;  /* base of CS segment */
90     target_ulong pc_save;
91 
92     MemOp aflag;
93     MemOp dflag;
94 
95     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
96     uint8_t prefix;
97 
98     bool has_modrm;
99     uint8_t modrm;
100 
101 #ifndef CONFIG_USER_ONLY
102     uint8_t cpl;   /* code priv level */
103     uint8_t iopl;  /* i/o priv level */
104 #endif
105     uint8_t vex_l;  /* vex vector length */
106     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
107     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
108     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
109 
110 #ifdef TARGET_X86_64
111     uint8_t rex_r;
112     uint8_t rex_x;
113     uint8_t rex_b;
114 #endif
115     bool vex_w; /* used by AVX even on 32-bit processors */
116     bool jmp_opt; /* use direct block chaining for direct jumps */
117     bool cc_op_dirty;
118 
119     CCOp cc_op;  /* current CC operation */
120     int mem_index; /* select memory access functions */
121     uint32_t flags; /* all execution flags */
122     int cpuid_features;
123     int cpuid_ext_features;
124     int cpuid_ext2_features;
125     int cpuid_ext3_features;
126     int cpuid_7_0_ebx_features;
127     int cpuid_7_0_ecx_features;
128     int cpuid_7_1_eax_features;
129     int cpuid_xsave_features;
130 
131     /* TCG local temps */
132     TCGv cc_srcT;
133     TCGv A0;
134     TCGv T0;
135     TCGv T1;
136 
137     /* TCG local register indexes (only used inside old micro ops) */
138     TCGv_i32 tmp2_i32;
139     TCGv_i64 tmp1_i64;
140 
141     sigjmp_buf jmpbuf;
142     TCGOp *prev_insn_start;
143     TCGOp *prev_insn_end;
144 } DisasContext;
145 
146 /*
147  * Point EIP to next instruction before ending translation.
148  * For instructions that can change hflags.
149  */
150 #define DISAS_EOB_NEXT         DISAS_TARGET_0
151 
152 /*
153  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
154  * already set.  For instructions that activate interrupt shadow.
155  */
156 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
157 
158 /*
159  * Return to the main loop; EIP might have already been updated
160  * but even in that case do not use lookup_and_goto_ptr().
161  */
162 #define DISAS_EOB_ONLY         DISAS_TARGET_2
163 
164 /*
165  * EIP has already been updated.  For jumps that wish to use
166  * lookup_and_goto_ptr()
167  */
168 #define DISAS_JUMP             DISAS_TARGET_3
169 
170 /*
171  * EIP has already been updated.  Use updated value of
172  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
173  */
174 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
175 
176 /* The environment in which user-only runs is constrained. */
177 #ifdef CONFIG_USER_ONLY
178 #define PE(S)     true
179 #define CPL(S)    3
180 #define IOPL(S)   0
181 #define SVME(S)   false
182 #define GUEST(S)  false
183 #else
184 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
185 #define CPL(S)    ((S)->cpl)
186 #define IOPL(S)   ((S)->iopl)
187 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
188 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
189 #endif
190 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
191 #define VM86(S)   false
192 #define CODE32(S) true
193 #define SS32(S)   true
194 #define ADDSEG(S) false
195 #else
196 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
197 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
198 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
199 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
200 #endif
201 #if !defined(TARGET_X86_64)
202 #define CODE64(S) false
203 #elif defined(CONFIG_USER_ONLY)
204 #define CODE64(S) true
205 #else
206 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
207 #endif
208 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
209 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
210 #else
211 #define LMA(S)    false
212 #endif
213 
214 #ifdef TARGET_X86_64
215 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
216 #define REX_W(S)       ((S)->vex_w)
217 #define REX_R(S)       ((S)->rex_r + 0)
218 #define REX_X(S)       ((S)->rex_x + 0)
219 #define REX_B(S)       ((S)->rex_b + 0)
220 #else
221 #define REX_PREFIX(S)  false
222 #define REX_W(S)       false
223 #define REX_R(S)       0
224 #define REX_X(S)       0
225 #define REX_B(S)       0
226 #endif
227 
228 /*
229  * Many system-only helpers are not reachable for user-only.
230  * Define stub generators here, so that we need not either sprinkle
231  * ifdefs through the translator, nor provide the helper function.
232  */
233 #define STUB_HELPER(NAME, ...) \
234     static inline void gen_helper_##NAME(__VA_ARGS__) \
235     { qemu_build_not_reached(); }
236 
237 #ifdef CONFIG_USER_ONLY
238 STUB_HELPER(clgi, TCGv_env env)
239 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
240 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
241 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
244 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
245 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
246 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(stgi, TCGv_env env)
249 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
250 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
251 STUB_HELPER(vmmcall, TCGv_env env)
252 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
253 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
254 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
255 #endif
256 
257 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
258 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
259 static void gen_exception_gpf(DisasContext *s);
260 
261 /* i386 shift ops */
262 enum {
263     OP_ROL,
264     OP_ROR,
265     OP_RCL,
266     OP_RCR,
267     OP_SHL,
268     OP_SHR,
269     OP_SHL1, /* undocumented */
270     OP_SAR = 7,
271 };
272 
273 enum {
274     JCC_O,
275     JCC_B,
276     JCC_Z,
277     JCC_BE,
278     JCC_S,
279     JCC_P,
280     JCC_L,
281     JCC_LE,
282 };
283 
284 enum {
285     USES_CC_DST  = 1,
286     USES_CC_SRC  = 2,
287     USES_CC_SRC2 = 4,
288     USES_CC_SRCT = 8,
289 };
290 
291 /* Bit set if the global variable is live after setting CC_OP to X.  */
292 static const uint8_t cc_op_live_[] = {
293     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
294     [CC_OP_EFLAGS] = USES_CC_SRC,
295     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
296     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
297     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
298     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
299     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
301     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
302     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
303     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
309     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_POPCNT] = USES_CC_DST,
311 };
312 
cc_op_live(CCOp op)313 static uint8_t cc_op_live(CCOp op)
314 {
315     uint8_t result;
316     assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
317 
318     /*
319      * Check that the array is fully populated.  A zero entry would correspond
320      * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
321      * as well.
322      */
323     result = cc_op_live_[op];
324     assert(result);
325     return result;
326 }
327 
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)328 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
329 {
330     int dead;
331 
332     if (s->cc_op == op) {
333         return;
334     }
335 
336     /* Discard CC computation that will no longer be used.  */
337     dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
338     if (dead & USES_CC_DST) {
339         tcg_gen_discard_tl(cpu_cc_dst);
340     }
341     if (dead & USES_CC_SRC) {
342         tcg_gen_discard_tl(cpu_cc_src);
343     }
344     if (dead & USES_CC_SRC2) {
345         tcg_gen_discard_tl(cpu_cc_src2);
346     }
347     if (dead & USES_CC_SRCT) {
348         tcg_gen_discard_tl(s->cc_srcT);
349     }
350 
351     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
352         tcg_gen_discard_i32(cpu_cc_op);
353     }
354     s->cc_op_dirty = dirty;
355     s->cc_op = op;
356 }
357 
set_cc_op(DisasContext * s,CCOp op)358 static void set_cc_op(DisasContext *s, CCOp op)
359 {
360     /*
361      * The DYNAMIC setting is translator only, everything else
362      * will be spilled later.
363      */
364     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
365 }
366 
assume_cc_op(DisasContext * s,CCOp op)367 static void assume_cc_op(DisasContext *s, CCOp op)
368 {
369     set_cc_op_1(s, op, false);
370 }
371 
gen_update_cc_op(DisasContext * s)372 static void gen_update_cc_op(DisasContext *s)
373 {
374     if (s->cc_op_dirty) {
375         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
376         s->cc_op_dirty = false;
377     }
378 }
379 
380 #ifdef TARGET_X86_64
381 
382 #define NB_OP_SIZES 4
383 
384 #else /* !TARGET_X86_64 */
385 
386 #define NB_OP_SIZES 3
387 
388 #endif /* !TARGET_X86_64 */
389 
390 #if HOST_BIG_ENDIAN
391 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
392 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
393 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
394 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
395 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
396 #else
397 #define REG_B_OFFSET 0
398 #define REG_H_OFFSET 1
399 #define REG_W_OFFSET 0
400 #define REG_L_OFFSET 0
401 #define REG_LH_OFFSET 4
402 #endif
403 
404 /* In instruction encodings for byte register accesses the
405  * register number usually indicates "low 8 bits of register N";
406  * however there are some special cases where N 4..7 indicates
407  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
408  * true for this special case, false otherwise.
409  */
byte_reg_is_xH(DisasContext * s,int reg)410 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
411 {
412     /* Any time the REX prefix is present, byte registers are uniform */
413     if (reg < 4 || REX_PREFIX(s)) {
414         return false;
415     }
416     return true;
417 }
418 
419 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)420 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
421 {
422     if (CODE64(s)) {
423         return ot == MO_16 ? MO_16 : MO_64;
424     } else {
425         return ot;
426     }
427 }
428 
429 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)430 static inline MemOp mo_stacksize(DisasContext *s)
431 {
432     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
433 }
434 
435 /* Compute the result of writing t0 to the OT-sized register REG.
436  *
437  * If DEST is NULL, store the result into the register and return the
438  * register's TCGv.
439  *
440  * If DEST is not NULL, store the result into DEST and return the
441  * register's TCGv.
442  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)443 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
444 {
445     switch(ot) {
446     case MO_8:
447         if (byte_reg_is_xH(s, reg)) {
448             dest = dest ? dest : cpu_regs[reg - 4];
449             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
450             return cpu_regs[reg - 4];
451         }
452         dest = dest ? dest : cpu_regs[reg];
453         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
454         break;
455     case MO_16:
456         dest = dest ? dest : cpu_regs[reg];
457         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
458         break;
459     case MO_32:
460         /* For x86_64, this sets the higher half of register to zero.
461            For i386, this is equivalent to a mov. */
462         dest = dest ? dest : cpu_regs[reg];
463         tcg_gen_ext32u_tl(dest, t0);
464         break;
465 #ifdef TARGET_X86_64
466     case MO_64:
467         dest = dest ? dest : cpu_regs[reg];
468         tcg_gen_mov_tl(dest, t0);
469         break;
470 #endif
471     default:
472         g_assert_not_reached();
473     }
474     return cpu_regs[reg];
475 }
476 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)477 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
478 {
479     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
480 }
481 
482 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)483 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
484 {
485     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
486         tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
487     } else {
488         tcg_gen_mov_tl(t0, cpu_regs[reg]);
489     }
490 }
491 
gen_add_A0_im(DisasContext * s,int val)492 static void gen_add_A0_im(DisasContext *s, int val)
493 {
494     tcg_gen_addi_tl(s->A0, s->A0, val);
495     if (!CODE64(s)) {
496         tcg_gen_ext32u_tl(s->A0, s->A0);
497     }
498 }
499 
gen_op_jmp_v(DisasContext * s,TCGv dest)500 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
501 {
502     tcg_gen_mov_tl(cpu_eip, dest);
503     s->pc_save = -1;
504 }
505 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)506 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
507 {
508     /* Using cpu_regs[reg] does not work for xH registers.  */
509     assert(size >= MO_16);
510     if (size == MO_16) {
511         TCGv temp = tcg_temp_new();
512         tcg_gen_add_tl(temp, cpu_regs[reg], val);
513         gen_op_mov_reg_v(s, size, reg, temp);
514     } else {
515         tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val);
516         tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size);
517     }
518 }
519 
520 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)521 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
522 {
523     gen_op_add_reg(s, size, reg, tcg_constant_tl(val));
524 }
525 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)526 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)531 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535 
gen_update_eip_next(DisasContext * s)536 static void gen_update_eip_next(DisasContext *s)
537 {
538     assert(s->pc_save != -1);
539     if (tb_cflags(s->base.tb) & CF_PCREL) {
540         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
541     } else if (CODE64(s)) {
542         tcg_gen_movi_tl(cpu_eip, s->pc);
543     } else {
544         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
545     }
546     s->pc_save = s->pc;
547 }
548 
gen_update_eip_cur(DisasContext * s)549 static void gen_update_eip_cur(DisasContext *s)
550 {
551     assert(s->pc_save != -1);
552     if (tb_cflags(s->base.tb) & CF_PCREL) {
553         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
554     } else if (CODE64(s)) {
555         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
556     } else {
557         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
558     }
559     s->pc_save = s->base.pc_next;
560 }
561 
cur_insn_len(DisasContext * s)562 static int cur_insn_len(DisasContext *s)
563 {
564     return s->pc - s->base.pc_next;
565 }
566 
cur_insn_len_i32(DisasContext * s)567 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
568 {
569     return tcg_constant_i32(cur_insn_len(s));
570 }
571 
eip_next_i32(DisasContext * s)572 static TCGv_i32 eip_next_i32(DisasContext *s)
573 {
574     assert(s->pc_save != -1);
575     /*
576      * This function has two users: lcall_real (always 16-bit mode), and
577      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
578      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
579      * why passing a 32-bit value isn't broken.  To avoid using this where
580      * we shouldn't, return -1 in 64-bit mode so that execution goes into
581      * the weeds quickly.
582      */
583     if (CODE64(s)) {
584         return tcg_constant_i32(-1);
585     }
586     if (tb_cflags(s->base.tb) & CF_PCREL) {
587         TCGv_i32 ret = tcg_temp_new_i32();
588         tcg_gen_trunc_tl_i32(ret, cpu_eip);
589         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
590         return ret;
591     } else {
592         return tcg_constant_i32(s->pc - s->cs_base);
593     }
594 }
595 
eip_next_tl(DisasContext * s)596 static TCGv eip_next_tl(DisasContext *s)
597 {
598     assert(s->pc_save != -1);
599     if (tb_cflags(s->base.tb) & CF_PCREL) {
600         TCGv ret = tcg_temp_new();
601         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
602         return ret;
603     } else if (CODE64(s)) {
604         return tcg_constant_tl(s->pc);
605     } else {
606         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
607     }
608 }
609 
eip_cur_tl(DisasContext * s)610 static TCGv eip_cur_tl(DisasContext *s)
611 {
612     assert(s->pc_save != -1);
613     if (tb_cflags(s->base.tb) & CF_PCREL) {
614         TCGv ret = tcg_temp_new();
615         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
616         return ret;
617     } else if (CODE64(s)) {
618         return tcg_constant_tl(s->base.pc_next);
619     } else {
620         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
621     }
622 }
623 
624 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
625    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
626    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)627 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
628                                int def_seg, int ovr_seg)
629 {
630     switch (aflag) {
631 #ifdef TARGET_X86_64
632     case MO_64:
633         if (ovr_seg < 0) {
634             tcg_gen_mov_tl(dest, a0);
635             return;
636         }
637         break;
638 #endif
639     case MO_32:
640         /* 32 bit address */
641         if (ovr_seg < 0 && ADDSEG(s)) {
642             ovr_seg = def_seg;
643         }
644         if (ovr_seg < 0) {
645             tcg_gen_ext32u_tl(dest, a0);
646             return;
647         }
648         break;
649     case MO_16:
650         /* 16 bit address */
651         tcg_gen_ext16u_tl(dest, a0);
652         a0 = dest;
653         if (ovr_seg < 0) {
654             if (ADDSEG(s)) {
655                 ovr_seg = def_seg;
656             } else {
657                 return;
658             }
659         }
660         break;
661     default:
662         g_assert_not_reached();
663     }
664 
665     if (ovr_seg >= 0) {
666         TCGv seg = cpu_seg_base[ovr_seg];
667 
668         if (aflag == MO_64) {
669             tcg_gen_add_tl(dest, a0, seg);
670         } else if (CODE64(s)) {
671             tcg_gen_ext32u_tl(dest, a0);
672             tcg_gen_add_tl(dest, dest, seg);
673         } else {
674             tcg_gen_add_tl(dest, a0, seg);
675             tcg_gen_ext32u_tl(dest, dest);
676         }
677     }
678 }
679 
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)680 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
681                           int def_seg, int ovr_seg)
682 {
683     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
684 }
685 
gen_string_movl_A0_ESI(DisasContext * s)686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
689 }
690 
gen_string_movl_A0_EDI(DisasContext * s)691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
694 }
695 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)696 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
697 {
698     if (size == MO_TL) {
699         return src;
700     }
701     if (!dst) {
702         dst = tcg_temp_new();
703     }
704     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
705     return dst;
706 }
707 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)708 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
709 {
710     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
711 
712     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
713 }
714 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)715 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
716 {
717     gen_op_j_ecx(s, TCG_COND_EQ, label1);
718 }
719 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)720 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
721 {
722     gen_op_j_ecx(s, TCG_COND_NE, label1);
723 }
724 
gen_set_hflag(DisasContext * s,uint32_t mask)725 static void gen_set_hflag(DisasContext *s, uint32_t mask)
726 {
727     if ((s->flags & mask) == 0) {
728         TCGv_i32 t = tcg_temp_new_i32();
729         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
730         tcg_gen_ori_i32(t, t, mask);
731         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
732         s->flags |= mask;
733     }
734 }
735 
gen_reset_hflag(DisasContext * s,uint32_t mask)736 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
737 {
738     if (s->flags & mask) {
739         TCGv_i32 t = tcg_temp_new_i32();
740         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
741         tcg_gen_andi_i32(t, t, ~mask);
742         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
743         s->flags &= ~mask;
744     }
745 }
746 
gen_set_eflags(DisasContext * s,target_ulong mask)747 static void gen_set_eflags(DisasContext *s, target_ulong mask)
748 {
749     TCGv t = tcg_temp_new();
750 
751     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
752     tcg_gen_ori_tl(t, t, mask);
753     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
754 }
755 
gen_reset_eflags(DisasContext * s,target_ulong mask)756 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
757 {
758     TCGv t = tcg_temp_new();
759 
760     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
761     tcg_gen_andi_tl(t, t, ~mask);
762     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
763 }
764 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)765 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
766 {
767     switch (ot) {
768     case MO_8:
769         gen_helper_inb(v, tcg_env, n);
770         break;
771     case MO_16:
772         gen_helper_inw(v, tcg_env, n);
773         break;
774     case MO_32:
775         gen_helper_inl(v, tcg_env, n);
776         break;
777     default:
778         g_assert_not_reached();
779     }
780 }
781 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)782 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
783 {
784     switch (ot) {
785     case MO_8:
786         gen_helper_outb(tcg_env, v, n);
787         break;
788     case MO_16:
789         gen_helper_outw(tcg_env, v, n);
790         break;
791     case MO_32:
792         gen_helper_outl(tcg_env, v, n);
793         break;
794     default:
795         g_assert_not_reached();
796     }
797 }
798 
799 /*
800  * Validate that access to [port, port + 1<<ot) is allowed.
801  * Raise #GP, or VMM exit if not.
802  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)803 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
804                          uint32_t svm_flags)
805 {
806 #ifdef CONFIG_USER_ONLY
807     /*
808      * We do not implement the ioperm(2) syscall, so the TSS check
809      * will always fail.
810      */
811     gen_exception_gpf(s);
812     return false;
813 #else
814     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
815         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
816     }
817     if (GUEST(s)) {
818         gen_update_cc_op(s);
819         gen_update_eip_cur(s);
820         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
821             svm_flags |= SVM_IOIO_REP_MASK;
822         }
823         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
824         gen_helper_svm_check_io(tcg_env, port,
825                                 tcg_constant_i32(svm_flags),
826                                 cur_insn_len_i32(s));
827     }
828     return true;
829 #endif
830 }
831 
gen_movs(DisasContext * s,MemOp ot,TCGv dshift)832 static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift)
833 {
834     gen_string_movl_A0_ESI(s);
835     gen_op_ld_v(s, ot, s->T0, s->A0);
836     gen_string_movl_A0_EDI(s);
837     gen_op_st_v(s, ot, s->T0, s->A0);
838 
839     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
840     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
841 }
842 
843 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)844 static void gen_mov_eflags(DisasContext *s, TCGv reg)
845 {
846     TCGv dst, src1, src2;
847     TCGv_i32 cc_op;
848     int live, dead;
849 
850     if (s->cc_op == CC_OP_EFLAGS) {
851         tcg_gen_mov_tl(reg, cpu_cc_src);
852         return;
853     }
854 
855     dst = cpu_cc_dst;
856     src1 = cpu_cc_src;
857     src2 = cpu_cc_src2;
858 
859     /* Take care to not read values that are not live.  */
860     live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
861     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
862     if (dead) {
863         TCGv zero = tcg_constant_tl(0);
864         if (dead & USES_CC_DST) {
865             dst = zero;
866         }
867         if (dead & USES_CC_SRC) {
868             src1 = zero;
869         }
870         if (dead & USES_CC_SRC2) {
871             src2 = zero;
872         }
873     }
874 
875     if (s->cc_op != CC_OP_DYNAMIC) {
876         cc_op = tcg_constant_i32(s->cc_op);
877     } else {
878         cc_op = cpu_cc_op;
879     }
880     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
881 }
882 
883 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)884 static void gen_compute_eflags(DisasContext *s)
885 {
886     gen_mov_eflags(s, cpu_cc_src);
887     set_cc_op(s, CC_OP_EFLAGS);
888 }
889 
890 typedef struct CCPrepare {
891     TCGCond cond;
892     TCGv reg;
893     TCGv reg2;
894     target_ulong imm;
895     bool use_reg2;
896     bool no_setcond;
897 } CCPrepare;
898 
gen_prepare_sign_nz(TCGv src,MemOp size)899 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
900 {
901     if (size == MO_TL) {
902         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
903     } else {
904         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
905                              .imm = 1ull << ((8 << size) - 1) };
906     }
907 }
908 
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)909 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
910 {
911     if (size == MO_TL) {
912         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
913                              .reg = src };
914     } else {
915         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
916                              .imm = MAKE_64BIT_MASK(0, 8 << size),
917                              .reg = src };
918     }
919 }
920 
921 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)922 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
923 {
924     MemOp size;
925 
926     switch (s->cc_op) {
927     case CC_OP_SUBB ... CC_OP_SUBQ:
928         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
929         size = s->cc_op - CC_OP_SUBB;
930         tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
931         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
932         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
933                              .reg2 = cpu_cc_src, .use_reg2 = true };
934 
935     case CC_OP_ADDB ... CC_OP_ADDQ:
936         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
937         size = cc_op_size(s->cc_op);
938         tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
939         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
940         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
941                              .reg2 = cpu_cc_src, .use_reg2 = true };
942 
943     case CC_OP_LOGICB ... CC_OP_LOGICQ:
944     case CC_OP_POPCNT:
945         return (CCPrepare) { .cond = TCG_COND_NEVER };
946 
947     case CC_OP_INCB ... CC_OP_INCQ:
948     case CC_OP_DECB ... CC_OP_DECQ:
949         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
950                              .no_setcond = true };
951 
952     case CC_OP_SHLB ... CC_OP_SHLQ:
953         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
954         size = cc_op_size(s->cc_op);
955         return gen_prepare_sign_nz(cpu_cc_src, size);
956 
957     case CC_OP_MULB ... CC_OP_MULQ:
958         return (CCPrepare) { .cond = TCG_COND_NE,
959                              .reg = cpu_cc_src };
960 
961     case CC_OP_BMILGB ... CC_OP_BMILGQ:
962         size = cc_op_size(s->cc_op);
963         return gen_prepare_val_nz(cpu_cc_src, size, true);
964 
965     case CC_OP_BLSIB ... CC_OP_BLSIQ:
966         size = cc_op_size(s->cc_op);
967         return gen_prepare_val_nz(cpu_cc_src, size, false);
968 
969     case CC_OP_ADCX:
970     case CC_OP_ADCOX:
971         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
972                              .no_setcond = true };
973 
974     case CC_OP_EFLAGS:
975     case CC_OP_SARB ... CC_OP_SARQ:
976         /* CC_SRC & 1 */
977         return (CCPrepare) { .cond = TCG_COND_TSTNE,
978                              .reg = cpu_cc_src, .imm = CC_C };
979 
980     default:
981        /* The need to compute only C from CC_OP_DYNAMIC is important
982           in efficiently implementing e.g. INC at the start of a TB.  */
983        gen_update_cc_op(s);
984        if (!reg) {
985            reg = tcg_temp_new();
986        }
987        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
988                                cpu_cc_src2, cpu_cc_op);
989        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
990                             .no_setcond = true };
991     }
992 }
993 
994 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)995 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
996 {
997     gen_compute_eflags(s);
998     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
999                          .imm = CC_P };
1000 }
1001 
1002 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)1003 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1004 {
1005     switch (s->cc_op) {
1006     case CC_OP_DYNAMIC:
1007         gen_compute_eflags(s);
1008         /* FALLTHRU */
1009     case CC_OP_EFLAGS:
1010     case CC_OP_ADCX:
1011     case CC_OP_ADOX:
1012     case CC_OP_ADCOX:
1013         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1014                              .imm = CC_S };
1015     case CC_OP_POPCNT:
1016         return (CCPrepare) { .cond = TCG_COND_NEVER };
1017     default:
1018         return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
1019     }
1020 }
1021 
1022 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)1023 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1024 {
1025     switch (s->cc_op) {
1026     case CC_OP_ADOX:
1027     case CC_OP_ADCOX:
1028         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1029                              .no_setcond = true };
1030     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1031     case CC_OP_POPCNT:
1032         return (CCPrepare) { .cond = TCG_COND_NEVER };
1033     case CC_OP_MULB ... CC_OP_MULQ:
1034         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1035     default:
1036         gen_compute_eflags(s);
1037         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1038                              .imm = CC_O };
1039     }
1040 }
1041 
1042 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1043 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1044 {
1045     switch (s->cc_op) {
1046     case CC_OP_EFLAGS:
1047     case CC_OP_ADCX:
1048     case CC_OP_ADOX:
1049     case CC_OP_ADCOX:
1050         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1051                              .imm = CC_Z };
1052     case CC_OP_DYNAMIC:
1053         gen_update_cc_op(s);
1054         if (!reg) {
1055             reg = tcg_temp_new();
1056         }
1057         gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1058         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1059     case CC_OP_POPCNT:
1060         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1061     default:
1062         {
1063             MemOp size = cc_op_size(s->cc_op);
1064             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1065         }
1066     }
1067 }
1068 
1069 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1070  * if needed; it may be used for CCPrepare.reg if that will
1071  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1072 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1073 {
1074     int inv, jcc_op, cond;
1075     MemOp size;
1076     CCPrepare cc;
1077 
1078     inv = b & 1;
1079     jcc_op = (b >> 1) & 7;
1080 
1081     switch (s->cc_op) {
1082     case CC_OP_SUBB ... CC_OP_SUBQ:
1083         /* We optimize relational operators for the cmp/jcc case.  */
1084         size = cc_op_size(s->cc_op);
1085         switch (jcc_op) {
1086         case JCC_BE:
1087             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1088             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1089             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1090                                .reg2 = cpu_cc_src, .use_reg2 = true };
1091             break;
1092         case JCC_L:
1093             cond = TCG_COND_LT;
1094             goto fast_jcc_l;
1095         case JCC_LE:
1096             cond = TCG_COND_LE;
1097         fast_jcc_l:
1098             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1099             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1100             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1101                                .reg2 = cpu_cc_src, .use_reg2 = true };
1102             break;
1103 
1104         default:
1105             goto slow_jcc;
1106         }
1107         break;
1108 
1109     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1110         /* Mostly used for test+jump */
1111         size = s->cc_op - CC_OP_LOGICB;
1112         switch (jcc_op) {
1113         case JCC_BE:
1114             /* CF = 0, becomes jz/je */
1115             jcc_op = JCC_Z;
1116             goto slow_jcc;
1117         case JCC_L:
1118             /* OF = 0, becomes js/jns */
1119             jcc_op = JCC_S;
1120             goto slow_jcc;
1121         case JCC_LE:
1122             /* SF or ZF, becomes signed <= 0 */
1123             tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1124             cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1125             break;
1126         default:
1127             goto slow_jcc;
1128         }
1129         break;
1130 
1131     default:
1132     slow_jcc:
1133         /* This actually generates good code for JC, JZ and JS.  */
1134         switch (jcc_op) {
1135         case JCC_O:
1136             cc = gen_prepare_eflags_o(s, reg);
1137             break;
1138         case JCC_B:
1139             cc = gen_prepare_eflags_c(s, reg);
1140             break;
1141         case JCC_Z:
1142             cc = gen_prepare_eflags_z(s, reg);
1143             break;
1144         case JCC_BE:
1145             gen_compute_eflags(s);
1146             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1147                                .imm = CC_Z | CC_C };
1148             break;
1149         case JCC_S:
1150             cc = gen_prepare_eflags_s(s, reg);
1151             break;
1152         case JCC_P:
1153             cc = gen_prepare_eflags_p(s, reg);
1154             break;
1155         case JCC_L:
1156             gen_compute_eflags(s);
1157             if (!reg || reg == cpu_cc_src) {
1158                 reg = tcg_temp_new();
1159             }
1160             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1161             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1162                                .imm = CC_O };
1163             break;
1164         default:
1165         case JCC_LE:
1166             gen_compute_eflags(s);
1167             if (!reg || reg == cpu_cc_src) {
1168                 reg = tcg_temp_new();
1169             }
1170             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1171             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1172                                .imm = CC_O | CC_Z };
1173             break;
1174         }
1175         break;
1176     }
1177 
1178     if (inv) {
1179         cc.cond = tcg_invert_cond(cc.cond);
1180     }
1181     return cc;
1182 }
1183 
gen_neg_setcc(DisasContext * s,int b,TCGv reg)1184 static void gen_neg_setcc(DisasContext *s, int b, TCGv reg)
1185 {
1186     CCPrepare cc = gen_prepare_cc(s, b, reg);
1187 
1188     if (cc.no_setcond) {
1189         if (cc.cond == TCG_COND_EQ) {
1190             tcg_gen_addi_tl(reg, cc.reg, -1);
1191         } else {
1192             tcg_gen_neg_tl(reg, cc.reg);
1193         }
1194         return;
1195     }
1196 
1197     if (cc.use_reg2) {
1198         tcg_gen_negsetcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1199     } else {
1200         tcg_gen_negsetcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1201     }
1202 }
1203 
gen_setcc(DisasContext * s,int b,TCGv reg)1204 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1205 {
1206     CCPrepare cc = gen_prepare_cc(s, b, reg);
1207 
1208     if (cc.no_setcond) {
1209         if (cc.cond == TCG_COND_EQ) {
1210             tcg_gen_xori_tl(reg, cc.reg, 1);
1211         } else {
1212             tcg_gen_mov_tl(reg, cc.reg);
1213         }
1214         return;
1215     }
1216 
1217     if (cc.use_reg2) {
1218         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1219     } else {
1220         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1221     }
1222 }
1223 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1224 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1225 {
1226     gen_setcc(s, JCC_B << 1, reg);
1227 }
1228 
1229 /* generate a conditional jump to label 'l1' according to jump opcode
1230    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc_noeob(DisasContext * s,int b,TCGLabel * l1)1231 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1232 {
1233     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1234 
1235     if (cc.use_reg2) {
1236         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1237     } else {
1238         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1239     }
1240 }
1241 
1242 /* Generate a conditional jump to label 'l1' according to jump opcode
1243    value 'b'. In the fast case, T0 is guaranteed not to be used.
1244    One or both of the branches will call gen_jmp_rel, so ensure
1245    cc_op is clean.  */
gen_jcc(DisasContext * s,int b,TCGLabel * l1)1246 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1247 {
1248     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1249 
1250     /*
1251      * Note that this must be _after_ gen_prepare_cc, because it can change
1252      * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
1253      * it's cheaper to just compute the flags)!
1254      */
1255     gen_update_cc_op(s);
1256     if (cc.use_reg2) {
1257         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1258     } else {
1259         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1260     }
1261 }
1262 
gen_stos(DisasContext * s,MemOp ot,TCGv dshift)1263 static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift)
1264 {
1265     gen_string_movl_A0_EDI(s);
1266     gen_op_st_v(s, ot, s->T0, s->A0);
1267     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1268 }
1269 
gen_lods(DisasContext * s,MemOp ot,TCGv dshift)1270 static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift)
1271 {
1272     gen_string_movl_A0_ESI(s);
1273     gen_op_ld_v(s, ot, s->T0, s->A0);
1274     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1275     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1276 }
1277 
gen_scas(DisasContext * s,MemOp ot,TCGv dshift)1278 static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift)
1279 {
1280     gen_string_movl_A0_EDI(s);
1281     gen_op_ld_v(s, ot, s->T1, s->A0);
1282     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1283     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1284     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1285     set_cc_op(s, CC_OP_SUBB + ot);
1286 
1287     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1288 }
1289 
gen_cmps(DisasContext * s,MemOp ot,TCGv dshift)1290 static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift)
1291 {
1292     gen_string_movl_A0_EDI(s);
1293     gen_op_ld_v(s, ot, s->T1, s->A0);
1294     gen_string_movl_A0_ESI(s);
1295     gen_op_ld_v(s, ot, s->T0, s->A0);
1296     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1297     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1298     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1299     set_cc_op(s, CC_OP_SUBB + ot);
1300 
1301     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1302     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1303 }
1304 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1305 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1306 {
1307     if (s->flags & HF_IOBPT_MASK) {
1308 #ifdef CONFIG_USER_ONLY
1309         /* user-mode cpu should not be in IOBPT mode */
1310         g_assert_not_reached();
1311 #else
1312         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1313         TCGv t_next = eip_next_tl(s);
1314         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1315 #endif /* CONFIG_USER_ONLY */
1316     }
1317 }
1318 
gen_ins(DisasContext * s,MemOp ot,TCGv dshift)1319 static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
1320 {
1321     TCGv_i32 port = tcg_temp_new_i32();
1322 
1323     gen_string_movl_A0_EDI(s);
1324     /* Note: we must do this dummy write first to be restartable in
1325        case of page fault. */
1326     tcg_gen_movi_tl(s->T0, 0);
1327     gen_op_st_v(s, ot, s->T0, s->A0);
1328     tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
1329     tcg_gen_andi_i32(port, port, 0xffff);
1330     gen_helper_in_func(ot, s->T0, port);
1331     gen_op_st_v(s, ot, s->T0, s->A0);
1332     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1333     gen_bpt_io(s, port, ot);
1334 }
1335 
gen_outs(DisasContext * s,MemOp ot,TCGv dshift)1336 static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
1337 {
1338     TCGv_i32 port = tcg_temp_new_i32();
1339     TCGv_i32 value = tcg_temp_new_i32();
1340 
1341     gen_string_movl_A0_ESI(s);
1342     gen_op_ld_v(s, ot, s->T0, s->A0);
1343 
1344     tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
1345     tcg_gen_andi_i32(port, port, 0xffff);
1346     tcg_gen_trunc_tl_i32(value, s->T0);
1347     gen_helper_out_func(ot, port, value);
1348     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1349     gen_bpt_io(s, port, ot);
1350 }
1351 
1352 #define REP_MAX 65535
1353 
do_gen_rep(DisasContext * s,MemOp ot,TCGv dshift,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1354 static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift,
1355                        void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1356                        bool is_repz_nz)
1357 {
1358     TCGLabel *last = gen_new_label();
1359     TCGLabel *loop = gen_new_label();
1360     TCGLabel *done = gen_new_label();
1361 
1362     target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag);
1363     TCGv cx_next = tcg_temp_new();
1364 
1365     /*
1366      * Check if we must translate a single iteration only.  Normally, HF_RF_MASK
1367      * would also limit translation blocks to one instruction, so that gen_eob
1368      * can reset the flag; here however RF is set throughout the repetition, so
1369      * we can plow through until CX/ECX/RCX is zero.
1370      */
1371     bool can_loop =
1372         (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP))
1373 	 && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
1374     bool had_rf = s->flags & HF_RF_MASK;
1375 
1376     /*
1377      * Even if EFLAGS.RF was set on entry (such as if we're on the second or
1378      * later iteration and an exception or interrupt happened), force gen_eob()
1379      * not to clear the flag.  We do that ourselves after the last iteration.
1380      */
1381     s->flags &= ~HF_RF_MASK;
1382 
1383     /*
1384      * For CMPS/SCAS, the CC_OP after a memory fault could come from either
1385      * the previous instruction or the string instruction; but because we
1386      * arrange to keep CC_OP up to date all the time, just mark the whole
1387      * insn as CC_OP_DYNAMIC.
1388      *
1389      * It's not a problem to do this even for instructions that do not
1390      * modify the flags, so do it unconditionally.
1391      */
1392     gen_update_cc_op(s);
1393     tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
1394 
1395     /* Any iteration at all?  */
1396     tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done);
1397 
1398     /*
1399      * From now on we operate on the value of CX/ECX/RCX that will be written
1400      * back, which is stored in cx_next.  There can be no carry, so we can zero
1401      * extend here if needed and not do any expensive deposit operations later.
1402      */
1403     tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1);
1404 #ifdef TARGET_X86_64
1405     if (s->aflag == MO_32) {
1406         tcg_gen_ext32u_tl(cx_next, cx_next);
1407         cx_mask = ~0;
1408     }
1409 #endif
1410 
1411     /*
1412      * The last iteration is handled outside the loop, so that cx_next
1413      * can never underflow.
1414      */
1415     if (can_loop) {
1416         tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1417     }
1418 
1419     gen_set_label(loop);
1420     fn(s, ot, dshift);
1421     tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1422     gen_update_cc_op(s);
1423 
1424     /* Leave if REP condition fails.  */
1425     if (is_repz_nz) {
1426         int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1427         gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
1428         /* gen_prepare_eflags_z never changes cc_op.  */
1429 	assert(!s->cc_op_dirty);
1430     }
1431 
1432     if (can_loop) {
1433         tcg_gen_subi_tl(cx_next, cx_next, 1);
1434         tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop);
1435         tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1436     }
1437 
1438     /*
1439      * Traps or interrupts set RF_MASK if they happen after any iteration
1440      * but the last.  Set it here before giving the main loop a chance to
1441      * execute.  (For faults, seg_helper.c sets the flag as usual).
1442      */
1443     if (!had_rf) {
1444         gen_set_eflags(s, RF_MASK);
1445     }
1446 
1447     /* Go to the main loop but reenter the same instruction.  */
1448     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1449 
1450     if (can_loop) {
1451         /*
1452          * The last iteration needs no conditional jump, even if is_repz_nz,
1453          * because the repeats are ending anyway.
1454          */
1455         gen_set_label(last);
1456         set_cc_op(s, CC_OP_DYNAMIC);
1457         fn(s, ot, dshift);
1458         tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1459         gen_update_cc_op(s);
1460     }
1461 
1462     /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition.  */
1463     gen_set_label(done);
1464     set_cc_op(s, CC_OP_DYNAMIC);
1465     if (had_rf) {
1466         gen_reset_eflags(s, RF_MASK);
1467     }
1468     gen_jmp_rel_csize(s, 0, 1);
1469 }
1470 
do_gen_string(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1471 static void do_gen_string(DisasContext *s, MemOp ot,
1472                           void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1473                           bool is_repz_nz)
1474 {
1475     TCGv dshift = tcg_temp_new();
1476     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
1477     tcg_gen_shli_tl(dshift, dshift, ot);
1478 
1479     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1480         do_gen_rep(s, ot, dshift, fn, is_repz_nz);
1481     } else {
1482         fn(s, ot, dshift);
1483     }
1484 }
1485 
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1486 static void gen_repz(DisasContext *s, MemOp ot,
1487                      void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1488 {
1489     do_gen_string(s, ot, fn, false);
1490 }
1491 
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1492 static void gen_repz_nz(DisasContext *s, MemOp ot,
1493                         void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1494 {
1495     do_gen_string(s, ot, fn, true);
1496 }
1497 
gen_helper_fp_arith_ST0_FT0(int op)1498 static void gen_helper_fp_arith_ST0_FT0(int op)
1499 {
1500     switch (op) {
1501     case 0:
1502         gen_helper_fadd_ST0_FT0(tcg_env);
1503         break;
1504     case 1:
1505         gen_helper_fmul_ST0_FT0(tcg_env);
1506         break;
1507     case 2:
1508         gen_helper_fcom_ST0_FT0(tcg_env);
1509         break;
1510     case 3:
1511         gen_helper_fcom_ST0_FT0(tcg_env);
1512         break;
1513     case 4:
1514         gen_helper_fsub_ST0_FT0(tcg_env);
1515         break;
1516     case 5:
1517         gen_helper_fsubr_ST0_FT0(tcg_env);
1518         break;
1519     case 6:
1520         gen_helper_fdiv_ST0_FT0(tcg_env);
1521         break;
1522     case 7:
1523         gen_helper_fdivr_ST0_FT0(tcg_env);
1524         break;
1525     }
1526 }
1527 
1528 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1529 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1530 {
1531     TCGv_i32 tmp = tcg_constant_i32(opreg);
1532     switch (op) {
1533     case 0:
1534         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1535         break;
1536     case 1:
1537         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1538         break;
1539     case 4:
1540         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1541         break;
1542     case 5:
1543         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1544         break;
1545     case 6:
1546         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1547         break;
1548     case 7:
1549         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1550         break;
1551     }
1552 }
1553 
gen_exception(DisasContext * s,int trapno)1554 static void gen_exception(DisasContext *s, int trapno)
1555 {
1556     gen_update_cc_op(s);
1557     gen_update_eip_cur(s);
1558     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1559     s->base.is_jmp = DISAS_NORETURN;
1560 }
1561 
1562 /* Generate #UD for the current instruction.  The assumption here is that
1563    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1564 static void gen_illegal_opcode(DisasContext *s)
1565 {
1566     gen_exception(s, EXCP06_ILLOP);
1567 }
1568 
1569 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1570 static void gen_exception_gpf(DisasContext *s)
1571 {
1572     gen_exception(s, EXCP0D_GPF);
1573 }
1574 
1575 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1576 static bool check_cpl0(DisasContext *s)
1577 {
1578     if (CPL(s) == 0) {
1579         return true;
1580     }
1581     gen_exception_gpf(s);
1582     return false;
1583 }
1584 
1585 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1586 static TCGv gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1587                              bool is_right, TCGv count)
1588 {
1589     target_ulong mask = (ot == MO_64 ? 63 : 31);
1590     TCGv cc_src = tcg_temp_new();
1591     TCGv tmp = tcg_temp_new();
1592     TCGv hishift;
1593 
1594     switch (ot) {
1595     case MO_16:
1596         /* Note: we implement the Intel behaviour for shift count > 16.
1597            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1598            portion by constructing it as a 32-bit value.  */
1599         if (is_right) {
1600             tcg_gen_deposit_tl(tmp, s->T0, s->T1, 16, 16);
1601             tcg_gen_mov_tl(s->T1, s->T0);
1602             tcg_gen_mov_tl(s->T0, tmp);
1603         } else {
1604             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1605         }
1606         /*
1607          * If TARGET_X86_64 defined then fall through into MO_32 case,
1608          * otherwise fall through default case.
1609          */
1610     case MO_32:
1611 #ifdef TARGET_X86_64
1612         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1613         tcg_gen_subi_tl(tmp, count, 1);
1614         if (is_right) {
1615             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1616             tcg_gen_shr_i64(cc_src, s->T0, tmp);
1617             tcg_gen_shr_i64(s->T0, s->T0, count);
1618         } else {
1619             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1620             tcg_gen_shl_i64(cc_src, s->T0, tmp);
1621             tcg_gen_shl_i64(s->T0, s->T0, count);
1622             tcg_gen_shri_i64(cc_src, cc_src, 32);
1623             tcg_gen_shri_i64(s->T0, s->T0, 32);
1624         }
1625         break;
1626 #endif
1627     default:
1628         hishift = tcg_temp_new();
1629         tcg_gen_subi_tl(tmp, count, 1);
1630         if (is_right) {
1631             tcg_gen_shr_tl(cc_src, s->T0, tmp);
1632 
1633             /* mask + 1 - count = mask - tmp = mask ^ tmp */
1634             tcg_gen_xori_tl(hishift, tmp, mask);
1635             tcg_gen_shr_tl(s->T0, s->T0, count);
1636             tcg_gen_shl_tl(s->T1, s->T1, hishift);
1637         } else {
1638             tcg_gen_shl_tl(cc_src, s->T0, tmp);
1639 
1640             /* mask + 1 - count = mask - tmp = mask ^ tmp */
1641             tcg_gen_xori_tl(hishift, tmp, mask);
1642             tcg_gen_shl_tl(s->T0, s->T0, count);
1643             tcg_gen_shr_tl(s->T1, s->T1, hishift);
1644 
1645             if (ot == MO_16) {
1646                 /* Only needed if count > 16, for Intel behaviour.  */
1647                 tcg_gen_shri_tl(tmp, s->T1, 1);
1648                 tcg_gen_or_tl(cc_src, cc_src, tmp);
1649             }
1650         }
1651         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1,
1652                            count, tcg_constant_tl(0),
1653                            tcg_constant_tl(0), s->T1);
1654         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1655         break;
1656     }
1657 
1658     return cc_src;
1659 }
1660 
1661 #define X86_MAX_INSN_LENGTH 15
1662 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1663 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1664 {
1665     uint64_t pc = s->pc;
1666 
1667     /* This is a subsequent insn that crosses a page boundary.  */
1668     if (s->base.num_insns > 1 &&
1669         !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1670         siglongjmp(s->jmpbuf, 2);
1671     }
1672 
1673     s->pc += num_bytes;
1674     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1675         /* If the instruction's 16th byte is on a different page than the 1st, a
1676          * page fault on the second page wins over the general protection fault
1677          * caused by the instruction being too long.
1678          * This can happen even if the operand is only one byte long!
1679          */
1680         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1681             (void)translator_ldub(env, &s->base,
1682                                   (s->pc - 1) & TARGET_PAGE_MASK);
1683         }
1684         siglongjmp(s->jmpbuf, 1);
1685     }
1686 
1687     return pc;
1688 }
1689 
x86_ldub_code(CPUX86State * env,DisasContext * s)1690 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1691 {
1692     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1693 }
1694 
x86_lduw_code(CPUX86State * env,DisasContext * s)1695 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1696 {
1697     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1698 }
1699 
x86_ldl_code(CPUX86State * env,DisasContext * s)1700 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1701 {
1702     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1703 }
1704 
1705 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1706 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1707 {
1708     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1709 }
1710 #endif
1711 
1712 /* Decompose an address.  */
1713 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1714 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1715                                     int modrm, bool is_vsib)
1716 {
1717     int def_seg, base, index, scale, mod, rm;
1718     target_long disp;
1719     bool havesib;
1720 
1721     def_seg = R_DS;
1722     index = -1;
1723     scale = 0;
1724     disp = 0;
1725 
1726     mod = (modrm >> 6) & 3;
1727     rm = modrm & 7;
1728     base = rm | REX_B(s);
1729 
1730     if (mod == 3) {
1731         /* Normally filtered out earlier, but including this path
1732            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1733         goto done;
1734     }
1735 
1736     switch (s->aflag) {
1737     case MO_64:
1738     case MO_32:
1739         havesib = 0;
1740         if (rm == 4) {
1741             int code = x86_ldub_code(env, s);
1742             scale = (code >> 6) & 3;
1743             index = ((code >> 3) & 7) | REX_X(s);
1744             if (index == 4 && !is_vsib) {
1745                 index = -1;  /* no index */
1746             }
1747             base = (code & 7) | REX_B(s);
1748             havesib = 1;
1749         }
1750 
1751         switch (mod) {
1752         case 0:
1753             if ((base & 7) == 5) {
1754                 base = -1;
1755                 disp = (int32_t)x86_ldl_code(env, s);
1756                 if (CODE64(s) && !havesib) {
1757                     base = -2;
1758                     disp += s->pc + s->rip_offset;
1759                 }
1760             }
1761             break;
1762         case 1:
1763             disp = (int8_t)x86_ldub_code(env, s);
1764             break;
1765         default:
1766         case 2:
1767             disp = (int32_t)x86_ldl_code(env, s);
1768             break;
1769         }
1770 
1771         /* For correct popl handling with esp.  */
1772         if (base == R_ESP && s->popl_esp_hack) {
1773             disp += s->popl_esp_hack;
1774         }
1775         if (base == R_EBP || base == R_ESP) {
1776             def_seg = R_SS;
1777         }
1778         break;
1779 
1780     case MO_16:
1781         if (mod == 0) {
1782             if (rm == 6) {
1783                 base = -1;
1784                 disp = x86_lduw_code(env, s);
1785                 break;
1786             }
1787         } else if (mod == 1) {
1788             disp = (int8_t)x86_ldub_code(env, s);
1789         } else {
1790             disp = (int16_t)x86_lduw_code(env, s);
1791         }
1792 
1793         switch (rm) {
1794         case 0:
1795             base = R_EBX;
1796             index = R_ESI;
1797             break;
1798         case 1:
1799             base = R_EBX;
1800             index = R_EDI;
1801             break;
1802         case 2:
1803             base = R_EBP;
1804             index = R_ESI;
1805             def_seg = R_SS;
1806             break;
1807         case 3:
1808             base = R_EBP;
1809             index = R_EDI;
1810             def_seg = R_SS;
1811             break;
1812         case 4:
1813             base = R_ESI;
1814             break;
1815         case 5:
1816             base = R_EDI;
1817             break;
1818         case 6:
1819             base = R_EBP;
1820             def_seg = R_SS;
1821             break;
1822         default:
1823         case 7:
1824             base = R_EBX;
1825             break;
1826         }
1827         break;
1828 
1829     default:
1830         g_assert_not_reached();
1831     }
1832 
1833  done:
1834     return (AddressParts){ def_seg, base, index, scale, disp };
1835 }
1836 
1837 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1838 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1839 {
1840     TCGv ea = NULL;
1841 
1842     if (a.index >= 0 && !is_vsib) {
1843         if (a.scale == 0) {
1844             ea = cpu_regs[a.index];
1845         } else {
1846             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1847             ea = s->A0;
1848         }
1849         if (a.base >= 0) {
1850             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1851             ea = s->A0;
1852         }
1853     } else if (a.base >= 0) {
1854         ea = cpu_regs[a.base];
1855     }
1856     if (!ea) {
1857         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1858             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1859             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1860         } else {
1861             tcg_gen_movi_tl(s->A0, a.disp);
1862         }
1863         ea = s->A0;
1864     } else if (a.disp != 0) {
1865         tcg_gen_addi_tl(s->A0, ea, a.disp);
1866         ea = s->A0;
1867     }
1868 
1869     return ea;
1870 }
1871 
1872 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(DisasContext * s,X86DecodedInsn * decode,TCGCond cond,TCGv_i64 bndv)1873 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1874                       TCGCond cond, TCGv_i64 bndv)
1875 {
1876     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1877     TCGv_i32 t32 = tcg_temp_new_i32();
1878     TCGv_i64 t64 = tcg_temp_new_i64();
1879 
1880     tcg_gen_extu_tl_i64(t64, ea);
1881     if (!CODE64(s)) {
1882         tcg_gen_ext32u_i64(t64, t64);
1883     }
1884     tcg_gen_setcond_i64(cond, t64, t64, bndv);
1885     tcg_gen_extrl_i64_i32(t32, t64);
1886     gen_helper_bndck(tcg_env, t32);
1887 }
1888 
1889 /* generate modrm load of memory or register. */
gen_ld_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1890 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1891 {
1892     int modrm = s->modrm;
1893     int mod, rm;
1894 
1895     mod = (modrm >> 6) & 3;
1896     rm = (modrm & 7) | REX_B(s);
1897     if (mod == 3) {
1898         gen_op_mov_v_reg(s, ot, s->T0, rm);
1899     } else {
1900         gen_lea_modrm(s, decode);
1901         gen_op_ld_v(s, ot, s->T0, s->A0);
1902     }
1903 }
1904 
1905 /* generate modrm store of memory or register. */
gen_st_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1906 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1907 {
1908     int modrm = s->modrm;
1909     int mod, rm;
1910 
1911     mod = (modrm >> 6) & 3;
1912     rm = (modrm & 7) | REX_B(s);
1913     if (mod == 3) {
1914         gen_op_mov_reg_v(s, ot, rm, s->T0);
1915     } else {
1916         gen_lea_modrm(s, decode);
1917         gen_op_st_v(s, ot, s->T0, s->A0);
1918     }
1919 }
1920 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1921 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1922 {
1923     target_ulong ret;
1924 
1925     switch (ot) {
1926     case MO_8:
1927         ret = x86_ldub_code(env, s);
1928         break;
1929     case MO_16:
1930         ret = x86_lduw_code(env, s);
1931         break;
1932     case MO_32:
1933         ret = x86_ldl_code(env, s);
1934         break;
1935 #ifdef TARGET_X86_64
1936     case MO_64:
1937         ret = x86_ldq_code(env, s);
1938         break;
1939 #endif
1940     default:
1941         g_assert_not_reached();
1942     }
1943     return ret;
1944 }
1945 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1946 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1947 {
1948     uint32_t ret;
1949 
1950     switch (ot) {
1951     case MO_8:
1952         ret = x86_ldub_code(env, s);
1953         break;
1954     case MO_16:
1955         ret = x86_lduw_code(env, s);
1956         break;
1957     case MO_32:
1958 #ifdef TARGET_X86_64
1959     case MO_64:
1960 #endif
1961         ret = x86_ldl_code(env, s);
1962         break;
1963     default:
1964         g_assert_not_reached();
1965     }
1966     return ret;
1967 }
1968 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1969 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1970 {
1971     target_long ret;
1972 
1973     switch (ot) {
1974     case MO_8:
1975         ret = (int8_t) x86_ldub_code(env, s);
1976         break;
1977     case MO_16:
1978         ret = (int16_t) x86_lduw_code(env, s);
1979         break;
1980     case MO_32:
1981         ret = (int32_t) x86_ldl_code(env, s);
1982         break;
1983 #ifdef TARGET_X86_64
1984     case MO_64:
1985         ret = x86_ldq_code(env, s);
1986         break;
1987 #endif
1988     default:
1989         g_assert_not_reached();
1990     }
1991     return ret;
1992 }
1993 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1994 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1995                                         TCGLabel *not_taken, TCGLabel *taken)
1996 {
1997     if (not_taken) {
1998         gen_set_label(not_taken);
1999     }
2000     gen_jmp_rel_csize(s, 0, 1);
2001 
2002     gen_set_label(taken);
2003     gen_jmp_rel(s, s->dflag, diff, 0);
2004 }
2005 
gen_cmovcc(DisasContext * s,int b,TCGv dest,TCGv src)2006 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
2007 {
2008     CCPrepare cc = gen_prepare_cc(s, b, NULL);
2009 
2010     if (!cc.use_reg2) {
2011         cc.reg2 = tcg_constant_tl(cc.imm);
2012     }
2013 
2014     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
2015 }
2016 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)2017 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
2018 {
2019     TCGv selector = tcg_temp_new();
2020     tcg_gen_ext16u_tl(selector, seg);
2021     tcg_gen_st32_tl(selector, tcg_env,
2022                     offsetof(CPUX86State,segs[seg_reg].selector));
2023     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
2024 }
2025 
2026 /* move SRC to seg_reg and compute if the CPU state may change. Never
2027    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src,bool inhibit_irq)2028 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
2029 {
2030     if (PE(s) && !VM86(s)) {
2031         TCGv_i32 sel = tcg_temp_new_i32();
2032 
2033         tcg_gen_trunc_tl_i32(sel, src);
2034         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
2035 
2036         /*
2037          * For moves to SS, the SS32 flag may change. For CODE32 only, changes
2038          * to SS, DS and ES may change the ADDSEG flags.
2039          */
2040         if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
2041             s->base.is_jmp = DISAS_EOB_NEXT;
2042         }
2043     } else {
2044         gen_op_movl_seg_real(s, seg_reg, src);
2045     }
2046 
2047     /*
2048      * For MOV or POP to SS (but not LSS) translation must always
2049      * stop as a special handling must be done to disable hardware
2050      * interrupts for the next instruction.
2051      *
2052      * This is the last instruction, so it's okay to overwrite
2053      * HF_TF_MASK; the next TB will start with the flag set.
2054      *
2055      * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
2056      * might have been set above.
2057      */
2058     if (inhibit_irq) {
2059         s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2060         s->flags &= ~HF_TF_MASK;
2061     }
2062 }
2063 
gen_far_call(DisasContext * s)2064 static void gen_far_call(DisasContext *s)
2065 {
2066     TCGv_i32 new_cs = tcg_temp_new_i32();
2067     tcg_gen_trunc_tl_i32(new_cs, s->T1);
2068     if (PE(s) && !VM86(s)) {
2069         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
2070                                    tcg_constant_i32(s->dflag - 1),
2071                                    eip_next_tl(s));
2072     } else {
2073         TCGv_i32 new_eip = tcg_temp_new_i32();
2074         tcg_gen_trunc_tl_i32(new_eip, s->T0);
2075         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
2076                               tcg_constant_i32(s->dflag - 1),
2077                               eip_next_i32(s));
2078     }
2079     s->base.is_jmp = DISAS_JUMP;
2080 }
2081 
gen_far_jmp(DisasContext * s)2082 static void gen_far_jmp(DisasContext *s)
2083 {
2084     if (PE(s) && !VM86(s)) {
2085         TCGv_i32 new_cs = tcg_temp_new_i32();
2086         tcg_gen_trunc_tl_i32(new_cs, s->T1);
2087         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2088                                   eip_next_tl(s));
2089     } else {
2090         gen_op_movl_seg_real(s, R_CS, s->T1);
2091         gen_op_jmp_v(s, s->T0);
2092     }
2093     s->base.is_jmp = DISAS_JUMP;
2094 }
2095 
gen_svm_check_intercept(DisasContext * s,uint32_t type)2096 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2097 {
2098     /* no SVM activated; fast case */
2099     if (likely(!GUEST(s))) {
2100         return;
2101     }
2102     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2103 }
2104 
gen_stack_update(DisasContext * s,int addend)2105 static inline void gen_stack_update(DisasContext *s, int addend)
2106 {
2107     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2108 }
2109 
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)2110 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2111 {
2112     if (offset) {
2113         tcg_gen_addi_tl(dest, src, offset);
2114         src = dest;
2115     }
2116     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2117 }
2118 
2119 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)2120 static void gen_push_v(DisasContext *s, TCGv val)
2121 {
2122     MemOp d_ot = mo_pushpop(s, s->dflag);
2123     MemOp a_ot = mo_stacksize(s);
2124     int size = 1 << d_ot;
2125     TCGv new_esp = tcg_temp_new();
2126 
2127     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2128 
2129     /* Now reduce the value to the address size and apply SS base.  */
2130     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2131     gen_op_st_v(s, d_ot, val, s->A0);
2132     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2133 }
2134 
2135 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2136 static MemOp gen_pop_T0(DisasContext *s)
2137 {
2138     MemOp d_ot = mo_pushpop(s, s->dflag);
2139 
2140     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2141     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2142 
2143     return d_ot;
2144 }
2145 
gen_pop_update(DisasContext * s,MemOp ot)2146 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2147 {
2148     gen_stack_update(s, 1 << ot);
2149 }
2150 
gen_pusha(DisasContext * s)2151 static void gen_pusha(DisasContext *s)
2152 {
2153     MemOp d_ot = s->dflag;
2154     int size = 1 << d_ot;
2155     int i;
2156 
2157     for (i = 0; i < 8; i++) {
2158         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2159         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2160     }
2161 
2162     gen_stack_update(s, -8 * size);
2163 }
2164 
gen_popa(DisasContext * s)2165 static void gen_popa(DisasContext *s)
2166 {
2167     MemOp d_ot = s->dflag;
2168     int size = 1 << d_ot;
2169     int i;
2170 
2171     for (i = 0; i < 8; i++) {
2172         /* ESP is not reloaded */
2173         if (7 - i == R_ESP) {
2174             continue;
2175         }
2176         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2177         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2178         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2179     }
2180 
2181     gen_stack_update(s, 8 * size);
2182 }
2183 
gen_enter(DisasContext * s,int esp_addend,int level)2184 static void gen_enter(DisasContext *s, int esp_addend, int level)
2185 {
2186     MemOp d_ot = mo_pushpop(s, s->dflag);
2187     MemOp a_ot = mo_stacksize(s);
2188     int size = 1 << d_ot;
2189 
2190     /* Push BP; compute FrameTemp into T1.  */
2191     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2192     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2193     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2194 
2195     level &= 31;
2196     if (level != 0) {
2197         int i;
2198         if (level > 1) {
2199             TCGv fp = tcg_temp_new();
2200 
2201             /* Copy level-1 pointers from the previous frame.  */
2202             for (i = 1; i < level; ++i) {
2203                 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2204                 gen_op_ld_v(s, d_ot, fp, s->A0);
2205 
2206                 gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2207                 gen_op_st_v(s, d_ot, fp, s->A0);
2208             }
2209         }
2210 
2211         /* Push the current FrameTemp as the last level.  */
2212         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2213         gen_op_st_v(s, d_ot, s->T1, s->A0);
2214     }
2215 
2216     /* Copy the FrameTemp value to EBP.  */
2217     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2218 
2219     /* Compute the final value of ESP.  */
2220     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2221     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2222 }
2223 
gen_leave(DisasContext * s)2224 static void gen_leave(DisasContext *s)
2225 {
2226     MemOp d_ot = mo_pushpop(s, s->dflag);
2227     MemOp a_ot = mo_stacksize(s);
2228 
2229     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2230     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2231 
2232     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2233 
2234     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2235     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2236 }
2237 
2238 /* Similarly, except that the assumption here is that we don't decode
2239    the instruction at all -- either a missing opcode, an unimplemented
2240    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2241 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2242 {
2243     gen_illegal_opcode(s);
2244 
2245     if (qemu_loglevel_mask(LOG_UNIMP)) {
2246         FILE *logfile = qemu_log_trylock();
2247         if (logfile) {
2248             target_ulong pc = s->base.pc_next, end = s->pc;
2249 
2250             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2251             for (; pc < end; ++pc) {
2252                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2253             }
2254             fprintf(logfile, "\n");
2255             qemu_log_unlock(logfile);
2256         }
2257     }
2258 }
2259 
2260 /* an interrupt is different from an exception because of the
2261    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2262 static void gen_interrupt(DisasContext *s, uint8_t intno)
2263 {
2264     gen_update_cc_op(s);
2265     gen_update_eip_cur(s);
2266     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2267                                cur_insn_len_i32(s));
2268     s->base.is_jmp = DISAS_NORETURN;
2269 }
2270 
2271 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2272 static void gen_bnd_jmp(DisasContext *s)
2273 {
2274     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2275        and if the BNDREGs are known to be in use (non-zero) already.
2276        The helper itself will check BNDPRESERVE at runtime.  */
2277     if ((s->prefix & PREFIX_REPNZ) == 0
2278         && (s->flags & HF_MPX_EN_MASK) != 0
2279         && (s->flags & HF_MPX_IU_MASK) != 0) {
2280         gen_helper_bnd_jmp(tcg_env);
2281     }
2282 }
2283 
2284 /*
2285  * Generate an end of block, including common tasks such as generating
2286  * single step traps, resetting the RF flag, and handling the interrupt
2287  * shadow.
2288  */
2289 static void
gen_eob(DisasContext * s,int mode)2290 gen_eob(DisasContext *s, int mode)
2291 {
2292     bool inhibit_reset;
2293 
2294     gen_update_cc_op(s);
2295 
2296     /* If several instructions disable interrupts, only the first does it.  */
2297     inhibit_reset = false;
2298     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2299         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2300         inhibit_reset = true;
2301     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2302         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2303     }
2304 
2305     if (s->flags & HF_RF_MASK) {
2306         gen_reset_eflags(s, RF_MASK);
2307     }
2308     if (mode == DISAS_EOB_RECHECK_TF) {
2309         gen_helper_rechecking_single_step(tcg_env);
2310         tcg_gen_exit_tb(NULL, 0);
2311     } else if (s->flags & HF_TF_MASK) {
2312         gen_helper_single_step(tcg_env);
2313     } else if (mode == DISAS_JUMP &&
2314                /* give irqs a chance to happen */
2315                !inhibit_reset) {
2316         tcg_gen_lookup_and_goto_ptr();
2317     } else {
2318         tcg_gen_exit_tb(NULL, 0);
2319     }
2320 
2321     s->base.is_jmp = DISAS_NORETURN;
2322 }
2323 
2324 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2325 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2326 {
2327     bool use_goto_tb = s->jmp_opt;
2328     target_ulong mask = -1;
2329     target_ulong new_pc = s->pc + diff;
2330     target_ulong new_eip = new_pc - s->cs_base;
2331 
2332     assert(!s->cc_op_dirty);
2333 
2334     /* In 64-bit mode, operand size is fixed at 64 bits. */
2335     if (!CODE64(s)) {
2336         if (ot == MO_16) {
2337             mask = 0xffff;
2338             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2339                 use_goto_tb = false;
2340             }
2341         } else {
2342             mask = 0xffffffff;
2343         }
2344     }
2345     new_eip &= mask;
2346 
2347     if (tb_cflags(s->base.tb) & CF_PCREL) {
2348         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2349         /*
2350          * If we can prove the branch does not leave the page and we have
2351          * no extra masking to apply (data16 branch in code32, see above),
2352          * then we have also proven that the addition does not wrap.
2353          */
2354         if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2355             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2356             use_goto_tb = false;
2357         }
2358     } else if (!CODE64(s)) {
2359         new_pc = (uint32_t)(new_eip + s->cs_base);
2360     }
2361 
2362     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2363         /* jump to same page: we can use a direct jump */
2364         tcg_gen_goto_tb(tb_num);
2365         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2366             tcg_gen_movi_tl(cpu_eip, new_eip);
2367         }
2368         tcg_gen_exit_tb(s->base.tb, tb_num);
2369         s->base.is_jmp = DISAS_NORETURN;
2370     } else {
2371         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2372             tcg_gen_movi_tl(cpu_eip, new_eip);
2373         }
2374         if (s->jmp_opt) {
2375             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2376         } else {
2377             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2378         }
2379     }
2380 }
2381 
2382 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2383 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2384 {
2385     /* CODE64 ignores the OT argument, so we need not consider it. */
2386     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2387 }
2388 
gen_ldq_env_A0(DisasContext * s,int offset)2389 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2390 {
2391     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2392     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2393 }
2394 
gen_stq_env_A0(DisasContext * s,int offset)2395 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2396 {
2397     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2398     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2399 }
2400 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2401 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2402 {
2403     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2404                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2405     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2406     int mem_index = s->mem_index;
2407     TCGv_i128 t = tcg_temp_new_i128();
2408 
2409     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2410     tcg_gen_st_i128(t, tcg_env, offset);
2411 }
2412 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2413 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2414 {
2415     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2416                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2417     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2418     int mem_index = s->mem_index;
2419     TCGv_i128 t = tcg_temp_new_i128();
2420 
2421     tcg_gen_ld_i128(t, tcg_env, offset);
2422     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2423 }
2424 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2425 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2426 {
2427     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2428     int mem_index = s->mem_index;
2429     TCGv_i128 t0 = tcg_temp_new_i128();
2430     TCGv_i128 t1 = tcg_temp_new_i128();
2431     TCGv a0_hi = tcg_temp_new();
2432 
2433     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2434     tcg_gen_addi_tl(a0_hi, s->A0, 16);
2435     tcg_gen_qemu_ld_i128(t1, a0_hi, mem_index, mop);
2436 
2437     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2438     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2439 }
2440 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2441 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2442 {
2443     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2444     int mem_index = s->mem_index;
2445     TCGv_i128 t = tcg_temp_new_i128();
2446     TCGv a0_hi = tcg_temp_new();
2447 
2448     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2449     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2450     tcg_gen_addi_tl(a0_hi, s->A0, 16);
2451     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2452     tcg_gen_qemu_st_i128(t, a0_hi, mem_index, mop);
2453 }
2454 
2455 #include "emit.c.inc"
2456 
gen_x87(DisasContext * s,X86DecodedInsn * decode)2457 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2458 {
2459     bool update_fip = true;
2460     int b = decode->b;
2461     int modrm = s->modrm;
2462     int mod, rm, op;
2463 
2464     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2465         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2466         /* XXX: what to do if illegal op ? */
2467         gen_exception(s, EXCP07_PREX);
2468         return;
2469     }
2470     mod = (modrm >> 6) & 3;
2471     rm = modrm & 7;
2472     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2473     if (mod != 3) {
2474         /* memory op */
2475         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2476         TCGv last_addr = tcg_temp_new();
2477         bool update_fdp = true;
2478 
2479         tcg_gen_mov_tl(last_addr, ea);
2480         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2481 
2482         switch (op) {
2483         case 0x00 ... 0x07: /* fxxxs */
2484         case 0x10 ... 0x17: /* fixxxl */
2485         case 0x20 ... 0x27: /* fxxxl */
2486         case 0x30 ... 0x37: /* fixxx */
2487             {
2488                 int op1;
2489                 op1 = op & 7;
2490 
2491                 switch (op >> 4) {
2492                 case 0:
2493                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2494                                         s->mem_index, MO_LEUL);
2495                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2496                     break;
2497                 case 1:
2498                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2499                                         s->mem_index, MO_LEUL);
2500                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2501                     break;
2502                 case 2:
2503                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2504                                         s->mem_index, MO_LEUQ);
2505                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2506                     break;
2507                 case 3:
2508                 default:
2509                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2510                                         s->mem_index, MO_LESW);
2511                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2512                     break;
2513                 }
2514 
2515                 gen_helper_fp_arith_ST0_FT0(op1);
2516                 if (op1 == 3) {
2517                     /* fcomp needs pop */
2518                     gen_helper_fpop(tcg_env);
2519                 }
2520             }
2521             break;
2522         case 0x08: /* flds */
2523         case 0x0a: /* fsts */
2524         case 0x0b: /* fstps */
2525         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2526         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2527         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2528             switch (op & 7) {
2529             case 0:
2530                 switch (op >> 4) {
2531                 case 0:
2532                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2533                                         s->mem_index, MO_LEUL);
2534                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2535                     break;
2536                 case 1:
2537                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2538                                         s->mem_index, MO_LEUL);
2539                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2540                     break;
2541                 case 2:
2542                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2543                                         s->mem_index, MO_LEUQ);
2544                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2545                     break;
2546                 case 3:
2547                 default:
2548                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2549                                         s->mem_index, MO_LESW);
2550                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2551                     break;
2552                 }
2553                 break;
2554             case 1:
2555                 /* XXX: the corresponding CPUID bit must be tested ! */
2556                 switch (op >> 4) {
2557                 case 1:
2558                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2559                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2560                                         s->mem_index, MO_LEUL);
2561                     break;
2562                 case 2:
2563                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2564                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2565                                         s->mem_index, MO_LEUQ);
2566                     break;
2567                 case 3:
2568                 default:
2569                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2570                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2571                                         s->mem_index, MO_LEUW);
2572                     break;
2573                 }
2574                 gen_helper_fpop(tcg_env);
2575                 break;
2576             default:
2577                 switch (op >> 4) {
2578                 case 0:
2579                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2580                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2581                                         s->mem_index, MO_LEUL);
2582                     break;
2583                 case 1:
2584                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2585                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2586                                         s->mem_index, MO_LEUL);
2587                     break;
2588                 case 2:
2589                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2590                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2591                                         s->mem_index, MO_LEUQ);
2592                     break;
2593                 case 3:
2594                 default:
2595                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2596                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2597                                         s->mem_index, MO_LEUW);
2598                     break;
2599                 }
2600                 if ((op & 7) == 3) {
2601                     gen_helper_fpop(tcg_env);
2602                 }
2603                 break;
2604             }
2605             break;
2606         case 0x0c: /* fldenv mem */
2607             gen_helper_fldenv(tcg_env, s->A0,
2608                               tcg_constant_i32(s->dflag - 1));
2609             update_fip = update_fdp = false;
2610             break;
2611         case 0x0d: /* fldcw mem */
2612             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2613                                 s->mem_index, MO_LEUW);
2614             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2615             update_fip = update_fdp = false;
2616             break;
2617         case 0x0e: /* fnstenv mem */
2618             gen_helper_fstenv(tcg_env, s->A0,
2619                               tcg_constant_i32(s->dflag - 1));
2620             update_fip = update_fdp = false;
2621             break;
2622         case 0x0f: /* fnstcw mem */
2623             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2624             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2625                                 s->mem_index, MO_LEUW);
2626             update_fip = update_fdp = false;
2627             break;
2628         case 0x1d: /* fldt mem */
2629             gen_helper_fldt_ST0(tcg_env, s->A0);
2630             break;
2631         case 0x1f: /* fstpt mem */
2632             gen_helper_fstt_ST0(tcg_env, s->A0);
2633             gen_helper_fpop(tcg_env);
2634             break;
2635         case 0x2c: /* frstor mem */
2636             gen_helper_frstor(tcg_env, s->A0,
2637                               tcg_constant_i32(s->dflag - 1));
2638             update_fip = update_fdp = false;
2639             break;
2640         case 0x2e: /* fnsave mem */
2641             gen_helper_fsave(tcg_env, s->A0,
2642                              tcg_constant_i32(s->dflag - 1));
2643             update_fip = update_fdp = false;
2644             break;
2645         case 0x2f: /* fnstsw mem */
2646             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2647             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2648                                 s->mem_index, MO_LEUW);
2649             update_fip = update_fdp = false;
2650             break;
2651         case 0x3c: /* fbld */
2652             gen_helper_fbld_ST0(tcg_env, s->A0);
2653             break;
2654         case 0x3e: /* fbstp */
2655             gen_helper_fbst_ST0(tcg_env, s->A0);
2656             gen_helper_fpop(tcg_env);
2657             break;
2658         case 0x3d: /* fildll */
2659             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2660                                 s->mem_index, MO_LEUQ);
2661             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2662             break;
2663         case 0x3f: /* fistpll */
2664             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2665             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2666                                 s->mem_index, MO_LEUQ);
2667             gen_helper_fpop(tcg_env);
2668             break;
2669         default:
2670             goto illegal_op;
2671         }
2672 
2673         if (update_fdp) {
2674             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2675 
2676             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2677                            offsetof(CPUX86State,
2678                                     segs[last_seg].selector));
2679             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2680                              offsetof(CPUX86State, fpds));
2681             tcg_gen_st_tl(last_addr, tcg_env,
2682                           offsetof(CPUX86State, fpdp));
2683         }
2684     } else {
2685         /* register float ops */
2686         int opreg = rm;
2687 
2688         switch (op) {
2689         case 0x08: /* fld sti */
2690             gen_helper_fpush(tcg_env);
2691             gen_helper_fmov_ST0_STN(tcg_env,
2692                                     tcg_constant_i32((opreg + 1) & 7));
2693             break;
2694         case 0x09: /* fxchg sti */
2695         case 0x29: /* fxchg4 sti, undocumented op */
2696         case 0x39: /* fxchg7 sti, undocumented op */
2697             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2698             break;
2699         case 0x0a: /* grp d9/2 */
2700             switch (rm) {
2701             case 0: /* fnop */
2702                 /*
2703                  * check exceptions (FreeBSD FPU probe)
2704                  * needs to be treated as I/O because of ferr_irq
2705                  */
2706                 translator_io_start(&s->base);
2707                 gen_helper_fwait(tcg_env);
2708                 update_fip = false;
2709                 break;
2710             default:
2711                 goto illegal_op;
2712             }
2713             break;
2714         case 0x0c: /* grp d9/4 */
2715             switch (rm) {
2716             case 0: /* fchs */
2717                 gen_helper_fchs_ST0(tcg_env);
2718                 break;
2719             case 1: /* fabs */
2720                 gen_helper_fabs_ST0(tcg_env);
2721                 break;
2722             case 4: /* ftst */
2723                 gen_helper_fldz_FT0(tcg_env);
2724                 gen_helper_fcom_ST0_FT0(tcg_env);
2725                 break;
2726             case 5: /* fxam */
2727                 gen_helper_fxam_ST0(tcg_env);
2728                 break;
2729             default:
2730                 goto illegal_op;
2731             }
2732             break;
2733         case 0x0d: /* grp d9/5 */
2734             {
2735                 switch (rm) {
2736                 case 0:
2737                     gen_helper_fpush(tcg_env);
2738                     gen_helper_fld1_ST0(tcg_env);
2739                     break;
2740                 case 1:
2741                     gen_helper_fpush(tcg_env);
2742                     gen_helper_fldl2t_ST0(tcg_env);
2743                     break;
2744                 case 2:
2745                     gen_helper_fpush(tcg_env);
2746                     gen_helper_fldl2e_ST0(tcg_env);
2747                     break;
2748                 case 3:
2749                     gen_helper_fpush(tcg_env);
2750                     gen_helper_fldpi_ST0(tcg_env);
2751                     break;
2752                 case 4:
2753                     gen_helper_fpush(tcg_env);
2754                     gen_helper_fldlg2_ST0(tcg_env);
2755                     break;
2756                 case 5:
2757                     gen_helper_fpush(tcg_env);
2758                     gen_helper_fldln2_ST0(tcg_env);
2759                     break;
2760                 case 6:
2761                     gen_helper_fpush(tcg_env);
2762                     gen_helper_fldz_ST0(tcg_env);
2763                     break;
2764                 default:
2765                     goto illegal_op;
2766                 }
2767             }
2768             break;
2769         case 0x0e: /* grp d9/6 */
2770             switch (rm) {
2771             case 0: /* f2xm1 */
2772                 gen_helper_f2xm1(tcg_env);
2773                 break;
2774             case 1: /* fyl2x */
2775                 gen_helper_fyl2x(tcg_env);
2776                 break;
2777             case 2: /* fptan */
2778                 gen_helper_fptan(tcg_env);
2779                 break;
2780             case 3: /* fpatan */
2781                 gen_helper_fpatan(tcg_env);
2782                 break;
2783             case 4: /* fxtract */
2784                 gen_helper_fxtract(tcg_env);
2785                 break;
2786             case 5: /* fprem1 */
2787                 gen_helper_fprem1(tcg_env);
2788                 break;
2789             case 6: /* fdecstp */
2790                 gen_helper_fdecstp(tcg_env);
2791                 break;
2792             default:
2793             case 7: /* fincstp */
2794                 gen_helper_fincstp(tcg_env);
2795                 break;
2796             }
2797             break;
2798         case 0x0f: /* grp d9/7 */
2799             switch (rm) {
2800             case 0: /* fprem */
2801                 gen_helper_fprem(tcg_env);
2802                 break;
2803             case 1: /* fyl2xp1 */
2804                 gen_helper_fyl2xp1(tcg_env);
2805                 break;
2806             case 2: /* fsqrt */
2807                 gen_helper_fsqrt(tcg_env);
2808                 break;
2809             case 3: /* fsincos */
2810                 gen_helper_fsincos(tcg_env);
2811                 break;
2812             case 5: /* fscale */
2813                 gen_helper_fscale(tcg_env);
2814                 break;
2815             case 4: /* frndint */
2816                 gen_helper_frndint(tcg_env);
2817                 break;
2818             case 6: /* fsin */
2819                 gen_helper_fsin(tcg_env);
2820                 break;
2821             default:
2822             case 7: /* fcos */
2823                 gen_helper_fcos(tcg_env);
2824                 break;
2825             }
2826             break;
2827         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2828         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2829         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2830             {
2831                 int op1;
2832 
2833                 op1 = op & 7;
2834                 if (op >= 0x20) {
2835                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2836                     if (op >= 0x30) {
2837                         gen_helper_fpop(tcg_env);
2838                     }
2839                 } else {
2840                     gen_helper_fmov_FT0_STN(tcg_env,
2841                                             tcg_constant_i32(opreg));
2842                     gen_helper_fp_arith_ST0_FT0(op1);
2843                 }
2844             }
2845             break;
2846         case 0x02: /* fcom */
2847         case 0x22: /* fcom2, undocumented op */
2848             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2849             gen_helper_fcom_ST0_FT0(tcg_env);
2850             break;
2851         case 0x03: /* fcomp */
2852         case 0x23: /* fcomp3, undocumented op */
2853         case 0x32: /* fcomp5, undocumented op */
2854             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2855             gen_helper_fcom_ST0_FT0(tcg_env);
2856             gen_helper_fpop(tcg_env);
2857             break;
2858         case 0x15: /* da/5 */
2859             switch (rm) {
2860             case 1: /* fucompp */
2861                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2862                 gen_helper_fucom_ST0_FT0(tcg_env);
2863                 gen_helper_fpop(tcg_env);
2864                 gen_helper_fpop(tcg_env);
2865                 break;
2866             default:
2867                 goto illegal_op;
2868             }
2869             break;
2870         case 0x1c:
2871             switch (rm) {
2872             case 0: /* feni (287 only, just do nop here) */
2873                 break;
2874             case 1: /* fdisi (287 only, just do nop here) */
2875                 break;
2876             case 2: /* fclex */
2877                 gen_helper_fclex(tcg_env);
2878                 update_fip = false;
2879                 break;
2880             case 3: /* fninit */
2881                 gen_helper_fninit(tcg_env);
2882                 update_fip = false;
2883                 break;
2884             case 4: /* fsetpm (287 only, just do nop here) */
2885                 break;
2886             default:
2887                 goto illegal_op;
2888             }
2889             break;
2890         case 0x1d: /* fucomi */
2891             if (!(s->cpuid_features & CPUID_CMOV)) {
2892                 goto illegal_op;
2893             }
2894             gen_update_cc_op(s);
2895             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2896             gen_helper_fucomi_ST0_FT0(tcg_env);
2897             assume_cc_op(s, CC_OP_EFLAGS);
2898             break;
2899         case 0x1e: /* fcomi */
2900             if (!(s->cpuid_features & CPUID_CMOV)) {
2901                 goto illegal_op;
2902             }
2903             gen_update_cc_op(s);
2904             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2905             gen_helper_fcomi_ST0_FT0(tcg_env);
2906             assume_cc_op(s, CC_OP_EFLAGS);
2907             break;
2908         case 0x28: /* ffree sti */
2909             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2910             break;
2911         case 0x2a: /* fst sti */
2912             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2913             break;
2914         case 0x2b: /* fstp sti */
2915         case 0x0b: /* fstp1 sti, undocumented op */
2916         case 0x3a: /* fstp8 sti, undocumented op */
2917         case 0x3b: /* fstp9 sti, undocumented op */
2918             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2919             gen_helper_fpop(tcg_env);
2920             break;
2921         case 0x2c: /* fucom st(i) */
2922             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2923             gen_helper_fucom_ST0_FT0(tcg_env);
2924             break;
2925         case 0x2d: /* fucomp st(i) */
2926             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2927             gen_helper_fucom_ST0_FT0(tcg_env);
2928             gen_helper_fpop(tcg_env);
2929             break;
2930         case 0x33: /* de/3 */
2931             switch (rm) {
2932             case 1: /* fcompp */
2933                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2934                 gen_helper_fcom_ST0_FT0(tcg_env);
2935                 gen_helper_fpop(tcg_env);
2936                 gen_helper_fpop(tcg_env);
2937                 break;
2938             default:
2939                 goto illegal_op;
2940             }
2941             break;
2942         case 0x38: /* ffreep sti, undocumented op */
2943             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2944             gen_helper_fpop(tcg_env);
2945             break;
2946         case 0x3c: /* df/4 */
2947             switch (rm) {
2948             case 0:
2949                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2950                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2951                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2952                 break;
2953             default:
2954                 goto illegal_op;
2955             }
2956             break;
2957         case 0x3d: /* fucomip */
2958             if (!(s->cpuid_features & CPUID_CMOV)) {
2959                 goto illegal_op;
2960             }
2961             gen_update_cc_op(s);
2962             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2963             gen_helper_fucomi_ST0_FT0(tcg_env);
2964             gen_helper_fpop(tcg_env);
2965             assume_cc_op(s, CC_OP_EFLAGS);
2966             break;
2967         case 0x3e: /* fcomip */
2968             if (!(s->cpuid_features & CPUID_CMOV)) {
2969                 goto illegal_op;
2970             }
2971             gen_update_cc_op(s);
2972             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2973             gen_helper_fcomi_ST0_FT0(tcg_env);
2974             gen_helper_fpop(tcg_env);
2975             assume_cc_op(s, CC_OP_EFLAGS);
2976             break;
2977         case 0x10 ... 0x13: /* fcmovxx */
2978         case 0x18 ... 0x1b:
2979             {
2980                 int op1;
2981                 TCGLabel *l1;
2982                 static const uint8_t fcmov_cc[8] = {
2983                     (JCC_B << 1),
2984                     (JCC_Z << 1),
2985                     (JCC_BE << 1),
2986                     (JCC_P << 1),
2987                 };
2988 
2989                 if (!(s->cpuid_features & CPUID_CMOV)) {
2990                     goto illegal_op;
2991                 }
2992                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2993                 l1 = gen_new_label();
2994                 gen_jcc_noeob(s, op1, l1);
2995                 gen_helper_fmov_ST0_STN(tcg_env,
2996                                         tcg_constant_i32(opreg));
2997                 gen_set_label(l1);
2998             }
2999             break;
3000         default:
3001             goto illegal_op;
3002         }
3003     }
3004 
3005     if (update_fip) {
3006         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
3007                        offsetof(CPUX86State, segs[R_CS].selector));
3008         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
3009                          offsetof(CPUX86State, fpcs));
3010         tcg_gen_st_tl(eip_cur_tl(s),
3011                       tcg_env, offsetof(CPUX86State, fpip));
3012     }
3013     return;
3014 
3015  illegal_op:
3016     gen_illegal_opcode(s);
3017 }
3018 
gen_multi0F(DisasContext * s,X86DecodedInsn * decode)3019 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
3020 {
3021     int prefixes = s->prefix;
3022     MemOp dflag = s->dflag;
3023     int b = decode->b + 0x100;
3024     int modrm = s->modrm;
3025     MemOp ot;
3026     int reg, rm, mod, op;
3027 
3028     /* now check op code */
3029     switch (b) {
3030     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
3031         mod = (modrm >> 6) & 3;
3032         switch ((modrm >> 3) & 7) {
3033         case 7:
3034             if (mod != 3 ||
3035                 (s->prefix & PREFIX_REPNZ)) {
3036                 goto illegal_op;
3037             }
3038             if (s->prefix & PREFIX_REPZ) {
3039                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3040                     goto illegal_op;
3041                 }
3042                 gen_helper_rdpid(s->T0, tcg_env);
3043                 rm = (modrm & 7) | REX_B(s);
3044                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3045                 break;
3046             } else {
3047                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3048                     goto illegal_op;
3049                 }
3050                 goto do_rdrand;
3051             }
3052 
3053         case 6: /* RDRAND */
3054             if (mod != 3 ||
3055                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
3056                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3057                 goto illegal_op;
3058             }
3059         do_rdrand:
3060             translator_io_start(&s->base);
3061             gen_helper_rdrand(s->T0, tcg_env);
3062             rm = (modrm & 7) | REX_B(s);
3063             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3064             assume_cc_op(s, CC_OP_EFLAGS);
3065             break;
3066 
3067         default:
3068             goto illegal_op;
3069         }
3070         break;
3071 
3072     case 0x100:
3073         mod = (modrm >> 6) & 3;
3074         op = (modrm >> 3) & 7;
3075         switch(op) {
3076         case 0: /* sldt */
3077             if (!PE(s) || VM86(s))
3078                 goto illegal_op;
3079             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3080                 break;
3081             }
3082             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3083             tcg_gen_ld32u_tl(s->T0, tcg_env,
3084                              offsetof(CPUX86State, ldt.selector));
3085             ot = mod == 3 ? dflag : MO_16;
3086             gen_st_modrm(s, decode, ot);
3087             break;
3088         case 2: /* lldt */
3089             if (!PE(s) || VM86(s))
3090                 goto illegal_op;
3091             if (check_cpl0(s)) {
3092                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3093                 gen_ld_modrm(s, decode, MO_16);
3094                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3095                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3096             }
3097             break;
3098         case 1: /* str */
3099             if (!PE(s) || VM86(s))
3100                 goto illegal_op;
3101             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3102                 break;
3103             }
3104             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3105             tcg_gen_ld32u_tl(s->T0, tcg_env,
3106                              offsetof(CPUX86State, tr.selector));
3107             ot = mod == 3 ? dflag : MO_16;
3108             gen_st_modrm(s, decode, ot);
3109             break;
3110         case 3: /* ltr */
3111             if (!PE(s) || VM86(s))
3112                 goto illegal_op;
3113             if (check_cpl0(s)) {
3114                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3115                 gen_ld_modrm(s, decode, MO_16);
3116                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3117                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3118             }
3119             break;
3120         case 4: /* verr */
3121         case 5: /* verw */
3122             if (!PE(s) || VM86(s))
3123                 goto illegal_op;
3124             gen_ld_modrm(s, decode, MO_16);
3125             gen_update_cc_op(s);
3126             if (op == 4) {
3127                 gen_helper_verr(tcg_env, s->T0);
3128             } else {
3129                 gen_helper_verw(tcg_env, s->T0);
3130             }
3131             assume_cc_op(s, CC_OP_EFLAGS);
3132             break;
3133         default:
3134             goto illegal_op;
3135         }
3136         break;
3137 
3138     case 0x101:
3139         switch (modrm) {
3140         CASE_MODRM_MEM_OP(0): /* sgdt */
3141             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3142                 break;
3143             }
3144             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3145             gen_lea_modrm(s, decode);
3146             tcg_gen_ld32u_tl(s->T0,
3147                              tcg_env, offsetof(CPUX86State, gdt.limit));
3148             gen_op_st_v(s, MO_16, s->T0, s->A0);
3149             gen_add_A0_im(s, 2);
3150             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3151             /*
3152              * NB: Despite a confusing description in Intel CPU documentation,
3153              *     all 32-bits are written regardless of operand size.
3154              */
3155             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3156             break;
3157 
3158         case 0xc8: /* monitor */
3159             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3160                 goto illegal_op;
3161             }
3162             gen_update_cc_op(s);
3163             gen_update_eip_cur(s);
3164             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3165             gen_helper_monitor(tcg_env, s->A0);
3166             break;
3167 
3168         case 0xc9: /* mwait */
3169             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3170                 goto illegal_op;
3171             }
3172             gen_update_cc_op(s);
3173             gen_update_eip_cur(s);
3174             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3175             s->base.is_jmp = DISAS_NORETURN;
3176             break;
3177 
3178         case 0xca: /* clac */
3179             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3180                 || CPL(s) != 0) {
3181                 goto illegal_op;
3182             }
3183             gen_reset_eflags(s, AC_MASK);
3184             s->base.is_jmp = DISAS_EOB_NEXT;
3185             break;
3186 
3187         case 0xcb: /* stac */
3188             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3189                 || CPL(s) != 0) {
3190                 goto illegal_op;
3191             }
3192             gen_set_eflags(s, AC_MASK);
3193             s->base.is_jmp = DISAS_EOB_NEXT;
3194             break;
3195 
3196         CASE_MODRM_MEM_OP(1): /* sidt */
3197             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3198                 break;
3199             }
3200             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3201             gen_lea_modrm(s, decode);
3202             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3203             gen_op_st_v(s, MO_16, s->T0, s->A0);
3204             gen_add_A0_im(s, 2);
3205             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3206             /*
3207              * NB: Despite a confusing description in Intel CPU documentation,
3208              *     all 32-bits are written regardless of operand size.
3209              */
3210             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3211             break;
3212 
3213         case 0xd0: /* xgetbv */
3214             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3215                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3216                 goto illegal_op;
3217             }
3218             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3219             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3220             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3221             break;
3222 
3223         case 0xd1: /* xsetbv */
3224             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3225                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3226                 goto illegal_op;
3227             }
3228             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3229             if (!check_cpl0(s)) {
3230                 break;
3231             }
3232             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3233                                   cpu_regs[R_EDX]);
3234             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3235             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3236             /* End TB because translation flags may change.  */
3237             s->base.is_jmp = DISAS_EOB_NEXT;
3238             break;
3239 
3240         case 0xd8: /* VMRUN */
3241             if (!SVME(s) || !PE(s)) {
3242                 goto illegal_op;
3243             }
3244             if (!check_cpl0(s)) {
3245                 break;
3246             }
3247             gen_update_cc_op(s);
3248             gen_update_eip_cur(s);
3249             /*
3250              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3251              * The usual gen_eob() handling is performed on vmexit after
3252              * host state is reloaded.
3253              */
3254             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3255                              cur_insn_len_i32(s));
3256             tcg_gen_exit_tb(NULL, 0);
3257             s->base.is_jmp = DISAS_NORETURN;
3258             break;
3259 
3260         case 0xd9: /* VMMCALL */
3261             if (!SVME(s)) {
3262                 goto illegal_op;
3263             }
3264             gen_update_cc_op(s);
3265             gen_update_eip_cur(s);
3266             gen_helper_vmmcall(tcg_env);
3267             break;
3268 
3269         case 0xda: /* VMLOAD */
3270             if (!SVME(s) || !PE(s)) {
3271                 goto illegal_op;
3272             }
3273             if (!check_cpl0(s)) {
3274                 break;
3275             }
3276             gen_update_cc_op(s);
3277             gen_update_eip_cur(s);
3278             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3279             break;
3280 
3281         case 0xdb: /* VMSAVE */
3282             if (!SVME(s) || !PE(s)) {
3283                 goto illegal_op;
3284             }
3285             if (!check_cpl0(s)) {
3286                 break;
3287             }
3288             gen_update_cc_op(s);
3289             gen_update_eip_cur(s);
3290             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3291             break;
3292 
3293         case 0xdc: /* STGI */
3294             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3295                 || !PE(s)) {
3296                 goto illegal_op;
3297             }
3298             if (!check_cpl0(s)) {
3299                 break;
3300             }
3301             gen_update_cc_op(s);
3302             gen_helper_stgi(tcg_env);
3303             s->base.is_jmp = DISAS_EOB_NEXT;
3304             break;
3305 
3306         case 0xdd: /* CLGI */
3307             if (!SVME(s) || !PE(s)) {
3308                 goto illegal_op;
3309             }
3310             if (!check_cpl0(s)) {
3311                 break;
3312             }
3313             gen_update_cc_op(s);
3314             gen_update_eip_cur(s);
3315             gen_helper_clgi(tcg_env);
3316             break;
3317 
3318         case 0xde: /* SKINIT */
3319             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3320                 || !PE(s)) {
3321                 goto illegal_op;
3322             }
3323             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3324             /* If not intercepted, not implemented -- raise #UD. */
3325             goto illegal_op;
3326 
3327         case 0xdf: /* INVLPGA */
3328             if (!SVME(s) || !PE(s)) {
3329                 goto illegal_op;
3330             }
3331             if (!check_cpl0(s)) {
3332                 break;
3333             }
3334             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3335             if (s->aflag == MO_64) {
3336                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3337             } else {
3338                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3339             }
3340             gen_helper_flush_page(tcg_env, s->A0);
3341             s->base.is_jmp = DISAS_EOB_NEXT;
3342             break;
3343 
3344         CASE_MODRM_MEM_OP(2): /* lgdt */
3345             if (!check_cpl0(s)) {
3346                 break;
3347             }
3348             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3349             gen_lea_modrm(s, decode);
3350             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3351             gen_add_A0_im(s, 2);
3352             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3353             if (dflag == MO_16) {
3354                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3355             }
3356             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3357             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3358             break;
3359 
3360         CASE_MODRM_MEM_OP(3): /* lidt */
3361             if (!check_cpl0(s)) {
3362                 break;
3363             }
3364             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3365             gen_lea_modrm(s, decode);
3366             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3367             gen_add_A0_im(s, 2);
3368             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3369             if (dflag == MO_16) {
3370                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3371             }
3372             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3373             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3374             break;
3375 
3376         CASE_MODRM_OP(4): /* smsw */
3377             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3378                 break;
3379             }
3380             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3381             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3382             /*
3383              * In 32-bit mode, the higher 16 bits of the destination
3384              * register are undefined.  In practice CR0[31:0] is stored
3385              * just like in 64-bit mode.
3386              */
3387             mod = (modrm >> 6) & 3;
3388             ot = (mod != 3 ? MO_16 : s->dflag);
3389             gen_st_modrm(s, decode, ot);
3390             break;
3391         case 0xee: /* rdpkru */
3392             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3393                 goto illegal_op;
3394             }
3395             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3396             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3397             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3398             break;
3399         case 0xef: /* wrpkru */
3400             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3401                 goto illegal_op;
3402             }
3403             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3404                                   cpu_regs[R_EDX]);
3405             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3406             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3407             break;
3408 
3409         CASE_MODRM_OP(6): /* lmsw */
3410             if (!check_cpl0(s)) {
3411                 break;
3412             }
3413             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3414             gen_ld_modrm(s, decode, MO_16);
3415             /*
3416              * Only the 4 lower bits of CR0 are modified.
3417              * PE cannot be set to zero if already set to one.
3418              */
3419             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3420             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3421             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3422             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3423             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3424             s->base.is_jmp = DISAS_EOB_NEXT;
3425             break;
3426 
3427         CASE_MODRM_MEM_OP(7): /* invlpg */
3428             if (!check_cpl0(s)) {
3429                 break;
3430             }
3431             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3432             gen_lea_modrm(s, decode);
3433             gen_helper_flush_page(tcg_env, s->A0);
3434             s->base.is_jmp = DISAS_EOB_NEXT;
3435             break;
3436 
3437         case 0xf8: /* swapgs */
3438 #ifdef TARGET_X86_64
3439             if (CODE64(s)) {
3440                 if (check_cpl0(s)) {
3441                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3442                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3443                                   offsetof(CPUX86State, kernelgsbase));
3444                     tcg_gen_st_tl(s->T0, tcg_env,
3445                                   offsetof(CPUX86State, kernelgsbase));
3446                 }
3447                 break;
3448             }
3449 #endif
3450             goto illegal_op;
3451 
3452         case 0xf9: /* rdtscp */
3453             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3454                 goto illegal_op;
3455             }
3456             gen_update_cc_op(s);
3457             gen_update_eip_cur(s);
3458             translator_io_start(&s->base);
3459             gen_helper_rdtsc(tcg_env);
3460             gen_helper_rdpid(s->T0, tcg_env);
3461             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3462             break;
3463 
3464         default:
3465             goto illegal_op;
3466         }
3467         break;
3468 
3469     case 0x11a:
3470         if (s->flags & HF_MPX_EN_MASK) {
3471             mod = (modrm >> 6) & 3;
3472             reg = ((modrm >> 3) & 7) | REX_R(s);
3473             if (prefixes & PREFIX_REPZ) {
3474                 /* bndcl */
3475                 if (reg >= 4
3476                     || s->aflag == MO_16) {
3477                     goto illegal_op;
3478                 }
3479                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3480             } else if (prefixes & PREFIX_REPNZ) {
3481                 /* bndcu */
3482                 if (reg >= 4
3483                     || s->aflag == MO_16) {
3484                     goto illegal_op;
3485                 }
3486                 TCGv_i64 notu = tcg_temp_new_i64();
3487                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3488                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3489             } else if (prefixes & PREFIX_DATA) {
3490                 /* bndmov -- from reg/mem */
3491                 if (reg >= 4 || s->aflag == MO_16) {
3492                     goto illegal_op;
3493                 }
3494                 if (mod == 3) {
3495                     int reg2 = (modrm & 7) | REX_B(s);
3496                     if (reg2 >= 4) {
3497                         goto illegal_op;
3498                     }
3499                     if (s->flags & HF_MPX_IU_MASK) {
3500                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3501                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3502                     }
3503                 } else {
3504                     gen_lea_modrm(s, decode);
3505                     if (CODE64(s)) {
3506                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3507                                             s->mem_index, MO_LEUQ);
3508                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3509                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3510                                             s->mem_index, MO_LEUQ);
3511                     } else {
3512                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3513                                             s->mem_index, MO_LEUL);
3514                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3515                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3516                                             s->mem_index, MO_LEUL);
3517                     }
3518                     /* bnd registers are now in-use */
3519                     gen_set_hflag(s, HF_MPX_IU_MASK);
3520                 }
3521             } else if (mod != 3) {
3522                 /* bndldx */
3523                 AddressParts a = decode->mem;
3524                 if (reg >= 4
3525                     || s->aflag == MO_16
3526                     || a.base < -1) {
3527                     goto illegal_op;
3528                 }
3529                 if (a.base >= 0) {
3530                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3531                 } else {
3532                     tcg_gen_movi_tl(s->A0, 0);
3533                 }
3534                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3535                 if (a.index >= 0) {
3536                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3537                 } else {
3538                     tcg_gen_movi_tl(s->T0, 0);
3539                 }
3540                 if (CODE64(s)) {
3541                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3542                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3543                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3544                 } else {
3545                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3546                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3547                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3548                 }
3549                 gen_set_hflag(s, HF_MPX_IU_MASK);
3550             }
3551         }
3552         break;
3553     case 0x11b:
3554         if (s->flags & HF_MPX_EN_MASK) {
3555             mod = (modrm >> 6) & 3;
3556             reg = ((modrm >> 3) & 7) | REX_R(s);
3557             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3558                 /* bndmk */
3559                 if (reg >= 4
3560                     || s->aflag == MO_16) {
3561                     goto illegal_op;
3562                 }
3563                 AddressParts a = decode->mem;
3564                 if (a.base >= 0) {
3565                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3566                     if (!CODE64(s)) {
3567                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3568                     }
3569                 } else if (a.base == -1) {
3570                     /* no base register has lower bound of 0 */
3571                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3572                 } else {
3573                     /* rip-relative generates #ud */
3574                     goto illegal_op;
3575                 }
3576                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3577                 if (!CODE64(s)) {
3578                     tcg_gen_ext32u_tl(s->A0, s->A0);
3579                 }
3580                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3581                 /* bnd registers are now in-use */
3582                 gen_set_hflag(s, HF_MPX_IU_MASK);
3583                 break;
3584             } else if (prefixes & PREFIX_REPNZ) {
3585                 /* bndcn */
3586                 if (reg >= 4
3587                     || s->aflag == MO_16) {
3588                     goto illegal_op;
3589                 }
3590                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3591             } else if (prefixes & PREFIX_DATA) {
3592                 /* bndmov -- to reg/mem */
3593                 if (reg >= 4 || s->aflag == MO_16) {
3594                     goto illegal_op;
3595                 }
3596                 if (mod == 3) {
3597                     int reg2 = (modrm & 7) | REX_B(s);
3598                     if (reg2 >= 4) {
3599                         goto illegal_op;
3600                     }
3601                     if (s->flags & HF_MPX_IU_MASK) {
3602                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3603                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3604                     }
3605                 } else {
3606                     gen_lea_modrm(s, decode);
3607                     if (CODE64(s)) {
3608                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3609                                             s->mem_index, MO_LEUQ);
3610                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3611                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3612                                             s->mem_index, MO_LEUQ);
3613                     } else {
3614                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3615                                             s->mem_index, MO_LEUL);
3616                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3617                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3618                                             s->mem_index, MO_LEUL);
3619                     }
3620                 }
3621             } else if (mod != 3) {
3622                 /* bndstx */
3623                 AddressParts a = decode->mem;
3624                 if (reg >= 4
3625                     || s->aflag == MO_16
3626                     || a.base < -1) {
3627                     goto illegal_op;
3628                 }
3629                 if (a.base >= 0) {
3630                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3631                 } else {
3632                     tcg_gen_movi_tl(s->A0, 0);
3633                 }
3634                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3635                 if (a.index >= 0) {
3636                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3637                 } else {
3638                     tcg_gen_movi_tl(s->T0, 0);
3639                 }
3640                 if (CODE64(s)) {
3641                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3642                                         cpu_bndl[reg], cpu_bndu[reg]);
3643                 } else {
3644                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3645                                         cpu_bndl[reg], cpu_bndu[reg]);
3646                 }
3647             }
3648         }
3649         break;
3650     default:
3651         g_assert_not_reached();
3652     }
3653     return;
3654  illegal_op:
3655     gen_illegal_opcode(s);
3656 }
3657 
3658 #include "decode-new.c.inc"
3659 
tcg_x86_init(void)3660 void tcg_x86_init(void)
3661 {
3662     static const char reg_names[CPU_NB_REGS][4] = {
3663 #ifdef TARGET_X86_64
3664         [R_EAX] = "rax",
3665         [R_EBX] = "rbx",
3666         [R_ECX] = "rcx",
3667         [R_EDX] = "rdx",
3668         [R_ESI] = "rsi",
3669         [R_EDI] = "rdi",
3670         [R_EBP] = "rbp",
3671         [R_ESP] = "rsp",
3672         [8]  = "r8",
3673         [9]  = "r9",
3674         [10] = "r10",
3675         [11] = "r11",
3676         [12] = "r12",
3677         [13] = "r13",
3678         [14] = "r14",
3679         [15] = "r15",
3680 #else
3681         [R_EAX] = "eax",
3682         [R_EBX] = "ebx",
3683         [R_ECX] = "ecx",
3684         [R_EDX] = "edx",
3685         [R_ESI] = "esi",
3686         [R_EDI] = "edi",
3687         [R_EBP] = "ebp",
3688         [R_ESP] = "esp",
3689 #endif
3690     };
3691     static const char eip_name[] = {
3692 #ifdef TARGET_X86_64
3693         "rip"
3694 #else
3695         "eip"
3696 #endif
3697     };
3698     static const char seg_base_names[6][8] = {
3699         [R_CS] = "cs_base",
3700         [R_DS] = "ds_base",
3701         [R_ES] = "es_base",
3702         [R_FS] = "fs_base",
3703         [R_GS] = "gs_base",
3704         [R_SS] = "ss_base",
3705     };
3706     static const char bnd_regl_names[4][8] = {
3707         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3708     };
3709     static const char bnd_regu_names[4][8] = {
3710         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3711     };
3712     int i;
3713 
3714     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3715                                        offsetof(CPUX86State, cc_op), "cc_op");
3716     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3717                                     "cc_dst");
3718     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3719                                     "cc_src");
3720     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3721                                      "cc_src2");
3722     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3723 
3724     for (i = 0; i < CPU_NB_REGS; ++i) {
3725         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3726                                          offsetof(CPUX86State, regs[i]),
3727                                          reg_names[i]);
3728     }
3729 
3730     for (i = 0; i < 6; ++i) {
3731         cpu_seg_base[i]
3732             = tcg_global_mem_new(tcg_env,
3733                                  offsetof(CPUX86State, segs[i].base),
3734                                  seg_base_names[i]);
3735     }
3736 
3737     for (i = 0; i < 4; ++i) {
3738         cpu_bndl[i]
3739             = tcg_global_mem_new_i64(tcg_env,
3740                                      offsetof(CPUX86State, bnd_regs[i].lb),
3741                                      bnd_regl_names[i]);
3742         cpu_bndu[i]
3743             = tcg_global_mem_new_i64(tcg_env,
3744                                      offsetof(CPUX86State, bnd_regs[i].ub),
3745                                      bnd_regu_names[i]);
3746     }
3747 }
3748 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3749 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3750 {
3751     DisasContext *dc = container_of(dcbase, DisasContext, base);
3752     CPUX86State *env = cpu_env(cpu);
3753     uint32_t flags = dc->base.tb->flags;
3754     uint32_t cflags = tb_cflags(dc->base.tb);
3755     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3756     int iopl = (flags >> IOPL_SHIFT) & 3;
3757 
3758     dc->cs_base = dc->base.tb->cs_base;
3759     dc->pc_save = dc->base.pc_next;
3760     dc->flags = flags;
3761 #ifndef CONFIG_USER_ONLY
3762     dc->cpl = cpl;
3763     dc->iopl = iopl;
3764 #endif
3765 
3766     /* We make some simplifying assumptions; validate they're correct. */
3767     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3768     g_assert(CPL(dc) == cpl);
3769     g_assert(IOPL(dc) == iopl);
3770     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3771     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3772     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3773     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3774     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3775     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3776     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3777     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3778 
3779     dc->cc_op = CC_OP_DYNAMIC;
3780     dc->cc_op_dirty = false;
3781     /* select memory access functions */
3782     dc->mem_index = cpu_mmu_index(cpu, false);
3783     dc->cpuid_features = env->features[FEAT_1_EDX];
3784     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3785     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3786     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3787     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3788     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3789     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3790     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3791     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3792                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3793 
3794     dc->T0 = tcg_temp_new();
3795     dc->T1 = tcg_temp_new();
3796     dc->A0 = tcg_temp_new();
3797 
3798     dc->tmp1_i64 = tcg_temp_new_i64();
3799     dc->tmp2_i32 = tcg_temp_new_i32();
3800     dc->cc_srcT = tcg_temp_new();
3801 }
3802 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3803 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3804 {
3805 }
3806 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3807 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3808 {
3809     DisasContext *dc = container_of(dcbase, DisasContext, base);
3810     target_ulong pc_arg = dc->base.pc_next;
3811 
3812     dc->prev_insn_start = dc->base.insn_start;
3813     dc->prev_insn_end = tcg_last_op();
3814     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3815         pc_arg &= ~TARGET_PAGE_MASK;
3816     }
3817     tcg_gen_insn_start(pc_arg, dc->cc_op);
3818 }
3819 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3820 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3821 {
3822     DisasContext *dc = container_of(dcbase, DisasContext, base);
3823     bool orig_cc_op_dirty = dc->cc_op_dirty;
3824     CCOp orig_cc_op = dc->cc_op;
3825     target_ulong orig_pc_save = dc->pc_save;
3826 
3827 #ifdef TARGET_VSYSCALL_PAGE
3828     /*
3829      * Detect entry into the vsyscall page and invoke the syscall.
3830      */
3831     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3832         gen_exception(dc, EXCP_VSYSCALL);
3833         dc->base.pc_next = dc->pc + 1;
3834         return;
3835     }
3836 #endif
3837 
3838     switch (sigsetjmp(dc->jmpbuf, 0)) {
3839     case 0:
3840         disas_insn(dc, cpu);
3841         break;
3842     case 1:
3843         gen_exception_gpf(dc);
3844         break;
3845     case 2:
3846         /* Restore state that may affect the next instruction. */
3847         dc->pc = dc->base.pc_next;
3848         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3849         assert(dc->cc_op == orig_cc_op);
3850         assert(dc->pc_save == orig_pc_save);
3851         dc->base.num_insns--;
3852         tcg_remove_ops_after(dc->prev_insn_end);
3853         dc->base.insn_start = dc->prev_insn_start;
3854         dc->base.is_jmp = DISAS_TOO_MANY;
3855         return;
3856     default:
3857         g_assert_not_reached();
3858     }
3859 
3860     /*
3861      * Instruction decoding completed (possibly with #GP if the
3862      * 15-byte boundary was exceeded).
3863      */
3864     dc->base.pc_next = dc->pc;
3865     if (dc->base.is_jmp == DISAS_NEXT) {
3866         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3867             /*
3868              * If single step mode, we generate only one instruction and
3869              * generate an exception.
3870              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3871              * the flag and abort the translation to give the irqs a
3872              * chance to happen.
3873              */
3874             dc->base.is_jmp = DISAS_EOB_NEXT;
3875         } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3876             dc->base.is_jmp = DISAS_TOO_MANY;
3877         }
3878     }
3879 }
3880 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)3881 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3882 {
3883     DisasContext *dc = container_of(dcbase, DisasContext, base);
3884 
3885     switch (dc->base.is_jmp) {
3886     case DISAS_NORETURN:
3887         /*
3888          * Most instructions should not use DISAS_NORETURN, as that suppresses
3889          * the handling of hflags normally done by gen_eob().  We can
3890          * get here:
3891          * - for exception and interrupts
3892          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3893          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3894          *   and INHIBIT_IRQ is loaded from the VMCB
3895          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3896          *   the helpers handle themselves the tasks normally done by gen_eob().
3897          */
3898         break;
3899     case DISAS_TOO_MANY:
3900         gen_update_cc_op(dc);
3901         gen_jmp_rel_csize(dc, 0, 0);
3902         break;
3903     case DISAS_EOB_NEXT:
3904     case DISAS_EOB_INHIBIT_IRQ:
3905         assert(dc->base.pc_next == dc->pc);
3906         gen_update_eip_cur(dc);
3907         /* fall through */
3908     case DISAS_EOB_ONLY:
3909     case DISAS_EOB_RECHECK_TF:
3910     case DISAS_JUMP:
3911         gen_eob(dc, dc->base.is_jmp);
3912         break;
3913     default:
3914         g_assert_not_reached();
3915     }
3916 }
3917 
3918 static const TranslatorOps i386_tr_ops = {
3919     .init_disas_context = i386_tr_init_disas_context,
3920     .tb_start           = i386_tr_tb_start,
3921     .insn_start         = i386_tr_insn_start,
3922     .translate_insn     = i386_tr_translate_insn,
3923     .tb_stop            = i386_tr_tb_stop,
3924 };
3925 
x86_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)3926 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3927                         int *max_insns, vaddr pc, void *host_pc)
3928 {
3929     DisasContext dc;
3930 
3931     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3932 }
3933