1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "accel/tcg/cpu-mmu-index.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "exec/target_page.h"
29 #include "fpu/softfloat.h"
30
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 #include "decode-new.h"
35
36 #include "exec/log.h"
37
38 #define HELPER_H "helper.h"
39 #include "exec/helper-info.c.inc"
40 #undef HELPER_H
41
42 /* Fixes for Windows namespace pollution. */
43 #undef IN
44 #undef OUT
45
46 #define PREFIX_REPZ 0x01
47 #define PREFIX_REPNZ 0x02
48 #define PREFIX_LOCK 0x04
49 #define PREFIX_DATA 0x08
50 #define PREFIX_ADR 0x10
51 #define PREFIX_VEX 0x20
52 #define PREFIX_REX 0x40
53
54 #ifdef TARGET_X86_64
55 # define ctztl ctz64
56 # define clztl clz64
57 #else
58 # define ctztl ctz32
59 # define clztl clz32
60 #endif
61
62 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
63 #define CASE_MODRM_MEM_OP(OP) \
64 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
65 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
66 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
67
68 #define CASE_MODRM_OP(OP) \
69 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
70 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
71 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
72 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
73
74 //#define MACRO_TEST 1
75
76 /* global register indexes */
77 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
78 static TCGv cpu_eip;
79 static TCGv_i32 cpu_cc_op;
80 static TCGv cpu_regs[CPU_NB_REGS];
81 static TCGv cpu_seg_base[6];
82 static TCGv_i64 cpu_bndl[4];
83 static TCGv_i64 cpu_bndu[4];
84
85 typedef struct DisasContext {
86 DisasContextBase base;
87
88 target_ulong pc; /* pc = eip + cs_base */
89 target_ulong cs_base; /* base of CS segment */
90 target_ulong pc_save;
91
92 MemOp aflag;
93 MemOp dflag;
94
95 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
96 uint8_t prefix;
97
98 bool has_modrm;
99 uint8_t modrm;
100
101 #ifndef CONFIG_USER_ONLY
102 uint8_t cpl; /* code priv level */
103 uint8_t iopl; /* i/o priv level */
104 #endif
105 uint8_t vex_l; /* vex vector length */
106 uint8_t vex_v; /* vex vvvv register, without 1's complement. */
107 uint8_t popl_esp_hack; /* for correct popl with esp base handling */
108 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
109
110 #ifdef TARGET_X86_64
111 uint8_t rex_r;
112 uint8_t rex_x;
113 uint8_t rex_b;
114 #endif
115 bool vex_w; /* used by AVX even on 32-bit processors */
116 bool jmp_opt; /* use direct block chaining for direct jumps */
117 bool cc_op_dirty;
118
119 CCOp cc_op; /* current CC operation */
120 int mem_index; /* select memory access functions */
121 uint32_t flags; /* all execution flags */
122 int cpuid_features;
123 int cpuid_ext_features;
124 int cpuid_ext2_features;
125 int cpuid_ext3_features;
126 int cpuid_7_0_ebx_features;
127 int cpuid_7_0_ecx_features;
128 int cpuid_7_1_eax_features;
129 int cpuid_xsave_features;
130
131 /* TCG local temps */
132 TCGv cc_srcT;
133 TCGv A0;
134 TCGv T0;
135 TCGv T1;
136
137 /* TCG local register indexes (only used inside old micro ops) */
138 TCGv_i32 tmp2_i32;
139 TCGv_i64 tmp1_i64;
140
141 sigjmp_buf jmpbuf;
142 TCGOp *prev_insn_start;
143 TCGOp *prev_insn_end;
144 } DisasContext;
145
146 /*
147 * Point EIP to next instruction before ending translation.
148 * For instructions that can change hflags.
149 */
150 #define DISAS_EOB_NEXT DISAS_TARGET_0
151
152 /*
153 * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
154 * already set. For instructions that activate interrupt shadow.
155 */
156 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_1
157
158 /*
159 * Return to the main loop; EIP might have already been updated
160 * but even in that case do not use lookup_and_goto_ptr().
161 */
162 #define DISAS_EOB_ONLY DISAS_TARGET_2
163
164 /*
165 * EIP has already been updated. For jumps that wish to use
166 * lookup_and_goto_ptr()
167 */
168 #define DISAS_JUMP DISAS_TARGET_3
169
170 /*
171 * EIP has already been updated. Use updated value of
172 * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
173 */
174 #define DISAS_EOB_RECHECK_TF DISAS_TARGET_4
175
176 /* The environment in which user-only runs is constrained. */
177 #ifdef CONFIG_USER_ONLY
178 #define PE(S) true
179 #define CPL(S) 3
180 #define IOPL(S) 0
181 #define SVME(S) false
182 #define GUEST(S) false
183 #else
184 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
185 #define CPL(S) ((S)->cpl)
186 #define IOPL(S) ((S)->iopl)
187 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
188 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
189 #endif
190 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
191 #define VM86(S) false
192 #define CODE32(S) true
193 #define SS32(S) true
194 #define ADDSEG(S) false
195 #else
196 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
197 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
198 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
199 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
200 #endif
201 #if !defined(TARGET_X86_64)
202 #define CODE64(S) false
203 #elif defined(CONFIG_USER_ONLY)
204 #define CODE64(S) true
205 #else
206 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
207 #endif
208 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
209 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
210 #else
211 #define LMA(S) false
212 #endif
213
214 #ifdef TARGET_X86_64
215 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
216 #define REX_W(S) ((S)->vex_w)
217 #define REX_R(S) ((S)->rex_r + 0)
218 #define REX_X(S) ((S)->rex_x + 0)
219 #define REX_B(S) ((S)->rex_b + 0)
220 #else
221 #define REX_PREFIX(S) false
222 #define REX_W(S) false
223 #define REX_R(S) 0
224 #define REX_X(S) 0
225 #define REX_B(S) 0
226 #endif
227
228 /*
229 * Many system-only helpers are not reachable for user-only.
230 * Define stub generators here, so that we need not either sprinkle
231 * ifdefs through the translator, nor provide the helper function.
232 */
233 #define STUB_HELPER(NAME, ...) \
234 static inline void gen_helper_##NAME(__VA_ARGS__) \
235 { qemu_build_not_reached(); }
236
237 #ifdef CONFIG_USER_ONLY
238 STUB_HELPER(clgi, TCGv_env env)
239 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
240 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
241 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
244 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
245 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
246 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(stgi, TCGv_env env)
249 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
250 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
251 STUB_HELPER(vmmcall, TCGv_env env)
252 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
253 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
254 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
255 #endif
256
257 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
258 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
259 static void gen_exception_gpf(DisasContext *s);
260
261 /* i386 shift ops */
262 enum {
263 OP_ROL,
264 OP_ROR,
265 OP_RCL,
266 OP_RCR,
267 OP_SHL,
268 OP_SHR,
269 OP_SHL1, /* undocumented */
270 OP_SAR = 7,
271 };
272
273 enum {
274 JCC_O,
275 JCC_B,
276 JCC_Z,
277 JCC_BE,
278 JCC_S,
279 JCC_P,
280 JCC_L,
281 JCC_LE,
282 };
283
284 enum {
285 USES_CC_DST = 1,
286 USES_CC_SRC = 2,
287 USES_CC_SRC2 = 4,
288 USES_CC_SRCT = 8,
289 };
290
291 /* Bit set if the global variable is live after setting CC_OP to X. */
292 static const uint8_t cc_op_live_[] = {
293 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
294 [CC_OP_EFLAGS] = USES_CC_SRC,
295 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
296 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
297 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
298 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
299 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
301 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
302 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
303 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
304 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
305 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
306 [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
307 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
308 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
309 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310 [CC_OP_POPCNT] = USES_CC_DST,
311 };
312
cc_op_live(CCOp op)313 static uint8_t cc_op_live(CCOp op)
314 {
315 uint8_t result;
316 assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
317
318 /*
319 * Check that the array is fully populated. A zero entry would correspond
320 * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
321 * as well.
322 */
323 result = cc_op_live_[op];
324 assert(result);
325 return result;
326 }
327
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)328 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
329 {
330 int dead;
331
332 if (s->cc_op == op) {
333 return;
334 }
335
336 /* Discard CC computation that will no longer be used. */
337 dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
338 if (dead & USES_CC_DST) {
339 tcg_gen_discard_tl(cpu_cc_dst);
340 }
341 if (dead & USES_CC_SRC) {
342 tcg_gen_discard_tl(cpu_cc_src);
343 }
344 if (dead & USES_CC_SRC2) {
345 tcg_gen_discard_tl(cpu_cc_src2);
346 }
347 if (dead & USES_CC_SRCT) {
348 tcg_gen_discard_tl(s->cc_srcT);
349 }
350
351 if (dirty && s->cc_op == CC_OP_DYNAMIC) {
352 tcg_gen_discard_i32(cpu_cc_op);
353 }
354 s->cc_op_dirty = dirty;
355 s->cc_op = op;
356 }
357
set_cc_op(DisasContext * s,CCOp op)358 static void set_cc_op(DisasContext *s, CCOp op)
359 {
360 /*
361 * The DYNAMIC setting is translator only, everything else
362 * will be spilled later.
363 */
364 set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
365 }
366
assume_cc_op(DisasContext * s,CCOp op)367 static void assume_cc_op(DisasContext *s, CCOp op)
368 {
369 set_cc_op_1(s, op, false);
370 }
371
gen_update_cc_op(DisasContext * s)372 static void gen_update_cc_op(DisasContext *s)
373 {
374 if (s->cc_op_dirty) {
375 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
376 s->cc_op_dirty = false;
377 }
378 }
379
380 #ifdef TARGET_X86_64
381
382 #define NB_OP_SIZES 4
383
384 #else /* !TARGET_X86_64 */
385
386 #define NB_OP_SIZES 3
387
388 #endif /* !TARGET_X86_64 */
389
390 #if HOST_BIG_ENDIAN
391 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
392 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
393 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
394 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
395 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
396 #else
397 #define REG_B_OFFSET 0
398 #define REG_H_OFFSET 1
399 #define REG_W_OFFSET 0
400 #define REG_L_OFFSET 0
401 #define REG_LH_OFFSET 4
402 #endif
403
404 /* In instruction encodings for byte register accesses the
405 * register number usually indicates "low 8 bits of register N";
406 * however there are some special cases where N 4..7 indicates
407 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
408 * true for this special case, false otherwise.
409 */
byte_reg_is_xH(DisasContext * s,int reg)410 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
411 {
412 /* Any time the REX prefix is present, byte registers are uniform */
413 if (reg < 4 || REX_PREFIX(s)) {
414 return false;
415 }
416 return true;
417 }
418
419 /* Select the size of a push/pop operation. */
mo_pushpop(DisasContext * s,MemOp ot)420 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
421 {
422 if (CODE64(s)) {
423 return ot == MO_16 ? MO_16 : MO_64;
424 } else {
425 return ot;
426 }
427 }
428
429 /* Select the size of the stack pointer. */
mo_stacksize(DisasContext * s)430 static inline MemOp mo_stacksize(DisasContext *s)
431 {
432 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
433 }
434
435 /* Compute the result of writing t0 to the OT-sized register REG.
436 *
437 * If DEST is NULL, store the result into the register and return the
438 * register's TCGv.
439 *
440 * If DEST is not NULL, store the result into DEST and return the
441 * register's TCGv.
442 */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)443 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
444 {
445 switch(ot) {
446 case MO_8:
447 if (byte_reg_is_xH(s, reg)) {
448 dest = dest ? dest : cpu_regs[reg - 4];
449 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
450 return cpu_regs[reg - 4];
451 }
452 dest = dest ? dest : cpu_regs[reg];
453 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
454 break;
455 case MO_16:
456 dest = dest ? dest : cpu_regs[reg];
457 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
458 break;
459 case MO_32:
460 /* For x86_64, this sets the higher half of register to zero.
461 For i386, this is equivalent to a mov. */
462 dest = dest ? dest : cpu_regs[reg];
463 tcg_gen_ext32u_tl(dest, t0);
464 break;
465 #ifdef TARGET_X86_64
466 case MO_64:
467 dest = dest ? dest : cpu_regs[reg];
468 tcg_gen_mov_tl(dest, t0);
469 break;
470 #endif
471 default:
472 g_assert_not_reached();
473 }
474 return cpu_regs[reg];
475 }
476
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)477 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
478 {
479 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
480 }
481
482 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)483 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
484 {
485 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
486 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
487 } else {
488 tcg_gen_mov_tl(t0, cpu_regs[reg]);
489 }
490 }
491
gen_add_A0_im(DisasContext * s,int val)492 static void gen_add_A0_im(DisasContext *s, int val)
493 {
494 tcg_gen_addi_tl(s->A0, s->A0, val);
495 if (!CODE64(s)) {
496 tcg_gen_ext32u_tl(s->A0, s->A0);
497 }
498 }
499
gen_op_jmp_v(DisasContext * s,TCGv dest)500 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
501 {
502 tcg_gen_mov_tl(cpu_eip, dest);
503 s->pc_save = -1;
504 }
505
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)506 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
507 {
508 /* Using cpu_regs[reg] does not work for xH registers. */
509 assert(size >= MO_16);
510 if (size == MO_16) {
511 TCGv temp = tcg_temp_new();
512 tcg_gen_add_tl(temp, cpu_regs[reg], val);
513 gen_op_mov_reg_v(s, size, reg, temp);
514 } else {
515 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val);
516 tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size);
517 }
518 }
519
520 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)521 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
522 {
523 gen_op_add_reg(s, size, reg, tcg_constant_tl(val));
524 }
525
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)526 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)531 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535
gen_update_eip_next(DisasContext * s)536 static void gen_update_eip_next(DisasContext *s)
537 {
538 assert(s->pc_save != -1);
539 if (tb_cflags(s->base.tb) & CF_PCREL) {
540 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
541 } else if (CODE64(s)) {
542 tcg_gen_movi_tl(cpu_eip, s->pc);
543 } else {
544 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
545 }
546 s->pc_save = s->pc;
547 }
548
gen_update_eip_cur(DisasContext * s)549 static void gen_update_eip_cur(DisasContext *s)
550 {
551 assert(s->pc_save != -1);
552 if (tb_cflags(s->base.tb) & CF_PCREL) {
553 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
554 } else if (CODE64(s)) {
555 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
556 } else {
557 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
558 }
559 s->pc_save = s->base.pc_next;
560 }
561
cur_insn_len(DisasContext * s)562 static int cur_insn_len(DisasContext *s)
563 {
564 return s->pc - s->base.pc_next;
565 }
566
cur_insn_len_i32(DisasContext * s)567 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
568 {
569 return tcg_constant_i32(cur_insn_len(s));
570 }
571
eip_next_i32(DisasContext * s)572 static TCGv_i32 eip_next_i32(DisasContext *s)
573 {
574 assert(s->pc_save != -1);
575 /*
576 * This function has two users: lcall_real (always 16-bit mode), and
577 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
578 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
579 * why passing a 32-bit value isn't broken. To avoid using this where
580 * we shouldn't, return -1 in 64-bit mode so that execution goes into
581 * the weeds quickly.
582 */
583 if (CODE64(s)) {
584 return tcg_constant_i32(-1);
585 }
586 if (tb_cflags(s->base.tb) & CF_PCREL) {
587 TCGv_i32 ret = tcg_temp_new_i32();
588 tcg_gen_trunc_tl_i32(ret, cpu_eip);
589 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
590 return ret;
591 } else {
592 return tcg_constant_i32(s->pc - s->cs_base);
593 }
594 }
595
eip_next_tl(DisasContext * s)596 static TCGv eip_next_tl(DisasContext *s)
597 {
598 assert(s->pc_save != -1);
599 if (tb_cflags(s->base.tb) & CF_PCREL) {
600 TCGv ret = tcg_temp_new();
601 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
602 return ret;
603 } else if (CODE64(s)) {
604 return tcg_constant_tl(s->pc);
605 } else {
606 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
607 }
608 }
609
eip_cur_tl(DisasContext * s)610 static TCGv eip_cur_tl(DisasContext *s)
611 {
612 assert(s->pc_save != -1);
613 if (tb_cflags(s->base.tb) & CF_PCREL) {
614 TCGv ret = tcg_temp_new();
615 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
616 return ret;
617 } else if (CODE64(s)) {
618 return tcg_constant_tl(s->base.pc_next);
619 } else {
620 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
621 }
622 }
623
624 /* Compute SEG:REG into DEST. SEG is selected from the override segment
625 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
626 indicate no override. */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)627 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
628 int def_seg, int ovr_seg)
629 {
630 switch (aflag) {
631 #ifdef TARGET_X86_64
632 case MO_64:
633 if (ovr_seg < 0) {
634 tcg_gen_mov_tl(dest, a0);
635 return;
636 }
637 break;
638 #endif
639 case MO_32:
640 /* 32 bit address */
641 if (ovr_seg < 0 && ADDSEG(s)) {
642 ovr_seg = def_seg;
643 }
644 if (ovr_seg < 0) {
645 tcg_gen_ext32u_tl(dest, a0);
646 return;
647 }
648 break;
649 case MO_16:
650 /* 16 bit address */
651 tcg_gen_ext16u_tl(dest, a0);
652 a0 = dest;
653 if (ovr_seg < 0) {
654 if (ADDSEG(s)) {
655 ovr_seg = def_seg;
656 } else {
657 return;
658 }
659 }
660 break;
661 default:
662 g_assert_not_reached();
663 }
664
665 if (ovr_seg >= 0) {
666 TCGv seg = cpu_seg_base[ovr_seg];
667
668 if (aflag == MO_64) {
669 tcg_gen_add_tl(dest, a0, seg);
670 } else if (CODE64(s)) {
671 tcg_gen_ext32u_tl(dest, a0);
672 tcg_gen_add_tl(dest, dest, seg);
673 } else {
674 tcg_gen_add_tl(dest, a0, seg);
675 tcg_gen_ext32u_tl(dest, dest);
676 }
677 }
678 }
679
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)680 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
681 int def_seg, int ovr_seg)
682 {
683 gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
684 }
685
gen_string_movl_A0_ESI(DisasContext * s)686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688 gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
689 }
690
gen_string_movl_A0_EDI(DisasContext * s)691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693 gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
694 }
695
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)696 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
697 {
698 if (size == MO_TL) {
699 return src;
700 }
701 if (!dst) {
702 dst = tcg_temp_new();
703 }
704 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
705 return dst;
706 }
707
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)708 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
709 {
710 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
711
712 tcg_gen_brcondi_tl(cond, tmp, 0, label1);
713 }
714
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)715 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
716 {
717 gen_op_j_ecx(s, TCG_COND_EQ, label1);
718 }
719
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)720 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
721 {
722 gen_op_j_ecx(s, TCG_COND_NE, label1);
723 }
724
gen_set_hflag(DisasContext * s,uint32_t mask)725 static void gen_set_hflag(DisasContext *s, uint32_t mask)
726 {
727 if ((s->flags & mask) == 0) {
728 TCGv_i32 t = tcg_temp_new_i32();
729 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
730 tcg_gen_ori_i32(t, t, mask);
731 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
732 s->flags |= mask;
733 }
734 }
735
gen_reset_hflag(DisasContext * s,uint32_t mask)736 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
737 {
738 if (s->flags & mask) {
739 TCGv_i32 t = tcg_temp_new_i32();
740 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
741 tcg_gen_andi_i32(t, t, ~mask);
742 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
743 s->flags &= ~mask;
744 }
745 }
746
gen_set_eflags(DisasContext * s,target_ulong mask)747 static void gen_set_eflags(DisasContext *s, target_ulong mask)
748 {
749 TCGv t = tcg_temp_new();
750
751 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
752 tcg_gen_ori_tl(t, t, mask);
753 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
754 }
755
gen_reset_eflags(DisasContext * s,target_ulong mask)756 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
757 {
758 TCGv t = tcg_temp_new();
759
760 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
761 tcg_gen_andi_tl(t, t, ~mask);
762 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
763 }
764
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)765 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
766 {
767 switch (ot) {
768 case MO_8:
769 gen_helper_inb(v, tcg_env, n);
770 break;
771 case MO_16:
772 gen_helper_inw(v, tcg_env, n);
773 break;
774 case MO_32:
775 gen_helper_inl(v, tcg_env, n);
776 break;
777 default:
778 g_assert_not_reached();
779 }
780 }
781
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)782 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
783 {
784 switch (ot) {
785 case MO_8:
786 gen_helper_outb(tcg_env, v, n);
787 break;
788 case MO_16:
789 gen_helper_outw(tcg_env, v, n);
790 break;
791 case MO_32:
792 gen_helper_outl(tcg_env, v, n);
793 break;
794 default:
795 g_assert_not_reached();
796 }
797 }
798
799 /*
800 * Validate that access to [port, port + 1<<ot) is allowed.
801 * Raise #GP, or VMM exit if not.
802 */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)803 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
804 uint32_t svm_flags)
805 {
806 #ifdef CONFIG_USER_ONLY
807 /*
808 * We do not implement the ioperm(2) syscall, so the TSS check
809 * will always fail.
810 */
811 gen_exception_gpf(s);
812 return false;
813 #else
814 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
815 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
816 }
817 if (GUEST(s)) {
818 gen_update_cc_op(s);
819 gen_update_eip_cur(s);
820 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
821 svm_flags |= SVM_IOIO_REP_MASK;
822 }
823 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
824 gen_helper_svm_check_io(tcg_env, port,
825 tcg_constant_i32(svm_flags),
826 cur_insn_len_i32(s));
827 }
828 return true;
829 #endif
830 }
831
gen_movs(DisasContext * s,MemOp ot,TCGv dshift)832 static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift)
833 {
834 gen_string_movl_A0_ESI(s);
835 gen_op_ld_v(s, ot, s->T0, s->A0);
836 gen_string_movl_A0_EDI(s);
837 gen_op_st_v(s, ot, s->T0, s->A0);
838
839 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
840 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
841 }
842
843 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)844 static void gen_mov_eflags(DisasContext *s, TCGv reg)
845 {
846 TCGv dst, src1, src2;
847 TCGv_i32 cc_op;
848 int live, dead;
849
850 if (s->cc_op == CC_OP_EFLAGS) {
851 tcg_gen_mov_tl(reg, cpu_cc_src);
852 return;
853 }
854
855 dst = cpu_cc_dst;
856 src1 = cpu_cc_src;
857 src2 = cpu_cc_src2;
858
859 /* Take care to not read values that are not live. */
860 live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
861 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
862 if (dead) {
863 TCGv zero = tcg_constant_tl(0);
864 if (dead & USES_CC_DST) {
865 dst = zero;
866 }
867 if (dead & USES_CC_SRC) {
868 src1 = zero;
869 }
870 if (dead & USES_CC_SRC2) {
871 src2 = zero;
872 }
873 }
874
875 if (s->cc_op != CC_OP_DYNAMIC) {
876 cc_op = tcg_constant_i32(s->cc_op);
877 } else {
878 cc_op = cpu_cc_op;
879 }
880 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
881 }
882
883 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)884 static void gen_compute_eflags(DisasContext *s)
885 {
886 gen_mov_eflags(s, cpu_cc_src);
887 set_cc_op(s, CC_OP_EFLAGS);
888 }
889
890 typedef struct CCPrepare {
891 TCGCond cond;
892 TCGv reg;
893 TCGv reg2;
894 target_ulong imm;
895 bool use_reg2;
896 bool no_setcond;
897 } CCPrepare;
898
gen_prepare_sign_nz(TCGv src,MemOp size)899 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
900 {
901 if (size == MO_TL) {
902 return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
903 } else {
904 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
905 .imm = 1ull << ((8 << size) - 1) };
906 }
907 }
908
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)909 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
910 {
911 if (size == MO_TL) {
912 return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
913 .reg = src };
914 } else {
915 return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
916 .imm = MAKE_64BIT_MASK(0, 8 << size),
917 .reg = src };
918 }
919 }
920
921 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)922 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
923 {
924 MemOp size;
925
926 switch (s->cc_op) {
927 case CC_OP_SUBB ... CC_OP_SUBQ:
928 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
929 size = s->cc_op - CC_OP_SUBB;
930 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
931 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
932 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
933 .reg2 = cpu_cc_src, .use_reg2 = true };
934
935 case CC_OP_ADDB ... CC_OP_ADDQ:
936 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
937 size = cc_op_size(s->cc_op);
938 tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
939 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
940 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
941 .reg2 = cpu_cc_src, .use_reg2 = true };
942
943 case CC_OP_LOGICB ... CC_OP_LOGICQ:
944 case CC_OP_POPCNT:
945 return (CCPrepare) { .cond = TCG_COND_NEVER };
946
947 case CC_OP_INCB ... CC_OP_INCQ:
948 case CC_OP_DECB ... CC_OP_DECQ:
949 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
950 .no_setcond = true };
951
952 case CC_OP_SHLB ... CC_OP_SHLQ:
953 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
954 size = cc_op_size(s->cc_op);
955 return gen_prepare_sign_nz(cpu_cc_src, size);
956
957 case CC_OP_MULB ... CC_OP_MULQ:
958 return (CCPrepare) { .cond = TCG_COND_NE,
959 .reg = cpu_cc_src };
960
961 case CC_OP_BMILGB ... CC_OP_BMILGQ:
962 size = cc_op_size(s->cc_op);
963 return gen_prepare_val_nz(cpu_cc_src, size, true);
964
965 case CC_OP_BLSIB ... CC_OP_BLSIQ:
966 size = cc_op_size(s->cc_op);
967 return gen_prepare_val_nz(cpu_cc_src, size, false);
968
969 case CC_OP_ADCX:
970 case CC_OP_ADCOX:
971 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
972 .no_setcond = true };
973
974 case CC_OP_EFLAGS:
975 case CC_OP_SARB ... CC_OP_SARQ:
976 /* CC_SRC & 1 */
977 return (CCPrepare) { .cond = TCG_COND_TSTNE,
978 .reg = cpu_cc_src, .imm = CC_C };
979
980 default:
981 /* The need to compute only C from CC_OP_DYNAMIC is important
982 in efficiently implementing e.g. INC at the start of a TB. */
983 gen_update_cc_op(s);
984 if (!reg) {
985 reg = tcg_temp_new();
986 }
987 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
988 cpu_cc_src2, cpu_cc_op);
989 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
990 .no_setcond = true };
991 }
992 }
993
994 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)995 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
996 {
997 gen_compute_eflags(s);
998 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
999 .imm = CC_P };
1000 }
1001
1002 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)1003 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1004 {
1005 switch (s->cc_op) {
1006 case CC_OP_DYNAMIC:
1007 gen_compute_eflags(s);
1008 /* FALLTHRU */
1009 case CC_OP_EFLAGS:
1010 case CC_OP_ADCX:
1011 case CC_OP_ADOX:
1012 case CC_OP_ADCOX:
1013 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1014 .imm = CC_S };
1015 case CC_OP_POPCNT:
1016 return (CCPrepare) { .cond = TCG_COND_NEVER };
1017 default:
1018 return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
1019 }
1020 }
1021
1022 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)1023 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1024 {
1025 switch (s->cc_op) {
1026 case CC_OP_ADOX:
1027 case CC_OP_ADCOX:
1028 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1029 .no_setcond = true };
1030 case CC_OP_LOGICB ... CC_OP_LOGICQ:
1031 case CC_OP_POPCNT:
1032 return (CCPrepare) { .cond = TCG_COND_NEVER };
1033 case CC_OP_MULB ... CC_OP_MULQ:
1034 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1035 default:
1036 gen_compute_eflags(s);
1037 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1038 .imm = CC_O };
1039 }
1040 }
1041
1042 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1043 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1044 {
1045 switch (s->cc_op) {
1046 case CC_OP_EFLAGS:
1047 case CC_OP_ADCX:
1048 case CC_OP_ADOX:
1049 case CC_OP_ADCOX:
1050 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1051 .imm = CC_Z };
1052 case CC_OP_DYNAMIC:
1053 gen_update_cc_op(s);
1054 if (!reg) {
1055 reg = tcg_temp_new();
1056 }
1057 gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1058 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1059 case CC_OP_POPCNT:
1060 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1061 default:
1062 {
1063 MemOp size = cc_op_size(s->cc_op);
1064 return gen_prepare_val_nz(cpu_cc_dst, size, true);
1065 }
1066 }
1067 }
1068
1069 /* return how to compute jump opcode 'b'. 'reg' can be clobbered
1070 * if needed; it may be used for CCPrepare.reg if that will
1071 * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1072 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1073 {
1074 int inv, jcc_op, cond;
1075 MemOp size;
1076 CCPrepare cc;
1077
1078 inv = b & 1;
1079 jcc_op = (b >> 1) & 7;
1080
1081 switch (s->cc_op) {
1082 case CC_OP_SUBB ... CC_OP_SUBQ:
1083 /* We optimize relational operators for the cmp/jcc case. */
1084 size = cc_op_size(s->cc_op);
1085 switch (jcc_op) {
1086 case JCC_BE:
1087 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1088 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1089 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1090 .reg2 = cpu_cc_src, .use_reg2 = true };
1091 break;
1092 case JCC_L:
1093 cond = TCG_COND_LT;
1094 goto fast_jcc_l;
1095 case JCC_LE:
1096 cond = TCG_COND_LE;
1097 fast_jcc_l:
1098 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1099 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1100 cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1101 .reg2 = cpu_cc_src, .use_reg2 = true };
1102 break;
1103
1104 default:
1105 goto slow_jcc;
1106 }
1107 break;
1108
1109 case CC_OP_LOGICB ... CC_OP_LOGICQ:
1110 /* Mostly used for test+jump */
1111 size = s->cc_op - CC_OP_LOGICB;
1112 switch (jcc_op) {
1113 case JCC_BE:
1114 /* CF = 0, becomes jz/je */
1115 jcc_op = JCC_Z;
1116 goto slow_jcc;
1117 case JCC_L:
1118 /* OF = 0, becomes js/jns */
1119 jcc_op = JCC_S;
1120 goto slow_jcc;
1121 case JCC_LE:
1122 /* SF or ZF, becomes signed <= 0 */
1123 tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1124 cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1125 break;
1126 default:
1127 goto slow_jcc;
1128 }
1129 break;
1130
1131 default:
1132 slow_jcc:
1133 /* This actually generates good code for JC, JZ and JS. */
1134 switch (jcc_op) {
1135 case JCC_O:
1136 cc = gen_prepare_eflags_o(s, reg);
1137 break;
1138 case JCC_B:
1139 cc = gen_prepare_eflags_c(s, reg);
1140 break;
1141 case JCC_Z:
1142 cc = gen_prepare_eflags_z(s, reg);
1143 break;
1144 case JCC_BE:
1145 gen_compute_eflags(s);
1146 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1147 .imm = CC_Z | CC_C };
1148 break;
1149 case JCC_S:
1150 cc = gen_prepare_eflags_s(s, reg);
1151 break;
1152 case JCC_P:
1153 cc = gen_prepare_eflags_p(s, reg);
1154 break;
1155 case JCC_L:
1156 gen_compute_eflags(s);
1157 if (!reg || reg == cpu_cc_src) {
1158 reg = tcg_temp_new();
1159 }
1160 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1161 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1162 .imm = CC_O };
1163 break;
1164 default:
1165 case JCC_LE:
1166 gen_compute_eflags(s);
1167 if (!reg || reg == cpu_cc_src) {
1168 reg = tcg_temp_new();
1169 }
1170 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1171 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1172 .imm = CC_O | CC_Z };
1173 break;
1174 }
1175 break;
1176 }
1177
1178 if (inv) {
1179 cc.cond = tcg_invert_cond(cc.cond);
1180 }
1181 return cc;
1182 }
1183
gen_neg_setcc(DisasContext * s,int b,TCGv reg)1184 static void gen_neg_setcc(DisasContext *s, int b, TCGv reg)
1185 {
1186 CCPrepare cc = gen_prepare_cc(s, b, reg);
1187
1188 if (cc.no_setcond) {
1189 if (cc.cond == TCG_COND_EQ) {
1190 tcg_gen_addi_tl(reg, cc.reg, -1);
1191 } else {
1192 tcg_gen_neg_tl(reg, cc.reg);
1193 }
1194 return;
1195 }
1196
1197 if (cc.use_reg2) {
1198 tcg_gen_negsetcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1199 } else {
1200 tcg_gen_negsetcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1201 }
1202 }
1203
gen_setcc(DisasContext * s,int b,TCGv reg)1204 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1205 {
1206 CCPrepare cc = gen_prepare_cc(s, b, reg);
1207
1208 if (cc.no_setcond) {
1209 if (cc.cond == TCG_COND_EQ) {
1210 tcg_gen_xori_tl(reg, cc.reg, 1);
1211 } else {
1212 tcg_gen_mov_tl(reg, cc.reg);
1213 }
1214 return;
1215 }
1216
1217 if (cc.use_reg2) {
1218 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1219 } else {
1220 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1221 }
1222 }
1223
gen_compute_eflags_c(DisasContext * s,TCGv reg)1224 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1225 {
1226 gen_setcc(s, JCC_B << 1, reg);
1227 }
1228
1229 /* generate a conditional jump to label 'l1' according to jump opcode
1230 value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc_noeob(DisasContext * s,int b,TCGLabel * l1)1231 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1232 {
1233 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1234
1235 if (cc.use_reg2) {
1236 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1237 } else {
1238 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1239 }
1240 }
1241
1242 /* Generate a conditional jump to label 'l1' according to jump opcode
1243 value 'b'. In the fast case, T0 is guaranteed not to be used.
1244 One or both of the branches will call gen_jmp_rel, so ensure
1245 cc_op is clean. */
gen_jcc(DisasContext * s,int b,TCGLabel * l1)1246 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1247 {
1248 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1249
1250 /*
1251 * Note that this must be _after_ gen_prepare_cc, because it can change
1252 * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
1253 * it's cheaper to just compute the flags)!
1254 */
1255 gen_update_cc_op(s);
1256 if (cc.use_reg2) {
1257 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1258 } else {
1259 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1260 }
1261 }
1262
gen_stos(DisasContext * s,MemOp ot,TCGv dshift)1263 static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift)
1264 {
1265 gen_string_movl_A0_EDI(s);
1266 gen_op_st_v(s, ot, s->T0, s->A0);
1267 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1268 }
1269
gen_lods(DisasContext * s,MemOp ot,TCGv dshift)1270 static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift)
1271 {
1272 gen_string_movl_A0_ESI(s);
1273 gen_op_ld_v(s, ot, s->T0, s->A0);
1274 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1275 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1276 }
1277
gen_scas(DisasContext * s,MemOp ot,TCGv dshift)1278 static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift)
1279 {
1280 gen_string_movl_A0_EDI(s);
1281 gen_op_ld_v(s, ot, s->T1, s->A0);
1282 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1283 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1284 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1285 set_cc_op(s, CC_OP_SUBB + ot);
1286
1287 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1288 }
1289
gen_cmps(DisasContext * s,MemOp ot,TCGv dshift)1290 static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift)
1291 {
1292 gen_string_movl_A0_EDI(s);
1293 gen_op_ld_v(s, ot, s->T1, s->A0);
1294 gen_string_movl_A0_ESI(s);
1295 gen_op_ld_v(s, ot, s->T0, s->A0);
1296 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1297 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1298 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1299 set_cc_op(s, CC_OP_SUBB + ot);
1300
1301 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1302 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1303 }
1304
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1305 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1306 {
1307 if (s->flags & HF_IOBPT_MASK) {
1308 #ifdef CONFIG_USER_ONLY
1309 /* user-mode cpu should not be in IOBPT mode */
1310 g_assert_not_reached();
1311 #else
1312 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1313 TCGv t_next = eip_next_tl(s);
1314 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1315 #endif /* CONFIG_USER_ONLY */
1316 }
1317 }
1318
gen_ins(DisasContext * s,MemOp ot,TCGv dshift)1319 static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
1320 {
1321 TCGv_i32 port = tcg_temp_new_i32();
1322
1323 gen_string_movl_A0_EDI(s);
1324 /* Note: we must do this dummy write first to be restartable in
1325 case of page fault. */
1326 tcg_gen_movi_tl(s->T0, 0);
1327 gen_op_st_v(s, ot, s->T0, s->A0);
1328 tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
1329 tcg_gen_andi_i32(port, port, 0xffff);
1330 gen_helper_in_func(ot, s->T0, port);
1331 gen_op_st_v(s, ot, s->T0, s->A0);
1332 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1333 gen_bpt_io(s, port, ot);
1334 }
1335
gen_outs(DisasContext * s,MemOp ot,TCGv dshift)1336 static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
1337 {
1338 TCGv_i32 port = tcg_temp_new_i32();
1339 TCGv_i32 value = tcg_temp_new_i32();
1340
1341 gen_string_movl_A0_ESI(s);
1342 gen_op_ld_v(s, ot, s->T0, s->A0);
1343
1344 tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
1345 tcg_gen_andi_i32(port, port, 0xffff);
1346 tcg_gen_trunc_tl_i32(value, s->T0);
1347 gen_helper_out_func(ot, port, value);
1348 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1349 gen_bpt_io(s, port, ot);
1350 }
1351
1352 #define REP_MAX 65535
1353
do_gen_rep(DisasContext * s,MemOp ot,TCGv dshift,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1354 static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift,
1355 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1356 bool is_repz_nz)
1357 {
1358 TCGLabel *last = gen_new_label();
1359 TCGLabel *loop = gen_new_label();
1360 TCGLabel *done = gen_new_label();
1361
1362 target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag);
1363 TCGv cx_next = tcg_temp_new();
1364
1365 /*
1366 * Check if we must translate a single iteration only. Normally, HF_RF_MASK
1367 * would also limit translation blocks to one instruction, so that gen_eob
1368 * can reset the flag; here however RF is set throughout the repetition, so
1369 * we can plow through until CX/ECX/RCX is zero.
1370 */
1371 bool can_loop =
1372 (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP))
1373 && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
1374 bool had_rf = s->flags & HF_RF_MASK;
1375
1376 /*
1377 * Even if EFLAGS.RF was set on entry (such as if we're on the second or
1378 * later iteration and an exception or interrupt happened), force gen_eob()
1379 * not to clear the flag. We do that ourselves after the last iteration.
1380 */
1381 s->flags &= ~HF_RF_MASK;
1382
1383 /*
1384 * For CMPS/SCAS, the CC_OP after a memory fault could come from either
1385 * the previous instruction or the string instruction; but because we
1386 * arrange to keep CC_OP up to date all the time, just mark the whole
1387 * insn as CC_OP_DYNAMIC.
1388 *
1389 * It's not a problem to do this even for instructions that do not
1390 * modify the flags, so do it unconditionally.
1391 */
1392 gen_update_cc_op(s);
1393 tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
1394
1395 /* Any iteration at all? */
1396 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done);
1397
1398 /*
1399 * From now on we operate on the value of CX/ECX/RCX that will be written
1400 * back, which is stored in cx_next. There can be no carry, so we can zero
1401 * extend here if needed and not do any expensive deposit operations later.
1402 */
1403 tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1);
1404 #ifdef TARGET_X86_64
1405 if (s->aflag == MO_32) {
1406 tcg_gen_ext32u_tl(cx_next, cx_next);
1407 cx_mask = ~0;
1408 }
1409 #endif
1410
1411 /*
1412 * The last iteration is handled outside the loop, so that cx_next
1413 * can never underflow.
1414 */
1415 if (can_loop) {
1416 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1417 }
1418
1419 gen_set_label(loop);
1420 fn(s, ot, dshift);
1421 tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1422 gen_update_cc_op(s);
1423
1424 /* Leave if REP condition fails. */
1425 if (is_repz_nz) {
1426 int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1427 gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
1428 /* gen_prepare_eflags_z never changes cc_op. */
1429 assert(!s->cc_op_dirty);
1430 }
1431
1432 if (can_loop) {
1433 tcg_gen_subi_tl(cx_next, cx_next, 1);
1434 tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop);
1435 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1436 }
1437
1438 /*
1439 * Traps or interrupts set RF_MASK if they happen after any iteration
1440 * but the last. Set it here before giving the main loop a chance to
1441 * execute. (For faults, seg_helper.c sets the flag as usual).
1442 */
1443 if (!had_rf) {
1444 gen_set_eflags(s, RF_MASK);
1445 }
1446
1447 /* Go to the main loop but reenter the same instruction. */
1448 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1449
1450 if (can_loop) {
1451 /*
1452 * The last iteration needs no conditional jump, even if is_repz_nz,
1453 * because the repeats are ending anyway.
1454 */
1455 gen_set_label(last);
1456 set_cc_op(s, CC_OP_DYNAMIC);
1457 fn(s, ot, dshift);
1458 tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1459 gen_update_cc_op(s);
1460 }
1461
1462 /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition. */
1463 gen_set_label(done);
1464 set_cc_op(s, CC_OP_DYNAMIC);
1465 if (had_rf) {
1466 gen_reset_eflags(s, RF_MASK);
1467 }
1468 gen_jmp_rel_csize(s, 0, 1);
1469 }
1470
do_gen_string(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1471 static void do_gen_string(DisasContext *s, MemOp ot,
1472 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1473 bool is_repz_nz)
1474 {
1475 TCGv dshift = tcg_temp_new();
1476 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
1477 tcg_gen_shli_tl(dshift, dshift, ot);
1478
1479 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1480 do_gen_rep(s, ot, dshift, fn, is_repz_nz);
1481 } else {
1482 fn(s, ot, dshift);
1483 }
1484 }
1485
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1486 static void gen_repz(DisasContext *s, MemOp ot,
1487 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1488 {
1489 do_gen_string(s, ot, fn, false);
1490 }
1491
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1492 static void gen_repz_nz(DisasContext *s, MemOp ot,
1493 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1494 {
1495 do_gen_string(s, ot, fn, true);
1496 }
1497
gen_helper_fp_arith_ST0_FT0(int op)1498 static void gen_helper_fp_arith_ST0_FT0(int op)
1499 {
1500 switch (op) {
1501 case 0:
1502 gen_helper_fadd_ST0_FT0(tcg_env);
1503 break;
1504 case 1:
1505 gen_helper_fmul_ST0_FT0(tcg_env);
1506 break;
1507 case 2:
1508 gen_helper_fcom_ST0_FT0(tcg_env);
1509 break;
1510 case 3:
1511 gen_helper_fcom_ST0_FT0(tcg_env);
1512 break;
1513 case 4:
1514 gen_helper_fsub_ST0_FT0(tcg_env);
1515 break;
1516 case 5:
1517 gen_helper_fsubr_ST0_FT0(tcg_env);
1518 break;
1519 case 6:
1520 gen_helper_fdiv_ST0_FT0(tcg_env);
1521 break;
1522 case 7:
1523 gen_helper_fdivr_ST0_FT0(tcg_env);
1524 break;
1525 }
1526 }
1527
1528 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1529 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1530 {
1531 TCGv_i32 tmp = tcg_constant_i32(opreg);
1532 switch (op) {
1533 case 0:
1534 gen_helper_fadd_STN_ST0(tcg_env, tmp);
1535 break;
1536 case 1:
1537 gen_helper_fmul_STN_ST0(tcg_env, tmp);
1538 break;
1539 case 4:
1540 gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1541 break;
1542 case 5:
1543 gen_helper_fsub_STN_ST0(tcg_env, tmp);
1544 break;
1545 case 6:
1546 gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1547 break;
1548 case 7:
1549 gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1550 break;
1551 }
1552 }
1553
gen_exception(DisasContext * s,int trapno)1554 static void gen_exception(DisasContext *s, int trapno)
1555 {
1556 gen_update_cc_op(s);
1557 gen_update_eip_cur(s);
1558 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1559 s->base.is_jmp = DISAS_NORETURN;
1560 }
1561
1562 /* Generate #UD for the current instruction. The assumption here is that
1563 the instruction is known, but it isn't allowed in the current cpu mode. */
gen_illegal_opcode(DisasContext * s)1564 static void gen_illegal_opcode(DisasContext *s)
1565 {
1566 gen_exception(s, EXCP06_ILLOP);
1567 }
1568
1569 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1570 static void gen_exception_gpf(DisasContext *s)
1571 {
1572 gen_exception(s, EXCP0D_GPF);
1573 }
1574
1575 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1576 static bool check_cpl0(DisasContext *s)
1577 {
1578 if (CPL(s) == 0) {
1579 return true;
1580 }
1581 gen_exception_gpf(s);
1582 return false;
1583 }
1584
1585 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1586 static TCGv gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1587 bool is_right, TCGv count)
1588 {
1589 target_ulong mask = (ot == MO_64 ? 63 : 31);
1590 TCGv cc_src = tcg_temp_new();
1591 TCGv tmp = tcg_temp_new();
1592 TCGv hishift;
1593
1594 switch (ot) {
1595 case MO_16:
1596 /* Note: we implement the Intel behaviour for shift count > 16.
1597 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1598 portion by constructing it as a 32-bit value. */
1599 if (is_right) {
1600 tcg_gen_deposit_tl(tmp, s->T0, s->T1, 16, 16);
1601 tcg_gen_mov_tl(s->T1, s->T0);
1602 tcg_gen_mov_tl(s->T0, tmp);
1603 } else {
1604 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1605 }
1606 /*
1607 * If TARGET_X86_64 defined then fall through into MO_32 case,
1608 * otherwise fall through default case.
1609 */
1610 case MO_32:
1611 #ifdef TARGET_X86_64
1612 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1613 tcg_gen_subi_tl(tmp, count, 1);
1614 if (is_right) {
1615 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1616 tcg_gen_shr_i64(cc_src, s->T0, tmp);
1617 tcg_gen_shr_i64(s->T0, s->T0, count);
1618 } else {
1619 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1620 tcg_gen_shl_i64(cc_src, s->T0, tmp);
1621 tcg_gen_shl_i64(s->T0, s->T0, count);
1622 tcg_gen_shri_i64(cc_src, cc_src, 32);
1623 tcg_gen_shri_i64(s->T0, s->T0, 32);
1624 }
1625 break;
1626 #endif
1627 default:
1628 hishift = tcg_temp_new();
1629 tcg_gen_subi_tl(tmp, count, 1);
1630 if (is_right) {
1631 tcg_gen_shr_tl(cc_src, s->T0, tmp);
1632
1633 /* mask + 1 - count = mask - tmp = mask ^ tmp */
1634 tcg_gen_xori_tl(hishift, tmp, mask);
1635 tcg_gen_shr_tl(s->T0, s->T0, count);
1636 tcg_gen_shl_tl(s->T1, s->T1, hishift);
1637 } else {
1638 tcg_gen_shl_tl(cc_src, s->T0, tmp);
1639
1640 /* mask + 1 - count = mask - tmp = mask ^ tmp */
1641 tcg_gen_xori_tl(hishift, tmp, mask);
1642 tcg_gen_shl_tl(s->T0, s->T0, count);
1643 tcg_gen_shr_tl(s->T1, s->T1, hishift);
1644
1645 if (ot == MO_16) {
1646 /* Only needed if count > 16, for Intel behaviour. */
1647 tcg_gen_shri_tl(tmp, s->T1, 1);
1648 tcg_gen_or_tl(cc_src, cc_src, tmp);
1649 }
1650 }
1651 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1,
1652 count, tcg_constant_tl(0),
1653 tcg_constant_tl(0), s->T1);
1654 tcg_gen_or_tl(s->T0, s->T0, s->T1);
1655 break;
1656 }
1657
1658 return cc_src;
1659 }
1660
1661 #define X86_MAX_INSN_LENGTH 15
1662
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1663 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1664 {
1665 uint64_t pc = s->pc;
1666
1667 /* This is a subsequent insn that crosses a page boundary. */
1668 if (s->base.num_insns > 1 &&
1669 !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1670 siglongjmp(s->jmpbuf, 2);
1671 }
1672
1673 s->pc += num_bytes;
1674 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1675 /* If the instruction's 16th byte is on a different page than the 1st, a
1676 * page fault on the second page wins over the general protection fault
1677 * caused by the instruction being too long.
1678 * This can happen even if the operand is only one byte long!
1679 */
1680 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1681 (void)translator_ldub(env, &s->base,
1682 (s->pc - 1) & TARGET_PAGE_MASK);
1683 }
1684 siglongjmp(s->jmpbuf, 1);
1685 }
1686
1687 return pc;
1688 }
1689
x86_ldub_code(CPUX86State * env,DisasContext * s)1690 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1691 {
1692 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1693 }
1694
x86_lduw_code(CPUX86State * env,DisasContext * s)1695 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1696 {
1697 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1698 }
1699
x86_ldl_code(CPUX86State * env,DisasContext * s)1700 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1701 {
1702 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1703 }
1704
1705 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1706 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1707 {
1708 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1709 }
1710 #endif
1711
1712 /* Decompose an address. */
1713
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1714 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1715 int modrm, bool is_vsib)
1716 {
1717 int def_seg, base, index, scale, mod, rm;
1718 target_long disp;
1719 bool havesib;
1720
1721 def_seg = R_DS;
1722 index = -1;
1723 scale = 0;
1724 disp = 0;
1725
1726 mod = (modrm >> 6) & 3;
1727 rm = modrm & 7;
1728 base = rm | REX_B(s);
1729
1730 if (mod == 3) {
1731 /* Normally filtered out earlier, but including this path
1732 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1733 goto done;
1734 }
1735
1736 switch (s->aflag) {
1737 case MO_64:
1738 case MO_32:
1739 havesib = 0;
1740 if (rm == 4) {
1741 int code = x86_ldub_code(env, s);
1742 scale = (code >> 6) & 3;
1743 index = ((code >> 3) & 7) | REX_X(s);
1744 if (index == 4 && !is_vsib) {
1745 index = -1; /* no index */
1746 }
1747 base = (code & 7) | REX_B(s);
1748 havesib = 1;
1749 }
1750
1751 switch (mod) {
1752 case 0:
1753 if ((base & 7) == 5) {
1754 base = -1;
1755 disp = (int32_t)x86_ldl_code(env, s);
1756 if (CODE64(s) && !havesib) {
1757 base = -2;
1758 disp += s->pc + s->rip_offset;
1759 }
1760 }
1761 break;
1762 case 1:
1763 disp = (int8_t)x86_ldub_code(env, s);
1764 break;
1765 default:
1766 case 2:
1767 disp = (int32_t)x86_ldl_code(env, s);
1768 break;
1769 }
1770
1771 /* For correct popl handling with esp. */
1772 if (base == R_ESP && s->popl_esp_hack) {
1773 disp += s->popl_esp_hack;
1774 }
1775 if (base == R_EBP || base == R_ESP) {
1776 def_seg = R_SS;
1777 }
1778 break;
1779
1780 case MO_16:
1781 if (mod == 0) {
1782 if (rm == 6) {
1783 base = -1;
1784 disp = x86_lduw_code(env, s);
1785 break;
1786 }
1787 } else if (mod == 1) {
1788 disp = (int8_t)x86_ldub_code(env, s);
1789 } else {
1790 disp = (int16_t)x86_lduw_code(env, s);
1791 }
1792
1793 switch (rm) {
1794 case 0:
1795 base = R_EBX;
1796 index = R_ESI;
1797 break;
1798 case 1:
1799 base = R_EBX;
1800 index = R_EDI;
1801 break;
1802 case 2:
1803 base = R_EBP;
1804 index = R_ESI;
1805 def_seg = R_SS;
1806 break;
1807 case 3:
1808 base = R_EBP;
1809 index = R_EDI;
1810 def_seg = R_SS;
1811 break;
1812 case 4:
1813 base = R_ESI;
1814 break;
1815 case 5:
1816 base = R_EDI;
1817 break;
1818 case 6:
1819 base = R_EBP;
1820 def_seg = R_SS;
1821 break;
1822 default:
1823 case 7:
1824 base = R_EBX;
1825 break;
1826 }
1827 break;
1828
1829 default:
1830 g_assert_not_reached();
1831 }
1832
1833 done:
1834 return (AddressParts){ def_seg, base, index, scale, disp };
1835 }
1836
1837 /* Compute the address, with a minimum number of TCG ops. */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1838 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1839 {
1840 TCGv ea = NULL;
1841
1842 if (a.index >= 0 && !is_vsib) {
1843 if (a.scale == 0) {
1844 ea = cpu_regs[a.index];
1845 } else {
1846 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1847 ea = s->A0;
1848 }
1849 if (a.base >= 0) {
1850 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1851 ea = s->A0;
1852 }
1853 } else if (a.base >= 0) {
1854 ea = cpu_regs[a.base];
1855 }
1856 if (!ea) {
1857 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1858 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1859 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1860 } else {
1861 tcg_gen_movi_tl(s->A0, a.disp);
1862 }
1863 ea = s->A0;
1864 } else if (a.disp != 0) {
1865 tcg_gen_addi_tl(s->A0, ea, a.disp);
1866 ea = s->A0;
1867 }
1868
1869 return ea;
1870 }
1871
1872 /* Used for BNDCL, BNDCU, BNDCN. */
gen_bndck(DisasContext * s,X86DecodedInsn * decode,TCGCond cond,TCGv_i64 bndv)1873 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1874 TCGCond cond, TCGv_i64 bndv)
1875 {
1876 TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1877 TCGv_i32 t32 = tcg_temp_new_i32();
1878 TCGv_i64 t64 = tcg_temp_new_i64();
1879
1880 tcg_gen_extu_tl_i64(t64, ea);
1881 if (!CODE64(s)) {
1882 tcg_gen_ext32u_i64(t64, t64);
1883 }
1884 tcg_gen_setcond_i64(cond, t64, t64, bndv);
1885 tcg_gen_extrl_i64_i32(t32, t64);
1886 gen_helper_bndck(tcg_env, t32);
1887 }
1888
1889 /* generate modrm load of memory or register. */
gen_ld_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1890 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1891 {
1892 int modrm = s->modrm;
1893 int mod, rm;
1894
1895 mod = (modrm >> 6) & 3;
1896 rm = (modrm & 7) | REX_B(s);
1897 if (mod == 3) {
1898 gen_op_mov_v_reg(s, ot, s->T0, rm);
1899 } else {
1900 gen_lea_modrm(s, decode);
1901 gen_op_ld_v(s, ot, s->T0, s->A0);
1902 }
1903 }
1904
1905 /* generate modrm store of memory or register. */
gen_st_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1906 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1907 {
1908 int modrm = s->modrm;
1909 int mod, rm;
1910
1911 mod = (modrm >> 6) & 3;
1912 rm = (modrm & 7) | REX_B(s);
1913 if (mod == 3) {
1914 gen_op_mov_reg_v(s, ot, rm, s->T0);
1915 } else {
1916 gen_lea_modrm(s, decode);
1917 gen_op_st_v(s, ot, s->T0, s->A0);
1918 }
1919 }
1920
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1921 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1922 {
1923 target_ulong ret;
1924
1925 switch (ot) {
1926 case MO_8:
1927 ret = x86_ldub_code(env, s);
1928 break;
1929 case MO_16:
1930 ret = x86_lduw_code(env, s);
1931 break;
1932 case MO_32:
1933 ret = x86_ldl_code(env, s);
1934 break;
1935 #ifdef TARGET_X86_64
1936 case MO_64:
1937 ret = x86_ldq_code(env, s);
1938 break;
1939 #endif
1940 default:
1941 g_assert_not_reached();
1942 }
1943 return ret;
1944 }
1945
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1946 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1947 {
1948 uint32_t ret;
1949
1950 switch (ot) {
1951 case MO_8:
1952 ret = x86_ldub_code(env, s);
1953 break;
1954 case MO_16:
1955 ret = x86_lduw_code(env, s);
1956 break;
1957 case MO_32:
1958 #ifdef TARGET_X86_64
1959 case MO_64:
1960 #endif
1961 ret = x86_ldl_code(env, s);
1962 break;
1963 default:
1964 g_assert_not_reached();
1965 }
1966 return ret;
1967 }
1968
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1969 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1970 {
1971 target_long ret;
1972
1973 switch (ot) {
1974 case MO_8:
1975 ret = (int8_t) x86_ldub_code(env, s);
1976 break;
1977 case MO_16:
1978 ret = (int16_t) x86_lduw_code(env, s);
1979 break;
1980 case MO_32:
1981 ret = (int32_t) x86_ldl_code(env, s);
1982 break;
1983 #ifdef TARGET_X86_64
1984 case MO_64:
1985 ret = x86_ldq_code(env, s);
1986 break;
1987 #endif
1988 default:
1989 g_assert_not_reached();
1990 }
1991 return ret;
1992 }
1993
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1994 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1995 TCGLabel *not_taken, TCGLabel *taken)
1996 {
1997 if (not_taken) {
1998 gen_set_label(not_taken);
1999 }
2000 gen_jmp_rel_csize(s, 0, 1);
2001
2002 gen_set_label(taken);
2003 gen_jmp_rel(s, s->dflag, diff, 0);
2004 }
2005
gen_cmovcc(DisasContext * s,int b,TCGv dest,TCGv src)2006 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
2007 {
2008 CCPrepare cc = gen_prepare_cc(s, b, NULL);
2009
2010 if (!cc.use_reg2) {
2011 cc.reg2 = tcg_constant_tl(cc.imm);
2012 }
2013
2014 tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
2015 }
2016
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)2017 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
2018 {
2019 TCGv selector = tcg_temp_new();
2020 tcg_gen_ext16u_tl(selector, seg);
2021 tcg_gen_st32_tl(selector, tcg_env,
2022 offsetof(CPUX86State,segs[seg_reg].selector));
2023 tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
2024 }
2025
2026 /* move SRC to seg_reg and compute if the CPU state may change. Never
2027 call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src,bool inhibit_irq)2028 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
2029 {
2030 if (PE(s) && !VM86(s)) {
2031 TCGv_i32 sel = tcg_temp_new_i32();
2032
2033 tcg_gen_trunc_tl_i32(sel, src);
2034 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
2035
2036 /* For move to DS/ES/SS, the addseg or ss32 flags may change. */
2037 if (CODE32(s) && seg_reg < R_FS) {
2038 s->base.is_jmp = DISAS_EOB_NEXT;
2039 }
2040 } else {
2041 gen_op_movl_seg_real(s, seg_reg, src);
2042 }
2043
2044 /*
2045 * For MOV or POP to SS (but not LSS) translation must always
2046 * stop as a special handling must be done to disable hardware
2047 * interrupts for the next instruction.
2048 *
2049 * This is the last instruction, so it's okay to overwrite
2050 * HF_TF_MASK; the next TB will start with the flag set.
2051 *
2052 * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
2053 * might have been set above.
2054 */
2055 if (inhibit_irq) {
2056 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2057 s->flags &= ~HF_TF_MASK;
2058 }
2059 }
2060
gen_far_call(DisasContext * s)2061 static void gen_far_call(DisasContext *s)
2062 {
2063 TCGv_i32 new_cs = tcg_temp_new_i32();
2064 tcg_gen_trunc_tl_i32(new_cs, s->T1);
2065 if (PE(s) && !VM86(s)) {
2066 gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
2067 tcg_constant_i32(s->dflag - 1),
2068 eip_next_tl(s));
2069 } else {
2070 TCGv_i32 new_eip = tcg_temp_new_i32();
2071 tcg_gen_trunc_tl_i32(new_eip, s->T0);
2072 gen_helper_lcall_real(tcg_env, new_cs, new_eip,
2073 tcg_constant_i32(s->dflag - 1),
2074 eip_next_i32(s));
2075 }
2076 s->base.is_jmp = DISAS_JUMP;
2077 }
2078
gen_far_jmp(DisasContext * s)2079 static void gen_far_jmp(DisasContext *s)
2080 {
2081 if (PE(s) && !VM86(s)) {
2082 TCGv_i32 new_cs = tcg_temp_new_i32();
2083 tcg_gen_trunc_tl_i32(new_cs, s->T1);
2084 gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2085 eip_next_tl(s));
2086 } else {
2087 gen_op_movl_seg_real(s, R_CS, s->T1);
2088 gen_op_jmp_v(s, s->T0);
2089 }
2090 s->base.is_jmp = DISAS_JUMP;
2091 }
2092
gen_svm_check_intercept(DisasContext * s,uint32_t type)2093 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2094 {
2095 /* no SVM activated; fast case */
2096 if (likely(!GUEST(s))) {
2097 return;
2098 }
2099 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2100 }
2101
gen_stack_update(DisasContext * s,int addend)2102 static inline void gen_stack_update(DisasContext *s, int addend)
2103 {
2104 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2105 }
2106
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)2107 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2108 {
2109 if (offset) {
2110 tcg_gen_addi_tl(dest, src, offset);
2111 src = dest;
2112 }
2113 gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2114 }
2115
2116 /* Generate a push. It depends on ss32, addseg and dflag. */
gen_push_v(DisasContext * s,TCGv val)2117 static void gen_push_v(DisasContext *s, TCGv val)
2118 {
2119 MemOp d_ot = mo_pushpop(s, s->dflag);
2120 MemOp a_ot = mo_stacksize(s);
2121 int size = 1 << d_ot;
2122 TCGv new_esp = tcg_temp_new();
2123
2124 tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2125
2126 /* Now reduce the value to the address size and apply SS base. */
2127 gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2128 gen_op_st_v(s, d_ot, val, s->A0);
2129 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2130 }
2131
2132 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2133 static MemOp gen_pop_T0(DisasContext *s)
2134 {
2135 MemOp d_ot = mo_pushpop(s, s->dflag);
2136
2137 gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2138 gen_op_ld_v(s, d_ot, s->T0, s->T0);
2139
2140 return d_ot;
2141 }
2142
gen_pop_update(DisasContext * s,MemOp ot)2143 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2144 {
2145 gen_stack_update(s, 1 << ot);
2146 }
2147
gen_pusha(DisasContext * s)2148 static void gen_pusha(DisasContext *s)
2149 {
2150 MemOp d_ot = s->dflag;
2151 int size = 1 << d_ot;
2152 int i;
2153
2154 for (i = 0; i < 8; i++) {
2155 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2156 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2157 }
2158
2159 gen_stack_update(s, -8 * size);
2160 }
2161
gen_popa(DisasContext * s)2162 static void gen_popa(DisasContext *s)
2163 {
2164 MemOp d_ot = s->dflag;
2165 int size = 1 << d_ot;
2166 int i;
2167
2168 for (i = 0; i < 8; i++) {
2169 /* ESP is not reloaded */
2170 if (7 - i == R_ESP) {
2171 continue;
2172 }
2173 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2174 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2175 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2176 }
2177
2178 gen_stack_update(s, 8 * size);
2179 }
2180
gen_enter(DisasContext * s,int esp_addend,int level)2181 static void gen_enter(DisasContext *s, int esp_addend, int level)
2182 {
2183 MemOp d_ot = mo_pushpop(s, s->dflag);
2184 MemOp a_ot = mo_stacksize(s);
2185 int size = 1 << d_ot;
2186
2187 /* Push BP; compute FrameTemp into T1. */
2188 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2189 gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2190 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2191
2192 level &= 31;
2193 if (level != 0) {
2194 int i;
2195 if (level > 1) {
2196 TCGv fp = tcg_temp_new();
2197
2198 /* Copy level-1 pointers from the previous frame. */
2199 for (i = 1; i < level; ++i) {
2200 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2201 gen_op_ld_v(s, d_ot, fp, s->A0);
2202
2203 gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2204 gen_op_st_v(s, d_ot, fp, s->A0);
2205 }
2206 }
2207
2208 /* Push the current FrameTemp as the last level. */
2209 gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2210 gen_op_st_v(s, d_ot, s->T1, s->A0);
2211 }
2212
2213 /* Copy the FrameTemp value to EBP. */
2214 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2215
2216 /* Compute the final value of ESP. */
2217 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2218 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2219 }
2220
gen_leave(DisasContext * s)2221 static void gen_leave(DisasContext *s)
2222 {
2223 MemOp d_ot = mo_pushpop(s, s->dflag);
2224 MemOp a_ot = mo_stacksize(s);
2225
2226 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2227 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2228
2229 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2230
2231 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2232 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2233 }
2234
2235 /* Similarly, except that the assumption here is that we don't decode
2236 the instruction at all -- either a missing opcode, an unimplemented
2237 feature, or just a bogus instruction stream. */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2238 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2239 {
2240 gen_illegal_opcode(s);
2241
2242 if (qemu_loglevel_mask(LOG_UNIMP)) {
2243 FILE *logfile = qemu_log_trylock();
2244 if (logfile) {
2245 target_ulong pc = s->base.pc_next, end = s->pc;
2246
2247 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2248 for (; pc < end; ++pc) {
2249 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2250 }
2251 fprintf(logfile, "\n");
2252 qemu_log_unlock(logfile);
2253 }
2254 }
2255 }
2256
2257 /* an interrupt is different from an exception because of the
2258 privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2259 static void gen_interrupt(DisasContext *s, uint8_t intno)
2260 {
2261 gen_update_cc_op(s);
2262 gen_update_eip_cur(s);
2263 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2264 cur_insn_len_i32(s));
2265 s->base.is_jmp = DISAS_NORETURN;
2266 }
2267
2268 /* Clear BND registers during legacy branches. */
gen_bnd_jmp(DisasContext * s)2269 static void gen_bnd_jmp(DisasContext *s)
2270 {
2271 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2272 and if the BNDREGs are known to be in use (non-zero) already.
2273 The helper itself will check BNDPRESERVE at runtime. */
2274 if ((s->prefix & PREFIX_REPNZ) == 0
2275 && (s->flags & HF_MPX_EN_MASK) != 0
2276 && (s->flags & HF_MPX_IU_MASK) != 0) {
2277 gen_helper_bnd_jmp(tcg_env);
2278 }
2279 }
2280
2281 /*
2282 * Generate an end of block, including common tasks such as generating
2283 * single step traps, resetting the RF flag, and handling the interrupt
2284 * shadow.
2285 */
2286 static void
gen_eob(DisasContext * s,int mode)2287 gen_eob(DisasContext *s, int mode)
2288 {
2289 bool inhibit_reset;
2290
2291 gen_update_cc_op(s);
2292
2293 /* If several instructions disable interrupts, only the first does it. */
2294 inhibit_reset = false;
2295 if (s->flags & HF_INHIBIT_IRQ_MASK) {
2296 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2297 inhibit_reset = true;
2298 } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2299 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2300 }
2301
2302 if (s->flags & HF_RF_MASK) {
2303 gen_reset_eflags(s, RF_MASK);
2304 }
2305 if (mode == DISAS_EOB_RECHECK_TF) {
2306 gen_helper_rechecking_single_step(tcg_env);
2307 tcg_gen_exit_tb(NULL, 0);
2308 } else if (s->flags & HF_TF_MASK) {
2309 gen_helper_single_step(tcg_env);
2310 } else if (mode == DISAS_JUMP &&
2311 /* give irqs a chance to happen */
2312 !inhibit_reset) {
2313 tcg_gen_lookup_and_goto_ptr();
2314 } else {
2315 tcg_gen_exit_tb(NULL, 0);
2316 }
2317
2318 s->base.is_jmp = DISAS_NORETURN;
2319 }
2320
2321 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2322 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2323 {
2324 bool use_goto_tb = s->jmp_opt;
2325 target_ulong mask = -1;
2326 target_ulong new_pc = s->pc + diff;
2327 target_ulong new_eip = new_pc - s->cs_base;
2328
2329 assert(!s->cc_op_dirty);
2330
2331 /* In 64-bit mode, operand size is fixed at 64 bits. */
2332 if (!CODE64(s)) {
2333 if (ot == MO_16) {
2334 mask = 0xffff;
2335 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2336 use_goto_tb = false;
2337 }
2338 } else {
2339 mask = 0xffffffff;
2340 }
2341 }
2342 new_eip &= mask;
2343
2344 if (tb_cflags(s->base.tb) & CF_PCREL) {
2345 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2346 /*
2347 * If we can prove the branch does not leave the page and we have
2348 * no extra masking to apply (data16 branch in code32, see above),
2349 * then we have also proven that the addition does not wrap.
2350 */
2351 if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2352 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2353 use_goto_tb = false;
2354 }
2355 } else if (!CODE64(s)) {
2356 new_pc = (uint32_t)(new_eip + s->cs_base);
2357 }
2358
2359 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2360 /* jump to same page: we can use a direct jump */
2361 tcg_gen_goto_tb(tb_num);
2362 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2363 tcg_gen_movi_tl(cpu_eip, new_eip);
2364 }
2365 tcg_gen_exit_tb(s->base.tb, tb_num);
2366 s->base.is_jmp = DISAS_NORETURN;
2367 } else {
2368 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2369 tcg_gen_movi_tl(cpu_eip, new_eip);
2370 }
2371 if (s->jmp_opt) {
2372 gen_eob(s, DISAS_JUMP); /* jump to another page */
2373 } else {
2374 gen_eob(s, DISAS_EOB_ONLY); /* exit to main loop */
2375 }
2376 }
2377 }
2378
2379 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2380 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2381 {
2382 /* CODE64 ignores the OT argument, so we need not consider it. */
2383 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2384 }
2385
gen_ldq_env_A0(DisasContext * s,int offset)2386 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2387 {
2388 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2389 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2390 }
2391
gen_stq_env_A0(DisasContext * s,int offset)2392 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2393 {
2394 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2395 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2396 }
2397
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2398 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2399 {
2400 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2401 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2402 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2403 int mem_index = s->mem_index;
2404 TCGv_i128 t = tcg_temp_new_i128();
2405
2406 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2407 tcg_gen_st_i128(t, tcg_env, offset);
2408 }
2409
gen_sto_env_A0(DisasContext * s,int offset,bool align)2410 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2411 {
2412 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2413 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2414 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2415 int mem_index = s->mem_index;
2416 TCGv_i128 t = tcg_temp_new_i128();
2417
2418 tcg_gen_ld_i128(t, tcg_env, offset);
2419 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2420 }
2421
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2422 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2423 {
2424 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2425 int mem_index = s->mem_index;
2426 TCGv_i128 t0 = tcg_temp_new_i128();
2427 TCGv_i128 t1 = tcg_temp_new_i128();
2428 TCGv a0_hi = tcg_temp_new();
2429
2430 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2431 tcg_gen_addi_tl(a0_hi, s->A0, 16);
2432 tcg_gen_qemu_ld_i128(t1, a0_hi, mem_index, mop);
2433
2434 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2435 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2436 }
2437
gen_sty_env_A0(DisasContext * s,int offset,bool align)2438 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2439 {
2440 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2441 int mem_index = s->mem_index;
2442 TCGv_i128 t = tcg_temp_new_i128();
2443 TCGv a0_hi = tcg_temp_new();
2444
2445 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2446 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2447 tcg_gen_addi_tl(a0_hi, s->A0, 16);
2448 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2449 tcg_gen_qemu_st_i128(t, a0_hi, mem_index, mop);
2450 }
2451
2452 #include "emit.c.inc"
2453
gen_x87(DisasContext * s,X86DecodedInsn * decode)2454 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2455 {
2456 bool update_fip = true;
2457 int b = decode->b;
2458 int modrm = s->modrm;
2459 int mod, rm, op;
2460
2461 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2462 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2463 /* XXX: what to do if illegal op ? */
2464 gen_exception(s, EXCP07_PREX);
2465 return;
2466 }
2467 mod = (modrm >> 6) & 3;
2468 rm = modrm & 7;
2469 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2470 if (mod != 3) {
2471 /* memory op */
2472 TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2473 TCGv last_addr = tcg_temp_new();
2474 bool update_fdp = true;
2475
2476 tcg_gen_mov_tl(last_addr, ea);
2477 gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2478
2479 switch (op) {
2480 case 0x00 ... 0x07: /* fxxxs */
2481 case 0x10 ... 0x17: /* fixxxl */
2482 case 0x20 ... 0x27: /* fxxxl */
2483 case 0x30 ... 0x37: /* fixxx */
2484 {
2485 int op1;
2486 op1 = op & 7;
2487
2488 switch (op >> 4) {
2489 case 0:
2490 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2491 s->mem_index, MO_LEUL);
2492 gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2493 break;
2494 case 1:
2495 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2496 s->mem_index, MO_LEUL);
2497 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2498 break;
2499 case 2:
2500 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2501 s->mem_index, MO_LEUQ);
2502 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2503 break;
2504 case 3:
2505 default:
2506 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2507 s->mem_index, MO_LESW);
2508 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2509 break;
2510 }
2511
2512 gen_helper_fp_arith_ST0_FT0(op1);
2513 if (op1 == 3) {
2514 /* fcomp needs pop */
2515 gen_helper_fpop(tcg_env);
2516 }
2517 }
2518 break;
2519 case 0x08: /* flds */
2520 case 0x0a: /* fsts */
2521 case 0x0b: /* fstps */
2522 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2523 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2524 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2525 switch (op & 7) {
2526 case 0:
2527 switch (op >> 4) {
2528 case 0:
2529 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2530 s->mem_index, MO_LEUL);
2531 gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2532 break;
2533 case 1:
2534 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2535 s->mem_index, MO_LEUL);
2536 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2537 break;
2538 case 2:
2539 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2540 s->mem_index, MO_LEUQ);
2541 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2542 break;
2543 case 3:
2544 default:
2545 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2546 s->mem_index, MO_LESW);
2547 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2548 break;
2549 }
2550 break;
2551 case 1:
2552 /* XXX: the corresponding CPUID bit must be tested ! */
2553 switch (op >> 4) {
2554 case 1:
2555 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2556 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2557 s->mem_index, MO_LEUL);
2558 break;
2559 case 2:
2560 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2561 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2562 s->mem_index, MO_LEUQ);
2563 break;
2564 case 3:
2565 default:
2566 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2567 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2568 s->mem_index, MO_LEUW);
2569 break;
2570 }
2571 gen_helper_fpop(tcg_env);
2572 break;
2573 default:
2574 switch (op >> 4) {
2575 case 0:
2576 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2577 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2578 s->mem_index, MO_LEUL);
2579 break;
2580 case 1:
2581 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2582 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2583 s->mem_index, MO_LEUL);
2584 break;
2585 case 2:
2586 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2587 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2588 s->mem_index, MO_LEUQ);
2589 break;
2590 case 3:
2591 default:
2592 gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2593 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2594 s->mem_index, MO_LEUW);
2595 break;
2596 }
2597 if ((op & 7) == 3) {
2598 gen_helper_fpop(tcg_env);
2599 }
2600 break;
2601 }
2602 break;
2603 case 0x0c: /* fldenv mem */
2604 gen_helper_fldenv(tcg_env, s->A0,
2605 tcg_constant_i32(s->dflag - 1));
2606 update_fip = update_fdp = false;
2607 break;
2608 case 0x0d: /* fldcw mem */
2609 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2610 s->mem_index, MO_LEUW);
2611 gen_helper_fldcw(tcg_env, s->tmp2_i32);
2612 update_fip = update_fdp = false;
2613 break;
2614 case 0x0e: /* fnstenv mem */
2615 gen_helper_fstenv(tcg_env, s->A0,
2616 tcg_constant_i32(s->dflag - 1));
2617 update_fip = update_fdp = false;
2618 break;
2619 case 0x0f: /* fnstcw mem */
2620 gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2621 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2622 s->mem_index, MO_LEUW);
2623 update_fip = update_fdp = false;
2624 break;
2625 case 0x1d: /* fldt mem */
2626 gen_helper_fldt_ST0(tcg_env, s->A0);
2627 break;
2628 case 0x1f: /* fstpt mem */
2629 gen_helper_fstt_ST0(tcg_env, s->A0);
2630 gen_helper_fpop(tcg_env);
2631 break;
2632 case 0x2c: /* frstor mem */
2633 gen_helper_frstor(tcg_env, s->A0,
2634 tcg_constant_i32(s->dflag - 1));
2635 update_fip = update_fdp = false;
2636 break;
2637 case 0x2e: /* fnsave mem */
2638 gen_helper_fsave(tcg_env, s->A0,
2639 tcg_constant_i32(s->dflag - 1));
2640 update_fip = update_fdp = false;
2641 break;
2642 case 0x2f: /* fnstsw mem */
2643 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2644 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2645 s->mem_index, MO_LEUW);
2646 update_fip = update_fdp = false;
2647 break;
2648 case 0x3c: /* fbld */
2649 gen_helper_fbld_ST0(tcg_env, s->A0);
2650 break;
2651 case 0x3e: /* fbstp */
2652 gen_helper_fbst_ST0(tcg_env, s->A0);
2653 gen_helper_fpop(tcg_env);
2654 break;
2655 case 0x3d: /* fildll */
2656 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2657 s->mem_index, MO_LEUQ);
2658 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2659 break;
2660 case 0x3f: /* fistpll */
2661 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2662 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2663 s->mem_index, MO_LEUQ);
2664 gen_helper_fpop(tcg_env);
2665 break;
2666 default:
2667 goto illegal_op;
2668 }
2669
2670 if (update_fdp) {
2671 int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2672
2673 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2674 offsetof(CPUX86State,
2675 segs[last_seg].selector));
2676 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2677 offsetof(CPUX86State, fpds));
2678 tcg_gen_st_tl(last_addr, tcg_env,
2679 offsetof(CPUX86State, fpdp));
2680 }
2681 } else {
2682 /* register float ops */
2683 int opreg = rm;
2684
2685 switch (op) {
2686 case 0x08: /* fld sti */
2687 gen_helper_fpush(tcg_env);
2688 gen_helper_fmov_ST0_STN(tcg_env,
2689 tcg_constant_i32((opreg + 1) & 7));
2690 break;
2691 case 0x09: /* fxchg sti */
2692 case 0x29: /* fxchg4 sti, undocumented op */
2693 case 0x39: /* fxchg7 sti, undocumented op */
2694 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2695 break;
2696 case 0x0a: /* grp d9/2 */
2697 switch (rm) {
2698 case 0: /* fnop */
2699 /*
2700 * check exceptions (FreeBSD FPU probe)
2701 * needs to be treated as I/O because of ferr_irq
2702 */
2703 translator_io_start(&s->base);
2704 gen_helper_fwait(tcg_env);
2705 update_fip = false;
2706 break;
2707 default:
2708 goto illegal_op;
2709 }
2710 break;
2711 case 0x0c: /* grp d9/4 */
2712 switch (rm) {
2713 case 0: /* fchs */
2714 gen_helper_fchs_ST0(tcg_env);
2715 break;
2716 case 1: /* fabs */
2717 gen_helper_fabs_ST0(tcg_env);
2718 break;
2719 case 4: /* ftst */
2720 gen_helper_fldz_FT0(tcg_env);
2721 gen_helper_fcom_ST0_FT0(tcg_env);
2722 break;
2723 case 5: /* fxam */
2724 gen_helper_fxam_ST0(tcg_env);
2725 break;
2726 default:
2727 goto illegal_op;
2728 }
2729 break;
2730 case 0x0d: /* grp d9/5 */
2731 {
2732 switch (rm) {
2733 case 0:
2734 gen_helper_fpush(tcg_env);
2735 gen_helper_fld1_ST0(tcg_env);
2736 break;
2737 case 1:
2738 gen_helper_fpush(tcg_env);
2739 gen_helper_fldl2t_ST0(tcg_env);
2740 break;
2741 case 2:
2742 gen_helper_fpush(tcg_env);
2743 gen_helper_fldl2e_ST0(tcg_env);
2744 break;
2745 case 3:
2746 gen_helper_fpush(tcg_env);
2747 gen_helper_fldpi_ST0(tcg_env);
2748 break;
2749 case 4:
2750 gen_helper_fpush(tcg_env);
2751 gen_helper_fldlg2_ST0(tcg_env);
2752 break;
2753 case 5:
2754 gen_helper_fpush(tcg_env);
2755 gen_helper_fldln2_ST0(tcg_env);
2756 break;
2757 case 6:
2758 gen_helper_fpush(tcg_env);
2759 gen_helper_fldz_ST0(tcg_env);
2760 break;
2761 default:
2762 goto illegal_op;
2763 }
2764 }
2765 break;
2766 case 0x0e: /* grp d9/6 */
2767 switch (rm) {
2768 case 0: /* f2xm1 */
2769 gen_helper_f2xm1(tcg_env);
2770 break;
2771 case 1: /* fyl2x */
2772 gen_helper_fyl2x(tcg_env);
2773 break;
2774 case 2: /* fptan */
2775 gen_helper_fptan(tcg_env);
2776 break;
2777 case 3: /* fpatan */
2778 gen_helper_fpatan(tcg_env);
2779 break;
2780 case 4: /* fxtract */
2781 gen_helper_fxtract(tcg_env);
2782 break;
2783 case 5: /* fprem1 */
2784 gen_helper_fprem1(tcg_env);
2785 break;
2786 case 6: /* fdecstp */
2787 gen_helper_fdecstp(tcg_env);
2788 break;
2789 default:
2790 case 7: /* fincstp */
2791 gen_helper_fincstp(tcg_env);
2792 break;
2793 }
2794 break;
2795 case 0x0f: /* grp d9/7 */
2796 switch (rm) {
2797 case 0: /* fprem */
2798 gen_helper_fprem(tcg_env);
2799 break;
2800 case 1: /* fyl2xp1 */
2801 gen_helper_fyl2xp1(tcg_env);
2802 break;
2803 case 2: /* fsqrt */
2804 gen_helper_fsqrt(tcg_env);
2805 break;
2806 case 3: /* fsincos */
2807 gen_helper_fsincos(tcg_env);
2808 break;
2809 case 5: /* fscale */
2810 gen_helper_fscale(tcg_env);
2811 break;
2812 case 4: /* frndint */
2813 gen_helper_frndint(tcg_env);
2814 break;
2815 case 6: /* fsin */
2816 gen_helper_fsin(tcg_env);
2817 break;
2818 default:
2819 case 7: /* fcos */
2820 gen_helper_fcos(tcg_env);
2821 break;
2822 }
2823 break;
2824 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2825 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2826 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2827 {
2828 int op1;
2829
2830 op1 = op & 7;
2831 if (op >= 0x20) {
2832 gen_helper_fp_arith_STN_ST0(op1, opreg);
2833 if (op >= 0x30) {
2834 gen_helper_fpop(tcg_env);
2835 }
2836 } else {
2837 gen_helper_fmov_FT0_STN(tcg_env,
2838 tcg_constant_i32(opreg));
2839 gen_helper_fp_arith_ST0_FT0(op1);
2840 }
2841 }
2842 break;
2843 case 0x02: /* fcom */
2844 case 0x22: /* fcom2, undocumented op */
2845 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2846 gen_helper_fcom_ST0_FT0(tcg_env);
2847 break;
2848 case 0x03: /* fcomp */
2849 case 0x23: /* fcomp3, undocumented op */
2850 case 0x32: /* fcomp5, undocumented op */
2851 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2852 gen_helper_fcom_ST0_FT0(tcg_env);
2853 gen_helper_fpop(tcg_env);
2854 break;
2855 case 0x15: /* da/5 */
2856 switch (rm) {
2857 case 1: /* fucompp */
2858 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2859 gen_helper_fucom_ST0_FT0(tcg_env);
2860 gen_helper_fpop(tcg_env);
2861 gen_helper_fpop(tcg_env);
2862 break;
2863 default:
2864 goto illegal_op;
2865 }
2866 break;
2867 case 0x1c:
2868 switch (rm) {
2869 case 0: /* feni (287 only, just do nop here) */
2870 break;
2871 case 1: /* fdisi (287 only, just do nop here) */
2872 break;
2873 case 2: /* fclex */
2874 gen_helper_fclex(tcg_env);
2875 update_fip = false;
2876 break;
2877 case 3: /* fninit */
2878 gen_helper_fninit(tcg_env);
2879 update_fip = false;
2880 break;
2881 case 4: /* fsetpm (287 only, just do nop here) */
2882 break;
2883 default:
2884 goto illegal_op;
2885 }
2886 break;
2887 case 0x1d: /* fucomi */
2888 if (!(s->cpuid_features & CPUID_CMOV)) {
2889 goto illegal_op;
2890 }
2891 gen_update_cc_op(s);
2892 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2893 gen_helper_fucomi_ST0_FT0(tcg_env);
2894 assume_cc_op(s, CC_OP_EFLAGS);
2895 break;
2896 case 0x1e: /* fcomi */
2897 if (!(s->cpuid_features & CPUID_CMOV)) {
2898 goto illegal_op;
2899 }
2900 gen_update_cc_op(s);
2901 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2902 gen_helper_fcomi_ST0_FT0(tcg_env);
2903 assume_cc_op(s, CC_OP_EFLAGS);
2904 break;
2905 case 0x28: /* ffree sti */
2906 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2907 break;
2908 case 0x2a: /* fst sti */
2909 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2910 break;
2911 case 0x2b: /* fstp sti */
2912 case 0x0b: /* fstp1 sti, undocumented op */
2913 case 0x3a: /* fstp8 sti, undocumented op */
2914 case 0x3b: /* fstp9 sti, undocumented op */
2915 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2916 gen_helper_fpop(tcg_env);
2917 break;
2918 case 0x2c: /* fucom st(i) */
2919 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2920 gen_helper_fucom_ST0_FT0(tcg_env);
2921 break;
2922 case 0x2d: /* fucomp st(i) */
2923 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2924 gen_helper_fucom_ST0_FT0(tcg_env);
2925 gen_helper_fpop(tcg_env);
2926 break;
2927 case 0x33: /* de/3 */
2928 switch (rm) {
2929 case 1: /* fcompp */
2930 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2931 gen_helper_fcom_ST0_FT0(tcg_env);
2932 gen_helper_fpop(tcg_env);
2933 gen_helper_fpop(tcg_env);
2934 break;
2935 default:
2936 goto illegal_op;
2937 }
2938 break;
2939 case 0x38: /* ffreep sti, undocumented op */
2940 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2941 gen_helper_fpop(tcg_env);
2942 break;
2943 case 0x3c: /* df/4 */
2944 switch (rm) {
2945 case 0:
2946 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2947 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2948 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2949 break;
2950 default:
2951 goto illegal_op;
2952 }
2953 break;
2954 case 0x3d: /* fucomip */
2955 if (!(s->cpuid_features & CPUID_CMOV)) {
2956 goto illegal_op;
2957 }
2958 gen_update_cc_op(s);
2959 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2960 gen_helper_fucomi_ST0_FT0(tcg_env);
2961 gen_helper_fpop(tcg_env);
2962 assume_cc_op(s, CC_OP_EFLAGS);
2963 break;
2964 case 0x3e: /* fcomip */
2965 if (!(s->cpuid_features & CPUID_CMOV)) {
2966 goto illegal_op;
2967 }
2968 gen_update_cc_op(s);
2969 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2970 gen_helper_fcomi_ST0_FT0(tcg_env);
2971 gen_helper_fpop(tcg_env);
2972 assume_cc_op(s, CC_OP_EFLAGS);
2973 break;
2974 case 0x10 ... 0x13: /* fcmovxx */
2975 case 0x18 ... 0x1b:
2976 {
2977 int op1;
2978 TCGLabel *l1;
2979 static const uint8_t fcmov_cc[8] = {
2980 (JCC_B << 1),
2981 (JCC_Z << 1),
2982 (JCC_BE << 1),
2983 (JCC_P << 1),
2984 };
2985
2986 if (!(s->cpuid_features & CPUID_CMOV)) {
2987 goto illegal_op;
2988 }
2989 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2990 l1 = gen_new_label();
2991 gen_jcc_noeob(s, op1, l1);
2992 gen_helper_fmov_ST0_STN(tcg_env,
2993 tcg_constant_i32(opreg));
2994 gen_set_label(l1);
2995 }
2996 break;
2997 default:
2998 goto illegal_op;
2999 }
3000 }
3001
3002 if (update_fip) {
3003 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
3004 offsetof(CPUX86State, segs[R_CS].selector));
3005 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
3006 offsetof(CPUX86State, fpcs));
3007 tcg_gen_st_tl(eip_cur_tl(s),
3008 tcg_env, offsetof(CPUX86State, fpip));
3009 }
3010 return;
3011
3012 illegal_op:
3013 gen_illegal_opcode(s);
3014 }
3015
gen_multi0F(DisasContext * s,X86DecodedInsn * decode)3016 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
3017 {
3018 int prefixes = s->prefix;
3019 MemOp dflag = s->dflag;
3020 int b = decode->b + 0x100;
3021 int modrm = s->modrm;
3022 MemOp ot;
3023 int reg, rm, mod, op;
3024
3025 /* now check op code */
3026 switch (b) {
3027 case 0x1c7: /* RDSEED, RDPID with f3 prefix */
3028 mod = (modrm >> 6) & 3;
3029 switch ((modrm >> 3) & 7) {
3030 case 7:
3031 if (mod != 3 ||
3032 (s->prefix & PREFIX_REPNZ)) {
3033 goto illegal_op;
3034 }
3035 if (s->prefix & PREFIX_REPZ) {
3036 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3037 goto illegal_op;
3038 }
3039 gen_helper_rdpid(s->T0, tcg_env);
3040 rm = (modrm & 7) | REX_B(s);
3041 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3042 break;
3043 } else {
3044 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3045 goto illegal_op;
3046 }
3047 goto do_rdrand;
3048 }
3049
3050 case 6: /* RDRAND */
3051 if (mod != 3 ||
3052 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
3053 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3054 goto illegal_op;
3055 }
3056 do_rdrand:
3057 translator_io_start(&s->base);
3058 gen_helper_rdrand(s->T0, tcg_env);
3059 rm = (modrm & 7) | REX_B(s);
3060 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3061 assume_cc_op(s, CC_OP_EFLAGS);
3062 break;
3063
3064 default:
3065 goto illegal_op;
3066 }
3067 break;
3068
3069 case 0x100:
3070 mod = (modrm >> 6) & 3;
3071 op = (modrm >> 3) & 7;
3072 switch(op) {
3073 case 0: /* sldt */
3074 if (!PE(s) || VM86(s))
3075 goto illegal_op;
3076 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3077 break;
3078 }
3079 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3080 tcg_gen_ld32u_tl(s->T0, tcg_env,
3081 offsetof(CPUX86State, ldt.selector));
3082 ot = mod == 3 ? dflag : MO_16;
3083 gen_st_modrm(s, decode, ot);
3084 break;
3085 case 2: /* lldt */
3086 if (!PE(s) || VM86(s))
3087 goto illegal_op;
3088 if (check_cpl0(s)) {
3089 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3090 gen_ld_modrm(s, decode, MO_16);
3091 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3092 gen_helper_lldt(tcg_env, s->tmp2_i32);
3093 }
3094 break;
3095 case 1: /* str */
3096 if (!PE(s) || VM86(s))
3097 goto illegal_op;
3098 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3099 break;
3100 }
3101 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3102 tcg_gen_ld32u_tl(s->T0, tcg_env,
3103 offsetof(CPUX86State, tr.selector));
3104 ot = mod == 3 ? dflag : MO_16;
3105 gen_st_modrm(s, decode, ot);
3106 break;
3107 case 3: /* ltr */
3108 if (!PE(s) || VM86(s))
3109 goto illegal_op;
3110 if (check_cpl0(s)) {
3111 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3112 gen_ld_modrm(s, decode, MO_16);
3113 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3114 gen_helper_ltr(tcg_env, s->tmp2_i32);
3115 }
3116 break;
3117 case 4: /* verr */
3118 case 5: /* verw */
3119 if (!PE(s) || VM86(s))
3120 goto illegal_op;
3121 gen_ld_modrm(s, decode, MO_16);
3122 gen_update_cc_op(s);
3123 if (op == 4) {
3124 gen_helper_verr(tcg_env, s->T0);
3125 } else {
3126 gen_helper_verw(tcg_env, s->T0);
3127 }
3128 assume_cc_op(s, CC_OP_EFLAGS);
3129 break;
3130 default:
3131 goto illegal_op;
3132 }
3133 break;
3134
3135 case 0x101:
3136 switch (modrm) {
3137 CASE_MODRM_MEM_OP(0): /* sgdt */
3138 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3139 break;
3140 }
3141 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3142 gen_lea_modrm(s, decode);
3143 tcg_gen_ld32u_tl(s->T0,
3144 tcg_env, offsetof(CPUX86State, gdt.limit));
3145 gen_op_st_v(s, MO_16, s->T0, s->A0);
3146 gen_add_A0_im(s, 2);
3147 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3148 /*
3149 * NB: Despite a confusing description in Intel CPU documentation,
3150 * all 32-bits are written regardless of operand size.
3151 */
3152 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3153 break;
3154
3155 case 0xc8: /* monitor */
3156 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3157 goto illegal_op;
3158 }
3159 gen_update_cc_op(s);
3160 gen_update_eip_cur(s);
3161 gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3162 gen_helper_monitor(tcg_env, s->A0);
3163 break;
3164
3165 case 0xc9: /* mwait */
3166 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3167 goto illegal_op;
3168 }
3169 gen_update_cc_op(s);
3170 gen_update_eip_cur(s);
3171 gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3172 s->base.is_jmp = DISAS_NORETURN;
3173 break;
3174
3175 case 0xca: /* clac */
3176 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3177 || CPL(s) != 0) {
3178 goto illegal_op;
3179 }
3180 gen_reset_eflags(s, AC_MASK);
3181 s->base.is_jmp = DISAS_EOB_NEXT;
3182 break;
3183
3184 case 0xcb: /* stac */
3185 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3186 || CPL(s) != 0) {
3187 goto illegal_op;
3188 }
3189 gen_set_eflags(s, AC_MASK);
3190 s->base.is_jmp = DISAS_EOB_NEXT;
3191 break;
3192
3193 CASE_MODRM_MEM_OP(1): /* sidt */
3194 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3195 break;
3196 }
3197 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3198 gen_lea_modrm(s, decode);
3199 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3200 gen_op_st_v(s, MO_16, s->T0, s->A0);
3201 gen_add_A0_im(s, 2);
3202 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3203 /*
3204 * NB: Despite a confusing description in Intel CPU documentation,
3205 * all 32-bits are written regardless of operand size.
3206 */
3207 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3208 break;
3209
3210 case 0xd0: /* xgetbv */
3211 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3212 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3213 goto illegal_op;
3214 }
3215 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3216 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3217 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3218 break;
3219
3220 case 0xd1: /* xsetbv */
3221 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3222 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3223 goto illegal_op;
3224 }
3225 gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3226 if (!check_cpl0(s)) {
3227 break;
3228 }
3229 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3230 cpu_regs[R_EDX]);
3231 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3232 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3233 /* End TB because translation flags may change. */
3234 s->base.is_jmp = DISAS_EOB_NEXT;
3235 break;
3236
3237 case 0xd8: /* VMRUN */
3238 if (!SVME(s) || !PE(s)) {
3239 goto illegal_op;
3240 }
3241 if (!check_cpl0(s)) {
3242 break;
3243 }
3244 gen_update_cc_op(s);
3245 gen_update_eip_cur(s);
3246 /*
3247 * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3248 * The usual gen_eob() handling is performed on vmexit after
3249 * host state is reloaded.
3250 */
3251 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3252 cur_insn_len_i32(s));
3253 tcg_gen_exit_tb(NULL, 0);
3254 s->base.is_jmp = DISAS_NORETURN;
3255 break;
3256
3257 case 0xd9: /* VMMCALL */
3258 if (!SVME(s)) {
3259 goto illegal_op;
3260 }
3261 gen_update_cc_op(s);
3262 gen_update_eip_cur(s);
3263 gen_helper_vmmcall(tcg_env);
3264 break;
3265
3266 case 0xda: /* VMLOAD */
3267 if (!SVME(s) || !PE(s)) {
3268 goto illegal_op;
3269 }
3270 if (!check_cpl0(s)) {
3271 break;
3272 }
3273 gen_update_cc_op(s);
3274 gen_update_eip_cur(s);
3275 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3276 break;
3277
3278 case 0xdb: /* VMSAVE */
3279 if (!SVME(s) || !PE(s)) {
3280 goto illegal_op;
3281 }
3282 if (!check_cpl0(s)) {
3283 break;
3284 }
3285 gen_update_cc_op(s);
3286 gen_update_eip_cur(s);
3287 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3288 break;
3289
3290 case 0xdc: /* STGI */
3291 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3292 || !PE(s)) {
3293 goto illegal_op;
3294 }
3295 if (!check_cpl0(s)) {
3296 break;
3297 }
3298 gen_update_cc_op(s);
3299 gen_helper_stgi(tcg_env);
3300 s->base.is_jmp = DISAS_EOB_NEXT;
3301 break;
3302
3303 case 0xdd: /* CLGI */
3304 if (!SVME(s) || !PE(s)) {
3305 goto illegal_op;
3306 }
3307 if (!check_cpl0(s)) {
3308 break;
3309 }
3310 gen_update_cc_op(s);
3311 gen_update_eip_cur(s);
3312 gen_helper_clgi(tcg_env);
3313 break;
3314
3315 case 0xde: /* SKINIT */
3316 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3317 || !PE(s)) {
3318 goto illegal_op;
3319 }
3320 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3321 /* If not intercepted, not implemented -- raise #UD. */
3322 goto illegal_op;
3323
3324 case 0xdf: /* INVLPGA */
3325 if (!SVME(s) || !PE(s)) {
3326 goto illegal_op;
3327 }
3328 if (!check_cpl0(s)) {
3329 break;
3330 }
3331 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3332 if (s->aflag == MO_64) {
3333 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3334 } else {
3335 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3336 }
3337 gen_helper_flush_page(tcg_env, s->A0);
3338 s->base.is_jmp = DISAS_EOB_NEXT;
3339 break;
3340
3341 CASE_MODRM_MEM_OP(2): /* lgdt */
3342 if (!check_cpl0(s)) {
3343 break;
3344 }
3345 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3346 gen_lea_modrm(s, decode);
3347 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3348 gen_add_A0_im(s, 2);
3349 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3350 if (dflag == MO_16) {
3351 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3352 }
3353 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3354 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3355 break;
3356
3357 CASE_MODRM_MEM_OP(3): /* lidt */
3358 if (!check_cpl0(s)) {
3359 break;
3360 }
3361 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3362 gen_lea_modrm(s, decode);
3363 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3364 gen_add_A0_im(s, 2);
3365 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3366 if (dflag == MO_16) {
3367 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3368 }
3369 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3370 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3371 break;
3372
3373 CASE_MODRM_OP(4): /* smsw */
3374 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3375 break;
3376 }
3377 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3378 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3379 /*
3380 * In 32-bit mode, the higher 16 bits of the destination
3381 * register are undefined. In practice CR0[31:0] is stored
3382 * just like in 64-bit mode.
3383 */
3384 mod = (modrm >> 6) & 3;
3385 ot = (mod != 3 ? MO_16 : s->dflag);
3386 gen_st_modrm(s, decode, ot);
3387 break;
3388 case 0xee: /* rdpkru */
3389 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3390 goto illegal_op;
3391 }
3392 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3393 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3394 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3395 break;
3396 case 0xef: /* wrpkru */
3397 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3398 goto illegal_op;
3399 }
3400 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3401 cpu_regs[R_EDX]);
3402 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3403 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3404 break;
3405
3406 CASE_MODRM_OP(6): /* lmsw */
3407 if (!check_cpl0(s)) {
3408 break;
3409 }
3410 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3411 gen_ld_modrm(s, decode, MO_16);
3412 /*
3413 * Only the 4 lower bits of CR0 are modified.
3414 * PE cannot be set to zero if already set to one.
3415 */
3416 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3417 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3418 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3419 tcg_gen_or_tl(s->T0, s->T0, s->T1);
3420 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3421 s->base.is_jmp = DISAS_EOB_NEXT;
3422 break;
3423
3424 CASE_MODRM_MEM_OP(7): /* invlpg */
3425 if (!check_cpl0(s)) {
3426 break;
3427 }
3428 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3429 gen_lea_modrm(s, decode);
3430 gen_helper_flush_page(tcg_env, s->A0);
3431 s->base.is_jmp = DISAS_EOB_NEXT;
3432 break;
3433
3434 case 0xf8: /* swapgs */
3435 #ifdef TARGET_X86_64
3436 if (CODE64(s)) {
3437 if (check_cpl0(s)) {
3438 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3439 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3440 offsetof(CPUX86State, kernelgsbase));
3441 tcg_gen_st_tl(s->T0, tcg_env,
3442 offsetof(CPUX86State, kernelgsbase));
3443 }
3444 break;
3445 }
3446 #endif
3447 goto illegal_op;
3448
3449 case 0xf9: /* rdtscp */
3450 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3451 goto illegal_op;
3452 }
3453 gen_update_cc_op(s);
3454 gen_update_eip_cur(s);
3455 translator_io_start(&s->base);
3456 gen_helper_rdtsc(tcg_env);
3457 gen_helper_rdpid(s->T0, tcg_env);
3458 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3459 break;
3460
3461 default:
3462 goto illegal_op;
3463 }
3464 break;
3465
3466 case 0x11a:
3467 if (s->flags & HF_MPX_EN_MASK) {
3468 mod = (modrm >> 6) & 3;
3469 reg = ((modrm >> 3) & 7) | REX_R(s);
3470 if (prefixes & PREFIX_REPZ) {
3471 /* bndcl */
3472 if (reg >= 4
3473 || s->aflag == MO_16) {
3474 goto illegal_op;
3475 }
3476 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3477 } else if (prefixes & PREFIX_REPNZ) {
3478 /* bndcu */
3479 if (reg >= 4
3480 || s->aflag == MO_16) {
3481 goto illegal_op;
3482 }
3483 TCGv_i64 notu = tcg_temp_new_i64();
3484 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3485 gen_bndck(s, decode, TCG_COND_GTU, notu);
3486 } else if (prefixes & PREFIX_DATA) {
3487 /* bndmov -- from reg/mem */
3488 if (reg >= 4 || s->aflag == MO_16) {
3489 goto illegal_op;
3490 }
3491 if (mod == 3) {
3492 int reg2 = (modrm & 7) | REX_B(s);
3493 if (reg2 >= 4) {
3494 goto illegal_op;
3495 }
3496 if (s->flags & HF_MPX_IU_MASK) {
3497 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3498 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3499 }
3500 } else {
3501 gen_lea_modrm(s, decode);
3502 if (CODE64(s)) {
3503 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3504 s->mem_index, MO_LEUQ);
3505 tcg_gen_addi_tl(s->A0, s->A0, 8);
3506 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3507 s->mem_index, MO_LEUQ);
3508 } else {
3509 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3510 s->mem_index, MO_LEUL);
3511 tcg_gen_addi_tl(s->A0, s->A0, 4);
3512 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3513 s->mem_index, MO_LEUL);
3514 }
3515 /* bnd registers are now in-use */
3516 gen_set_hflag(s, HF_MPX_IU_MASK);
3517 }
3518 } else if (mod != 3) {
3519 /* bndldx */
3520 AddressParts a = decode->mem;
3521 if (reg >= 4
3522 || s->aflag == MO_16
3523 || a.base < -1) {
3524 goto illegal_op;
3525 }
3526 if (a.base >= 0) {
3527 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3528 } else {
3529 tcg_gen_movi_tl(s->A0, 0);
3530 }
3531 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3532 if (a.index >= 0) {
3533 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3534 } else {
3535 tcg_gen_movi_tl(s->T0, 0);
3536 }
3537 if (CODE64(s)) {
3538 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3539 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3540 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3541 } else {
3542 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3543 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3544 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3545 }
3546 gen_set_hflag(s, HF_MPX_IU_MASK);
3547 }
3548 }
3549 break;
3550 case 0x11b:
3551 if (s->flags & HF_MPX_EN_MASK) {
3552 mod = (modrm >> 6) & 3;
3553 reg = ((modrm >> 3) & 7) | REX_R(s);
3554 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3555 /* bndmk */
3556 if (reg >= 4
3557 || s->aflag == MO_16) {
3558 goto illegal_op;
3559 }
3560 AddressParts a = decode->mem;
3561 if (a.base >= 0) {
3562 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3563 if (!CODE64(s)) {
3564 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3565 }
3566 } else if (a.base == -1) {
3567 /* no base register has lower bound of 0 */
3568 tcg_gen_movi_i64(cpu_bndl[reg], 0);
3569 } else {
3570 /* rip-relative generates #ud */
3571 goto illegal_op;
3572 }
3573 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3574 if (!CODE64(s)) {
3575 tcg_gen_ext32u_tl(s->A0, s->A0);
3576 }
3577 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3578 /* bnd registers are now in-use */
3579 gen_set_hflag(s, HF_MPX_IU_MASK);
3580 break;
3581 } else if (prefixes & PREFIX_REPNZ) {
3582 /* bndcn */
3583 if (reg >= 4
3584 || s->aflag == MO_16) {
3585 goto illegal_op;
3586 }
3587 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3588 } else if (prefixes & PREFIX_DATA) {
3589 /* bndmov -- to reg/mem */
3590 if (reg >= 4 || s->aflag == MO_16) {
3591 goto illegal_op;
3592 }
3593 if (mod == 3) {
3594 int reg2 = (modrm & 7) | REX_B(s);
3595 if (reg2 >= 4) {
3596 goto illegal_op;
3597 }
3598 if (s->flags & HF_MPX_IU_MASK) {
3599 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3600 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3601 }
3602 } else {
3603 gen_lea_modrm(s, decode);
3604 if (CODE64(s)) {
3605 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3606 s->mem_index, MO_LEUQ);
3607 tcg_gen_addi_tl(s->A0, s->A0, 8);
3608 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3609 s->mem_index, MO_LEUQ);
3610 } else {
3611 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3612 s->mem_index, MO_LEUL);
3613 tcg_gen_addi_tl(s->A0, s->A0, 4);
3614 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3615 s->mem_index, MO_LEUL);
3616 }
3617 }
3618 } else if (mod != 3) {
3619 /* bndstx */
3620 AddressParts a = decode->mem;
3621 if (reg >= 4
3622 || s->aflag == MO_16
3623 || a.base < -1) {
3624 goto illegal_op;
3625 }
3626 if (a.base >= 0) {
3627 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3628 } else {
3629 tcg_gen_movi_tl(s->A0, 0);
3630 }
3631 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3632 if (a.index >= 0) {
3633 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3634 } else {
3635 tcg_gen_movi_tl(s->T0, 0);
3636 }
3637 if (CODE64(s)) {
3638 gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3639 cpu_bndl[reg], cpu_bndu[reg]);
3640 } else {
3641 gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3642 cpu_bndl[reg], cpu_bndu[reg]);
3643 }
3644 }
3645 }
3646 break;
3647 default:
3648 g_assert_not_reached();
3649 }
3650 return;
3651 illegal_op:
3652 gen_illegal_opcode(s);
3653 }
3654
3655 #include "decode-new.c.inc"
3656
tcg_x86_init(void)3657 void tcg_x86_init(void)
3658 {
3659 static const char reg_names[CPU_NB_REGS][4] = {
3660 #ifdef TARGET_X86_64
3661 [R_EAX] = "rax",
3662 [R_EBX] = "rbx",
3663 [R_ECX] = "rcx",
3664 [R_EDX] = "rdx",
3665 [R_ESI] = "rsi",
3666 [R_EDI] = "rdi",
3667 [R_EBP] = "rbp",
3668 [R_ESP] = "rsp",
3669 [8] = "r8",
3670 [9] = "r9",
3671 [10] = "r10",
3672 [11] = "r11",
3673 [12] = "r12",
3674 [13] = "r13",
3675 [14] = "r14",
3676 [15] = "r15",
3677 #else
3678 [R_EAX] = "eax",
3679 [R_EBX] = "ebx",
3680 [R_ECX] = "ecx",
3681 [R_EDX] = "edx",
3682 [R_ESI] = "esi",
3683 [R_EDI] = "edi",
3684 [R_EBP] = "ebp",
3685 [R_ESP] = "esp",
3686 #endif
3687 };
3688 static const char eip_name[] = {
3689 #ifdef TARGET_X86_64
3690 "rip"
3691 #else
3692 "eip"
3693 #endif
3694 };
3695 static const char seg_base_names[6][8] = {
3696 [R_CS] = "cs_base",
3697 [R_DS] = "ds_base",
3698 [R_ES] = "es_base",
3699 [R_FS] = "fs_base",
3700 [R_GS] = "gs_base",
3701 [R_SS] = "ss_base",
3702 };
3703 static const char bnd_regl_names[4][8] = {
3704 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3705 };
3706 static const char bnd_regu_names[4][8] = {
3707 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3708 };
3709 int i;
3710
3711 cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3712 offsetof(CPUX86State, cc_op), "cc_op");
3713 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3714 "cc_dst");
3715 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3716 "cc_src");
3717 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3718 "cc_src2");
3719 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3720
3721 for (i = 0; i < CPU_NB_REGS; ++i) {
3722 cpu_regs[i] = tcg_global_mem_new(tcg_env,
3723 offsetof(CPUX86State, regs[i]),
3724 reg_names[i]);
3725 }
3726
3727 for (i = 0; i < 6; ++i) {
3728 cpu_seg_base[i]
3729 = tcg_global_mem_new(tcg_env,
3730 offsetof(CPUX86State, segs[i].base),
3731 seg_base_names[i]);
3732 }
3733
3734 for (i = 0; i < 4; ++i) {
3735 cpu_bndl[i]
3736 = tcg_global_mem_new_i64(tcg_env,
3737 offsetof(CPUX86State, bnd_regs[i].lb),
3738 bnd_regl_names[i]);
3739 cpu_bndu[i]
3740 = tcg_global_mem_new_i64(tcg_env,
3741 offsetof(CPUX86State, bnd_regs[i].ub),
3742 bnd_regu_names[i]);
3743 }
3744 }
3745
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3746 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3747 {
3748 DisasContext *dc = container_of(dcbase, DisasContext, base);
3749 CPUX86State *env = cpu_env(cpu);
3750 uint32_t flags = dc->base.tb->flags;
3751 uint32_t cflags = tb_cflags(dc->base.tb);
3752 int cpl = (flags >> HF_CPL_SHIFT) & 3;
3753 int iopl = (flags >> IOPL_SHIFT) & 3;
3754
3755 dc->cs_base = dc->base.tb->cs_base;
3756 dc->pc_save = dc->base.pc_next;
3757 dc->flags = flags;
3758 #ifndef CONFIG_USER_ONLY
3759 dc->cpl = cpl;
3760 dc->iopl = iopl;
3761 #endif
3762
3763 /* We make some simplifying assumptions; validate they're correct. */
3764 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3765 g_assert(CPL(dc) == cpl);
3766 g_assert(IOPL(dc) == iopl);
3767 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3768 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3769 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3770 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3771 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3772 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3773 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3774 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3775
3776 dc->cc_op = CC_OP_DYNAMIC;
3777 dc->cc_op_dirty = false;
3778 /* select memory access functions */
3779 dc->mem_index = cpu_mmu_index(cpu, false);
3780 dc->cpuid_features = env->features[FEAT_1_EDX];
3781 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3782 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3783 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3784 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3785 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3786 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3787 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3788 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3789 (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3790
3791 dc->T0 = tcg_temp_new();
3792 dc->T1 = tcg_temp_new();
3793 dc->A0 = tcg_temp_new();
3794
3795 dc->tmp1_i64 = tcg_temp_new_i64();
3796 dc->tmp2_i32 = tcg_temp_new_i32();
3797 dc->cc_srcT = tcg_temp_new();
3798 }
3799
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3800 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3801 {
3802 }
3803
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3804 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3805 {
3806 DisasContext *dc = container_of(dcbase, DisasContext, base);
3807 target_ulong pc_arg = dc->base.pc_next;
3808
3809 dc->prev_insn_start = dc->base.insn_start;
3810 dc->prev_insn_end = tcg_last_op();
3811 if (tb_cflags(dcbase->tb) & CF_PCREL) {
3812 pc_arg &= ~TARGET_PAGE_MASK;
3813 }
3814 tcg_gen_insn_start(pc_arg, dc->cc_op);
3815 }
3816
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3817 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3818 {
3819 DisasContext *dc = container_of(dcbase, DisasContext, base);
3820 bool orig_cc_op_dirty = dc->cc_op_dirty;
3821 CCOp orig_cc_op = dc->cc_op;
3822 target_ulong orig_pc_save = dc->pc_save;
3823
3824 #ifdef TARGET_VSYSCALL_PAGE
3825 /*
3826 * Detect entry into the vsyscall page and invoke the syscall.
3827 */
3828 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3829 gen_exception(dc, EXCP_VSYSCALL);
3830 dc->base.pc_next = dc->pc + 1;
3831 return;
3832 }
3833 #endif
3834
3835 switch (sigsetjmp(dc->jmpbuf, 0)) {
3836 case 0:
3837 disas_insn(dc, cpu);
3838 break;
3839 case 1:
3840 gen_exception_gpf(dc);
3841 break;
3842 case 2:
3843 /* Restore state that may affect the next instruction. */
3844 dc->pc = dc->base.pc_next;
3845 assert(dc->cc_op_dirty == orig_cc_op_dirty);
3846 assert(dc->cc_op == orig_cc_op);
3847 assert(dc->pc_save == orig_pc_save);
3848 dc->base.num_insns--;
3849 tcg_remove_ops_after(dc->prev_insn_end);
3850 dc->base.insn_start = dc->prev_insn_start;
3851 dc->base.is_jmp = DISAS_TOO_MANY;
3852 return;
3853 default:
3854 g_assert_not_reached();
3855 }
3856
3857 /*
3858 * Instruction decoding completed (possibly with #GP if the
3859 * 15-byte boundary was exceeded).
3860 */
3861 dc->base.pc_next = dc->pc;
3862 if (dc->base.is_jmp == DISAS_NEXT) {
3863 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3864 /*
3865 * If single step mode, we generate only one instruction and
3866 * generate an exception.
3867 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3868 * the flag and abort the translation to give the irqs a
3869 * chance to happen.
3870 */
3871 dc->base.is_jmp = DISAS_EOB_NEXT;
3872 } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3873 dc->base.is_jmp = DISAS_TOO_MANY;
3874 }
3875 }
3876 }
3877
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)3878 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3879 {
3880 DisasContext *dc = container_of(dcbase, DisasContext, base);
3881
3882 switch (dc->base.is_jmp) {
3883 case DISAS_NORETURN:
3884 /*
3885 * Most instructions should not use DISAS_NORETURN, as that suppresses
3886 * the handling of hflags normally done by gen_eob(). We can
3887 * get here:
3888 * - for exception and interrupts
3889 * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3890 * - for VMRUN because RF/TF handling for the host is done after vmexit,
3891 * and INHIBIT_IRQ is loaded from the VMCB
3892 * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3893 * the helpers handle themselves the tasks normally done by gen_eob().
3894 */
3895 break;
3896 case DISAS_TOO_MANY:
3897 gen_update_cc_op(dc);
3898 gen_jmp_rel_csize(dc, 0, 0);
3899 break;
3900 case DISAS_EOB_NEXT:
3901 case DISAS_EOB_INHIBIT_IRQ:
3902 assert(dc->base.pc_next == dc->pc);
3903 gen_update_eip_cur(dc);
3904 /* fall through */
3905 case DISAS_EOB_ONLY:
3906 case DISAS_EOB_RECHECK_TF:
3907 case DISAS_JUMP:
3908 gen_eob(dc, dc->base.is_jmp);
3909 break;
3910 default:
3911 g_assert_not_reached();
3912 }
3913 }
3914
3915 static const TranslatorOps i386_tr_ops = {
3916 .init_disas_context = i386_tr_init_disas_context,
3917 .tb_start = i386_tr_tb_start,
3918 .insn_start = i386_tr_insn_start,
3919 .translate_insn = i386_tr_translate_insn,
3920 .tb_stop = i386_tr_tb_stop,
3921 };
3922
x86_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)3923 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3924 int *max_insns, vaddr pc, void *host_pc)
3925 {
3926 DisasContext dc;
3927
3928 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3929 }
3930