xref: /qemu/target/i386/tcg/seg_helper.c (revision ad441b8b7913a26b18edbc076c74ca0cdbfa4ee5)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "accel/tcg/cpu-ldst.h"
26 #include "accel/tcg/probe.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 #include "tcg-cpu.h"
32 
33 #ifdef TARGET_X86_64
34 #define SET_ESP(val, sp_mask)                                   \
35     do {                                                        \
36         if ((sp_mask) == 0xffff) {                              \
37             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
38                 ((val) & 0xffff);                               \
39         } else if ((sp_mask) == 0xffffffffLL) {                 \
40             env->regs[R_ESP] = (uint32_t)(val);                 \
41         } else {                                                \
42             env->regs[R_ESP] = (val);                           \
43         }                                                       \
44     } while (0)
45 #else
46 #define SET_ESP(val, sp_mask)                                   \
47     do {                                                        \
48         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
49             ((val) & (sp_mask));                                \
50     } while (0)
51 #endif
52 
53 /* XXX: use mmu_index to have proper DPL support */
54 typedef struct StackAccess
55 {
56     CPUX86State *env;
57     uintptr_t ra;
58     target_ulong ss_base;
59     target_ulong sp;
60     target_ulong sp_mask;
61     int mmu_index;
62 } StackAccess;
63 
pushw(StackAccess * sa,uint16_t val)64 static void pushw(StackAccess *sa, uint16_t val)
65 {
66     sa->sp -= 2;
67     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
68                       val, sa->mmu_index, sa->ra);
69 }
70 
pushl(StackAccess * sa,uint32_t val)71 static void pushl(StackAccess *sa, uint32_t val)
72 {
73     sa->sp -= 4;
74     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
75                       val, sa->mmu_index, sa->ra);
76 }
77 
popw(StackAccess * sa)78 static uint16_t popw(StackAccess *sa)
79 {
80     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
81                                       sa->ss_base + (sa->sp & sa->sp_mask),
82                                       sa->mmu_index, sa->ra);
83     sa->sp += 2;
84     return ret;
85 }
86 
popl(StackAccess * sa)87 static uint32_t popl(StackAccess *sa)
88 {
89     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
90                                      sa->ss_base + (sa->sp & sa->sp_mask),
91                                      sa->mmu_index, sa->ra);
92     sa->sp += 4;
93     return ret;
94 }
95 
get_pg_mode(CPUX86State * env)96 int get_pg_mode(CPUX86State *env)
97 {
98     int pg_mode = PG_MODE_PG;
99     if (!(env->cr[0] & CR0_PG_MASK)) {
100         return 0;
101     }
102     if (env->cr[0] & CR0_WP_MASK) {
103         pg_mode |= PG_MODE_WP;
104     }
105     if (env->cr[4] & CR4_PAE_MASK) {
106         pg_mode |= PG_MODE_PAE;
107         if (env->efer & MSR_EFER_NXE) {
108             pg_mode |= PG_MODE_NXE;
109         }
110     }
111     if (env->cr[4] & CR4_PSE_MASK) {
112         pg_mode |= PG_MODE_PSE;
113     }
114     if (env->cr[4] & CR4_SMEP_MASK) {
115         pg_mode |= PG_MODE_SMEP;
116     }
117     if (env->hflags & HF_LMA_MASK) {
118         pg_mode |= PG_MODE_LMA;
119         if (env->cr[4] & CR4_PKE_MASK) {
120             pg_mode |= PG_MODE_PKE;
121         }
122         if (env->cr[4] & CR4_PKS_MASK) {
123             pg_mode |= PG_MODE_PKS;
124         }
125         if (env->cr[4] & CR4_LA57_MASK) {
126             pg_mode |= PG_MODE_LA57;
127         }
128     }
129     return pg_mode;
130 }
131 
x86_mmu_index_kernel_pl(CPUX86State * env,unsigned pl)132 static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
133 {
134     int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
135     int mmu_index_base =
136         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
137         (pl < 3 && (env->eflags & AC_MASK)
138          ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
139 
140     return mmu_index_base + mmu_index_32;
141 }
142 
cpu_mmu_index_kernel(CPUX86State * env)143 int cpu_mmu_index_kernel(CPUX86State *env)
144 {
145     return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
146 }
147 
148 /* return non zero if error */
load_segment_ra(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector,uintptr_t retaddr)149 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
150                                uint32_t *e2_ptr, int selector,
151                                uintptr_t retaddr)
152 {
153     SegmentCache *dt;
154     int index;
155     target_ulong ptr;
156 
157     if (selector & 0x4) {
158         dt = &env->ldt;
159     } else {
160         dt = &env->gdt;
161     }
162     index = selector & ~7;
163     if ((index + 7) > dt->limit) {
164         return -1;
165     }
166     ptr = dt->base + index;
167     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
168     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
169     return 0;
170 }
171 
load_segment(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)172 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
173                                uint32_t *e2_ptr, int selector)
174 {
175     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
176 }
177 
get_seg_limit(uint32_t e1,uint32_t e2)178 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
179 {
180     unsigned int limit;
181 
182     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
183     if (e2 & DESC_G_MASK) {
184         limit = (limit << 12) | 0xfff;
185     }
186     return limit;
187 }
188 
get_seg_base(uint32_t e1,uint32_t e2)189 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
190 {
191     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
192 }
193 
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)194 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
195                                          uint32_t e2)
196 {
197     sc->base = get_seg_base(e1, e2);
198     sc->limit = get_seg_limit(e1, e2);
199     sc->flags = e2;
200 }
201 
202 /* init the segment cache in vm86 mode. */
load_seg_vm(CPUX86State * env,int seg,int selector)203 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
204 {
205     selector &= 0xffff;
206 
207     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
208                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
209                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
210 }
211 
get_ss_esp_from_tss(CPUX86State * env,uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl,uintptr_t retaddr)212 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
213                                        uint32_t *esp_ptr, int dpl,
214                                        uintptr_t retaddr)
215 {
216     X86CPU *cpu = env_archcpu(env);
217     int type, index, shift;
218 
219 #if 0
220     {
221         int i;
222         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
223         for (i = 0; i < env->tr.limit; i++) {
224             printf("%02x ", env->tr.base[i]);
225             if ((i & 7) == 7) {
226                 printf("\n");
227             }
228         }
229         printf("\n");
230     }
231 #endif
232 
233     if (!(env->tr.flags & DESC_P_MASK)) {
234         cpu_abort(CPU(cpu), "invalid tss");
235     }
236     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
237     if ((type & 7) != 1) {
238         cpu_abort(CPU(cpu), "invalid tss type");
239     }
240     shift = type >> 3;
241     index = (dpl * 4 + 2) << shift;
242     if (index + (4 << shift) - 1 > env->tr.limit) {
243         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
244     }
245     if (shift == 0) {
246         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
247         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
248     } else {
249         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
250         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
251     }
252 }
253 
tss_load_seg(CPUX86State * env,X86Seg seg_reg,int selector,int cpl,uintptr_t retaddr)254 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
255                          int cpl, uintptr_t retaddr)
256 {
257     uint32_t e1, e2;
258     int rpl, dpl;
259 
260     if ((selector & 0xfffc) != 0) {
261         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
262             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
263         }
264         if (!(e2 & DESC_S_MASK)) {
265             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
266         }
267         rpl = selector & 3;
268         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
269         if (seg_reg == R_CS) {
270             if (!(e2 & DESC_CS_MASK)) {
271                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
272             }
273             if (dpl != rpl) {
274                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
275             }
276         } else if (seg_reg == R_SS) {
277             /* SS must be writable data */
278             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
279                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
280             }
281             if (dpl != cpl || dpl != rpl) {
282                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
283             }
284         } else {
285             /* not readable code */
286             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
287                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
288             }
289             /* if data or non conforming code, checks the rights */
290             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
291                 if (dpl < cpl || dpl < rpl) {
292                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
293                 }
294             }
295         }
296         if (!(e2 & DESC_P_MASK)) {
297             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
298         }
299         cpu_x86_load_seg_cache(env, seg_reg, selector,
300                                get_seg_base(e1, e2),
301                                get_seg_limit(e1, e2),
302                                e2);
303     } else {
304         if (seg_reg == R_SS || seg_reg == R_CS) {
305             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
306         }
307     }
308 }
309 
tss_set_busy(CPUX86State * env,int tss_selector,bool value,uintptr_t retaddr)310 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
311                          uintptr_t retaddr)
312 {
313     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
314     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
315 
316     if (value) {
317         e2 |= DESC_TSS_BUSY_MASK;
318     } else {
319         e2 &= ~DESC_TSS_BUSY_MASK;
320     }
321 
322     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
323 }
324 
325 #define SWITCH_TSS_JMP  0
326 #define SWITCH_TSS_IRET 1
327 #define SWITCH_TSS_CALL 2
328 
switch_tss_ra(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip,bool has_error_code,uint32_t error_code,uintptr_t retaddr)329 static void switch_tss_ra(CPUX86State *env, int tss_selector,
330                           uint32_t e1, uint32_t e2, int source,
331                           uint32_t next_eip, bool has_error_code,
332                           uint32_t error_code, uintptr_t retaddr)
333 {
334     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
335     target_ulong tss_base;
336     uint32_t new_regs[8], new_segs[6];
337     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
338     uint32_t old_eflags, eflags_mask;
339     SegmentCache *dt;
340     int mmu_index, index;
341     target_ulong ptr;
342     X86Access old, new;
343 
344     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
345     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
346               source);
347 
348     /* if task gate, we read the TSS segment and we load it */
349     if (type == 5) {
350         if (!(e2 & DESC_P_MASK)) {
351             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
352         }
353         tss_selector = e1 >> 16;
354         if (tss_selector & 4) {
355             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
356         }
357         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
358             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
359         }
360         if (e2 & DESC_S_MASK) {
361             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
362         }
363         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
364         if ((type & 7) != 1) {
365             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
366         }
367     }
368 
369     if (!(e2 & DESC_P_MASK)) {
370         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
371     }
372 
373     if (type & 8) {
374         tss_limit_max = 103;
375     } else {
376         tss_limit_max = 43;
377     }
378     tss_limit = get_seg_limit(e1, e2);
379     tss_base = get_seg_base(e1, e2);
380     if ((tss_selector & 4) != 0 ||
381         tss_limit < tss_limit_max) {
382         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
383     }
384     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
385     if (old_type & 8) {
386         old_tss_limit_max = 103;
387     } else {
388         old_tss_limit_max = 43;
389     }
390 
391     /* new TSS must be busy iff the source is an IRET instruction  */
392     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
393         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
394     }
395 
396     /* X86Access avoids memory exceptions during the task switch */
397     mmu_index = cpu_mmu_index_kernel(env);
398     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
399                        MMU_DATA_STORE, mmu_index, retaddr);
400 
401     if (source == SWITCH_TSS_CALL) {
402         /* Probe for future write of parent task */
403         probe_access(env, tss_base, 2, MMU_DATA_STORE,
404                      mmu_index, retaddr);
405     }
406     /* While true tss_limit may be larger, we don't access the iopb here. */
407     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
408                        MMU_DATA_LOAD, mmu_index, retaddr);
409 
410     /* save the current state in the old TSS */
411     old_eflags = cpu_compute_eflags(env);
412     if (old_type & 8) {
413         /* 32 bit */
414         access_stl(&old, env->tr.base + 0x20, next_eip);
415         access_stl(&old, env->tr.base + 0x24, old_eflags);
416         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
417         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
418         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
419         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
420         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
421         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
422         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
423         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
424         for (i = 0; i < 6; i++) {
425             access_stw(&old, env->tr.base + (0x48 + i * 4),
426                        env->segs[i].selector);
427         }
428     } else {
429         /* 16 bit */
430         access_stw(&old, env->tr.base + 0x0e, next_eip);
431         access_stw(&old, env->tr.base + 0x10, old_eflags);
432         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
433         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
434         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
435         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
436         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
437         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
438         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
439         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
440         for (i = 0; i < 4; i++) {
441             access_stw(&old, env->tr.base + (0x22 + i * 2),
442                        env->segs[i].selector);
443         }
444     }
445 
446     /* read all the registers from the new TSS */
447     if (type & 8) {
448         /* 32 bit */
449         new_cr3 = access_ldl(&new, tss_base + 0x1c);
450         new_eip = access_ldl(&new, tss_base + 0x20);
451         new_eflags = access_ldl(&new, tss_base + 0x24);
452         for (i = 0; i < 8; i++) {
453             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
454         }
455         for (i = 0; i < 6; i++) {
456             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
457         }
458         new_ldt = access_ldw(&new, tss_base + 0x60);
459         new_trap = access_ldl(&new, tss_base + 0x64);
460     } else {
461         /* 16 bit */
462         new_cr3 = 0;
463         new_eip = access_ldw(&new, tss_base + 0x0e);
464         new_eflags = access_ldw(&new, tss_base + 0x10);
465         for (i = 0; i < 8; i++) {
466             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
467         }
468         for (i = 0; i < 4; i++) {
469             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
470         }
471         new_ldt = access_ldw(&new, tss_base + 0x2a);
472         new_segs[R_FS] = 0;
473         new_segs[R_GS] = 0;
474         new_trap = 0;
475     }
476 
477     /* clear busy bit (it is restartable) */
478     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
479         tss_set_busy(env, env->tr.selector, 0, retaddr);
480     }
481 
482     if (source == SWITCH_TSS_IRET) {
483         old_eflags &= ~NT_MASK;
484         if (old_type & 8) {
485             access_stl(&old, env->tr.base + 0x24, old_eflags);
486         } else {
487             access_stw(&old, env->tr.base + 0x10, old_eflags);
488 	}
489     }
490 
491     if (source == SWITCH_TSS_CALL) {
492         /*
493          * Thanks to the probe_access above, we know the first two
494          * bytes addressed by &new are writable too.
495          */
496         access_stw(&new, tss_base, env->tr.selector);
497         new_eflags |= NT_MASK;
498     }
499 
500     /* set busy bit */
501     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
502         tss_set_busy(env, tss_selector, 1, retaddr);
503     }
504 
505     /* set the new CPU state */
506 
507     /* now if an exception occurs, it will occur in the next task context */
508 
509     env->cr[0] |= CR0_TS_MASK;
510     env->hflags |= HF_TS_MASK;
511     env->tr.selector = tss_selector;
512     env->tr.base = tss_base;
513     env->tr.limit = tss_limit;
514     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
515 
516     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
517         cpu_x86_update_cr3(env, new_cr3);
518     }
519 
520     /* load all registers without an exception, then reload them with
521        possible exception */
522     env->eip = new_eip;
523     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
524         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
525     if (type & 8) {
526         cpu_load_eflags(env, new_eflags, eflags_mask);
527         for (i = 0; i < 8; i++) {
528             env->regs[i] = new_regs[i];
529         }
530     } else {
531         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
532         for (i = 0; i < 8; i++) {
533             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
534         }
535     }
536     if (new_eflags & VM_MASK) {
537         for (i = 0; i < 6; i++) {
538             load_seg_vm(env, i, new_segs[i]);
539         }
540     } else {
541         /* first just selectors as the rest may trigger exceptions */
542         for (i = 0; i < 6; i++) {
543             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
544         }
545     }
546 
547     env->ldt.selector = new_ldt & ~4;
548     env->ldt.base = 0;
549     env->ldt.limit = 0;
550     env->ldt.flags = 0;
551 
552     /* load the LDT */
553     if (new_ldt & 4) {
554         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
555     }
556 
557     if ((new_ldt & 0xfffc) != 0) {
558         dt = &env->gdt;
559         index = new_ldt & ~7;
560         if ((index + 7) > dt->limit) {
561             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
562         }
563         ptr = dt->base + index;
564         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
565         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
566         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
567             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
568         }
569         if (!(e2 & DESC_P_MASK)) {
570             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
571         }
572         load_seg_cache_raw_dt(&env->ldt, e1, e2);
573     }
574 
575     /* load the segments */
576     if (!(new_eflags & VM_MASK)) {
577         int cpl = new_segs[R_CS] & 3;
578         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
579         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
580         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
581         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
582         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
583         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
584     }
585 
586     /* check that env->eip is in the CS segment limits */
587     if (new_eip > env->segs[R_CS].limit) {
588         /* XXX: different exception if CALL? */
589         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
590     }
591 
592 #ifndef CONFIG_USER_ONLY
593     /* reset local breakpoints */
594     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
595         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
596     }
597 #endif
598 
599     if (has_error_code) {
600         int cpl = env->hflags & HF_CPL_MASK;
601         StackAccess sa;
602 
603         /* push the error code */
604         sa.env = env;
605         sa.ra = retaddr;
606         sa.mmu_index = x86_mmu_index_pl(env, cpl);
607         sa.sp = env->regs[R_ESP];
608         if (env->segs[R_SS].flags & DESC_B_MASK) {
609             sa.sp_mask = 0xffffffff;
610         } else {
611             sa.sp_mask = 0xffff;
612         }
613         sa.ss_base = env->segs[R_SS].base;
614         if (type & 8) {
615             pushl(&sa, error_code);
616         } else {
617             pushw(&sa, error_code);
618         }
619         SET_ESP(sa.sp, sa.sp_mask);
620     }
621 
622     if (new_trap) {
623         env->dr[6] |= DR6_BT;
624         raise_exception_ra(env, EXCP01_DB, retaddr);
625     }
626 }
627 
switch_tss(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip,bool has_error_code,int error_code)628 static void switch_tss(CPUX86State *env, int tss_selector,
629                        uint32_t e1, uint32_t e2, int source,
630                        uint32_t next_eip, bool has_error_code,
631                        int error_code)
632 {
633     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
634                   has_error_code, error_code, 0);
635 }
636 
get_sp_mask(unsigned int e2)637 static inline unsigned int get_sp_mask(unsigned int e2)
638 {
639 #ifdef TARGET_X86_64
640     if (e2 & DESC_L_MASK) {
641         return 0;
642     } else
643 #endif
644     if (e2 & DESC_B_MASK) {
645         return 0xffffffff;
646     } else {
647         return 0xffff;
648     }
649 }
650 
exception_is_fault(int intno)651 static int exception_is_fault(int intno)
652 {
653     switch (intno) {
654         /*
655          * #DB can be both fault- and trap-like, but it never sets RF=1
656          * in the RFLAGS value pushed on the stack.
657          */
658     case EXCP01_DB:
659     case EXCP03_INT3:
660     case EXCP04_INTO:
661     case EXCP08_DBLE:
662     case EXCP12_MCHK:
663         return 0;
664     }
665     /* Everything else including reserved exception is a fault.  */
666     return 1;
667 }
668 
exception_has_error_code(int intno)669 int exception_has_error_code(int intno)
670 {
671     switch (intno) {
672     case 8:
673     case 10:
674     case 11:
675     case 12:
676     case 13:
677     case 14:
678     case 17:
679         return 1;
680     }
681     return 0;
682 }
683 
684 /* protected mode interrupt */
do_interrupt_protected(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)685 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
686                                    int error_code, unsigned int next_eip,
687                                    int is_hw)
688 {
689     SegmentCache *dt;
690     target_ulong ptr;
691     int type, dpl, selector, ss_dpl, cpl;
692     int has_error_code, new_stack, shift;
693     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
694     uint32_t old_eip, eflags;
695     int vm86 = env->eflags & VM_MASK;
696     StackAccess sa;
697     bool set_rf;
698 
699     has_error_code = 0;
700     if (!is_int && !is_hw) {
701         has_error_code = exception_has_error_code(intno);
702     }
703     if (is_int) {
704         old_eip = next_eip;
705         set_rf = false;
706     } else {
707         old_eip = env->eip;
708         set_rf = exception_is_fault(intno);
709     }
710 
711     dt = &env->idt;
712     if (intno * 8 + 7 > dt->limit) {
713         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
714     }
715     ptr = dt->base + intno * 8;
716     e1 = cpu_ldl_kernel(env, ptr);
717     e2 = cpu_ldl_kernel(env, ptr + 4);
718     /* check gate type */
719     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
720     switch (type) {
721     case 5: /* task gate */
722     case 6: /* 286 interrupt gate */
723     case 7: /* 286 trap gate */
724     case 14: /* 386 interrupt gate */
725     case 15: /* 386 trap gate */
726         break;
727     default:
728         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
729         break;
730     }
731     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
732     cpl = env->hflags & HF_CPL_MASK;
733     /* check privilege if software int */
734     if (is_int && dpl < cpl) {
735         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
736     }
737 
738     sa.env = env;
739     sa.ra = 0;
740 
741     if (type == 5) {
742         /* task gate */
743         /* must do that check here to return the correct error code */
744         if (!(e2 & DESC_P_MASK)) {
745             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
746         }
747         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
748                    has_error_code, error_code);
749         return;
750     }
751 
752     /* Otherwise, trap or interrupt gate */
753 
754     /* check valid bit */
755     if (!(e2 & DESC_P_MASK)) {
756         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
757     }
758     selector = e1 >> 16;
759     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
760     if ((selector & 0xfffc) == 0) {
761         raise_exception_err(env, EXCP0D_GPF, 0);
762     }
763     if (load_segment(env, &e1, &e2, selector) != 0) {
764         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
765     }
766     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
767         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
768     }
769     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
770     if (dpl > cpl) {
771         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
772     }
773     if (!(e2 & DESC_P_MASK)) {
774         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
775     }
776     if (e2 & DESC_C_MASK) {
777         dpl = cpl;
778     }
779     sa.mmu_index = x86_mmu_index_pl(env, dpl);
780     if (dpl < cpl) {
781         /* to inner privilege */
782         uint32_t esp;
783         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
784         if ((ss & 0xfffc) == 0) {
785             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
786         }
787         if ((ss & 3) != dpl) {
788             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
789         }
790         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
791             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
792         }
793         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
794         if (ss_dpl != dpl) {
795             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
796         }
797         if (!(ss_e2 & DESC_S_MASK) ||
798             (ss_e2 & DESC_CS_MASK) ||
799             !(ss_e2 & DESC_W_MASK)) {
800             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
801         }
802         if (!(ss_e2 & DESC_P_MASK)) {
803             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
804         }
805         new_stack = 1;
806         sa.sp = esp;
807         sa.sp_mask = get_sp_mask(ss_e2);
808         sa.ss_base = get_seg_base(ss_e1, ss_e2);
809     } else  {
810         /* to same privilege */
811         if (vm86) {
812             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
813         }
814         new_stack = 0;
815         sa.sp = env->regs[R_ESP];
816         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
817         sa.ss_base = env->segs[R_SS].base;
818     }
819 
820     shift = type >> 3;
821 
822 #if 0
823     /* XXX: check that enough room is available */
824     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
825     if (vm86) {
826         push_size += 8;
827     }
828     push_size <<= shift;
829 #endif
830     eflags = cpu_compute_eflags(env);
831     /*
832      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
833      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
834      */
835     if (set_rf) {
836         eflags |= RF_MASK;
837     }
838 
839     if (shift == 1) {
840         if (new_stack) {
841             if (vm86) {
842                 pushl(&sa, env->segs[R_GS].selector);
843                 pushl(&sa, env->segs[R_FS].selector);
844                 pushl(&sa, env->segs[R_DS].selector);
845                 pushl(&sa, env->segs[R_ES].selector);
846             }
847             pushl(&sa, env->segs[R_SS].selector);
848             pushl(&sa, env->regs[R_ESP]);
849         }
850         pushl(&sa, eflags);
851         pushl(&sa, env->segs[R_CS].selector);
852         pushl(&sa, old_eip);
853         if (has_error_code) {
854             pushl(&sa, error_code);
855         }
856     } else {
857         if (new_stack) {
858             if (vm86) {
859                 pushw(&sa, env->segs[R_GS].selector);
860                 pushw(&sa, env->segs[R_FS].selector);
861                 pushw(&sa, env->segs[R_DS].selector);
862                 pushw(&sa, env->segs[R_ES].selector);
863             }
864             pushw(&sa, env->segs[R_SS].selector);
865             pushw(&sa, env->regs[R_ESP]);
866         }
867         pushw(&sa, eflags);
868         pushw(&sa, env->segs[R_CS].selector);
869         pushw(&sa, old_eip);
870         if (has_error_code) {
871             pushw(&sa, error_code);
872         }
873     }
874 
875     /* interrupt gate clear IF mask */
876     if ((type & 1) == 0) {
877         env->eflags &= ~IF_MASK;
878     }
879     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
880 
881     if (new_stack) {
882         if (vm86) {
883             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
884             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
885             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
886             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
887         }
888         ss = (ss & ~3) | dpl;
889         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
890                                get_seg_limit(ss_e1, ss_e2), ss_e2);
891     }
892     SET_ESP(sa.sp, sa.sp_mask);
893 
894     selector = (selector & ~3) | dpl;
895     cpu_x86_load_seg_cache(env, R_CS, selector,
896                    get_seg_base(e1, e2),
897                    get_seg_limit(e1, e2),
898                    e2);
899     env->eip = offset;
900 }
901 
902 #ifdef TARGET_X86_64
903 
pushq(StackAccess * sa,uint64_t val)904 static void pushq(StackAccess *sa, uint64_t val)
905 {
906     sa->sp -= 8;
907     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
908 }
909 
popq(StackAccess * sa)910 static uint64_t popq(StackAccess *sa)
911 {
912     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
913     sa->sp += 8;
914     return ret;
915 }
916 
get_rsp_from_tss(CPUX86State * env,int level)917 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
918 {
919     X86CPU *cpu = env_archcpu(env);
920     int index, pg_mode;
921     target_ulong rsp;
922     int32_t sext;
923 
924 #if 0
925     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
926            env->tr.base, env->tr.limit);
927 #endif
928 
929     if (!(env->tr.flags & DESC_P_MASK)) {
930         cpu_abort(CPU(cpu), "invalid tss");
931     }
932     index = 8 * level + 4;
933     if ((index + 7) > env->tr.limit) {
934         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
935     }
936 
937     rsp = cpu_ldq_kernel(env, env->tr.base + index);
938 
939     /* test virtual address sign extension */
940     pg_mode = get_pg_mode(env);
941     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
942     if (sext != 0 && sext != -1) {
943         raise_exception_err(env, EXCP0C_STACK, 0);
944     }
945 
946     return rsp;
947 }
948 
949 /* 64 bit interrupt */
do_interrupt64(CPUX86State * env,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)950 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
951                            int error_code, target_ulong next_eip, int is_hw)
952 {
953     SegmentCache *dt;
954     target_ulong ptr;
955     int type, dpl, selector, cpl, ist;
956     int has_error_code, new_stack;
957     uint32_t e1, e2, e3, eflags;
958     target_ulong old_eip, offset;
959     bool set_rf;
960     StackAccess sa;
961 
962     has_error_code = 0;
963     if (!is_int && !is_hw) {
964         has_error_code = exception_has_error_code(intno);
965     }
966     if (is_int) {
967         old_eip = next_eip;
968         set_rf = false;
969     } else {
970         old_eip = env->eip;
971         set_rf = exception_is_fault(intno);
972     }
973 
974     dt = &env->idt;
975     if (intno * 16 + 15 > dt->limit) {
976         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
977     }
978     ptr = dt->base + intno * 16;
979     e1 = cpu_ldl_kernel(env, ptr);
980     e2 = cpu_ldl_kernel(env, ptr + 4);
981     e3 = cpu_ldl_kernel(env, ptr + 8);
982     /* check gate type */
983     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
984     switch (type) {
985     case 14: /* 386 interrupt gate */
986     case 15: /* 386 trap gate */
987         break;
988     default:
989         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
990         break;
991     }
992     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
993     cpl = env->hflags & HF_CPL_MASK;
994     /* check privilege if software int */
995     if (is_int && dpl < cpl) {
996         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
997     }
998     /* check valid bit */
999     if (!(e2 & DESC_P_MASK)) {
1000         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
1001     }
1002     selector = e1 >> 16;
1003     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1004     ist = e2 & 7;
1005     if ((selector & 0xfffc) == 0) {
1006         raise_exception_err(env, EXCP0D_GPF, 0);
1007     }
1008 
1009     if (load_segment(env, &e1, &e2, selector) != 0) {
1010         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1011     }
1012     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1013         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1014     }
1015     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1016     if (dpl > cpl) {
1017         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1018     }
1019     if (!(e2 & DESC_P_MASK)) {
1020         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1021     }
1022     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1023         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1024     }
1025     if (e2 & DESC_C_MASK) {
1026         dpl = cpl;
1027     }
1028 
1029     sa.env = env;
1030     sa.ra = 0;
1031     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1032     sa.sp_mask = -1;
1033     sa.ss_base = 0;
1034     if (dpl < cpl || ist != 0) {
1035         /* to inner privilege */
1036         new_stack = 1;
1037         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1038     } else {
1039         /* to same privilege */
1040         if (env->eflags & VM_MASK) {
1041             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1042         }
1043         new_stack = 0;
1044         sa.sp = env->regs[R_ESP];
1045     }
1046     sa.sp &= ~0xfLL; /* align stack */
1047 
1048     /* See do_interrupt_protected.  */
1049     eflags = cpu_compute_eflags(env);
1050     if (set_rf) {
1051         eflags |= RF_MASK;
1052     }
1053 
1054     pushq(&sa, env->segs[R_SS].selector);
1055     pushq(&sa, env->regs[R_ESP]);
1056     pushq(&sa, eflags);
1057     pushq(&sa, env->segs[R_CS].selector);
1058     pushq(&sa, old_eip);
1059     if (has_error_code) {
1060         pushq(&sa, error_code);
1061     }
1062 
1063     /* interrupt gate clear IF mask */
1064     if ((type & 1) == 0) {
1065         env->eflags &= ~IF_MASK;
1066     }
1067     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1068 
1069     if (new_stack) {
1070         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1071         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1072     }
1073     env->regs[R_ESP] = sa.sp;
1074 
1075     selector = (selector & ~3) | dpl;
1076     cpu_x86_load_seg_cache(env, R_CS, selector,
1077                    get_seg_base(e1, e2),
1078                    get_seg_limit(e1, e2),
1079                    e2);
1080     env->eip = offset;
1081 }
1082 #endif /* TARGET_X86_64 */
1083 
helper_sysret(CPUX86State * env,int dflag)1084 void helper_sysret(CPUX86State *env, int dflag)
1085 {
1086     int cpl, selector;
1087 
1088     if (!(env->efer & MSR_EFER_SCE)) {
1089         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1090     }
1091     cpl = env->hflags & HF_CPL_MASK;
1092     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1093         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1094     }
1095     selector = (env->star >> 48) & 0xffff;
1096 #ifdef TARGET_X86_64
1097     if (env->hflags & HF_LMA_MASK) {
1098         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1099                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1100                         NT_MASK);
1101         if (dflag == 2) {
1102             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1103                                    0, 0xffffffff,
1104                                    DESC_G_MASK | DESC_P_MASK |
1105                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1106                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1107                                    DESC_L_MASK);
1108             env->eip = env->regs[R_ECX];
1109         } else {
1110             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1111                                    0, 0xffffffff,
1112                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1115             env->eip = (uint32_t)env->regs[R_ECX];
1116         }
1117         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1118                                0, 0xffffffff,
1119                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1120                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1121                                DESC_W_MASK | DESC_A_MASK);
1122     } else
1123 #endif
1124     {
1125         env->eflags |= IF_MASK;
1126         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1127                                0, 0xffffffff,
1128                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1129                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1130                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1131         env->eip = (uint32_t)env->regs[R_ECX];
1132         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1133                                0, 0xffffffff,
1134                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1135                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1136                                DESC_W_MASK | DESC_A_MASK);
1137     }
1138 }
1139 
1140 /* real mode interrupt */
do_interrupt_real(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip)1141 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1142                               int error_code, unsigned int next_eip)
1143 {
1144     SegmentCache *dt;
1145     target_ulong ptr;
1146     int selector;
1147     uint32_t offset;
1148     uint32_t old_cs, old_eip;
1149     StackAccess sa;
1150 
1151     /* real mode (simpler!) */
1152     dt = &env->idt;
1153     if (intno * 4 + 3 > dt->limit) {
1154         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1155     }
1156     ptr = dt->base + intno * 4;
1157     offset = cpu_lduw_kernel(env, ptr);
1158     selector = cpu_lduw_kernel(env, ptr + 2);
1159 
1160     sa.env = env;
1161     sa.ra = 0;
1162     sa.sp = env->regs[R_ESP];
1163     sa.sp_mask = 0xffff;
1164     sa.ss_base = env->segs[R_SS].base;
1165     sa.mmu_index = x86_mmu_index_pl(env, 0);
1166 
1167     if (is_int) {
1168         old_eip = next_eip;
1169     } else {
1170         old_eip = env->eip;
1171     }
1172     old_cs = env->segs[R_CS].selector;
1173     /* XXX: use SS segment size? */
1174     pushw(&sa, cpu_compute_eflags(env));
1175     pushw(&sa, old_cs);
1176     pushw(&sa, old_eip);
1177 
1178     /* update processor state */
1179     SET_ESP(sa.sp, sa.sp_mask);
1180     env->eip = offset;
1181     env->segs[R_CS].selector = selector;
1182     env->segs[R_CS].base = (selector << 4);
1183     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1184 }
1185 
1186 /*
1187  * Begin execution of an interruption. is_int is TRUE if coming from
1188  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1189  * instruction. It is only relevant if is_int is TRUE.
1190  */
do_interrupt_all(X86CPU * cpu,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1191 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1192                       int error_code, target_ulong next_eip, int is_hw)
1193 {
1194     CPUX86State *env = &cpu->env;
1195 
1196     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1197         if ((env->cr[0] & CR0_PE_MASK)) {
1198             static int count;
1199 
1200             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1201                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1202                      count, intno, error_code, is_int,
1203                      env->hflags & HF_CPL_MASK,
1204                      env->segs[R_CS].selector, env->eip,
1205                      (int)env->segs[R_CS].base + env->eip,
1206                      env->segs[R_SS].selector, env->regs[R_ESP]);
1207             if (intno == 0x0e) {
1208                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1209             } else {
1210                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1211             }
1212             qemu_log("\n");
1213             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1214 #if 0
1215             {
1216                 int i;
1217                 target_ulong ptr;
1218 
1219                 qemu_log("       code=");
1220                 ptr = env->segs[R_CS].base + env->eip;
1221                 for (i = 0; i < 16; i++) {
1222                     qemu_log(" %02x", ldub(ptr + i));
1223                 }
1224                 qemu_log("\n");
1225             }
1226 #endif
1227             count++;
1228         }
1229     }
1230     if (env->cr[0] & CR0_PE_MASK) {
1231 #if !defined(CONFIG_USER_ONLY)
1232         if (env->hflags & HF_GUEST_MASK) {
1233             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1234         }
1235 #endif
1236 #ifdef TARGET_X86_64
1237         if (env->hflags & HF_LMA_MASK) {
1238             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1239         } else
1240 #endif
1241         {
1242             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1243                                    is_hw);
1244         }
1245     } else {
1246 #if !defined(CONFIG_USER_ONLY)
1247         if (env->hflags & HF_GUEST_MASK) {
1248             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1249         }
1250 #endif
1251         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1252     }
1253 
1254 #if !defined(CONFIG_USER_ONLY)
1255     if (env->hflags & HF_GUEST_MASK) {
1256         CPUState *cs = CPU(cpu);
1257         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1258                                       offsetof(struct vmcb,
1259                                                control.event_inj));
1260 
1261         x86_stl_phys(cs,
1262                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1263                  event_inj & ~SVM_EVTINJ_VALID);
1264     }
1265 #endif
1266 }
1267 
do_interrupt_x86_hardirq(CPUX86State * env,int intno,int is_hw)1268 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1269 {
1270     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1271 }
1272 
helper_lldt(CPUX86State * env,int selector)1273 void helper_lldt(CPUX86State *env, int selector)
1274 {
1275     SegmentCache *dt;
1276     uint32_t e1, e2;
1277     int index, entry_limit;
1278     target_ulong ptr;
1279 
1280     selector &= 0xffff;
1281     if ((selector & 0xfffc) == 0) {
1282         /* XXX: NULL selector case: invalid LDT */
1283         env->ldt.base = 0;
1284         env->ldt.limit = 0;
1285     } else {
1286         if (selector & 0x4) {
1287             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1288         }
1289         dt = &env->gdt;
1290         index = selector & ~7;
1291 #ifdef TARGET_X86_64
1292         if (env->hflags & HF_LMA_MASK) {
1293             entry_limit = 15;
1294         } else
1295 #endif
1296         {
1297             entry_limit = 7;
1298         }
1299         if ((index + entry_limit) > dt->limit) {
1300             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1301         }
1302         ptr = dt->base + index;
1303         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1304         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1305         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1306             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1307         }
1308         if (!(e2 & DESC_P_MASK)) {
1309             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1310         }
1311 #ifdef TARGET_X86_64
1312         if (env->hflags & HF_LMA_MASK) {
1313             uint32_t e3;
1314 
1315             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1316             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1317             env->ldt.base |= (target_ulong)e3 << 32;
1318         } else
1319 #endif
1320         {
1321             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1322         }
1323     }
1324     env->ldt.selector = selector;
1325 }
1326 
helper_ltr(CPUX86State * env,int selector)1327 void helper_ltr(CPUX86State *env, int selector)
1328 {
1329     SegmentCache *dt;
1330     uint32_t e1, e2;
1331     int index, type, entry_limit;
1332     target_ulong ptr;
1333 
1334     selector &= 0xffff;
1335     if ((selector & 0xfffc) == 0) {
1336         /* NULL selector case: invalid TR */
1337         env->tr.base = 0;
1338         env->tr.limit = 0;
1339         env->tr.flags = 0;
1340     } else {
1341         if (selector & 0x4) {
1342             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1343         }
1344         dt = &env->gdt;
1345         index = selector & ~7;
1346 #ifdef TARGET_X86_64
1347         if (env->hflags & HF_LMA_MASK) {
1348             entry_limit = 15;
1349         } else
1350 #endif
1351         {
1352             entry_limit = 7;
1353         }
1354         if ((index + entry_limit) > dt->limit) {
1355             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1356         }
1357         ptr = dt->base + index;
1358         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1359         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1360         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1361         if ((e2 & DESC_S_MASK) ||
1362             (type != 1 && type != 9)) {
1363             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1364         }
1365         if (!(e2 & DESC_P_MASK)) {
1366             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1367         }
1368 #ifdef TARGET_X86_64
1369         if (env->hflags & HF_LMA_MASK) {
1370             uint32_t e3, e4;
1371 
1372             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1373             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1374             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1375                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1376             }
1377             load_seg_cache_raw_dt(&env->tr, e1, e2);
1378             env->tr.base |= (target_ulong)e3 << 32;
1379         } else
1380 #endif
1381         {
1382             load_seg_cache_raw_dt(&env->tr, e1, e2);
1383         }
1384         e2 |= DESC_TSS_BUSY_MASK;
1385         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1386     }
1387     env->tr.selector = selector;
1388 }
1389 
1390 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(CPUX86State * env,int seg_reg,int selector)1391 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1392 {
1393     uint32_t e1, e2;
1394     int cpl, dpl, rpl;
1395     SegmentCache *dt;
1396     int index;
1397     target_ulong ptr;
1398 
1399     selector &= 0xffff;
1400     cpl = env->hflags & HF_CPL_MASK;
1401     if ((selector & 0xfffc) == 0) {
1402         /* null selector case */
1403         if (seg_reg == R_SS
1404 #ifdef TARGET_X86_64
1405             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1406 #endif
1407             ) {
1408             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1409         }
1410         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1411     } else {
1412 
1413         if (selector & 0x4) {
1414             dt = &env->ldt;
1415         } else {
1416             dt = &env->gdt;
1417         }
1418         index = selector & ~7;
1419         if ((index + 7) > dt->limit) {
1420             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421         }
1422         ptr = dt->base + index;
1423         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1424         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1425 
1426         if (!(e2 & DESC_S_MASK)) {
1427             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1428         }
1429         rpl = selector & 3;
1430         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1431         if (seg_reg == R_SS) {
1432             /* must be writable segment */
1433             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1434                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1435             }
1436             if (rpl != cpl || dpl != cpl) {
1437                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1438             }
1439         } else {
1440             /* must be readable segment */
1441             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1442                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1443             }
1444 
1445             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1446                 /* if not conforming code, test rights */
1447                 if (dpl < cpl || dpl < rpl) {
1448                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1449                 }
1450             }
1451         }
1452 
1453         if (!(e2 & DESC_P_MASK)) {
1454             if (seg_reg == R_SS) {
1455                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1456             } else {
1457                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1458             }
1459         }
1460 
1461         /* set the access bit if not already set */
1462         if (!(e2 & DESC_A_MASK)) {
1463             e2 |= DESC_A_MASK;
1464             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1465         }
1466 
1467         cpu_x86_load_seg_cache(env, seg_reg, selector,
1468                        get_seg_base(e1, e2),
1469                        get_seg_limit(e1, e2),
1470                        e2);
1471 #if 0
1472         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1473                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1474 #endif
1475     }
1476 }
1477 
1478 /* protected mode jump */
helper_ljmp_protected(CPUX86State * env,int new_cs,target_ulong new_eip,target_ulong next_eip)1479 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1480                            target_ulong next_eip)
1481 {
1482     int gate_cs, type;
1483     uint32_t e1, e2, cpl, dpl, rpl, limit;
1484 
1485     if ((new_cs & 0xfffc) == 0) {
1486         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1487     }
1488     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1489         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1490     }
1491     cpl = env->hflags & HF_CPL_MASK;
1492     if (e2 & DESC_S_MASK) {
1493         if (!(e2 & DESC_CS_MASK)) {
1494             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1495         }
1496         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1497         if (e2 & DESC_C_MASK) {
1498             /* conforming code segment */
1499             if (dpl > cpl) {
1500                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1501             }
1502         } else {
1503             /* non conforming code segment */
1504             rpl = new_cs & 3;
1505             if (rpl > cpl) {
1506                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1507             }
1508             if (dpl != cpl) {
1509                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1510             }
1511         }
1512         if (!(e2 & DESC_P_MASK)) {
1513             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1514         }
1515         limit = get_seg_limit(e1, e2);
1516         if (new_eip > limit &&
1517             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1518             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1519         }
1520         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1521                        get_seg_base(e1, e2), limit, e2);
1522         env->eip = new_eip;
1523     } else {
1524         /* jump to call or task gate */
1525         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1526         rpl = new_cs & 3;
1527         cpl = env->hflags & HF_CPL_MASK;
1528         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1529 
1530 #ifdef TARGET_X86_64
1531         if (env->efer & MSR_EFER_LMA) {
1532             if (type != 12) {
1533                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1534             }
1535         }
1536 #endif
1537         switch (type) {
1538         case 1: /* 286 TSS */
1539         case 9: /* 386 TSS */
1540         case 5: /* task gate */
1541             if (dpl < cpl || dpl < rpl) {
1542                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1543             }
1544             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
1545                           false, 0, GETPC());
1546             break;
1547         case 4: /* 286 call gate */
1548         case 12: /* 386 call gate */
1549             if ((dpl < cpl) || (dpl < rpl)) {
1550                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1551             }
1552             if (!(e2 & DESC_P_MASK)) {
1553                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1554             }
1555             gate_cs = e1 >> 16;
1556             new_eip = (e1 & 0xffff);
1557             if (type == 12) {
1558                 new_eip |= (e2 & 0xffff0000);
1559             }
1560 
1561 #ifdef TARGET_X86_64
1562             if (env->efer & MSR_EFER_LMA) {
1563                 /* load the upper 8 bytes of the 64-bit call gate */
1564                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1565                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1566                                            GETPC());
1567                 }
1568                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1569                 if (type != 0) {
1570                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1571                                            GETPC());
1572                 }
1573                 new_eip |= ((target_ulong)e1) << 32;
1574             }
1575 #endif
1576 
1577             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1578                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1579             }
1580             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1581             /* must be code segment */
1582             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1583                  (DESC_S_MASK | DESC_CS_MASK))) {
1584                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1585             }
1586             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1587                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1588                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1589             }
1590 #ifdef TARGET_X86_64
1591             if (env->efer & MSR_EFER_LMA) {
1592                 if (!(e2 & DESC_L_MASK)) {
1593                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1594                 }
1595                 if (e2 & DESC_B_MASK) {
1596                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1597                 }
1598             }
1599 #endif
1600             if (!(e2 & DESC_P_MASK)) {
1601                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1602             }
1603             limit = get_seg_limit(e1, e2);
1604             if (new_eip > limit &&
1605                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1606                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1607             }
1608             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1609                                    get_seg_base(e1, e2), limit, e2);
1610             env->eip = new_eip;
1611             break;
1612         default:
1613             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1614             break;
1615         }
1616     }
1617 }
1618 
1619 /* real mode call */
helper_lcall_real(CPUX86State * env,uint32_t new_cs,uint32_t new_eip,int shift,uint32_t next_eip)1620 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1621                        int shift, uint32_t next_eip)
1622 {
1623     StackAccess sa;
1624 
1625     sa.env = env;
1626     sa.ra = GETPC();
1627     sa.sp = env->regs[R_ESP];
1628     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1629     sa.ss_base = env->segs[R_SS].base;
1630     sa.mmu_index = x86_mmu_index_pl(env, 0);
1631 
1632     if (shift) {
1633         pushl(&sa, env->segs[R_CS].selector);
1634         pushl(&sa, next_eip);
1635     } else {
1636         pushw(&sa, env->segs[R_CS].selector);
1637         pushw(&sa, next_eip);
1638     }
1639 
1640     SET_ESP(sa.sp, sa.sp_mask);
1641     env->eip = new_eip;
1642     env->segs[R_CS].selector = new_cs;
1643     env->segs[R_CS].base = (new_cs << 4);
1644 }
1645 
1646 /* protected mode call */
helper_lcall_protected(CPUX86State * env,int new_cs,target_ulong new_eip,int shift,target_ulong next_eip)1647 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1648                             int shift, target_ulong next_eip)
1649 {
1650     int new_stack, i;
1651     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1652     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1653     uint32_t val, limit, old_sp_mask;
1654     target_ulong old_ssp, offset;
1655     StackAccess sa;
1656 
1657     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1658     LOG_PCALL_STATE(env_cpu(env));
1659     if ((new_cs & 0xfffc) == 0) {
1660         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1661     }
1662     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1663         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1664     }
1665     cpl = env->hflags & HF_CPL_MASK;
1666     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1667 
1668     sa.env = env;
1669     sa.ra = GETPC();
1670 
1671     if (e2 & DESC_S_MASK) {
1672         /* "normal" far call, no stack switch possible */
1673         if (!(e2 & DESC_CS_MASK)) {
1674             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1675         }
1676         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1677         if (e2 & DESC_C_MASK) {
1678             /* conforming code segment */
1679             if (dpl > cpl) {
1680                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1681             }
1682         } else {
1683             /* non conforming code segment */
1684             rpl = new_cs & 3;
1685             if (rpl > cpl) {
1686                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1687             }
1688             if (dpl != cpl) {
1689                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1690             }
1691         }
1692         if (!(e2 & DESC_P_MASK)) {
1693             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1694         }
1695 
1696         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1697 #ifdef TARGET_X86_64
1698         /* XXX: check 16/32 bit cases in long mode */
1699         if (shift == 2) {
1700             /* 64 bit case */
1701             sa.sp = env->regs[R_ESP];
1702             sa.sp_mask = -1;
1703             sa.ss_base = 0;
1704             pushq(&sa, env->segs[R_CS].selector);
1705             pushq(&sa, next_eip);
1706             /* from this point, not restartable */
1707             env->regs[R_ESP] = sa.sp;
1708             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1709                                    get_seg_base(e1, e2),
1710                                    get_seg_limit(e1, e2), e2);
1711             env->eip = new_eip;
1712         } else
1713 #endif
1714         {
1715             sa.sp = env->regs[R_ESP];
1716             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1717             sa.ss_base = env->segs[R_SS].base;
1718             if (shift) {
1719                 pushl(&sa, env->segs[R_CS].selector);
1720                 pushl(&sa, next_eip);
1721             } else {
1722                 pushw(&sa, env->segs[R_CS].selector);
1723                 pushw(&sa, next_eip);
1724             }
1725 
1726             limit = get_seg_limit(e1, e2);
1727             if (new_eip > limit) {
1728                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1729             }
1730             /* from this point, not restartable */
1731             SET_ESP(sa.sp, sa.sp_mask);
1732             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1733                                    get_seg_base(e1, e2), limit, e2);
1734             env->eip = new_eip;
1735         }
1736     } else {
1737         /* check gate type */
1738         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1739         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1740         rpl = new_cs & 3;
1741 
1742 #ifdef TARGET_X86_64
1743         if (env->efer & MSR_EFER_LMA) {
1744             if (type != 12) {
1745                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1746             }
1747         }
1748 #endif
1749 
1750         switch (type) {
1751         case 1: /* available 286 TSS */
1752         case 9: /* available 386 TSS */
1753         case 5: /* task gate */
1754             if (dpl < cpl || dpl < rpl) {
1755                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1756             }
1757             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
1758                           false, 0, GETPC());
1759             return;
1760         case 4: /* 286 call gate */
1761         case 12: /* 386 call gate */
1762             break;
1763         default:
1764             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1765             break;
1766         }
1767         shift = type >> 3;
1768 
1769         if (dpl < cpl || dpl < rpl) {
1770             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1771         }
1772         /* check valid bit */
1773         if (!(e2 & DESC_P_MASK)) {
1774             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1775         }
1776         selector = e1 >> 16;
1777         param_count = e2 & 0x1f;
1778         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1779 #ifdef TARGET_X86_64
1780         if (env->efer & MSR_EFER_LMA) {
1781             /* load the upper 8 bytes of the 64-bit call gate */
1782             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1783                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1784                                        GETPC());
1785             }
1786             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1787             if (type != 0) {
1788                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1789                                        GETPC());
1790             }
1791             offset |= ((target_ulong)e1) << 32;
1792         }
1793 #endif
1794         if ((selector & 0xfffc) == 0) {
1795             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1796         }
1797 
1798         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1799             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1800         }
1801         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1802             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1803         }
1804         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1805         if (dpl > cpl) {
1806             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1807         }
1808 #ifdef TARGET_X86_64
1809         if (env->efer & MSR_EFER_LMA) {
1810             if (!(e2 & DESC_L_MASK)) {
1811                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1812             }
1813             if (e2 & DESC_B_MASK) {
1814                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1815             }
1816             shift++;
1817         }
1818 #endif
1819         if (!(e2 & DESC_P_MASK)) {
1820             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1821         }
1822 
1823         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1824             /* to inner privilege */
1825             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1826 #ifdef TARGET_X86_64
1827             if (shift == 2) {
1828                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1829                 new_stack = 1;
1830                 sa.sp = get_rsp_from_tss(env, dpl);
1831                 sa.sp_mask = -1;
1832                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1833                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1834                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1835             } else
1836 #endif
1837             {
1838                 uint32_t sp32;
1839                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1840                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1841                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1842                           env->regs[R_ESP]);
1843                 if ((ss & 0xfffc) == 0) {
1844                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1845                 }
1846                 if ((ss & 3) != dpl) {
1847                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1848                 }
1849                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1850                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1851                 }
1852                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1853                 if (ss_dpl != dpl) {
1854                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1855                 }
1856                 if (!(ss_e2 & DESC_S_MASK) ||
1857                     (ss_e2 & DESC_CS_MASK) ||
1858                     !(ss_e2 & DESC_W_MASK)) {
1859                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1860                 }
1861                 if (!(ss_e2 & DESC_P_MASK)) {
1862                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1863                 }
1864 
1865                 sa.sp = sp32;
1866                 sa.sp_mask = get_sp_mask(ss_e2);
1867                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1868             }
1869 
1870             /* push_size = ((param_count * 2) + 8) << shift; */
1871             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1872             old_ssp = env->segs[R_SS].base;
1873 
1874 #ifdef TARGET_X86_64
1875             if (shift == 2) {
1876                 /* XXX: verify if new stack address is canonical */
1877                 pushq(&sa, env->segs[R_SS].selector);
1878                 pushq(&sa, env->regs[R_ESP]);
1879                 /* parameters aren't supported for 64-bit call gates */
1880             } else
1881 #endif
1882             if (shift == 1) {
1883                 pushl(&sa, env->segs[R_SS].selector);
1884                 pushl(&sa, env->regs[R_ESP]);
1885                 for (i = param_count - 1; i >= 0; i--) {
1886                     val = cpu_ldl_data_ra(env,
1887                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1888                                           GETPC());
1889                     pushl(&sa, val);
1890                 }
1891             } else {
1892                 pushw(&sa, env->segs[R_SS].selector);
1893                 pushw(&sa, env->regs[R_ESP]);
1894                 for (i = param_count - 1; i >= 0; i--) {
1895                     val = cpu_lduw_data_ra(env,
1896                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1897                                            GETPC());
1898                     pushw(&sa, val);
1899                 }
1900             }
1901             new_stack = 1;
1902         } else {
1903             /* to same privilege */
1904             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1905             sa.sp = env->regs[R_ESP];
1906             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1907             sa.ss_base = env->segs[R_SS].base;
1908             /* push_size = (4 << shift); */
1909             new_stack = 0;
1910         }
1911 
1912 #ifdef TARGET_X86_64
1913         if (shift == 2) {
1914             pushq(&sa, env->segs[R_CS].selector);
1915             pushq(&sa, next_eip);
1916         } else
1917 #endif
1918         if (shift == 1) {
1919             pushl(&sa, env->segs[R_CS].selector);
1920             pushl(&sa, next_eip);
1921         } else {
1922             pushw(&sa, env->segs[R_CS].selector);
1923             pushw(&sa, next_eip);
1924         }
1925 
1926         /* from this point, not restartable */
1927 
1928         if (new_stack) {
1929 #ifdef TARGET_X86_64
1930             if (shift == 2) {
1931                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1932             } else
1933 #endif
1934             {
1935                 ss = (ss & ~3) | dpl;
1936                 cpu_x86_load_seg_cache(env, R_SS, ss,
1937                                        sa.ss_base,
1938                                        get_seg_limit(ss_e1, ss_e2),
1939                                        ss_e2);
1940             }
1941         }
1942 
1943         selector = (selector & ~3) | dpl;
1944         cpu_x86_load_seg_cache(env, R_CS, selector,
1945                        get_seg_base(e1, e2),
1946                        get_seg_limit(e1, e2),
1947                        e2);
1948         SET_ESP(sa.sp, sa.sp_mask);
1949         env->eip = offset;
1950     }
1951 }
1952 
1953 /* real and vm86 mode iret */
helper_iret_real(CPUX86State * env,int shift)1954 void helper_iret_real(CPUX86State *env, int shift)
1955 {
1956     uint32_t new_cs, new_eip, new_eflags;
1957     int eflags_mask;
1958     StackAccess sa;
1959 
1960     sa.env = env;
1961     sa.ra = GETPC();
1962     sa.mmu_index = x86_mmu_index_pl(env, 0);
1963     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1964     sa.sp = env->regs[R_ESP];
1965     sa.ss_base = env->segs[R_SS].base;
1966 
1967     if (shift == 1) {
1968         /* 32 bits */
1969         new_eip = popl(&sa);
1970         new_cs = popl(&sa) & 0xffff;
1971         new_eflags = popl(&sa);
1972     } else {
1973         /* 16 bits */
1974         new_eip = popw(&sa);
1975         new_cs = popw(&sa);
1976         new_eflags = popw(&sa);
1977     }
1978     SET_ESP(sa.sp, sa.sp_mask);
1979     env->segs[R_CS].selector = new_cs;
1980     env->segs[R_CS].base = (new_cs << 4);
1981     env->eip = new_eip;
1982     if (env->eflags & VM_MASK) {
1983         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1984             NT_MASK;
1985     } else {
1986         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1987             RF_MASK | NT_MASK;
1988     }
1989     if (shift == 0) {
1990         eflags_mask &= 0xffff;
1991     }
1992     cpu_load_eflags(env, new_eflags, eflags_mask);
1993     env->hflags2 &= ~HF2_NMI_MASK;
1994 }
1995 
validate_seg(CPUX86State * env,X86Seg seg_reg,int cpl)1996 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1997 {
1998     int dpl;
1999     uint32_t e2;
2000 
2001     /* XXX: on x86_64, we do not want to nullify FS and GS because
2002        they may still contain a valid base. I would be interested to
2003        know how a real x86_64 CPU behaves */
2004     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2005         (env->segs[seg_reg].selector & 0xfffc) == 0) {
2006         return;
2007     }
2008 
2009     e2 = env->segs[seg_reg].flags;
2010     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2011     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2012         /* data or non conforming code segment */
2013         if (dpl < cpl) {
2014             cpu_x86_load_seg_cache(env, seg_reg, 0,
2015                                    env->segs[seg_reg].base,
2016                                    env->segs[seg_reg].limit,
2017                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2018         }
2019     }
2020 }
2021 
2022 /* protected mode iret */
helper_ret_protected(CPUX86State * env,int shift,int is_iret,int addend,uintptr_t retaddr)2023 static inline void helper_ret_protected(CPUX86State *env, int shift,
2024                                         int is_iret, int addend,
2025                                         uintptr_t retaddr)
2026 {
2027     uint32_t new_cs, new_eflags, new_ss;
2028     uint32_t new_es, new_ds, new_fs, new_gs;
2029     uint32_t e1, e2, ss_e1, ss_e2;
2030     int cpl, dpl, rpl, eflags_mask, iopl;
2031     target_ulong new_eip, new_esp;
2032     StackAccess sa;
2033 
2034     cpl = env->hflags & HF_CPL_MASK;
2035 
2036     sa.env = env;
2037     sa.ra = retaddr;
2038     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2039 
2040 #ifdef TARGET_X86_64
2041     if (shift == 2) {
2042         sa.sp_mask = -1;
2043     } else
2044 #endif
2045     {
2046         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2047     }
2048     sa.sp = env->regs[R_ESP];
2049     sa.ss_base = env->segs[R_SS].base;
2050     new_eflags = 0; /* avoid warning */
2051 #ifdef TARGET_X86_64
2052     if (shift == 2) {
2053         new_eip = popq(&sa);
2054         new_cs = popq(&sa) & 0xffff;
2055         if (is_iret) {
2056             new_eflags = popq(&sa);
2057         }
2058     } else
2059 #endif
2060     {
2061         if (shift == 1) {
2062             /* 32 bits */
2063             new_eip = popl(&sa);
2064             new_cs = popl(&sa) & 0xffff;
2065             if (is_iret) {
2066                 new_eflags = popl(&sa);
2067                 if (new_eflags & VM_MASK) {
2068                     goto return_to_vm86;
2069                 }
2070             }
2071         } else {
2072             /* 16 bits */
2073             new_eip = popw(&sa);
2074             new_cs = popw(&sa);
2075             if (is_iret) {
2076                 new_eflags = popw(&sa);
2077             }
2078         }
2079     }
2080     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2081               new_cs, new_eip, shift, addend);
2082     LOG_PCALL_STATE(env_cpu(env));
2083     if ((new_cs & 0xfffc) == 0) {
2084         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2085     }
2086     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2087         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2088     }
2089     if (!(e2 & DESC_S_MASK) ||
2090         !(e2 & DESC_CS_MASK)) {
2091         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2092     }
2093     rpl = new_cs & 3;
2094     if (rpl < cpl) {
2095         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2096     }
2097     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2098     if (e2 & DESC_C_MASK) {
2099         if (dpl > rpl) {
2100             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2101         }
2102     } else {
2103         if (dpl != rpl) {
2104             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2105         }
2106     }
2107     if (!(e2 & DESC_P_MASK)) {
2108         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2109     }
2110 
2111     sa.sp += addend;
2112     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2113                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2114         /* return to same privilege level */
2115         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2116                        get_seg_base(e1, e2),
2117                        get_seg_limit(e1, e2),
2118                        e2);
2119     } else {
2120         /* return to different privilege level */
2121 #ifdef TARGET_X86_64
2122         if (shift == 2) {
2123             new_esp = popq(&sa);
2124             new_ss = popq(&sa) & 0xffff;
2125         } else
2126 #endif
2127         {
2128             if (shift == 1) {
2129                 /* 32 bits */
2130                 new_esp = popl(&sa);
2131                 new_ss = popl(&sa) & 0xffff;
2132             } else {
2133                 /* 16 bits */
2134                 new_esp = popw(&sa);
2135                 new_ss = popw(&sa);
2136             }
2137         }
2138         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2139                   new_ss, new_esp);
2140         if ((new_ss & 0xfffc) == 0) {
2141 #ifdef TARGET_X86_64
2142             /* NULL ss is allowed in long mode if cpl != 3 */
2143             /* XXX: test CS64? */
2144             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2145                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2146                                        0, 0xffffffff,
2147                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2148                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2149                                        DESC_W_MASK | DESC_A_MASK);
2150                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2151             } else
2152 #endif
2153             {
2154                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2155             }
2156         } else {
2157             if ((new_ss & 3) != rpl) {
2158                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2159             }
2160             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2161                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2162             }
2163             if (!(ss_e2 & DESC_S_MASK) ||
2164                 (ss_e2 & DESC_CS_MASK) ||
2165                 !(ss_e2 & DESC_W_MASK)) {
2166                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2167             }
2168             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2169             if (dpl != rpl) {
2170                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2171             }
2172             if (!(ss_e2 & DESC_P_MASK)) {
2173                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2174             }
2175             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2176                                    get_seg_base(ss_e1, ss_e2),
2177                                    get_seg_limit(ss_e1, ss_e2),
2178                                    ss_e2);
2179         }
2180 
2181         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2182                        get_seg_base(e1, e2),
2183                        get_seg_limit(e1, e2),
2184                        e2);
2185         sa.sp = new_esp;
2186 #ifdef TARGET_X86_64
2187         if (env->hflags & HF_CS64_MASK) {
2188             sa.sp_mask = -1;
2189         } else
2190 #endif
2191         {
2192             sa.sp_mask = get_sp_mask(ss_e2);
2193         }
2194 
2195         /* validate data segments */
2196         validate_seg(env, R_ES, rpl);
2197         validate_seg(env, R_DS, rpl);
2198         validate_seg(env, R_FS, rpl);
2199         validate_seg(env, R_GS, rpl);
2200 
2201         sa.sp += addend;
2202     }
2203     SET_ESP(sa.sp, sa.sp_mask);
2204     env->eip = new_eip;
2205     if (is_iret) {
2206         /* NOTE: 'cpl' is the _old_ CPL */
2207         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2208         if (cpl == 0) {
2209             eflags_mask |= IOPL_MASK;
2210         }
2211         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2212         if (cpl <= iopl) {
2213             eflags_mask |= IF_MASK;
2214         }
2215         if (shift == 0) {
2216             eflags_mask &= 0xffff;
2217         }
2218         cpu_load_eflags(env, new_eflags, eflags_mask);
2219     }
2220     return;
2221 
2222  return_to_vm86:
2223     new_esp = popl(&sa);
2224     new_ss = popl(&sa);
2225     new_es = popl(&sa);
2226     new_ds = popl(&sa);
2227     new_fs = popl(&sa);
2228     new_gs = popl(&sa);
2229 
2230     /* modify processor state */
2231     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2232                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2233                     VIP_MASK);
2234     load_seg_vm(env, R_CS, new_cs & 0xffff);
2235     load_seg_vm(env, R_SS, new_ss & 0xffff);
2236     load_seg_vm(env, R_ES, new_es & 0xffff);
2237     load_seg_vm(env, R_DS, new_ds & 0xffff);
2238     load_seg_vm(env, R_FS, new_fs & 0xffff);
2239     load_seg_vm(env, R_GS, new_gs & 0xffff);
2240 
2241     env->eip = new_eip & 0xffff;
2242     env->regs[R_ESP] = new_esp;
2243 }
2244 
helper_iret_protected(CPUX86State * env,int shift,int next_eip)2245 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2246 {
2247     int tss_selector, type;
2248     uint32_t e1, e2;
2249 
2250     /* specific case for TSS */
2251     if (env->eflags & NT_MASK) {
2252 #ifdef TARGET_X86_64
2253         if (env->hflags & HF_LMA_MASK) {
2254             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2255         }
2256 #endif
2257         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2258         if (tss_selector & 4) {
2259             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2260         }
2261         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2262             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2263         }
2264         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2265         /* NOTE: we check both segment and busy TSS */
2266         if (type != 3) {
2267             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2268         }
2269         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
2270                       false, 0, GETPC());
2271     } else {
2272         helper_ret_protected(env, shift, 1, 0, GETPC());
2273     }
2274     env->hflags2 &= ~HF2_NMI_MASK;
2275 }
2276 
helper_lret_protected(CPUX86State * env,int shift,int addend)2277 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2278 {
2279     helper_ret_protected(env, shift, 0, addend, GETPC());
2280 }
2281 
helper_sysenter(CPUX86State * env)2282 void helper_sysenter(CPUX86State *env)
2283 {
2284     if (env->sysenter_cs == 0) {
2285         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2286     }
2287     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2288 
2289 #ifdef TARGET_X86_64
2290     if (env->hflags & HF_LMA_MASK) {
2291         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2292                                0, 0xffffffff,
2293                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2294                                DESC_S_MASK |
2295                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2296                                DESC_L_MASK);
2297     } else
2298 #endif
2299     {
2300         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2301                                0, 0xffffffff,
2302                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2303                                DESC_S_MASK |
2304                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2305     }
2306     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2307                            0, 0xffffffff,
2308                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2309                            DESC_S_MASK |
2310                            DESC_W_MASK | DESC_A_MASK);
2311     env->regs[R_ESP] = env->sysenter_esp;
2312     env->eip = env->sysenter_eip;
2313 }
2314 
helper_sysexit(CPUX86State * env,int dflag)2315 void helper_sysexit(CPUX86State *env, int dflag)
2316 {
2317     int cpl;
2318 
2319     cpl = env->hflags & HF_CPL_MASK;
2320     if (env->sysenter_cs == 0 || cpl != 0) {
2321         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2322     }
2323 #ifdef TARGET_X86_64
2324     if (dflag == 2) {
2325         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2326                                3, 0, 0xffffffff,
2327                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2328                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2329                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2330                                DESC_L_MASK);
2331         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2332                                3, 0, 0xffffffff,
2333                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2334                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2335                                DESC_W_MASK | DESC_A_MASK);
2336     } else
2337 #endif
2338     {
2339         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2340                                3, 0, 0xffffffff,
2341                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2342                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2343                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2344         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2345                                3, 0, 0xffffffff,
2346                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2347                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2348                                DESC_W_MASK | DESC_A_MASK);
2349     }
2350     env->regs[R_ESP] = env->regs[R_ECX];
2351     env->eip = env->regs[R_EDX];
2352 }
2353 
helper_lsl(CPUX86State * env,target_ulong selector1)2354 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2355 {
2356     unsigned int limit;
2357     uint32_t e1, e2, selector;
2358     int rpl, dpl, cpl, type;
2359 
2360     selector = selector1 & 0xffff;
2361     assert(CC_OP == CC_OP_EFLAGS);
2362     if ((selector & 0xfffc) == 0) {
2363         goto fail;
2364     }
2365     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2366         goto fail;
2367     }
2368     rpl = selector & 3;
2369     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2370     cpl = env->hflags & HF_CPL_MASK;
2371     if (e2 & DESC_S_MASK) {
2372         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2373             /* conforming */
2374         } else {
2375             if (dpl < cpl || dpl < rpl) {
2376                 goto fail;
2377             }
2378         }
2379     } else {
2380         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2381         switch (type) {
2382         case 1:
2383         case 2:
2384         case 3:
2385         case 9:
2386         case 11:
2387             break;
2388         default:
2389             goto fail;
2390         }
2391         if (dpl < cpl || dpl < rpl) {
2392         fail:
2393             CC_SRC &= ~CC_Z;
2394             return 0;
2395         }
2396     }
2397     limit = get_seg_limit(e1, e2);
2398     CC_SRC |= CC_Z;
2399     return limit;
2400 }
2401 
helper_lar(CPUX86State * env,target_ulong selector1)2402 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2403 {
2404     uint32_t e1, e2, selector;
2405     int rpl, dpl, cpl, type;
2406 
2407     selector = selector1 & 0xffff;
2408     assert(CC_OP == CC_OP_EFLAGS);
2409     if ((selector & 0xfffc) == 0) {
2410         goto fail;
2411     }
2412     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2413         goto fail;
2414     }
2415     rpl = selector & 3;
2416     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417     cpl = env->hflags & HF_CPL_MASK;
2418     if (e2 & DESC_S_MASK) {
2419         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2420             /* conforming */
2421         } else {
2422             if (dpl < cpl || dpl < rpl) {
2423                 goto fail;
2424             }
2425         }
2426     } else {
2427         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2428         switch (type) {
2429         case 1:
2430         case 2:
2431         case 3:
2432         case 4:
2433         case 5:
2434         case 9:
2435         case 11:
2436         case 12:
2437             break;
2438         default:
2439             goto fail;
2440         }
2441         if (dpl < cpl || dpl < rpl) {
2442         fail:
2443             CC_SRC &= ~CC_Z;
2444             return 0;
2445         }
2446     }
2447     CC_SRC |= CC_Z;
2448     return e2 & 0x00f0ff00;
2449 }
2450 
helper_verr(CPUX86State * env,target_ulong selector1)2451 void helper_verr(CPUX86State *env, target_ulong selector1)
2452 {
2453     uint32_t e1, e2, eflags, selector;
2454     int rpl, dpl, cpl;
2455 
2456     selector = selector1 & 0xffff;
2457     eflags = cpu_cc_compute_all(env) | CC_Z;
2458     if ((selector & 0xfffc) == 0) {
2459         goto fail;
2460     }
2461     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2462         goto fail;
2463     }
2464     if (!(e2 & DESC_S_MASK)) {
2465         goto fail;
2466     }
2467     rpl = selector & 3;
2468     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2469     cpl = env->hflags & HF_CPL_MASK;
2470     if (e2 & DESC_CS_MASK) {
2471         if (!(e2 & DESC_R_MASK)) {
2472             goto fail;
2473         }
2474         if (!(e2 & DESC_C_MASK)) {
2475             if (dpl < cpl || dpl < rpl) {
2476                 goto fail;
2477             }
2478         }
2479     } else {
2480         if (dpl < cpl || dpl < rpl) {
2481         fail:
2482             eflags &= ~CC_Z;
2483         }
2484     }
2485     CC_SRC = eflags;
2486     CC_OP = CC_OP_EFLAGS;
2487 }
2488 
helper_verw(CPUX86State * env,target_ulong selector1)2489 void helper_verw(CPUX86State *env, target_ulong selector1)
2490 {
2491     uint32_t e1, e2, eflags, selector;
2492     int rpl, dpl, cpl;
2493 
2494     selector = selector1 & 0xffff;
2495     eflags = cpu_cc_compute_all(env) | CC_Z;
2496     if ((selector & 0xfffc) == 0) {
2497         goto fail;
2498     }
2499     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2500         goto fail;
2501     }
2502     if (!(e2 & DESC_S_MASK)) {
2503         goto fail;
2504     }
2505     rpl = selector & 3;
2506     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2507     cpl = env->hflags & HF_CPL_MASK;
2508     if (e2 & DESC_CS_MASK) {
2509         goto fail;
2510     } else {
2511         if (dpl < cpl || dpl < rpl) {
2512             goto fail;
2513         }
2514         if (!(e2 & DESC_W_MASK)) {
2515         fail:
2516             eflags &= ~CC_Z;
2517         }
2518     }
2519     CC_SRC = eflags;
2520     CC_OP = CC_OP_EFLAGS;
2521 }
2522