xref: /qemu/target/i386/tcg/seg_helper.c (revision 8053862af969a934dca67da9b38992e48fa1a95d)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 #ifdef TARGET_X86_64
32 #define SET_ESP(val, sp_mask)                                   \
33     do {                                                        \
34         if ((sp_mask) == 0xffff) {                              \
35             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
36                 ((val) & 0xffff);                               \
37         } else if ((sp_mask) == 0xffffffffLL) {                 \
38             env->regs[R_ESP] = (uint32_t)(val);                 \
39         } else {                                                \
40             env->regs[R_ESP] = (val);                           \
41         }                                                       \
42     } while (0)
43 #else
44 #define SET_ESP(val, sp_mask)                                   \
45     do {                                                        \
46         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
47             ((val) & (sp_mask));                                \
48     } while (0)
49 #endif
50 
51 /* XXX: use mmu_index to have proper DPL support */
52 typedef struct StackAccess
53 {
54     CPUX86State *env;
55     uintptr_t ra;
56     target_ulong ss_base;
57     target_ulong sp;
58     target_ulong sp_mask;
59     int mmu_index;
60 } StackAccess;
61 
62 static void pushw(StackAccess *sa, uint16_t val)
63 {
64     sa->sp -= 2;
65     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
66                       val, sa->mmu_index, sa->ra);
67 }
68 
69 static void pushl(StackAccess *sa, uint32_t val)
70 {
71     sa->sp -= 4;
72     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
73                       val, sa->mmu_index, sa->ra);
74 }
75 
76 static uint16_t popw(StackAccess *sa)
77 {
78     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
79                                       sa->ss_base + (sa->sp & sa->sp_mask),
80                                       sa->mmu_index, sa->ra);
81     sa->sp += 2;
82     return ret;
83 }
84 
85 static uint32_t popl(StackAccess *sa)
86 {
87     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
88                                      sa->ss_base + (sa->sp & sa->sp_mask),
89                                      sa->mmu_index, sa->ra);
90     sa->sp += 4;
91     return ret;
92 }
93 
94 int get_pg_mode(CPUX86State *env)
95 {
96     int pg_mode = 0;
97     if (!(env->cr[0] & CR0_PG_MASK)) {
98         return 0;
99     }
100     if (env->cr[0] & CR0_WP_MASK) {
101         pg_mode |= PG_MODE_WP;
102     }
103     if (env->cr[4] & CR4_PAE_MASK) {
104         pg_mode |= PG_MODE_PAE;
105         if (env->efer & MSR_EFER_NXE) {
106             pg_mode |= PG_MODE_NXE;
107         }
108     }
109     if (env->cr[4] & CR4_PSE_MASK) {
110         pg_mode |= PG_MODE_PSE;
111     }
112     if (env->cr[4] & CR4_SMEP_MASK) {
113         pg_mode |= PG_MODE_SMEP;
114     }
115     if (env->hflags & HF_LMA_MASK) {
116         pg_mode |= PG_MODE_LMA;
117         if (env->cr[4] & CR4_PKE_MASK) {
118             pg_mode |= PG_MODE_PKE;
119         }
120         if (env->cr[4] & CR4_PKS_MASK) {
121             pg_mode |= PG_MODE_PKS;
122         }
123         if (env->cr[4] & CR4_LA57_MASK) {
124             pg_mode |= PG_MODE_LA57;
125         }
126     }
127     return pg_mode;
128 }
129 
130 /* return non zero if error */
131 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
132                                uint32_t *e2_ptr, int selector,
133                                uintptr_t retaddr)
134 {
135     SegmentCache *dt;
136     int index;
137     target_ulong ptr;
138 
139     if (selector & 0x4) {
140         dt = &env->ldt;
141     } else {
142         dt = &env->gdt;
143     }
144     index = selector & ~7;
145     if ((index + 7) > dt->limit) {
146         return -1;
147     }
148     ptr = dt->base + index;
149     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
150     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
151     return 0;
152 }
153 
154 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
155                                uint32_t *e2_ptr, int selector)
156 {
157     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
158 }
159 
160 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161 {
162     unsigned int limit;
163 
164     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
165     if (e2 & DESC_G_MASK) {
166         limit = (limit << 12) | 0xfff;
167     }
168     return limit;
169 }
170 
171 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
172 {
173     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
174 }
175 
176 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
177                                          uint32_t e2)
178 {
179     sc->base = get_seg_base(e1, e2);
180     sc->limit = get_seg_limit(e1, e2);
181     sc->flags = e2;
182 }
183 
184 /* init the segment cache in vm86 mode. */
185 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
186 {
187     selector &= 0xffff;
188 
189     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
190                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
191                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
192 }
193 
194 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
195                                        uint32_t *esp_ptr, int dpl,
196                                        uintptr_t retaddr)
197 {
198     X86CPU *cpu = env_archcpu(env);
199     int type, index, shift;
200 
201 #if 0
202     {
203         int i;
204         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
205         for (i = 0; i < env->tr.limit; i++) {
206             printf("%02x ", env->tr.base[i]);
207             if ((i & 7) == 7) {
208                 printf("\n");
209             }
210         }
211         printf("\n");
212     }
213 #endif
214 
215     if (!(env->tr.flags & DESC_P_MASK)) {
216         cpu_abort(CPU(cpu), "invalid tss");
217     }
218     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
219     if ((type & 7) != 1) {
220         cpu_abort(CPU(cpu), "invalid tss type");
221     }
222     shift = type >> 3;
223     index = (dpl * 4 + 2) << shift;
224     if (index + (4 << shift) - 1 > env->tr.limit) {
225         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
226     }
227     if (shift == 0) {
228         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
229         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
230     } else {
231         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
232         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
233     }
234 }
235 
236 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
237                          int cpl, uintptr_t retaddr)
238 {
239     uint32_t e1, e2;
240     int rpl, dpl;
241 
242     if ((selector & 0xfffc) != 0) {
243         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
244             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
245         }
246         if (!(e2 & DESC_S_MASK)) {
247             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
248         }
249         rpl = selector & 3;
250         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
251         if (seg_reg == R_CS) {
252             if (!(e2 & DESC_CS_MASK)) {
253                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
254             }
255             if (dpl != rpl) {
256                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
257             }
258         } else if (seg_reg == R_SS) {
259             /* SS must be writable data */
260             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
261                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
262             }
263             if (dpl != cpl || dpl != rpl) {
264                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
265             }
266         } else {
267             /* not readable code */
268             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
269                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
270             }
271             /* if data or non conforming code, checks the rights */
272             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
273                 if (dpl < cpl || dpl < rpl) {
274                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
275                 }
276             }
277         }
278         if (!(e2 & DESC_P_MASK)) {
279             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
280         }
281         cpu_x86_load_seg_cache(env, seg_reg, selector,
282                                get_seg_base(e1, e2),
283                                get_seg_limit(e1, e2),
284                                e2);
285     } else {
286         if (seg_reg == R_SS || seg_reg == R_CS) {
287             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
288         }
289     }
290 }
291 
292 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
293                          uintptr_t retaddr)
294 {
295     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
296     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
297 
298     if (value) {
299         e2 |= DESC_TSS_BUSY_MASK;
300     } else {
301         e2 &= ~DESC_TSS_BUSY_MASK;
302     }
303 
304     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
305 }
306 
307 #define SWITCH_TSS_JMP  0
308 #define SWITCH_TSS_IRET 1
309 #define SWITCH_TSS_CALL 2
310 
311 /* return 0 if switching to a 16-bit selector */
312 static int switch_tss_ra(CPUX86State *env, int tss_selector,
313                          uint32_t e1, uint32_t e2, int source,
314                          uint32_t next_eip, uintptr_t retaddr)
315 {
316     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
317     target_ulong tss_base;
318     uint32_t new_regs[8], new_segs[6];
319     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
320     uint32_t old_eflags, eflags_mask;
321     SegmentCache *dt;
322     int index;
323     target_ulong ptr;
324 
325     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
326     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
327               source);
328 
329     /* if task gate, we read the TSS segment and we load it */
330     if (type == 5) {
331         if (!(e2 & DESC_P_MASK)) {
332             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
333         }
334         tss_selector = e1 >> 16;
335         if (tss_selector & 4) {
336             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
337         }
338         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
339             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
340         }
341         if (e2 & DESC_S_MASK) {
342             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
343         }
344         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
345         if ((type & 7) != 1) {
346             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
347         }
348     }
349 
350     if (!(e2 & DESC_P_MASK)) {
351         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
352     }
353 
354     if (type & 8) {
355         tss_limit_max = 103;
356     } else {
357         tss_limit_max = 43;
358     }
359     tss_limit = get_seg_limit(e1, e2);
360     tss_base = get_seg_base(e1, e2);
361     if ((tss_selector & 4) != 0 ||
362         tss_limit < tss_limit_max) {
363         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
364     }
365     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
366     if (old_type & 8) {
367         old_tss_limit_max = 103;
368     } else {
369         old_tss_limit_max = 43;
370     }
371 
372     /* read all the registers from the new TSS */
373     if (type & 8) {
374         /* 32 bit */
375         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
376         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
377         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
378         for (i = 0; i < 8; i++) {
379             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
380                                             retaddr);
381         }
382         for (i = 0; i < 6; i++) {
383             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
384                                              retaddr);
385         }
386         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
387         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
388     } else {
389         /* 16 bit */
390         new_cr3 = 0;
391         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
392         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
393         for (i = 0; i < 8; i++) {
394             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
395         }
396         for (i = 0; i < 4; i++) {
397             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
398                                              retaddr);
399         }
400         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
401         new_segs[R_FS] = 0;
402         new_segs[R_GS] = 0;
403         new_trap = 0;
404     }
405     /* XXX: avoid a compiler warning, see
406      http://support.amd.com/us/Processor_TechDocs/24593.pdf
407      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
408     (void)new_trap;
409 
410     /* NOTE: we must avoid memory exceptions during the task switch,
411        so we make dummy accesses before */
412     /* XXX: it can still fail in some cases, so a bigger hack is
413        necessary to valid the TLB after having done the accesses */
414 
415     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
416     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
417     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
418     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
419 
420     /* clear busy bit (it is restartable) */
421     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
422         tss_set_busy(env, env->tr.selector, 0, retaddr);
423     }
424     old_eflags = cpu_compute_eflags(env);
425     if (source == SWITCH_TSS_IRET) {
426         old_eflags &= ~NT_MASK;
427     }
428 
429     /* save the current state in the old TSS */
430     if (old_type & 8) {
431         /* 32 bit */
432         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
433         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
434         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
435         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
436         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
437         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
438         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
439         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
440         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
441         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
442         for (i = 0; i < 6; i++) {
443             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
444                               env->segs[i].selector, retaddr);
445         }
446     } else {
447         /* 16 bit */
448         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
449         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
450         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
451         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
452         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
453         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
454         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
455         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
456         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
457         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
458         for (i = 0; i < 4; i++) {
459             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
460                               env->segs[i].selector, retaddr);
461         }
462     }
463 
464     /* now if an exception occurs, it will occurs in the next task
465        context */
466 
467     if (source == SWITCH_TSS_CALL) {
468         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
469         new_eflags |= NT_MASK;
470     }
471 
472     /* set busy bit */
473     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
474         tss_set_busy(env, tss_selector, 1, retaddr);
475     }
476 
477     /* set the new CPU state */
478     /* from this point, any exception which occurs can give problems */
479     env->cr[0] |= CR0_TS_MASK;
480     env->hflags |= HF_TS_MASK;
481     env->tr.selector = tss_selector;
482     env->tr.base = tss_base;
483     env->tr.limit = tss_limit;
484     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
485 
486     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
487         cpu_x86_update_cr3(env, new_cr3);
488     }
489 
490     /* load all registers without an exception, then reload them with
491        possible exception */
492     env->eip = new_eip;
493     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
494         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
495     if (type & 8) {
496         cpu_load_eflags(env, new_eflags, eflags_mask);
497         for (i = 0; i < 8; i++) {
498             env->regs[i] = new_regs[i];
499         }
500     } else {
501         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
502         for (i = 0; i < 8; i++) {
503             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
504         }
505     }
506     if (new_eflags & VM_MASK) {
507         for (i = 0; i < 6; i++) {
508             load_seg_vm(env, i, new_segs[i]);
509         }
510     } else {
511         /* first just selectors as the rest may trigger exceptions */
512         for (i = 0; i < 6; i++) {
513             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
514         }
515     }
516 
517     env->ldt.selector = new_ldt & ~4;
518     env->ldt.base = 0;
519     env->ldt.limit = 0;
520     env->ldt.flags = 0;
521 
522     /* load the LDT */
523     if (new_ldt & 4) {
524         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
525     }
526 
527     if ((new_ldt & 0xfffc) != 0) {
528         dt = &env->gdt;
529         index = new_ldt & ~7;
530         if ((index + 7) > dt->limit) {
531             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
532         }
533         ptr = dt->base + index;
534         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
535         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
536         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
537             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
538         }
539         if (!(e2 & DESC_P_MASK)) {
540             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
541         }
542         load_seg_cache_raw_dt(&env->ldt, e1, e2);
543     }
544 
545     /* load the segments */
546     if (!(new_eflags & VM_MASK)) {
547         int cpl = new_segs[R_CS] & 3;
548         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
549         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
550         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
551         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
552         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
553         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
554     }
555 
556     /* check that env->eip is in the CS segment limits */
557     if (new_eip > env->segs[R_CS].limit) {
558         /* XXX: different exception if CALL? */
559         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
560     }
561 
562 #ifndef CONFIG_USER_ONLY
563     /* reset local breakpoints */
564     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
565         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
566     }
567 #endif
568     return type >> 3;
569 }
570 
571 static int switch_tss(CPUX86State *env, int tss_selector,
572                       uint32_t e1, uint32_t e2, int source,
573                       uint32_t next_eip)
574 {
575     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
576 }
577 
578 static inline unsigned int get_sp_mask(unsigned int e2)
579 {
580 #ifdef TARGET_X86_64
581     if (e2 & DESC_L_MASK) {
582         return 0;
583     } else
584 #endif
585     if (e2 & DESC_B_MASK) {
586         return 0xffffffff;
587     } else {
588         return 0xffff;
589     }
590 }
591 
592 static int exception_is_fault(int intno)
593 {
594     switch (intno) {
595         /*
596          * #DB can be both fault- and trap-like, but it never sets RF=1
597          * in the RFLAGS value pushed on the stack.
598          */
599     case EXCP01_DB:
600     case EXCP03_INT3:
601     case EXCP04_INTO:
602     case EXCP08_DBLE:
603     case EXCP12_MCHK:
604         return 0;
605     }
606     /* Everything else including reserved exception is a fault.  */
607     return 1;
608 }
609 
610 int exception_has_error_code(int intno)
611 {
612     switch (intno) {
613     case 8:
614     case 10:
615     case 11:
616     case 12:
617     case 13:
618     case 14:
619     case 17:
620         return 1;
621     }
622     return 0;
623 }
624 
625 /* protected mode interrupt */
626 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
627                                    int error_code, unsigned int next_eip,
628                                    int is_hw)
629 {
630     SegmentCache *dt;
631     target_ulong ptr;
632     int type, dpl, selector, ss_dpl, cpl;
633     int has_error_code, new_stack, shift;
634     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
635     uint32_t old_eip, eflags;
636     int vm86 = env->eflags & VM_MASK;
637     StackAccess sa;
638     bool set_rf;
639 
640     has_error_code = 0;
641     if (!is_int && !is_hw) {
642         has_error_code = exception_has_error_code(intno);
643     }
644     if (is_int) {
645         old_eip = next_eip;
646         set_rf = false;
647     } else {
648         old_eip = env->eip;
649         set_rf = exception_is_fault(intno);
650     }
651 
652     dt = &env->idt;
653     if (intno * 8 + 7 > dt->limit) {
654         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
655     }
656     ptr = dt->base + intno * 8;
657     e1 = cpu_ldl_kernel(env, ptr);
658     e2 = cpu_ldl_kernel(env, ptr + 4);
659     /* check gate type */
660     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
661     switch (type) {
662     case 5: /* task gate */
663     case 6: /* 286 interrupt gate */
664     case 7: /* 286 trap gate */
665     case 14: /* 386 interrupt gate */
666     case 15: /* 386 trap gate */
667         break;
668     default:
669         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
670         break;
671     }
672     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
673     cpl = env->hflags & HF_CPL_MASK;
674     /* check privilege if software int */
675     if (is_int && dpl < cpl) {
676         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
677     }
678 
679     sa.env = env;
680     sa.ra = 0;
681     sa.mmu_index = cpu_mmu_index_kernel(env);
682 
683     if (type == 5) {
684         /* task gate */
685         /* must do that check here to return the correct error code */
686         if (!(e2 & DESC_P_MASK)) {
687             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
688         }
689         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
690         if (has_error_code) {
691             /* push the error code */
692             if (env->segs[R_SS].flags & DESC_B_MASK) {
693                 sa.sp_mask = 0xffffffff;
694             } else {
695                 sa.sp_mask = 0xffff;
696             }
697             sa.sp = env->regs[R_ESP];
698             sa.ss_base = env->segs[R_SS].base;
699             if (shift) {
700                 pushl(&sa, error_code);
701             } else {
702                 pushw(&sa, error_code);
703             }
704             SET_ESP(sa.sp, sa.sp_mask);
705         }
706         return;
707     }
708 
709     /* Otherwise, trap or interrupt gate */
710 
711     /* check valid bit */
712     if (!(e2 & DESC_P_MASK)) {
713         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
714     }
715     selector = e1 >> 16;
716     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
717     if ((selector & 0xfffc) == 0) {
718         raise_exception_err(env, EXCP0D_GPF, 0);
719     }
720     if (load_segment(env, &e1, &e2, selector) != 0) {
721         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
722     }
723     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
724         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
725     }
726     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
727     if (dpl > cpl) {
728         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
729     }
730     if (!(e2 & DESC_P_MASK)) {
731         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
732     }
733     if (e2 & DESC_C_MASK) {
734         dpl = cpl;
735     }
736     if (dpl < cpl) {
737         /* to inner privilege */
738         uint32_t esp;
739         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
740         if ((ss & 0xfffc) == 0) {
741             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
742         }
743         if ((ss & 3) != dpl) {
744             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
745         }
746         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
747             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
748         }
749         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
750         if (ss_dpl != dpl) {
751             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
752         }
753         if (!(ss_e2 & DESC_S_MASK) ||
754             (ss_e2 & DESC_CS_MASK) ||
755             !(ss_e2 & DESC_W_MASK)) {
756             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
757         }
758         if (!(ss_e2 & DESC_P_MASK)) {
759             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
760         }
761         new_stack = 1;
762         sa.sp = esp;
763         sa.sp_mask = get_sp_mask(ss_e2);
764         sa.ss_base = get_seg_base(ss_e1, ss_e2);
765     } else  {
766         /* to same privilege */
767         if (vm86) {
768             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
769         }
770         new_stack = 0;
771         sa.sp = env->regs[R_ESP];
772         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
773         sa.ss_base = env->segs[R_SS].base;
774     }
775 
776     shift = type >> 3;
777 
778 #if 0
779     /* XXX: check that enough room is available */
780     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781     if (vm86) {
782         push_size += 8;
783     }
784     push_size <<= shift;
785 #endif
786     eflags = cpu_compute_eflags(env);
787     /*
788      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
789      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
790      */
791     if (set_rf) {
792         eflags |= RF_MASK;
793     }
794 
795     if (shift == 1) {
796         if (new_stack) {
797             if (vm86) {
798                 pushl(&sa, env->segs[R_GS].selector);
799                 pushl(&sa, env->segs[R_FS].selector);
800                 pushl(&sa, env->segs[R_DS].selector);
801                 pushl(&sa, env->segs[R_ES].selector);
802             }
803             pushl(&sa, env->segs[R_SS].selector);
804             pushl(&sa, env->regs[R_ESP]);
805         }
806         pushl(&sa, eflags);
807         pushl(&sa, env->segs[R_CS].selector);
808         pushl(&sa, old_eip);
809         if (has_error_code) {
810             pushl(&sa, error_code);
811         }
812     } else {
813         if (new_stack) {
814             if (vm86) {
815                 pushw(&sa, env->segs[R_GS].selector);
816                 pushw(&sa, env->segs[R_FS].selector);
817                 pushw(&sa, env->segs[R_DS].selector);
818                 pushw(&sa, env->segs[R_ES].selector);
819             }
820             pushw(&sa, env->segs[R_SS].selector);
821             pushw(&sa, env->regs[R_ESP]);
822         }
823         pushw(&sa, eflags);
824         pushw(&sa, env->segs[R_CS].selector);
825         pushw(&sa, old_eip);
826         if (has_error_code) {
827             pushw(&sa, error_code);
828         }
829     }
830 
831     /* interrupt gate clear IF mask */
832     if ((type & 1) == 0) {
833         env->eflags &= ~IF_MASK;
834     }
835     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
836 
837     if (new_stack) {
838         if (vm86) {
839             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
840             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
841             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
842             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
843         }
844         ss = (ss & ~3) | dpl;
845         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
846                                get_seg_limit(ss_e1, ss_e2), ss_e2);
847     }
848     SET_ESP(sa.sp, sa.sp_mask);
849 
850     selector = (selector & ~3) | dpl;
851     cpu_x86_load_seg_cache(env, R_CS, selector,
852                    get_seg_base(e1, e2),
853                    get_seg_limit(e1, e2),
854                    e2);
855     env->eip = offset;
856 }
857 
858 #ifdef TARGET_X86_64
859 
860 static void pushq(StackAccess *sa, uint64_t val)
861 {
862     sa->sp -= 8;
863     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
864 }
865 
866 static uint64_t popq(StackAccess *sa)
867 {
868     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
869     sa->sp += 8;
870     return ret;
871 }
872 
873 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
874 {
875     X86CPU *cpu = env_archcpu(env);
876     int index, pg_mode;
877     target_ulong rsp;
878     int32_t sext;
879 
880 #if 0
881     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
882            env->tr.base, env->tr.limit);
883 #endif
884 
885     if (!(env->tr.flags & DESC_P_MASK)) {
886         cpu_abort(CPU(cpu), "invalid tss");
887     }
888     index = 8 * level + 4;
889     if ((index + 7) > env->tr.limit) {
890         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
891     }
892 
893     rsp = cpu_ldq_kernel(env, env->tr.base + index);
894 
895     /* test virtual address sign extension */
896     pg_mode = get_pg_mode(env);
897     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
898     if (sext != 0 && sext != -1) {
899         raise_exception_err(env, EXCP0C_STACK, 0);
900     }
901 
902     return rsp;
903 }
904 
905 /* 64 bit interrupt */
906 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
907                            int error_code, target_ulong next_eip, int is_hw)
908 {
909     SegmentCache *dt;
910     target_ulong ptr;
911     int type, dpl, selector, cpl, ist;
912     int has_error_code, new_stack;
913     uint32_t e1, e2, e3, ss, eflags;
914     target_ulong old_eip, offset;
915     bool set_rf;
916     StackAccess sa;
917 
918     has_error_code = 0;
919     if (!is_int && !is_hw) {
920         has_error_code = exception_has_error_code(intno);
921     }
922     if (is_int) {
923         old_eip = next_eip;
924         set_rf = false;
925     } else {
926         old_eip = env->eip;
927         set_rf = exception_is_fault(intno);
928     }
929 
930     dt = &env->idt;
931     if (intno * 16 + 15 > dt->limit) {
932         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
933     }
934     ptr = dt->base + intno * 16;
935     e1 = cpu_ldl_kernel(env, ptr);
936     e2 = cpu_ldl_kernel(env, ptr + 4);
937     e3 = cpu_ldl_kernel(env, ptr + 8);
938     /* check gate type */
939     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
940     switch (type) {
941     case 14: /* 386 interrupt gate */
942     case 15: /* 386 trap gate */
943         break;
944     default:
945         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
946         break;
947     }
948     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
949     cpl = env->hflags & HF_CPL_MASK;
950     /* check privilege if software int */
951     if (is_int && dpl < cpl) {
952         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
953     }
954     /* check valid bit */
955     if (!(e2 & DESC_P_MASK)) {
956         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
957     }
958     selector = e1 >> 16;
959     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
960     ist = e2 & 7;
961     if ((selector & 0xfffc) == 0) {
962         raise_exception_err(env, EXCP0D_GPF, 0);
963     }
964 
965     if (load_segment(env, &e1, &e2, selector) != 0) {
966         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
967     }
968     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
969         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
970     }
971     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
972     if (dpl > cpl) {
973         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
974     }
975     if (!(e2 & DESC_P_MASK)) {
976         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
977     }
978     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
979         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
980     }
981     if (e2 & DESC_C_MASK) {
982         dpl = cpl;
983     }
984 
985     sa.env = env;
986     sa.ra = 0;
987     sa.mmu_index = cpu_mmu_index_kernel(env);
988     sa.sp_mask = -1;
989     sa.ss_base = 0;
990     if (dpl < cpl || ist != 0) {
991         /* to inner privilege */
992         new_stack = 1;
993         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
994         ss = 0;
995     } else {
996         /* to same privilege */
997         if (env->eflags & VM_MASK) {
998             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
999         }
1000         new_stack = 0;
1001         sa.sp = env->regs[R_ESP];
1002     }
1003     sa.sp &= ~0xfLL; /* align stack */
1004 
1005     /* See do_interrupt_protected.  */
1006     eflags = cpu_compute_eflags(env);
1007     if (set_rf) {
1008         eflags |= RF_MASK;
1009     }
1010 
1011     pushq(&sa, env->segs[R_SS].selector);
1012     pushq(&sa, env->regs[R_ESP]);
1013     pushq(&sa, eflags);
1014     pushq(&sa, env->segs[R_CS].selector);
1015     pushq(&sa, old_eip);
1016     if (has_error_code) {
1017         pushq(&sa, error_code);
1018     }
1019 
1020     /* interrupt gate clear IF mask */
1021     if ((type & 1) == 0) {
1022         env->eflags &= ~IF_MASK;
1023     }
1024     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1025 
1026     if (new_stack) {
1027         ss = 0 | dpl;
1028         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1029     }
1030     env->regs[R_ESP] = sa.sp;
1031 
1032     selector = (selector & ~3) | dpl;
1033     cpu_x86_load_seg_cache(env, R_CS, selector,
1034                    get_seg_base(e1, e2),
1035                    get_seg_limit(e1, e2),
1036                    e2);
1037     env->eip = offset;
1038 }
1039 #endif /* TARGET_X86_64 */
1040 
1041 void helper_sysret(CPUX86State *env, int dflag)
1042 {
1043     int cpl, selector;
1044 
1045     if (!(env->efer & MSR_EFER_SCE)) {
1046         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1047     }
1048     cpl = env->hflags & HF_CPL_MASK;
1049     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1050         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1051     }
1052     selector = (env->star >> 48) & 0xffff;
1053 #ifdef TARGET_X86_64
1054     if (env->hflags & HF_LMA_MASK) {
1055         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1056                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1057                         NT_MASK);
1058         if (dflag == 2) {
1059             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1060                                    0, 0xffffffff,
1061                                    DESC_G_MASK | DESC_P_MASK |
1062                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1063                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1064                                    DESC_L_MASK);
1065             env->eip = env->regs[R_ECX];
1066         } else {
1067             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1068                                    0, 0xffffffff,
1069                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1072             env->eip = (uint32_t)env->regs[R_ECX];
1073         }
1074         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1075                                0, 0xffffffff,
1076                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078                                DESC_W_MASK | DESC_A_MASK);
1079     } else
1080 #endif
1081     {
1082         env->eflags |= IF_MASK;
1083         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084                                0, 0xffffffff,
1085                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1088         env->eip = (uint32_t)env->regs[R_ECX];
1089         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1090                                0, 0xffffffff,
1091                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093                                DESC_W_MASK | DESC_A_MASK);
1094     }
1095 }
1096 
1097 /* real mode interrupt */
1098 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1099                               int error_code, unsigned int next_eip)
1100 {
1101     SegmentCache *dt;
1102     target_ulong ptr;
1103     int selector;
1104     uint32_t offset;
1105     uint32_t old_cs, old_eip;
1106     StackAccess sa;
1107 
1108     /* real mode (simpler!) */
1109     dt = &env->idt;
1110     if (intno * 4 + 3 > dt->limit) {
1111         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1112     }
1113     ptr = dt->base + intno * 4;
1114     offset = cpu_lduw_kernel(env, ptr);
1115     selector = cpu_lduw_kernel(env, ptr + 2);
1116 
1117     sa.env = env;
1118     sa.ra = 0;
1119     sa.sp = env->regs[R_ESP];
1120     sa.sp_mask = 0xffff;
1121     sa.ss_base = env->segs[R_SS].base;
1122     sa.mmu_index = cpu_mmu_index_kernel(env);
1123 
1124     if (is_int) {
1125         old_eip = next_eip;
1126     } else {
1127         old_eip = env->eip;
1128     }
1129     old_cs = env->segs[R_CS].selector;
1130     /* XXX: use SS segment size? */
1131     pushw(&sa, cpu_compute_eflags(env));
1132     pushw(&sa, old_cs);
1133     pushw(&sa, old_eip);
1134 
1135     /* update processor state */
1136     SET_ESP(sa.sp, sa.sp_mask);
1137     env->eip = offset;
1138     env->segs[R_CS].selector = selector;
1139     env->segs[R_CS].base = (selector << 4);
1140     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1141 }
1142 
1143 /*
1144  * Begin execution of an interruption. is_int is TRUE if coming from
1145  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1146  * instruction. It is only relevant if is_int is TRUE.
1147  */
1148 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1149                       int error_code, target_ulong next_eip, int is_hw)
1150 {
1151     CPUX86State *env = &cpu->env;
1152 
1153     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1154         if ((env->cr[0] & CR0_PE_MASK)) {
1155             static int count;
1156 
1157             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1158                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1159                      count, intno, error_code, is_int,
1160                      env->hflags & HF_CPL_MASK,
1161                      env->segs[R_CS].selector, env->eip,
1162                      (int)env->segs[R_CS].base + env->eip,
1163                      env->segs[R_SS].selector, env->regs[R_ESP]);
1164             if (intno == 0x0e) {
1165                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1166             } else {
1167                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1168             }
1169             qemu_log("\n");
1170             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1171 #if 0
1172             {
1173                 int i;
1174                 target_ulong ptr;
1175 
1176                 qemu_log("       code=");
1177                 ptr = env->segs[R_CS].base + env->eip;
1178                 for (i = 0; i < 16; i++) {
1179                     qemu_log(" %02x", ldub(ptr + i));
1180                 }
1181                 qemu_log("\n");
1182             }
1183 #endif
1184             count++;
1185         }
1186     }
1187     if (env->cr[0] & CR0_PE_MASK) {
1188 #if !defined(CONFIG_USER_ONLY)
1189         if (env->hflags & HF_GUEST_MASK) {
1190             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1191         }
1192 #endif
1193 #ifdef TARGET_X86_64
1194         if (env->hflags & HF_LMA_MASK) {
1195             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1196         } else
1197 #endif
1198         {
1199             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1200                                    is_hw);
1201         }
1202     } else {
1203 #if !defined(CONFIG_USER_ONLY)
1204         if (env->hflags & HF_GUEST_MASK) {
1205             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1206         }
1207 #endif
1208         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1209     }
1210 
1211 #if !defined(CONFIG_USER_ONLY)
1212     if (env->hflags & HF_GUEST_MASK) {
1213         CPUState *cs = CPU(cpu);
1214         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1215                                       offsetof(struct vmcb,
1216                                                control.event_inj));
1217 
1218         x86_stl_phys(cs,
1219                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1220                  event_inj & ~SVM_EVTINJ_VALID);
1221     }
1222 #endif
1223 }
1224 
1225 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1226 {
1227     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1228 }
1229 
1230 void helper_lldt(CPUX86State *env, int selector)
1231 {
1232     SegmentCache *dt;
1233     uint32_t e1, e2;
1234     int index, entry_limit;
1235     target_ulong ptr;
1236 
1237     selector &= 0xffff;
1238     if ((selector & 0xfffc) == 0) {
1239         /* XXX: NULL selector case: invalid LDT */
1240         env->ldt.base = 0;
1241         env->ldt.limit = 0;
1242     } else {
1243         if (selector & 0x4) {
1244             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1245         }
1246         dt = &env->gdt;
1247         index = selector & ~7;
1248 #ifdef TARGET_X86_64
1249         if (env->hflags & HF_LMA_MASK) {
1250             entry_limit = 15;
1251         } else
1252 #endif
1253         {
1254             entry_limit = 7;
1255         }
1256         if ((index + entry_limit) > dt->limit) {
1257             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1258         }
1259         ptr = dt->base + index;
1260         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1261         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1262         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1263             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1264         }
1265         if (!(e2 & DESC_P_MASK)) {
1266             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1267         }
1268 #ifdef TARGET_X86_64
1269         if (env->hflags & HF_LMA_MASK) {
1270             uint32_t e3;
1271 
1272             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1273             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1274             env->ldt.base |= (target_ulong)e3 << 32;
1275         } else
1276 #endif
1277         {
1278             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1279         }
1280     }
1281     env->ldt.selector = selector;
1282 }
1283 
1284 void helper_ltr(CPUX86State *env, int selector)
1285 {
1286     SegmentCache *dt;
1287     uint32_t e1, e2;
1288     int index, type, entry_limit;
1289     target_ulong ptr;
1290 
1291     selector &= 0xffff;
1292     if ((selector & 0xfffc) == 0) {
1293         /* NULL selector case: invalid TR */
1294         env->tr.base = 0;
1295         env->tr.limit = 0;
1296         env->tr.flags = 0;
1297     } else {
1298         if (selector & 0x4) {
1299             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1300         }
1301         dt = &env->gdt;
1302         index = selector & ~7;
1303 #ifdef TARGET_X86_64
1304         if (env->hflags & HF_LMA_MASK) {
1305             entry_limit = 15;
1306         } else
1307 #endif
1308         {
1309             entry_limit = 7;
1310         }
1311         if ((index + entry_limit) > dt->limit) {
1312             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1313         }
1314         ptr = dt->base + index;
1315         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1316         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1317         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1318         if ((e2 & DESC_S_MASK) ||
1319             (type != 1 && type != 9)) {
1320             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1321         }
1322         if (!(e2 & DESC_P_MASK)) {
1323             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1324         }
1325 #ifdef TARGET_X86_64
1326         if (env->hflags & HF_LMA_MASK) {
1327             uint32_t e3, e4;
1328 
1329             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1330             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1331             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1332                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1333             }
1334             load_seg_cache_raw_dt(&env->tr, e1, e2);
1335             env->tr.base |= (target_ulong)e3 << 32;
1336         } else
1337 #endif
1338         {
1339             load_seg_cache_raw_dt(&env->tr, e1, e2);
1340         }
1341         e2 |= DESC_TSS_BUSY_MASK;
1342         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1343     }
1344     env->tr.selector = selector;
1345 }
1346 
1347 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1348 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1349 {
1350     uint32_t e1, e2;
1351     int cpl, dpl, rpl;
1352     SegmentCache *dt;
1353     int index;
1354     target_ulong ptr;
1355 
1356     selector &= 0xffff;
1357     cpl = env->hflags & HF_CPL_MASK;
1358     if ((selector & 0xfffc) == 0) {
1359         /* null selector case */
1360         if (seg_reg == R_SS
1361 #ifdef TARGET_X86_64
1362             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1363 #endif
1364             ) {
1365             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1366         }
1367         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1368     } else {
1369 
1370         if (selector & 0x4) {
1371             dt = &env->ldt;
1372         } else {
1373             dt = &env->gdt;
1374         }
1375         index = selector & ~7;
1376         if ((index + 7) > dt->limit) {
1377             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1378         }
1379         ptr = dt->base + index;
1380         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1381         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1382 
1383         if (!(e2 & DESC_S_MASK)) {
1384             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1385         }
1386         rpl = selector & 3;
1387         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1388         if (seg_reg == R_SS) {
1389             /* must be writable segment */
1390             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1391                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1392             }
1393             if (rpl != cpl || dpl != cpl) {
1394                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1395             }
1396         } else {
1397             /* must be readable segment */
1398             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1399                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1400             }
1401 
1402             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1403                 /* if not conforming code, test rights */
1404                 if (dpl < cpl || dpl < rpl) {
1405                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1406                 }
1407             }
1408         }
1409 
1410         if (!(e2 & DESC_P_MASK)) {
1411             if (seg_reg == R_SS) {
1412                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1413             } else {
1414                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1415             }
1416         }
1417 
1418         /* set the access bit if not already set */
1419         if (!(e2 & DESC_A_MASK)) {
1420             e2 |= DESC_A_MASK;
1421             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1422         }
1423 
1424         cpu_x86_load_seg_cache(env, seg_reg, selector,
1425                        get_seg_base(e1, e2),
1426                        get_seg_limit(e1, e2),
1427                        e2);
1428 #if 0
1429         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1430                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1431 #endif
1432     }
1433 }
1434 
1435 /* protected mode jump */
1436 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1437                            target_ulong next_eip)
1438 {
1439     int gate_cs, type;
1440     uint32_t e1, e2, cpl, dpl, rpl, limit;
1441 
1442     if ((new_cs & 0xfffc) == 0) {
1443         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1444     }
1445     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1446         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1447     }
1448     cpl = env->hflags & HF_CPL_MASK;
1449     if (e2 & DESC_S_MASK) {
1450         if (!(e2 & DESC_CS_MASK)) {
1451             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1452         }
1453         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1454         if (e2 & DESC_C_MASK) {
1455             /* conforming code segment */
1456             if (dpl > cpl) {
1457                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1458             }
1459         } else {
1460             /* non conforming code segment */
1461             rpl = new_cs & 3;
1462             if (rpl > cpl) {
1463                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1464             }
1465             if (dpl != cpl) {
1466                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1467             }
1468         }
1469         if (!(e2 & DESC_P_MASK)) {
1470             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1471         }
1472         limit = get_seg_limit(e1, e2);
1473         if (new_eip > limit &&
1474             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1475             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1476         }
1477         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1478                        get_seg_base(e1, e2), limit, e2);
1479         env->eip = new_eip;
1480     } else {
1481         /* jump to call or task gate */
1482         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1483         rpl = new_cs & 3;
1484         cpl = env->hflags & HF_CPL_MASK;
1485         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1486 
1487 #ifdef TARGET_X86_64
1488         if (env->efer & MSR_EFER_LMA) {
1489             if (type != 12) {
1490                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1491             }
1492         }
1493 #endif
1494         switch (type) {
1495         case 1: /* 286 TSS */
1496         case 9: /* 386 TSS */
1497         case 5: /* task gate */
1498             if (dpl < cpl || dpl < rpl) {
1499                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1500             }
1501             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1502             break;
1503         case 4: /* 286 call gate */
1504         case 12: /* 386 call gate */
1505             if ((dpl < cpl) || (dpl < rpl)) {
1506                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1507             }
1508             if (!(e2 & DESC_P_MASK)) {
1509                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1510             }
1511             gate_cs = e1 >> 16;
1512             new_eip = (e1 & 0xffff);
1513             if (type == 12) {
1514                 new_eip |= (e2 & 0xffff0000);
1515             }
1516 
1517 #ifdef TARGET_X86_64
1518             if (env->efer & MSR_EFER_LMA) {
1519                 /* load the upper 8 bytes of the 64-bit call gate */
1520                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1521                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1522                                            GETPC());
1523                 }
1524                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1525                 if (type != 0) {
1526                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1527                                            GETPC());
1528                 }
1529                 new_eip |= ((target_ulong)e1) << 32;
1530             }
1531 #endif
1532 
1533             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1534                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1535             }
1536             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1537             /* must be code segment */
1538             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1539                  (DESC_S_MASK | DESC_CS_MASK))) {
1540                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1541             }
1542             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1543                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1544                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1545             }
1546 #ifdef TARGET_X86_64
1547             if (env->efer & MSR_EFER_LMA) {
1548                 if (!(e2 & DESC_L_MASK)) {
1549                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1550                 }
1551                 if (e2 & DESC_B_MASK) {
1552                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1553                 }
1554             }
1555 #endif
1556             if (!(e2 & DESC_P_MASK)) {
1557                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1558             }
1559             limit = get_seg_limit(e1, e2);
1560             if (new_eip > limit &&
1561                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1562                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1563             }
1564             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1565                                    get_seg_base(e1, e2), limit, e2);
1566             env->eip = new_eip;
1567             break;
1568         default:
1569             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1570             break;
1571         }
1572     }
1573 }
1574 
1575 /* real mode call */
1576 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1577                        int shift, uint32_t next_eip)
1578 {
1579     StackAccess sa;
1580 
1581     sa.env = env;
1582     sa.ra = GETPC();
1583     sa.sp = env->regs[R_ESP];
1584     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1585     sa.ss_base = env->segs[R_SS].base;
1586     sa.mmu_index = cpu_mmu_index_kernel(env);
1587 
1588     if (shift) {
1589         pushl(&sa, env->segs[R_CS].selector);
1590         pushl(&sa, next_eip);
1591     } else {
1592         pushw(&sa, env->segs[R_CS].selector);
1593         pushw(&sa, next_eip);
1594     }
1595 
1596     SET_ESP(sa.sp, sa.sp_mask);
1597     env->eip = new_eip;
1598     env->segs[R_CS].selector = new_cs;
1599     env->segs[R_CS].base = (new_cs << 4);
1600 }
1601 
1602 /* protected mode call */
1603 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1604                             int shift, target_ulong next_eip)
1605 {
1606     int new_stack, i;
1607     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1608     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1609     uint32_t val, limit, old_sp_mask;
1610     target_ulong old_ssp, offset;
1611     StackAccess sa;
1612 
1613     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1614     LOG_PCALL_STATE(env_cpu(env));
1615     if ((new_cs & 0xfffc) == 0) {
1616         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1617     }
1618     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1619         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1620     }
1621     cpl = env->hflags & HF_CPL_MASK;
1622     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1623 
1624     sa.env = env;
1625     sa.ra = GETPC();
1626     sa.mmu_index = cpu_mmu_index_kernel(env);
1627 
1628     if (e2 & DESC_S_MASK) {
1629         if (!(e2 & DESC_CS_MASK)) {
1630             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1631         }
1632         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1633         if (e2 & DESC_C_MASK) {
1634             /* conforming code segment */
1635             if (dpl > cpl) {
1636                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1637             }
1638         } else {
1639             /* non conforming code segment */
1640             rpl = new_cs & 3;
1641             if (rpl > cpl) {
1642                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1643             }
1644             if (dpl != cpl) {
1645                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1646             }
1647         }
1648         if (!(e2 & DESC_P_MASK)) {
1649             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1650         }
1651 
1652 #ifdef TARGET_X86_64
1653         /* XXX: check 16/32 bit cases in long mode */
1654         if (shift == 2) {
1655             /* 64 bit case */
1656             sa.sp = env->regs[R_ESP];
1657             sa.sp_mask = -1;
1658             sa.ss_base = 0;
1659             pushq(&sa, env->segs[R_CS].selector);
1660             pushq(&sa, next_eip);
1661             /* from this point, not restartable */
1662             env->regs[R_ESP] = sa.sp;
1663             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1664                                    get_seg_base(e1, e2),
1665                                    get_seg_limit(e1, e2), e2);
1666             env->eip = new_eip;
1667         } else
1668 #endif
1669         {
1670             sa.sp = env->regs[R_ESP];
1671             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1672             sa.ss_base = env->segs[R_SS].base;
1673             if (shift) {
1674                 pushl(&sa, env->segs[R_CS].selector);
1675                 pushl(&sa, next_eip);
1676             } else {
1677                 pushw(&sa, env->segs[R_CS].selector);
1678                 pushw(&sa, next_eip);
1679             }
1680 
1681             limit = get_seg_limit(e1, e2);
1682             if (new_eip > limit) {
1683                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1684             }
1685             /* from this point, not restartable */
1686             SET_ESP(sa.sp, sa.sp_mask);
1687             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1688                                    get_seg_base(e1, e2), limit, e2);
1689             env->eip = new_eip;
1690         }
1691     } else {
1692         /* check gate type */
1693         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1694         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1695         rpl = new_cs & 3;
1696 
1697 #ifdef TARGET_X86_64
1698         if (env->efer & MSR_EFER_LMA) {
1699             if (type != 12) {
1700                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1701             }
1702         }
1703 #endif
1704 
1705         switch (type) {
1706         case 1: /* available 286 TSS */
1707         case 9: /* available 386 TSS */
1708         case 5: /* task gate */
1709             if (dpl < cpl || dpl < rpl) {
1710                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1711             }
1712             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1713             return;
1714         case 4: /* 286 call gate */
1715         case 12: /* 386 call gate */
1716             break;
1717         default:
1718             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1719             break;
1720         }
1721         shift = type >> 3;
1722 
1723         if (dpl < cpl || dpl < rpl) {
1724             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1725         }
1726         /* check valid bit */
1727         if (!(e2 & DESC_P_MASK)) {
1728             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1729         }
1730         selector = e1 >> 16;
1731         param_count = e2 & 0x1f;
1732         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1733 #ifdef TARGET_X86_64
1734         if (env->efer & MSR_EFER_LMA) {
1735             /* load the upper 8 bytes of the 64-bit call gate */
1736             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1737                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1738                                        GETPC());
1739             }
1740             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1741             if (type != 0) {
1742                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1743                                        GETPC());
1744             }
1745             offset |= ((target_ulong)e1) << 32;
1746         }
1747 #endif
1748         if ((selector & 0xfffc) == 0) {
1749             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1750         }
1751 
1752         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1753             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1754         }
1755         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1756             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1757         }
1758         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1759         if (dpl > cpl) {
1760             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1761         }
1762 #ifdef TARGET_X86_64
1763         if (env->efer & MSR_EFER_LMA) {
1764             if (!(e2 & DESC_L_MASK)) {
1765                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1766             }
1767             if (e2 & DESC_B_MASK) {
1768                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1769             }
1770             shift++;
1771         }
1772 #endif
1773         if (!(e2 & DESC_P_MASK)) {
1774             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1775         }
1776 
1777         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1778             /* to inner privilege */
1779 #ifdef TARGET_X86_64
1780             if (shift == 2) {
1781                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1782                 new_stack = 1;
1783                 sa.sp = get_rsp_from_tss(env, dpl);
1784                 sa.sp_mask = -1;
1785                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1786                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1787                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1788             } else
1789 #endif
1790             {
1791                 uint32_t sp32;
1792                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1793                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1794                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1795                           env->regs[R_ESP]);
1796                 if ((ss & 0xfffc) == 0) {
1797                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1798                 }
1799                 if ((ss & 3) != dpl) {
1800                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1801                 }
1802                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1803                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1804                 }
1805                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1806                 if (ss_dpl != dpl) {
1807                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1808                 }
1809                 if (!(ss_e2 & DESC_S_MASK) ||
1810                     (ss_e2 & DESC_CS_MASK) ||
1811                     !(ss_e2 & DESC_W_MASK)) {
1812                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1813                 }
1814                 if (!(ss_e2 & DESC_P_MASK)) {
1815                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1816                 }
1817 
1818                 sa.sp = sp32;
1819                 sa.sp_mask = get_sp_mask(ss_e2);
1820                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1821             }
1822 
1823             /* push_size = ((param_count * 2) + 8) << shift; */
1824             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1825             old_ssp = env->segs[R_SS].base;
1826 
1827 #ifdef TARGET_X86_64
1828             if (shift == 2) {
1829                 /* XXX: verify if new stack address is canonical */
1830                 pushq(&sa, env->segs[R_SS].selector);
1831                 pushq(&sa, env->regs[R_ESP]);
1832                 /* parameters aren't supported for 64-bit call gates */
1833             } else
1834 #endif
1835             if (shift == 1) {
1836                 pushl(&sa, env->segs[R_SS].selector);
1837                 pushl(&sa, env->regs[R_ESP]);
1838                 for (i = param_count - 1; i >= 0; i--) {
1839                     val = cpu_ldl_data_ra(env,
1840                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1841                                           GETPC());
1842                     pushl(&sa, val);
1843                 }
1844             } else {
1845                 pushw(&sa, env->segs[R_SS].selector);
1846                 pushw(&sa, env->regs[R_ESP]);
1847                 for (i = param_count - 1; i >= 0; i--) {
1848                     val = cpu_lduw_data_ra(env,
1849                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1850                                            GETPC());
1851                     pushw(&sa, val);
1852                 }
1853             }
1854             new_stack = 1;
1855         } else {
1856             /* to same privilege */
1857             sa.sp = env->regs[R_ESP];
1858             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1859             sa.ss_base = env->segs[R_SS].base;
1860             /* push_size = (4 << shift); */
1861             new_stack = 0;
1862         }
1863 
1864 #ifdef TARGET_X86_64
1865         if (shift == 2) {
1866             pushq(&sa, env->segs[R_CS].selector);
1867             pushq(&sa, next_eip);
1868         } else
1869 #endif
1870         if (shift == 1) {
1871             pushl(&sa, env->segs[R_CS].selector);
1872             pushl(&sa, next_eip);
1873         } else {
1874             pushw(&sa, env->segs[R_CS].selector);
1875             pushw(&sa, next_eip);
1876         }
1877 
1878         /* from this point, not restartable */
1879 
1880         if (new_stack) {
1881 #ifdef TARGET_X86_64
1882             if (shift == 2) {
1883                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1884             } else
1885 #endif
1886             {
1887                 ss = (ss & ~3) | dpl;
1888                 cpu_x86_load_seg_cache(env, R_SS, ss,
1889                                        sa.ss_base,
1890                                        get_seg_limit(ss_e1, ss_e2),
1891                                        ss_e2);
1892             }
1893         }
1894 
1895         selector = (selector & ~3) | dpl;
1896         cpu_x86_load_seg_cache(env, R_CS, selector,
1897                        get_seg_base(e1, e2),
1898                        get_seg_limit(e1, e2),
1899                        e2);
1900         SET_ESP(sa.sp, sa.sp_mask);
1901         env->eip = offset;
1902     }
1903 }
1904 
1905 /* real and vm86 mode iret */
1906 void helper_iret_real(CPUX86State *env, int shift)
1907 {
1908     uint32_t new_cs, new_eip, new_eflags;
1909     int eflags_mask;
1910     StackAccess sa;
1911 
1912     sa.env = env;
1913     sa.ra = GETPC();
1914     sa.mmu_index = x86_mmu_index_pl(env, 0);
1915     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1916     sa.sp = env->regs[R_ESP];
1917     sa.ss_base = env->segs[R_SS].base;
1918 
1919     if (shift == 1) {
1920         /* 32 bits */
1921         new_eip = popl(&sa);
1922         new_cs = popl(&sa) & 0xffff;
1923         new_eflags = popl(&sa);
1924     } else {
1925         /* 16 bits */
1926         new_eip = popw(&sa);
1927         new_cs = popw(&sa);
1928         new_eflags = popw(&sa);
1929     }
1930     SET_ESP(sa.sp, sa.sp_mask);
1931     env->segs[R_CS].selector = new_cs;
1932     env->segs[R_CS].base = (new_cs << 4);
1933     env->eip = new_eip;
1934     if (env->eflags & VM_MASK) {
1935         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1936             NT_MASK;
1937     } else {
1938         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1939             RF_MASK | NT_MASK;
1940     }
1941     if (shift == 0) {
1942         eflags_mask &= 0xffff;
1943     }
1944     cpu_load_eflags(env, new_eflags, eflags_mask);
1945     env->hflags2 &= ~HF2_NMI_MASK;
1946 }
1947 
1948 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1949 {
1950     int dpl;
1951     uint32_t e2;
1952 
1953     /* XXX: on x86_64, we do not want to nullify FS and GS because
1954        they may still contain a valid base. I would be interested to
1955        know how a real x86_64 CPU behaves */
1956     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1957         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1958         return;
1959     }
1960 
1961     e2 = env->segs[seg_reg].flags;
1962     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1963     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1964         /* data or non conforming code segment */
1965         if (dpl < cpl) {
1966             cpu_x86_load_seg_cache(env, seg_reg, 0,
1967                                    env->segs[seg_reg].base,
1968                                    env->segs[seg_reg].limit,
1969                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1970         }
1971     }
1972 }
1973 
1974 /* protected mode iret */
1975 static inline void helper_ret_protected(CPUX86State *env, int shift,
1976                                         int is_iret, int addend,
1977                                         uintptr_t retaddr)
1978 {
1979     uint32_t new_cs, new_eflags, new_ss;
1980     uint32_t new_es, new_ds, new_fs, new_gs;
1981     uint32_t e1, e2, ss_e1, ss_e2;
1982     int cpl, dpl, rpl, eflags_mask, iopl;
1983     target_ulong new_eip, new_esp;
1984     StackAccess sa;
1985 
1986     cpl = env->hflags & HF_CPL_MASK;
1987 
1988     sa.env = env;
1989     sa.ra = retaddr;
1990     sa.mmu_index = x86_mmu_index_pl(env, cpl);
1991 
1992 #ifdef TARGET_X86_64
1993     if (shift == 2) {
1994         sa.sp_mask = -1;
1995     } else
1996 #endif
1997     {
1998         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1999     }
2000     sa.sp = env->regs[R_ESP];
2001     sa.ss_base = env->segs[R_SS].base;
2002     new_eflags = 0; /* avoid warning */
2003 #ifdef TARGET_X86_64
2004     if (shift == 2) {
2005         new_eip = popq(&sa);
2006         new_cs = popq(&sa) & 0xffff;
2007         if (is_iret) {
2008             new_eflags = popq(&sa);
2009         }
2010     } else
2011 #endif
2012     {
2013         if (shift == 1) {
2014             /* 32 bits */
2015             new_eip = popl(&sa);
2016             new_cs = popl(&sa) & 0xffff;
2017             if (is_iret) {
2018                 new_eflags = popl(&sa);
2019                 if (new_eflags & VM_MASK) {
2020                     goto return_to_vm86;
2021                 }
2022             }
2023         } else {
2024             /* 16 bits */
2025             new_eip = popw(&sa);
2026             new_cs = popw(&sa);
2027             if (is_iret) {
2028                 new_eflags = popw(&sa);
2029             }
2030         }
2031     }
2032     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2033               new_cs, new_eip, shift, addend);
2034     LOG_PCALL_STATE(env_cpu(env));
2035     if ((new_cs & 0xfffc) == 0) {
2036         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2037     }
2038     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2039         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2040     }
2041     if (!(e2 & DESC_S_MASK) ||
2042         !(e2 & DESC_CS_MASK)) {
2043         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2044     }
2045     rpl = new_cs & 3;
2046     if (rpl < cpl) {
2047         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2048     }
2049     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2050     if (e2 & DESC_C_MASK) {
2051         if (dpl > rpl) {
2052             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2053         }
2054     } else {
2055         if (dpl != rpl) {
2056             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2057         }
2058     }
2059     if (!(e2 & DESC_P_MASK)) {
2060         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2061     }
2062 
2063     sa.sp += addend;
2064     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2065                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2066         /* return to same privilege level */
2067         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2068                        get_seg_base(e1, e2),
2069                        get_seg_limit(e1, e2),
2070                        e2);
2071     } else {
2072         /* return to different privilege level */
2073 #ifdef TARGET_X86_64
2074         if (shift == 2) {
2075             new_esp = popq(&sa);
2076             new_ss = popq(&sa) & 0xffff;
2077         } else
2078 #endif
2079         {
2080             if (shift == 1) {
2081                 /* 32 bits */
2082                 new_esp = popl(&sa);
2083                 new_ss = popl(&sa) & 0xffff;
2084             } else {
2085                 /* 16 bits */
2086                 new_esp = popw(&sa);
2087                 new_ss = popw(&sa);
2088             }
2089         }
2090         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2091                   new_ss, new_esp);
2092         if ((new_ss & 0xfffc) == 0) {
2093 #ifdef TARGET_X86_64
2094             /* NULL ss is allowed in long mode if cpl != 3 */
2095             /* XXX: test CS64? */
2096             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2097                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2098                                        0, 0xffffffff,
2099                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2100                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2101                                        DESC_W_MASK | DESC_A_MASK);
2102                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2103             } else
2104 #endif
2105             {
2106                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2107             }
2108         } else {
2109             if ((new_ss & 3) != rpl) {
2110                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2111             }
2112             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2113                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2114             }
2115             if (!(ss_e2 & DESC_S_MASK) ||
2116                 (ss_e2 & DESC_CS_MASK) ||
2117                 !(ss_e2 & DESC_W_MASK)) {
2118                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2119             }
2120             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2121             if (dpl != rpl) {
2122                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2123             }
2124             if (!(ss_e2 & DESC_P_MASK)) {
2125                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2126             }
2127             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2128                                    get_seg_base(ss_e1, ss_e2),
2129                                    get_seg_limit(ss_e1, ss_e2),
2130                                    ss_e2);
2131         }
2132 
2133         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2134                        get_seg_base(e1, e2),
2135                        get_seg_limit(e1, e2),
2136                        e2);
2137         sa.sp = new_esp;
2138 #ifdef TARGET_X86_64
2139         if (env->hflags & HF_CS64_MASK) {
2140             sa.sp_mask = -1;
2141         } else
2142 #endif
2143         {
2144             sa.sp_mask = get_sp_mask(ss_e2);
2145         }
2146 
2147         /* validate data segments */
2148         validate_seg(env, R_ES, rpl);
2149         validate_seg(env, R_DS, rpl);
2150         validate_seg(env, R_FS, rpl);
2151         validate_seg(env, R_GS, rpl);
2152 
2153         sa.sp += addend;
2154     }
2155     SET_ESP(sa.sp, sa.sp_mask);
2156     env->eip = new_eip;
2157     if (is_iret) {
2158         /* NOTE: 'cpl' is the _old_ CPL */
2159         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2160         if (cpl == 0) {
2161             eflags_mask |= IOPL_MASK;
2162         }
2163         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2164         if (cpl <= iopl) {
2165             eflags_mask |= IF_MASK;
2166         }
2167         if (shift == 0) {
2168             eflags_mask &= 0xffff;
2169         }
2170         cpu_load_eflags(env, new_eflags, eflags_mask);
2171     }
2172     return;
2173 
2174  return_to_vm86:
2175     new_esp = popl(&sa);
2176     new_ss = popl(&sa);
2177     new_es = popl(&sa);
2178     new_ds = popl(&sa);
2179     new_fs = popl(&sa);
2180     new_gs = popl(&sa);
2181 
2182     /* modify processor state */
2183     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2184                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2185                     VIP_MASK);
2186     load_seg_vm(env, R_CS, new_cs & 0xffff);
2187     load_seg_vm(env, R_SS, new_ss & 0xffff);
2188     load_seg_vm(env, R_ES, new_es & 0xffff);
2189     load_seg_vm(env, R_DS, new_ds & 0xffff);
2190     load_seg_vm(env, R_FS, new_fs & 0xffff);
2191     load_seg_vm(env, R_GS, new_gs & 0xffff);
2192 
2193     env->eip = new_eip & 0xffff;
2194     env->regs[R_ESP] = new_esp;
2195 }
2196 
2197 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2198 {
2199     int tss_selector, type;
2200     uint32_t e1, e2;
2201 
2202     /* specific case for TSS */
2203     if (env->eflags & NT_MASK) {
2204 #ifdef TARGET_X86_64
2205         if (env->hflags & HF_LMA_MASK) {
2206             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2207         }
2208 #endif
2209         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2210         if (tss_selector & 4) {
2211             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2212         }
2213         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2214             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2215         }
2216         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2217         /* NOTE: we check both segment and busy TSS */
2218         if (type != 3) {
2219             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2220         }
2221         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2222     } else {
2223         helper_ret_protected(env, shift, 1, 0, GETPC());
2224     }
2225     env->hflags2 &= ~HF2_NMI_MASK;
2226 }
2227 
2228 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2229 {
2230     helper_ret_protected(env, shift, 0, addend, GETPC());
2231 }
2232 
2233 void helper_sysenter(CPUX86State *env)
2234 {
2235     if (env->sysenter_cs == 0) {
2236         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2237     }
2238     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2239 
2240 #ifdef TARGET_X86_64
2241     if (env->hflags & HF_LMA_MASK) {
2242         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2243                                0, 0xffffffff,
2244                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2245                                DESC_S_MASK |
2246                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2247                                DESC_L_MASK);
2248     } else
2249 #endif
2250     {
2251         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2252                                0, 0xffffffff,
2253                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2254                                DESC_S_MASK |
2255                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2256     }
2257     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2258                            0, 0xffffffff,
2259                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2260                            DESC_S_MASK |
2261                            DESC_W_MASK | DESC_A_MASK);
2262     env->regs[R_ESP] = env->sysenter_esp;
2263     env->eip = env->sysenter_eip;
2264 }
2265 
2266 void helper_sysexit(CPUX86State *env, int dflag)
2267 {
2268     int cpl;
2269 
2270     cpl = env->hflags & HF_CPL_MASK;
2271     if (env->sysenter_cs == 0 || cpl != 0) {
2272         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2273     }
2274 #ifdef TARGET_X86_64
2275     if (dflag == 2) {
2276         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2277                                3, 0, 0xffffffff,
2278                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2279                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2280                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2281                                DESC_L_MASK);
2282         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2283                                3, 0, 0xffffffff,
2284                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2285                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2286                                DESC_W_MASK | DESC_A_MASK);
2287     } else
2288 #endif
2289     {
2290         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2291                                3, 0, 0xffffffff,
2292                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2293                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2294                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2295         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2296                                3, 0, 0xffffffff,
2297                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2299                                DESC_W_MASK | DESC_A_MASK);
2300     }
2301     env->regs[R_ESP] = env->regs[R_ECX];
2302     env->eip = env->regs[R_EDX];
2303 }
2304 
2305 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2306 {
2307     unsigned int limit;
2308     uint32_t e1, e2, selector;
2309     int rpl, dpl, cpl, type;
2310 
2311     selector = selector1 & 0xffff;
2312     assert(CC_OP == CC_OP_EFLAGS);
2313     if ((selector & 0xfffc) == 0) {
2314         goto fail;
2315     }
2316     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2317         goto fail;
2318     }
2319     rpl = selector & 3;
2320     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2321     cpl = env->hflags & HF_CPL_MASK;
2322     if (e2 & DESC_S_MASK) {
2323         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2324             /* conforming */
2325         } else {
2326             if (dpl < cpl || dpl < rpl) {
2327                 goto fail;
2328             }
2329         }
2330     } else {
2331         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2332         switch (type) {
2333         case 1:
2334         case 2:
2335         case 3:
2336         case 9:
2337         case 11:
2338             break;
2339         default:
2340             goto fail;
2341         }
2342         if (dpl < cpl || dpl < rpl) {
2343         fail:
2344             CC_SRC &= ~CC_Z;
2345             return 0;
2346         }
2347     }
2348     limit = get_seg_limit(e1, e2);
2349     CC_SRC |= CC_Z;
2350     return limit;
2351 }
2352 
2353 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2354 {
2355     uint32_t e1, e2, selector;
2356     int rpl, dpl, cpl, type;
2357 
2358     selector = selector1 & 0xffff;
2359     assert(CC_OP == CC_OP_EFLAGS);
2360     if ((selector & 0xfffc) == 0) {
2361         goto fail;
2362     }
2363     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2364         goto fail;
2365     }
2366     rpl = selector & 3;
2367     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2368     cpl = env->hflags & HF_CPL_MASK;
2369     if (e2 & DESC_S_MASK) {
2370         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2371             /* conforming */
2372         } else {
2373             if (dpl < cpl || dpl < rpl) {
2374                 goto fail;
2375             }
2376         }
2377     } else {
2378         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2379         switch (type) {
2380         case 1:
2381         case 2:
2382         case 3:
2383         case 4:
2384         case 5:
2385         case 9:
2386         case 11:
2387         case 12:
2388             break;
2389         default:
2390             goto fail;
2391         }
2392         if (dpl < cpl || dpl < rpl) {
2393         fail:
2394             CC_SRC &= ~CC_Z;
2395             return 0;
2396         }
2397     }
2398     CC_SRC |= CC_Z;
2399     return e2 & 0x00f0ff00;
2400 }
2401 
2402 void helper_verr(CPUX86State *env, target_ulong selector1)
2403 {
2404     uint32_t e1, e2, eflags, selector;
2405     int rpl, dpl, cpl;
2406 
2407     selector = selector1 & 0xffff;
2408     eflags = cpu_cc_compute_all(env) | CC_Z;
2409     if ((selector & 0xfffc) == 0) {
2410         goto fail;
2411     }
2412     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2413         goto fail;
2414     }
2415     if (!(e2 & DESC_S_MASK)) {
2416         goto fail;
2417     }
2418     rpl = selector & 3;
2419     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420     cpl = env->hflags & HF_CPL_MASK;
2421     if (e2 & DESC_CS_MASK) {
2422         if (!(e2 & DESC_R_MASK)) {
2423             goto fail;
2424         }
2425         if (!(e2 & DESC_C_MASK)) {
2426             if (dpl < cpl || dpl < rpl) {
2427                 goto fail;
2428             }
2429         }
2430     } else {
2431         if (dpl < cpl || dpl < rpl) {
2432         fail:
2433             eflags &= ~CC_Z;
2434         }
2435     }
2436     CC_SRC = eflags;
2437     CC_OP = CC_OP_EFLAGS;
2438 }
2439 
2440 void helper_verw(CPUX86State *env, target_ulong selector1)
2441 {
2442     uint32_t e1, e2, eflags, selector;
2443     int rpl, dpl, cpl;
2444 
2445     selector = selector1 & 0xffff;
2446     eflags = cpu_cc_compute_all(env) | CC_Z;
2447     if ((selector & 0xfffc) == 0) {
2448         goto fail;
2449     }
2450     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2451         goto fail;
2452     }
2453     if (!(e2 & DESC_S_MASK)) {
2454         goto fail;
2455     }
2456     rpl = selector & 3;
2457     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2458     cpl = env->hflags & HF_CPL_MASK;
2459     if (e2 & DESC_CS_MASK) {
2460         goto fail;
2461     } else {
2462         if (dpl < cpl || dpl < rpl) {
2463             goto fail;
2464         }
2465         if (!(e2 & DESC_W_MASK)) {
2466         fail:
2467             eflags &= ~CC_Z;
2468         }
2469     }
2470     CC_SRC = eflags;
2471     CC_OP = CC_OP_EFLAGS;
2472 }
2473