xref: /qemu/target/i386/tcg/seg_helper.c (revision 059368bcf589f4c6cba860516f57cec0b51c1fa1)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 #ifdef TARGET_X86_64
32 #define SET_ESP(val, sp_mask)                                   \
33     do {                                                        \
34         if ((sp_mask) == 0xffff) {                              \
35             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
36                 ((val) & 0xffff);                               \
37         } else if ((sp_mask) == 0xffffffffLL) {                 \
38             env->regs[R_ESP] = (uint32_t)(val);                 \
39         } else {                                                \
40             env->regs[R_ESP] = (val);                           \
41         }                                                       \
42     } while (0)
43 #else
44 #define SET_ESP(val, sp_mask)                                   \
45     do {                                                        \
46         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
47             ((val) & (sp_mask));                                \
48     } while (0)
49 #endif
50 
51 /* XXX: use mmu_index to have proper DPL support */
52 typedef struct StackAccess
53 {
54     CPUX86State *env;
55     uintptr_t ra;
56     target_ulong ss_base;
57     target_ulong sp;
58     target_ulong sp_mask;
59 } StackAccess;
60 
61 static void pushw(StackAccess *sa, uint16_t val)
62 {
63     sa->sp -= 2;
64     cpu_stw_kernel_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
65                       val, sa->ra);
66 }
67 
68 static void pushl(StackAccess *sa, uint32_t val)
69 {
70     sa->sp -= 4;
71     cpu_stl_kernel_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
72                       val, sa->ra);
73 }
74 
75 static uint16_t popw(StackAccess *sa)
76 {
77     uint16_t ret = cpu_lduw_data_ra(sa->env,
78                                     sa->ss_base + (sa->sp & sa->sp_mask),
79                                     sa->ra);
80     sa->sp += 2;
81     return ret;
82 }
83 
84 static uint32_t popl(StackAccess *sa)
85 {
86     uint32_t ret = cpu_ldl_data_ra(sa->env,
87                                    sa->ss_base + (sa->sp & sa->sp_mask),
88                                    sa->ra);
89     sa->sp += 4;
90     return ret;
91 }
92 
93 int get_pg_mode(CPUX86State *env)
94 {
95     int pg_mode = 0;
96     if (!(env->cr[0] & CR0_PG_MASK)) {
97         return 0;
98     }
99     if (env->cr[0] & CR0_WP_MASK) {
100         pg_mode |= PG_MODE_WP;
101     }
102     if (env->cr[4] & CR4_PAE_MASK) {
103         pg_mode |= PG_MODE_PAE;
104         if (env->efer & MSR_EFER_NXE) {
105             pg_mode |= PG_MODE_NXE;
106         }
107     }
108     if (env->cr[4] & CR4_PSE_MASK) {
109         pg_mode |= PG_MODE_PSE;
110     }
111     if (env->cr[4] & CR4_SMEP_MASK) {
112         pg_mode |= PG_MODE_SMEP;
113     }
114     if (env->hflags & HF_LMA_MASK) {
115         pg_mode |= PG_MODE_LMA;
116         if (env->cr[4] & CR4_PKE_MASK) {
117             pg_mode |= PG_MODE_PKE;
118         }
119         if (env->cr[4] & CR4_PKS_MASK) {
120             pg_mode |= PG_MODE_PKS;
121         }
122         if (env->cr[4] & CR4_LA57_MASK) {
123             pg_mode |= PG_MODE_LA57;
124         }
125     }
126     return pg_mode;
127 }
128 
129 /* return non zero if error */
130 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
131                                uint32_t *e2_ptr, int selector,
132                                uintptr_t retaddr)
133 {
134     SegmentCache *dt;
135     int index;
136     target_ulong ptr;
137 
138     if (selector & 0x4) {
139         dt = &env->ldt;
140     } else {
141         dt = &env->gdt;
142     }
143     index = selector & ~7;
144     if ((index + 7) > dt->limit) {
145         return -1;
146     }
147     ptr = dt->base + index;
148     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
149     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
150     return 0;
151 }
152 
153 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
154                                uint32_t *e2_ptr, int selector)
155 {
156     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
157 }
158 
159 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160 {
161     unsigned int limit;
162 
163     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
164     if (e2 & DESC_G_MASK) {
165         limit = (limit << 12) | 0xfff;
166     }
167     return limit;
168 }
169 
170 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
171 {
172     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
173 }
174 
175 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
176                                          uint32_t e2)
177 {
178     sc->base = get_seg_base(e1, e2);
179     sc->limit = get_seg_limit(e1, e2);
180     sc->flags = e2;
181 }
182 
183 /* init the segment cache in vm86 mode. */
184 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
185 {
186     selector &= 0xffff;
187 
188     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
189                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
190                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
191 }
192 
193 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
194                                        uint32_t *esp_ptr, int dpl,
195                                        uintptr_t retaddr)
196 {
197     X86CPU *cpu = env_archcpu(env);
198     int type, index, shift;
199 
200 #if 0
201     {
202         int i;
203         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
204         for (i = 0; i < env->tr.limit; i++) {
205             printf("%02x ", env->tr.base[i]);
206             if ((i & 7) == 7) {
207                 printf("\n");
208             }
209         }
210         printf("\n");
211     }
212 #endif
213 
214     if (!(env->tr.flags & DESC_P_MASK)) {
215         cpu_abort(CPU(cpu), "invalid tss");
216     }
217     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
218     if ((type & 7) != 1) {
219         cpu_abort(CPU(cpu), "invalid tss type");
220     }
221     shift = type >> 3;
222     index = (dpl * 4 + 2) << shift;
223     if (index + (4 << shift) - 1 > env->tr.limit) {
224         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
225     }
226     if (shift == 0) {
227         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
228         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
229     } else {
230         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
231         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
232     }
233 }
234 
235 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
236                          int cpl, uintptr_t retaddr)
237 {
238     uint32_t e1, e2;
239     int rpl, dpl;
240 
241     if ((selector & 0xfffc) != 0) {
242         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
243             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
244         }
245         if (!(e2 & DESC_S_MASK)) {
246             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
247         }
248         rpl = selector & 3;
249         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
250         if (seg_reg == R_CS) {
251             if (!(e2 & DESC_CS_MASK)) {
252                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
253             }
254             if (dpl != rpl) {
255                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
256             }
257         } else if (seg_reg == R_SS) {
258             /* SS must be writable data */
259             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
260                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
261             }
262             if (dpl != cpl || dpl != rpl) {
263                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
264             }
265         } else {
266             /* not readable code */
267             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
268                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
269             }
270             /* if data or non conforming code, checks the rights */
271             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
272                 if (dpl < cpl || dpl < rpl) {
273                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
274                 }
275             }
276         }
277         if (!(e2 & DESC_P_MASK)) {
278             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
279         }
280         cpu_x86_load_seg_cache(env, seg_reg, selector,
281                                get_seg_base(e1, e2),
282                                get_seg_limit(e1, e2),
283                                e2);
284     } else {
285         if (seg_reg == R_SS || seg_reg == R_CS) {
286             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
287         }
288     }
289 }
290 
291 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
292                          uintptr_t retaddr)
293 {
294     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
295     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
296 
297     if (value) {
298         e2 |= DESC_TSS_BUSY_MASK;
299     } else {
300         e2 &= ~DESC_TSS_BUSY_MASK;
301     }
302 
303     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
304 }
305 
306 #define SWITCH_TSS_JMP  0
307 #define SWITCH_TSS_IRET 1
308 #define SWITCH_TSS_CALL 2
309 
310 /* return 0 if switching to a 16-bit selector */
311 static int switch_tss_ra(CPUX86State *env, int tss_selector,
312                          uint32_t e1, uint32_t e2, int source,
313                          uint32_t next_eip, uintptr_t retaddr)
314 {
315     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
316     target_ulong tss_base;
317     uint32_t new_regs[8], new_segs[6];
318     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
319     uint32_t old_eflags, eflags_mask;
320     SegmentCache *dt;
321     int index;
322     target_ulong ptr;
323 
324     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
325     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
326               source);
327 
328     /* if task gate, we read the TSS segment and we load it */
329     if (type == 5) {
330         if (!(e2 & DESC_P_MASK)) {
331             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
332         }
333         tss_selector = e1 >> 16;
334         if (tss_selector & 4) {
335             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
336         }
337         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
338             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
339         }
340         if (e2 & DESC_S_MASK) {
341             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
342         }
343         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
344         if ((type & 7) != 1) {
345             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
346         }
347     }
348 
349     if (!(e2 & DESC_P_MASK)) {
350         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
351     }
352 
353     if (type & 8) {
354         tss_limit_max = 103;
355     } else {
356         tss_limit_max = 43;
357     }
358     tss_limit = get_seg_limit(e1, e2);
359     tss_base = get_seg_base(e1, e2);
360     if ((tss_selector & 4) != 0 ||
361         tss_limit < tss_limit_max) {
362         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
363     }
364     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
365     if (old_type & 8) {
366         old_tss_limit_max = 103;
367     } else {
368         old_tss_limit_max = 43;
369     }
370 
371     /* read all the registers from the new TSS */
372     if (type & 8) {
373         /* 32 bit */
374         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
375         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
376         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
377         for (i = 0; i < 8; i++) {
378             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
379                                             retaddr);
380         }
381         for (i = 0; i < 6; i++) {
382             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
383                                              retaddr);
384         }
385         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
386         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
387     } else {
388         /* 16 bit */
389         new_cr3 = 0;
390         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
391         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
392         for (i = 0; i < 8; i++) {
393             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
394         }
395         for (i = 0; i < 4; i++) {
396             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
397                                              retaddr);
398         }
399         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
400         new_segs[R_FS] = 0;
401         new_segs[R_GS] = 0;
402         new_trap = 0;
403     }
404     /* XXX: avoid a compiler warning, see
405      http://support.amd.com/us/Processor_TechDocs/24593.pdf
406      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
407     (void)new_trap;
408 
409     /* NOTE: we must avoid memory exceptions during the task switch,
410        so we make dummy accesses before */
411     /* XXX: it can still fail in some cases, so a bigger hack is
412        necessary to valid the TLB after having done the accesses */
413 
414     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
415     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
416     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
417     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
418 
419     /* clear busy bit (it is restartable) */
420     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
421         tss_set_busy(env, env->tr.selector, 0, retaddr);
422     }
423     old_eflags = cpu_compute_eflags(env);
424     if (source == SWITCH_TSS_IRET) {
425         old_eflags &= ~NT_MASK;
426     }
427 
428     /* save the current state in the old TSS */
429     if (old_type & 8) {
430         /* 32 bit */
431         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
432         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
433         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
434         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
435         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
436         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
437         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
438         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
439         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
440         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
441         for (i = 0; i < 6; i++) {
442             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
443                               env->segs[i].selector, retaddr);
444         }
445     } else {
446         /* 16 bit */
447         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
448         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
449         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
450         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
451         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
452         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
453         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
454         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
455         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
456         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
457         for (i = 0; i < 4; i++) {
458             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
459                               env->segs[i].selector, retaddr);
460         }
461     }
462 
463     /* now if an exception occurs, it will occurs in the next task
464        context */
465 
466     if (source == SWITCH_TSS_CALL) {
467         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
468         new_eflags |= NT_MASK;
469     }
470 
471     /* set busy bit */
472     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
473         tss_set_busy(env, tss_selector, 1, retaddr);
474     }
475 
476     /* set the new CPU state */
477     /* from this point, any exception which occurs can give problems */
478     env->cr[0] |= CR0_TS_MASK;
479     env->hflags |= HF_TS_MASK;
480     env->tr.selector = tss_selector;
481     env->tr.base = tss_base;
482     env->tr.limit = tss_limit;
483     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
484 
485     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
486         cpu_x86_update_cr3(env, new_cr3);
487     }
488 
489     /* load all registers without an exception, then reload them with
490        possible exception */
491     env->eip = new_eip;
492     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
493         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
494     if (type & 8) {
495         cpu_load_eflags(env, new_eflags, eflags_mask);
496         for (i = 0; i < 8; i++) {
497             env->regs[i] = new_regs[i];
498         }
499     } else {
500         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
501         for (i = 0; i < 8; i++) {
502             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
503         }
504     }
505     if (new_eflags & VM_MASK) {
506         for (i = 0; i < 6; i++) {
507             load_seg_vm(env, i, new_segs[i]);
508         }
509     } else {
510         /* first just selectors as the rest may trigger exceptions */
511         for (i = 0; i < 6; i++) {
512             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
513         }
514     }
515 
516     env->ldt.selector = new_ldt & ~4;
517     env->ldt.base = 0;
518     env->ldt.limit = 0;
519     env->ldt.flags = 0;
520 
521     /* load the LDT */
522     if (new_ldt & 4) {
523         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
524     }
525 
526     if ((new_ldt & 0xfffc) != 0) {
527         dt = &env->gdt;
528         index = new_ldt & ~7;
529         if ((index + 7) > dt->limit) {
530             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
531         }
532         ptr = dt->base + index;
533         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
534         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
535         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
536             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
537         }
538         if (!(e2 & DESC_P_MASK)) {
539             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
540         }
541         load_seg_cache_raw_dt(&env->ldt, e1, e2);
542     }
543 
544     /* load the segments */
545     if (!(new_eflags & VM_MASK)) {
546         int cpl = new_segs[R_CS] & 3;
547         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
548         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
549         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
550         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
551         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
552         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
553     }
554 
555     /* check that env->eip is in the CS segment limits */
556     if (new_eip > env->segs[R_CS].limit) {
557         /* XXX: different exception if CALL? */
558         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
559     }
560 
561 #ifndef CONFIG_USER_ONLY
562     /* reset local breakpoints */
563     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
564         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
565     }
566 #endif
567     return type >> 3;
568 }
569 
570 static int switch_tss(CPUX86State *env, int tss_selector,
571                       uint32_t e1, uint32_t e2, int source,
572                       uint32_t next_eip)
573 {
574     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
575 }
576 
577 static inline unsigned int get_sp_mask(unsigned int e2)
578 {
579 #ifdef TARGET_X86_64
580     if (e2 & DESC_L_MASK) {
581         return 0;
582     } else
583 #endif
584     if (e2 & DESC_B_MASK) {
585         return 0xffffffff;
586     } else {
587         return 0xffff;
588     }
589 }
590 
591 static int exception_is_fault(int intno)
592 {
593     switch (intno) {
594         /*
595          * #DB can be both fault- and trap-like, but it never sets RF=1
596          * in the RFLAGS value pushed on the stack.
597          */
598     case EXCP01_DB:
599     case EXCP03_INT3:
600     case EXCP04_INTO:
601     case EXCP08_DBLE:
602     case EXCP12_MCHK:
603         return 0;
604     }
605     /* Everything else including reserved exception is a fault.  */
606     return 1;
607 }
608 
609 int exception_has_error_code(int intno)
610 {
611     switch (intno) {
612     case 8:
613     case 10:
614     case 11:
615     case 12:
616     case 13:
617     case 14:
618     case 17:
619         return 1;
620     }
621     return 0;
622 }
623 
624 /* protected mode interrupt */
625 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
626                                    int error_code, unsigned int next_eip,
627                                    int is_hw)
628 {
629     SegmentCache *dt;
630     target_ulong ptr;
631     int type, dpl, selector, ss_dpl, cpl;
632     int has_error_code, new_stack, shift;
633     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
634     uint32_t old_eip, eflags;
635     int vm86 = env->eflags & VM_MASK;
636     StackAccess sa;
637     bool set_rf;
638 
639     has_error_code = 0;
640     if (!is_int && !is_hw) {
641         has_error_code = exception_has_error_code(intno);
642     }
643     if (is_int) {
644         old_eip = next_eip;
645         set_rf = false;
646     } else {
647         old_eip = env->eip;
648         set_rf = exception_is_fault(intno);
649     }
650 
651     dt = &env->idt;
652     if (intno * 8 + 7 > dt->limit) {
653         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
654     }
655     ptr = dt->base + intno * 8;
656     e1 = cpu_ldl_kernel(env, ptr);
657     e2 = cpu_ldl_kernel(env, ptr + 4);
658     /* check gate type */
659     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660     switch (type) {
661     case 5: /* task gate */
662     case 6: /* 286 interrupt gate */
663     case 7: /* 286 trap gate */
664     case 14: /* 386 interrupt gate */
665     case 15: /* 386 trap gate */
666         break;
667     default:
668         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
669         break;
670     }
671     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672     cpl = env->hflags & HF_CPL_MASK;
673     /* check privilege if software int */
674     if (is_int && dpl < cpl) {
675         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
676     }
677 
678     sa.env = env;
679     sa.ra = 0;
680 
681     if (type == 5) {
682         /* task gate */
683         /* must do that check here to return the correct error code */
684         if (!(e2 & DESC_P_MASK)) {
685             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
686         }
687         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
688         if (has_error_code) {
689             /* push the error code */
690             if (env->segs[R_SS].flags & DESC_B_MASK) {
691                 sa.sp_mask = 0xffffffff;
692             } else {
693                 sa.sp_mask = 0xffff;
694             }
695             sa.sp = env->regs[R_ESP];
696             sa.ss_base = env->segs[R_SS].base;
697             if (shift) {
698                 pushl(&sa, error_code);
699             } else {
700                 pushw(&sa, error_code);
701             }
702             SET_ESP(sa.sp, sa.sp_mask);
703         }
704         return;
705     }
706 
707     /* Otherwise, trap or interrupt gate */
708 
709     /* check valid bit */
710     if (!(e2 & DESC_P_MASK)) {
711         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
712     }
713     selector = e1 >> 16;
714     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
715     if ((selector & 0xfffc) == 0) {
716         raise_exception_err(env, EXCP0D_GPF, 0);
717     }
718     if (load_segment(env, &e1, &e2, selector) != 0) {
719         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
720     }
721     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
722         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
723     }
724     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
725     if (dpl > cpl) {
726         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727     }
728     if (!(e2 & DESC_P_MASK)) {
729         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
730     }
731     if (e2 & DESC_C_MASK) {
732         dpl = cpl;
733     }
734     if (dpl < cpl) {
735         /* to inner privilege */
736         uint32_t esp;
737         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
738         if ((ss & 0xfffc) == 0) {
739             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
740         }
741         if ((ss & 3) != dpl) {
742             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
743         }
744         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
745             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
746         }
747         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
748         if (ss_dpl != dpl) {
749             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
750         }
751         if (!(ss_e2 & DESC_S_MASK) ||
752             (ss_e2 & DESC_CS_MASK) ||
753             !(ss_e2 & DESC_W_MASK)) {
754             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
755         }
756         if (!(ss_e2 & DESC_P_MASK)) {
757             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
758         }
759         new_stack = 1;
760         sa.sp = esp;
761         sa.sp_mask = get_sp_mask(ss_e2);
762         sa.ss_base = get_seg_base(ss_e1, ss_e2);
763     } else  {
764         /* to same privilege */
765         if (vm86) {
766             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
767         }
768         new_stack = 0;
769         sa.sp = env->regs[R_ESP];
770         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
771         sa.ss_base = env->segs[R_SS].base;
772     }
773 
774     shift = type >> 3;
775 
776 #if 0
777     /* XXX: check that enough room is available */
778     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
779     if (vm86) {
780         push_size += 8;
781     }
782     push_size <<= shift;
783 #endif
784     eflags = cpu_compute_eflags(env);
785     /*
786      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
787      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
788      */
789     if (set_rf) {
790         eflags |= RF_MASK;
791     }
792 
793     if (shift == 1) {
794         if (new_stack) {
795             if (vm86) {
796                 pushl(&sa, env->segs[R_GS].selector);
797                 pushl(&sa, env->segs[R_FS].selector);
798                 pushl(&sa, env->segs[R_DS].selector);
799                 pushl(&sa, env->segs[R_ES].selector);
800             }
801             pushl(&sa, env->segs[R_SS].selector);
802             pushl(&sa, env->regs[R_ESP]);
803         }
804         pushl(&sa, eflags);
805         pushl(&sa, env->segs[R_CS].selector);
806         pushl(&sa, old_eip);
807         if (has_error_code) {
808             pushl(&sa, error_code);
809         }
810     } else {
811         if (new_stack) {
812             if (vm86) {
813                 pushw(&sa, env->segs[R_GS].selector);
814                 pushw(&sa, env->segs[R_FS].selector);
815                 pushw(&sa, env->segs[R_DS].selector);
816                 pushw(&sa, env->segs[R_ES].selector);
817             }
818             pushw(&sa, env->segs[R_SS].selector);
819             pushw(&sa, env->regs[R_ESP]);
820         }
821         pushw(&sa, eflags);
822         pushw(&sa, env->segs[R_CS].selector);
823         pushw(&sa, old_eip);
824         if (has_error_code) {
825             pushw(&sa, error_code);
826         }
827     }
828 
829     /* interrupt gate clear IF mask */
830     if ((type & 1) == 0) {
831         env->eflags &= ~IF_MASK;
832     }
833     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
834 
835     if (new_stack) {
836         if (vm86) {
837             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
838             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
839             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
840             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
841         }
842         ss = (ss & ~3) | dpl;
843         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
844                                get_seg_limit(ss_e1, ss_e2), ss_e2);
845     }
846     SET_ESP(sa.sp, sa.sp_mask);
847 
848     selector = (selector & ~3) | dpl;
849     cpu_x86_load_seg_cache(env, R_CS, selector,
850                    get_seg_base(e1, e2),
851                    get_seg_limit(e1, e2),
852                    e2);
853     env->eip = offset;
854 }
855 
856 #ifdef TARGET_X86_64
857 
858 static void pushq(StackAccess *sa, uint64_t val)
859 {
860     sa->sp -= 8;
861     cpu_stq_kernel_ra(sa->env, sa->sp, val, sa->ra);
862 }
863 
864 static uint64_t popq(StackAccess *sa)
865 {
866     uint64_t ret = cpu_ldq_data_ra(sa->env, sa->sp, sa->ra);
867     sa->sp += 8;
868     return ret;
869 }
870 
871 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
872 {
873     X86CPU *cpu = env_archcpu(env);
874     int index, pg_mode;
875     target_ulong rsp;
876     int32_t sext;
877 
878 #if 0
879     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
880            env->tr.base, env->tr.limit);
881 #endif
882 
883     if (!(env->tr.flags & DESC_P_MASK)) {
884         cpu_abort(CPU(cpu), "invalid tss");
885     }
886     index = 8 * level + 4;
887     if ((index + 7) > env->tr.limit) {
888         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
889     }
890 
891     rsp = cpu_ldq_kernel(env, env->tr.base + index);
892 
893     /* test virtual address sign extension */
894     pg_mode = get_pg_mode(env);
895     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
896     if (sext != 0 && sext != -1) {
897         raise_exception_err(env, EXCP0C_STACK, 0);
898     }
899 
900     return rsp;
901 }
902 
903 /* 64 bit interrupt */
904 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
905                            int error_code, target_ulong next_eip, int is_hw)
906 {
907     SegmentCache *dt;
908     target_ulong ptr;
909     int type, dpl, selector, cpl, ist;
910     int has_error_code, new_stack;
911     uint32_t e1, e2, e3, ss, eflags;
912     target_ulong old_eip, offset;
913     bool set_rf;
914     StackAccess sa;
915 
916     has_error_code = 0;
917     if (!is_int && !is_hw) {
918         has_error_code = exception_has_error_code(intno);
919     }
920     if (is_int) {
921         old_eip = next_eip;
922         set_rf = false;
923     } else {
924         old_eip = env->eip;
925         set_rf = exception_is_fault(intno);
926     }
927 
928     dt = &env->idt;
929     if (intno * 16 + 15 > dt->limit) {
930         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
931     }
932     ptr = dt->base + intno * 16;
933     e1 = cpu_ldl_kernel(env, ptr);
934     e2 = cpu_ldl_kernel(env, ptr + 4);
935     e3 = cpu_ldl_kernel(env, ptr + 8);
936     /* check gate type */
937     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
938     switch (type) {
939     case 14: /* 386 interrupt gate */
940     case 15: /* 386 trap gate */
941         break;
942     default:
943         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
944         break;
945     }
946     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
947     cpl = env->hflags & HF_CPL_MASK;
948     /* check privilege if software int */
949     if (is_int && dpl < cpl) {
950         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
951     }
952     /* check valid bit */
953     if (!(e2 & DESC_P_MASK)) {
954         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
955     }
956     selector = e1 >> 16;
957     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
958     ist = e2 & 7;
959     if ((selector & 0xfffc) == 0) {
960         raise_exception_err(env, EXCP0D_GPF, 0);
961     }
962 
963     if (load_segment(env, &e1, &e2, selector) != 0) {
964         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
965     }
966     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
967         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
968     }
969     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
970     if (dpl > cpl) {
971         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
972     }
973     if (!(e2 & DESC_P_MASK)) {
974         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
975     }
976     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
977         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
978     }
979     if (e2 & DESC_C_MASK) {
980         dpl = cpl;
981     }
982 
983     sa.env = env;
984     sa.ra = 0;
985     sa.sp_mask = -1;
986     sa.ss_base = 0;
987     if (dpl < cpl || ist != 0) {
988         /* to inner privilege */
989         new_stack = 1;
990         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
991         ss = 0;
992     } else {
993         /* to same privilege */
994         if (env->eflags & VM_MASK) {
995             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
996         }
997         new_stack = 0;
998         sa.sp = env->regs[R_ESP];
999     }
1000     sa.sp &= ~0xfLL; /* align stack */
1001 
1002     /* See do_interrupt_protected.  */
1003     eflags = cpu_compute_eflags(env);
1004     if (set_rf) {
1005         eflags |= RF_MASK;
1006     }
1007 
1008     pushq(&sa, env->segs[R_SS].selector);
1009     pushq(&sa, env->regs[R_ESP]);
1010     pushq(&sa, eflags);
1011     pushq(&sa, env->segs[R_CS].selector);
1012     pushq(&sa, old_eip);
1013     if (has_error_code) {
1014         pushq(&sa, error_code);
1015     }
1016 
1017     /* interrupt gate clear IF mask */
1018     if ((type & 1) == 0) {
1019         env->eflags &= ~IF_MASK;
1020     }
1021     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1022 
1023     if (new_stack) {
1024         ss = 0 | dpl;
1025         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1026     }
1027     env->regs[R_ESP] = sa.sp;
1028 
1029     selector = (selector & ~3) | dpl;
1030     cpu_x86_load_seg_cache(env, R_CS, selector,
1031                    get_seg_base(e1, e2),
1032                    get_seg_limit(e1, e2),
1033                    e2);
1034     env->eip = offset;
1035 }
1036 #endif /* TARGET_X86_64 */
1037 
1038 void helper_sysret(CPUX86State *env, int dflag)
1039 {
1040     int cpl, selector;
1041 
1042     if (!(env->efer & MSR_EFER_SCE)) {
1043         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044     }
1045     cpl = env->hflags & HF_CPL_MASK;
1046     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048     }
1049     selector = (env->star >> 48) & 0xffff;
1050 #ifdef TARGET_X86_64
1051     if (env->hflags & HF_LMA_MASK) {
1052         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1053                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1054                         NT_MASK);
1055         if (dflag == 2) {
1056             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1057                                    0, 0xffffffff,
1058                                    DESC_G_MASK | DESC_P_MASK |
1059                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1060                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1061                                    DESC_L_MASK);
1062             env->eip = env->regs[R_ECX];
1063         } else {
1064             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1065                                    0, 0xffffffff,
1066                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1067                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1068                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1069             env->eip = (uint32_t)env->regs[R_ECX];
1070         }
1071         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1072                                0, 0xffffffff,
1073                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075                                DESC_W_MASK | DESC_A_MASK);
1076     } else
1077 #endif
1078     {
1079         env->eflags |= IF_MASK;
1080         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1081                                0, 0xffffffff,
1082                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1085         env->eip = (uint32_t)env->regs[R_ECX];
1086         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1087                                0, 0xffffffff,
1088                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090                                DESC_W_MASK | DESC_A_MASK);
1091     }
1092 }
1093 
1094 /* real mode interrupt */
1095 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1096                               int error_code, unsigned int next_eip)
1097 {
1098     SegmentCache *dt;
1099     target_ulong ptr;
1100     int selector;
1101     uint32_t offset;
1102     uint32_t old_cs, old_eip;
1103     StackAccess sa;
1104 
1105     /* real mode (simpler!) */
1106     dt = &env->idt;
1107     if (intno * 4 + 3 > dt->limit) {
1108         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1109     }
1110     ptr = dt->base + intno * 4;
1111     offset = cpu_lduw_kernel(env, ptr);
1112     selector = cpu_lduw_kernel(env, ptr + 2);
1113 
1114     sa.env = env;
1115     sa.ra = 0;
1116     sa.sp = env->regs[R_ESP];
1117     sa.sp_mask = 0xffff;
1118     sa.ss_base = env->segs[R_SS].base;
1119 
1120     if (is_int) {
1121         old_eip = next_eip;
1122     } else {
1123         old_eip = env->eip;
1124     }
1125     old_cs = env->segs[R_CS].selector;
1126     /* XXX: use SS segment size? */
1127     pushw(&sa, cpu_compute_eflags(env));
1128     pushw(&sa, old_cs);
1129     pushw(&sa, old_eip);
1130 
1131     /* update processor state */
1132     SET_ESP(sa.sp, sa.sp_mask);
1133     env->eip = offset;
1134     env->segs[R_CS].selector = selector;
1135     env->segs[R_CS].base = (selector << 4);
1136     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1137 }
1138 
1139 /*
1140  * Begin execution of an interruption. is_int is TRUE if coming from
1141  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1142  * instruction. It is only relevant if is_int is TRUE.
1143  */
1144 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1145                       int error_code, target_ulong next_eip, int is_hw)
1146 {
1147     CPUX86State *env = &cpu->env;
1148 
1149     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1150         if ((env->cr[0] & CR0_PE_MASK)) {
1151             static int count;
1152 
1153             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1154                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1155                      count, intno, error_code, is_int,
1156                      env->hflags & HF_CPL_MASK,
1157                      env->segs[R_CS].selector, env->eip,
1158                      (int)env->segs[R_CS].base + env->eip,
1159                      env->segs[R_SS].selector, env->regs[R_ESP]);
1160             if (intno == 0x0e) {
1161                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1162             } else {
1163                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1164             }
1165             qemu_log("\n");
1166             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1167 #if 0
1168             {
1169                 int i;
1170                 target_ulong ptr;
1171 
1172                 qemu_log("       code=");
1173                 ptr = env->segs[R_CS].base + env->eip;
1174                 for (i = 0; i < 16; i++) {
1175                     qemu_log(" %02x", ldub(ptr + i));
1176                 }
1177                 qemu_log("\n");
1178             }
1179 #endif
1180             count++;
1181         }
1182     }
1183     if (env->cr[0] & CR0_PE_MASK) {
1184 #if !defined(CONFIG_USER_ONLY)
1185         if (env->hflags & HF_GUEST_MASK) {
1186             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1187         }
1188 #endif
1189 #ifdef TARGET_X86_64
1190         if (env->hflags & HF_LMA_MASK) {
1191             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1192         } else
1193 #endif
1194         {
1195             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1196                                    is_hw);
1197         }
1198     } else {
1199 #if !defined(CONFIG_USER_ONLY)
1200         if (env->hflags & HF_GUEST_MASK) {
1201             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1202         }
1203 #endif
1204         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1205     }
1206 
1207 #if !defined(CONFIG_USER_ONLY)
1208     if (env->hflags & HF_GUEST_MASK) {
1209         CPUState *cs = CPU(cpu);
1210         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1211                                       offsetof(struct vmcb,
1212                                                control.event_inj));
1213 
1214         x86_stl_phys(cs,
1215                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1216                  event_inj & ~SVM_EVTINJ_VALID);
1217     }
1218 #endif
1219 }
1220 
1221 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1222 {
1223     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1224 }
1225 
1226 void helper_lldt(CPUX86State *env, int selector)
1227 {
1228     SegmentCache *dt;
1229     uint32_t e1, e2;
1230     int index, entry_limit;
1231     target_ulong ptr;
1232 
1233     selector &= 0xffff;
1234     if ((selector & 0xfffc) == 0) {
1235         /* XXX: NULL selector case: invalid LDT */
1236         env->ldt.base = 0;
1237         env->ldt.limit = 0;
1238     } else {
1239         if (selector & 0x4) {
1240             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1241         }
1242         dt = &env->gdt;
1243         index = selector & ~7;
1244 #ifdef TARGET_X86_64
1245         if (env->hflags & HF_LMA_MASK) {
1246             entry_limit = 15;
1247         } else
1248 #endif
1249         {
1250             entry_limit = 7;
1251         }
1252         if ((index + entry_limit) > dt->limit) {
1253             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1254         }
1255         ptr = dt->base + index;
1256         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1257         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1258         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1259             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1260         }
1261         if (!(e2 & DESC_P_MASK)) {
1262             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1263         }
1264 #ifdef TARGET_X86_64
1265         if (env->hflags & HF_LMA_MASK) {
1266             uint32_t e3;
1267 
1268             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1269             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1270             env->ldt.base |= (target_ulong)e3 << 32;
1271         } else
1272 #endif
1273         {
1274             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1275         }
1276     }
1277     env->ldt.selector = selector;
1278 }
1279 
1280 void helper_ltr(CPUX86State *env, int selector)
1281 {
1282     SegmentCache *dt;
1283     uint32_t e1, e2;
1284     int index, type, entry_limit;
1285     target_ulong ptr;
1286 
1287     selector &= 0xffff;
1288     if ((selector & 0xfffc) == 0) {
1289         /* NULL selector case: invalid TR */
1290         env->tr.base = 0;
1291         env->tr.limit = 0;
1292         env->tr.flags = 0;
1293     } else {
1294         if (selector & 0x4) {
1295             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1296         }
1297         dt = &env->gdt;
1298         index = selector & ~7;
1299 #ifdef TARGET_X86_64
1300         if (env->hflags & HF_LMA_MASK) {
1301             entry_limit = 15;
1302         } else
1303 #endif
1304         {
1305             entry_limit = 7;
1306         }
1307         if ((index + entry_limit) > dt->limit) {
1308             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1309         }
1310         ptr = dt->base + index;
1311         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1312         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1313         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1314         if ((e2 & DESC_S_MASK) ||
1315             (type != 1 && type != 9)) {
1316             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1317         }
1318         if (!(e2 & DESC_P_MASK)) {
1319             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1320         }
1321 #ifdef TARGET_X86_64
1322         if (env->hflags & HF_LMA_MASK) {
1323             uint32_t e3, e4;
1324 
1325             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1326             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1327             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1328                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1329             }
1330             load_seg_cache_raw_dt(&env->tr, e1, e2);
1331             env->tr.base |= (target_ulong)e3 << 32;
1332         } else
1333 #endif
1334         {
1335             load_seg_cache_raw_dt(&env->tr, e1, e2);
1336         }
1337         e2 |= DESC_TSS_BUSY_MASK;
1338         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1339     }
1340     env->tr.selector = selector;
1341 }
1342 
1343 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1344 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1345 {
1346     uint32_t e1, e2;
1347     int cpl, dpl, rpl;
1348     SegmentCache *dt;
1349     int index;
1350     target_ulong ptr;
1351 
1352     selector &= 0xffff;
1353     cpl = env->hflags & HF_CPL_MASK;
1354     if ((selector & 0xfffc) == 0) {
1355         /* null selector case */
1356         if (seg_reg == R_SS
1357 #ifdef TARGET_X86_64
1358             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1359 #endif
1360             ) {
1361             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1362         }
1363         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1364     } else {
1365 
1366         if (selector & 0x4) {
1367             dt = &env->ldt;
1368         } else {
1369             dt = &env->gdt;
1370         }
1371         index = selector & ~7;
1372         if ((index + 7) > dt->limit) {
1373             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1374         }
1375         ptr = dt->base + index;
1376         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1377         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1378 
1379         if (!(e2 & DESC_S_MASK)) {
1380             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1381         }
1382         rpl = selector & 3;
1383         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1384         if (seg_reg == R_SS) {
1385             /* must be writable segment */
1386             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1387                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1388             }
1389             if (rpl != cpl || dpl != cpl) {
1390                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1391             }
1392         } else {
1393             /* must be readable segment */
1394             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1395                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1396             }
1397 
1398             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1399                 /* if not conforming code, test rights */
1400                 if (dpl < cpl || dpl < rpl) {
1401                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1402                 }
1403             }
1404         }
1405 
1406         if (!(e2 & DESC_P_MASK)) {
1407             if (seg_reg == R_SS) {
1408                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1409             } else {
1410                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1411             }
1412         }
1413 
1414         /* set the access bit if not already set */
1415         if (!(e2 & DESC_A_MASK)) {
1416             e2 |= DESC_A_MASK;
1417             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1418         }
1419 
1420         cpu_x86_load_seg_cache(env, seg_reg, selector,
1421                        get_seg_base(e1, e2),
1422                        get_seg_limit(e1, e2),
1423                        e2);
1424 #if 0
1425         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1426                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1427 #endif
1428     }
1429 }
1430 
1431 /* protected mode jump */
1432 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1433                            target_ulong next_eip)
1434 {
1435     int gate_cs, type;
1436     uint32_t e1, e2, cpl, dpl, rpl, limit;
1437 
1438     if ((new_cs & 0xfffc) == 0) {
1439         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1440     }
1441     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1442         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1443     }
1444     cpl = env->hflags & HF_CPL_MASK;
1445     if (e2 & DESC_S_MASK) {
1446         if (!(e2 & DESC_CS_MASK)) {
1447             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1448         }
1449         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1450         if (e2 & DESC_C_MASK) {
1451             /* conforming code segment */
1452             if (dpl > cpl) {
1453                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1454             }
1455         } else {
1456             /* non conforming code segment */
1457             rpl = new_cs & 3;
1458             if (rpl > cpl) {
1459                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1460             }
1461             if (dpl != cpl) {
1462                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1463             }
1464         }
1465         if (!(e2 & DESC_P_MASK)) {
1466             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1467         }
1468         limit = get_seg_limit(e1, e2);
1469         if (new_eip > limit &&
1470             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1471             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1472         }
1473         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1474                        get_seg_base(e1, e2), limit, e2);
1475         env->eip = new_eip;
1476     } else {
1477         /* jump to call or task gate */
1478         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1479         rpl = new_cs & 3;
1480         cpl = env->hflags & HF_CPL_MASK;
1481         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1482 
1483 #ifdef TARGET_X86_64
1484         if (env->efer & MSR_EFER_LMA) {
1485             if (type != 12) {
1486                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1487             }
1488         }
1489 #endif
1490         switch (type) {
1491         case 1: /* 286 TSS */
1492         case 9: /* 386 TSS */
1493         case 5: /* task gate */
1494             if (dpl < cpl || dpl < rpl) {
1495                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1496             }
1497             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1498             break;
1499         case 4: /* 286 call gate */
1500         case 12: /* 386 call gate */
1501             if ((dpl < cpl) || (dpl < rpl)) {
1502                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1503             }
1504             if (!(e2 & DESC_P_MASK)) {
1505                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1506             }
1507             gate_cs = e1 >> 16;
1508             new_eip = (e1 & 0xffff);
1509             if (type == 12) {
1510                 new_eip |= (e2 & 0xffff0000);
1511             }
1512 
1513 #ifdef TARGET_X86_64
1514             if (env->efer & MSR_EFER_LMA) {
1515                 /* load the upper 8 bytes of the 64-bit call gate */
1516                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1517                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1518                                            GETPC());
1519                 }
1520                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1521                 if (type != 0) {
1522                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1523                                            GETPC());
1524                 }
1525                 new_eip |= ((target_ulong)e1) << 32;
1526             }
1527 #endif
1528 
1529             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1530                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1531             }
1532             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1533             /* must be code segment */
1534             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1535                  (DESC_S_MASK | DESC_CS_MASK))) {
1536                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1537             }
1538             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1539                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1540                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1541             }
1542 #ifdef TARGET_X86_64
1543             if (env->efer & MSR_EFER_LMA) {
1544                 if (!(e2 & DESC_L_MASK)) {
1545                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1546                 }
1547                 if (e2 & DESC_B_MASK) {
1548                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1549                 }
1550             }
1551 #endif
1552             if (!(e2 & DESC_P_MASK)) {
1553                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1554             }
1555             limit = get_seg_limit(e1, e2);
1556             if (new_eip > limit &&
1557                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1558                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1559             }
1560             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1561                                    get_seg_base(e1, e2), limit, e2);
1562             env->eip = new_eip;
1563             break;
1564         default:
1565             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1566             break;
1567         }
1568     }
1569 }
1570 
1571 /* real mode call */
1572 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1573                        int shift, uint32_t next_eip)
1574 {
1575     StackAccess sa;
1576 
1577     sa.env = env;
1578     sa.ra = GETPC();
1579     sa.sp = env->regs[R_ESP];
1580     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1581     sa.ss_base = env->segs[R_SS].base;
1582 
1583     if (shift) {
1584         pushl(&sa, env->segs[R_CS].selector);
1585         pushl(&sa, next_eip);
1586     } else {
1587         pushw(&sa, env->segs[R_CS].selector);
1588         pushw(&sa, next_eip);
1589     }
1590 
1591     SET_ESP(sa.sp, sa.sp_mask);
1592     env->eip = new_eip;
1593     env->segs[R_CS].selector = new_cs;
1594     env->segs[R_CS].base = (new_cs << 4);
1595 }
1596 
1597 /* protected mode call */
1598 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1599                             int shift, target_ulong next_eip)
1600 {
1601     int new_stack, i;
1602     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1603     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1604     uint32_t val, limit, old_sp_mask;
1605     target_ulong old_ssp, offset;
1606     StackAccess sa;
1607 
1608     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1609     LOG_PCALL_STATE(env_cpu(env));
1610     if ((new_cs & 0xfffc) == 0) {
1611         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1612     }
1613     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1614         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1615     }
1616     cpl = env->hflags & HF_CPL_MASK;
1617     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1618 
1619     sa.env = env;
1620     sa.ra = GETPC();
1621 
1622     if (e2 & DESC_S_MASK) {
1623         if (!(e2 & DESC_CS_MASK)) {
1624             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1625         }
1626         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1627         if (e2 & DESC_C_MASK) {
1628             /* conforming code segment */
1629             if (dpl > cpl) {
1630                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1631             }
1632         } else {
1633             /* non conforming code segment */
1634             rpl = new_cs & 3;
1635             if (rpl > cpl) {
1636                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1637             }
1638             if (dpl != cpl) {
1639                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1640             }
1641         }
1642         if (!(e2 & DESC_P_MASK)) {
1643             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1644         }
1645 
1646 #ifdef TARGET_X86_64
1647         /* XXX: check 16/32 bit cases in long mode */
1648         if (shift == 2) {
1649             /* 64 bit case */
1650             sa.sp = env->regs[R_ESP];
1651             sa.sp_mask = -1;
1652             sa.ss_base = 0;
1653             pushq(&sa, env->segs[R_CS].selector);
1654             pushq(&sa, next_eip);
1655             /* from this point, not restartable */
1656             env->regs[R_ESP] = sa.sp;
1657             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1658                                    get_seg_base(e1, e2),
1659                                    get_seg_limit(e1, e2), e2);
1660             env->eip = new_eip;
1661         } else
1662 #endif
1663         {
1664             sa.sp = env->regs[R_ESP];
1665             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1666             sa.ss_base = env->segs[R_SS].base;
1667             if (shift) {
1668                 pushl(&sa, env->segs[R_CS].selector);
1669                 pushl(&sa, next_eip);
1670             } else {
1671                 pushw(&sa, env->segs[R_CS].selector);
1672                 pushw(&sa, next_eip);
1673             }
1674 
1675             limit = get_seg_limit(e1, e2);
1676             if (new_eip > limit) {
1677                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1678             }
1679             /* from this point, not restartable */
1680             SET_ESP(sa.sp, sa.sp_mask);
1681             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1682                                    get_seg_base(e1, e2), limit, e2);
1683             env->eip = new_eip;
1684         }
1685     } else {
1686         /* check gate type */
1687         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1688         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1689         rpl = new_cs & 3;
1690 
1691 #ifdef TARGET_X86_64
1692         if (env->efer & MSR_EFER_LMA) {
1693             if (type != 12) {
1694                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1695             }
1696         }
1697 #endif
1698 
1699         switch (type) {
1700         case 1: /* available 286 TSS */
1701         case 9: /* available 386 TSS */
1702         case 5: /* task gate */
1703             if (dpl < cpl || dpl < rpl) {
1704                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1705             }
1706             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1707             return;
1708         case 4: /* 286 call gate */
1709         case 12: /* 386 call gate */
1710             break;
1711         default:
1712             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1713             break;
1714         }
1715         shift = type >> 3;
1716 
1717         if (dpl < cpl || dpl < rpl) {
1718             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1719         }
1720         /* check valid bit */
1721         if (!(e2 & DESC_P_MASK)) {
1722             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1723         }
1724         selector = e1 >> 16;
1725         param_count = e2 & 0x1f;
1726         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1727 #ifdef TARGET_X86_64
1728         if (env->efer & MSR_EFER_LMA) {
1729             /* load the upper 8 bytes of the 64-bit call gate */
1730             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1731                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1732                                        GETPC());
1733             }
1734             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1735             if (type != 0) {
1736                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1737                                        GETPC());
1738             }
1739             offset |= ((target_ulong)e1) << 32;
1740         }
1741 #endif
1742         if ((selector & 0xfffc) == 0) {
1743             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1744         }
1745 
1746         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1747             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1748         }
1749         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1750             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1751         }
1752         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1753         if (dpl > cpl) {
1754             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1755         }
1756 #ifdef TARGET_X86_64
1757         if (env->efer & MSR_EFER_LMA) {
1758             if (!(e2 & DESC_L_MASK)) {
1759                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1760             }
1761             if (e2 & DESC_B_MASK) {
1762                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1763             }
1764             shift++;
1765         }
1766 #endif
1767         if (!(e2 & DESC_P_MASK)) {
1768             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1769         }
1770 
1771         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1772             /* to inner privilege */
1773 #ifdef TARGET_X86_64
1774             if (shift == 2) {
1775                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1776                 new_stack = 1;
1777                 sa.sp = get_rsp_from_tss(env, dpl);
1778                 sa.sp_mask = -1;
1779                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1780                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1781                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1782             } else
1783 #endif
1784             {
1785                 uint32_t sp32;
1786                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1787                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1788                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1789                           env->regs[R_ESP]);
1790                 if ((ss & 0xfffc) == 0) {
1791                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1792                 }
1793                 if ((ss & 3) != dpl) {
1794                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1795                 }
1796                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1797                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1798                 }
1799                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1800                 if (ss_dpl != dpl) {
1801                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1802                 }
1803                 if (!(ss_e2 & DESC_S_MASK) ||
1804                     (ss_e2 & DESC_CS_MASK) ||
1805                     !(ss_e2 & DESC_W_MASK)) {
1806                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1807                 }
1808                 if (!(ss_e2 & DESC_P_MASK)) {
1809                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1810                 }
1811 
1812                 sa.sp = sp32;
1813                 sa.sp_mask = get_sp_mask(ss_e2);
1814                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1815             }
1816 
1817             /* push_size = ((param_count * 2) + 8) << shift; */
1818             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1819             old_ssp = env->segs[R_SS].base;
1820 
1821 #ifdef TARGET_X86_64
1822             if (shift == 2) {
1823                 /* XXX: verify if new stack address is canonical */
1824                 pushq(&sa, env->segs[R_SS].selector);
1825                 pushq(&sa, env->regs[R_ESP]);
1826                 /* parameters aren't supported for 64-bit call gates */
1827             } else
1828 #endif
1829             if (shift == 1) {
1830                 pushl(&sa, env->segs[R_SS].selector);
1831                 pushl(&sa, env->regs[R_ESP]);
1832                 for (i = param_count - 1; i >= 0; i--) {
1833                     val = cpu_ldl_data_ra(env,
1834                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1835                                           GETPC());
1836                     pushl(&sa, val);
1837                 }
1838             } else {
1839                 pushw(&sa, env->segs[R_SS].selector);
1840                 pushw(&sa, env->regs[R_ESP]);
1841                 for (i = param_count - 1; i >= 0; i--) {
1842                     val = cpu_lduw_data_ra(env,
1843                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1844                                            GETPC());
1845                     pushw(&sa, val);
1846                 }
1847             }
1848             new_stack = 1;
1849         } else {
1850             /* to same privilege */
1851             sa.sp = env->regs[R_ESP];
1852             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1853             sa.ss_base = env->segs[R_SS].base;
1854             /* push_size = (4 << shift); */
1855             new_stack = 0;
1856         }
1857 
1858 #ifdef TARGET_X86_64
1859         if (shift == 2) {
1860             pushq(&sa, env->segs[R_CS].selector);
1861             pushq(&sa, next_eip);
1862         } else
1863 #endif
1864         if (shift == 1) {
1865             pushl(&sa, env->segs[R_CS].selector);
1866             pushl(&sa, next_eip);
1867         } else {
1868             pushw(&sa, env->segs[R_CS].selector);
1869             pushw(&sa, next_eip);
1870         }
1871 
1872         /* from this point, not restartable */
1873 
1874         if (new_stack) {
1875 #ifdef TARGET_X86_64
1876             if (shift == 2) {
1877                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1878             } else
1879 #endif
1880             {
1881                 ss = (ss & ~3) | dpl;
1882                 cpu_x86_load_seg_cache(env, R_SS, ss,
1883                                        sa.ss_base,
1884                                        get_seg_limit(ss_e1, ss_e2),
1885                                        ss_e2);
1886             }
1887         }
1888 
1889         selector = (selector & ~3) | dpl;
1890         cpu_x86_load_seg_cache(env, R_CS, selector,
1891                        get_seg_base(e1, e2),
1892                        get_seg_limit(e1, e2),
1893                        e2);
1894         SET_ESP(sa.sp, sa.sp_mask);
1895         env->eip = offset;
1896     }
1897 }
1898 
1899 /* real and vm86 mode iret */
1900 void helper_iret_real(CPUX86State *env, int shift)
1901 {
1902     uint32_t new_cs, new_eip, new_eflags;
1903     int eflags_mask;
1904     StackAccess sa;
1905 
1906     sa.env = env;
1907     sa.ra = GETPC();
1908     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1909     sa.sp = env->regs[R_ESP];
1910     sa.ss_base = env->segs[R_SS].base;
1911 
1912     if (shift == 1) {
1913         /* 32 bits */
1914         new_eip = popl(&sa);
1915         new_cs = popl(&sa) & 0xffff;
1916         new_eflags = popl(&sa);
1917     } else {
1918         /* 16 bits */
1919         new_eip = popw(&sa);
1920         new_cs = popw(&sa);
1921         new_eflags = popw(&sa);
1922     }
1923     SET_ESP(sa.sp, sa.sp_mask);
1924     env->segs[R_CS].selector = new_cs;
1925     env->segs[R_CS].base = (new_cs << 4);
1926     env->eip = new_eip;
1927     if (env->eflags & VM_MASK) {
1928         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1929             NT_MASK;
1930     } else {
1931         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1932             RF_MASK | NT_MASK;
1933     }
1934     if (shift == 0) {
1935         eflags_mask &= 0xffff;
1936     }
1937     cpu_load_eflags(env, new_eflags, eflags_mask);
1938     env->hflags2 &= ~HF2_NMI_MASK;
1939 }
1940 
1941 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1942 {
1943     int dpl;
1944     uint32_t e2;
1945 
1946     /* XXX: on x86_64, we do not want to nullify FS and GS because
1947        they may still contain a valid base. I would be interested to
1948        know how a real x86_64 CPU behaves */
1949     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1950         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1951         return;
1952     }
1953 
1954     e2 = env->segs[seg_reg].flags;
1955     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1956     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1957         /* data or non conforming code segment */
1958         if (dpl < cpl) {
1959             cpu_x86_load_seg_cache(env, seg_reg, 0,
1960                                    env->segs[seg_reg].base,
1961                                    env->segs[seg_reg].limit,
1962                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1963         }
1964     }
1965 }
1966 
1967 /* protected mode iret */
1968 static inline void helper_ret_protected(CPUX86State *env, int shift,
1969                                         int is_iret, int addend,
1970                                         uintptr_t retaddr)
1971 {
1972     uint32_t new_cs, new_eflags, new_ss;
1973     uint32_t new_es, new_ds, new_fs, new_gs;
1974     uint32_t e1, e2, ss_e1, ss_e2;
1975     int cpl, dpl, rpl, eflags_mask, iopl;
1976     target_ulong new_eip, new_esp;
1977     StackAccess sa;
1978 
1979     sa.env = env;
1980     sa.ra = retaddr;
1981 
1982 #ifdef TARGET_X86_64
1983     if (shift == 2) {
1984         sa.sp_mask = -1;
1985     } else
1986 #endif
1987     {
1988         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1989     }
1990     sa.sp = env->regs[R_ESP];
1991     sa.ss_base = env->segs[R_SS].base;
1992     new_eflags = 0; /* avoid warning */
1993 #ifdef TARGET_X86_64
1994     if (shift == 2) {
1995         new_eip = popq(&sa);
1996         new_cs = popq(&sa) & 0xffff;
1997         if (is_iret) {
1998             new_eflags = popq(&sa);
1999         }
2000     } else
2001 #endif
2002     {
2003         if (shift == 1) {
2004             /* 32 bits */
2005             new_eip = popl(&sa);
2006             new_cs = popl(&sa) & 0xffff;
2007             if (is_iret) {
2008                 new_eflags = popl(&sa);
2009                 if (new_eflags & VM_MASK) {
2010                     goto return_to_vm86;
2011                 }
2012             }
2013         } else {
2014             /* 16 bits */
2015             new_eip = popw(&sa);
2016             new_cs = popw(&sa);
2017             if (is_iret) {
2018                 new_eflags = popw(&sa);
2019             }
2020         }
2021     }
2022     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2023               new_cs, new_eip, shift, addend);
2024     LOG_PCALL_STATE(env_cpu(env));
2025     if ((new_cs & 0xfffc) == 0) {
2026         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2027     }
2028     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2029         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2030     }
2031     if (!(e2 & DESC_S_MASK) ||
2032         !(e2 & DESC_CS_MASK)) {
2033         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2034     }
2035     cpl = env->hflags & HF_CPL_MASK;
2036     rpl = new_cs & 3;
2037     if (rpl < cpl) {
2038         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2039     }
2040     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2041     if (e2 & DESC_C_MASK) {
2042         if (dpl > rpl) {
2043             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2044         }
2045     } else {
2046         if (dpl != rpl) {
2047             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2048         }
2049     }
2050     if (!(e2 & DESC_P_MASK)) {
2051         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2052     }
2053 
2054     sa.sp += addend;
2055     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2056                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2057         /* return to same privilege level */
2058         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2059                        get_seg_base(e1, e2),
2060                        get_seg_limit(e1, e2),
2061                        e2);
2062     } else {
2063         /* return to different privilege level */
2064 #ifdef TARGET_X86_64
2065         if (shift == 2) {
2066             new_esp = popq(&sa);
2067             new_ss = popq(&sa) & 0xffff;
2068         } else
2069 #endif
2070         {
2071             if (shift == 1) {
2072                 /* 32 bits */
2073                 new_esp = popl(&sa);
2074                 new_ss = popl(&sa) & 0xffff;
2075             } else {
2076                 /* 16 bits */
2077                 new_esp = popw(&sa);
2078                 new_ss = popw(&sa);
2079             }
2080         }
2081         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2082                   new_ss, new_esp);
2083         if ((new_ss & 0xfffc) == 0) {
2084 #ifdef TARGET_X86_64
2085             /* NULL ss is allowed in long mode if cpl != 3 */
2086             /* XXX: test CS64? */
2087             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2088                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2089                                        0, 0xffffffff,
2090                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2091                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2092                                        DESC_W_MASK | DESC_A_MASK);
2093                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2094             } else
2095 #endif
2096             {
2097                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2098             }
2099         } else {
2100             if ((new_ss & 3) != rpl) {
2101                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2102             }
2103             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2104                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2105             }
2106             if (!(ss_e2 & DESC_S_MASK) ||
2107                 (ss_e2 & DESC_CS_MASK) ||
2108                 !(ss_e2 & DESC_W_MASK)) {
2109                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2110             }
2111             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2112             if (dpl != rpl) {
2113                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2114             }
2115             if (!(ss_e2 & DESC_P_MASK)) {
2116                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2117             }
2118             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2119                                    get_seg_base(ss_e1, ss_e2),
2120                                    get_seg_limit(ss_e1, ss_e2),
2121                                    ss_e2);
2122         }
2123 
2124         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2125                        get_seg_base(e1, e2),
2126                        get_seg_limit(e1, e2),
2127                        e2);
2128         sa.sp = new_esp;
2129 #ifdef TARGET_X86_64
2130         if (env->hflags & HF_CS64_MASK) {
2131             sa.sp_mask = -1;
2132         } else
2133 #endif
2134         {
2135             sa.sp_mask = get_sp_mask(ss_e2);
2136         }
2137 
2138         /* validate data segments */
2139         validate_seg(env, R_ES, rpl);
2140         validate_seg(env, R_DS, rpl);
2141         validate_seg(env, R_FS, rpl);
2142         validate_seg(env, R_GS, rpl);
2143 
2144         sa.sp += addend;
2145     }
2146     SET_ESP(sa.sp, sa.sp_mask);
2147     env->eip = new_eip;
2148     if (is_iret) {
2149         /* NOTE: 'cpl' is the _old_ CPL */
2150         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2151         if (cpl == 0) {
2152             eflags_mask |= IOPL_MASK;
2153         }
2154         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2155         if (cpl <= iopl) {
2156             eflags_mask |= IF_MASK;
2157         }
2158         if (shift == 0) {
2159             eflags_mask &= 0xffff;
2160         }
2161         cpu_load_eflags(env, new_eflags, eflags_mask);
2162     }
2163     return;
2164 
2165  return_to_vm86:
2166     new_esp = popl(&sa);
2167     new_ss = popl(&sa);
2168     new_es = popl(&sa);
2169     new_ds = popl(&sa);
2170     new_fs = popl(&sa);
2171     new_gs = popl(&sa);
2172 
2173     /* modify processor state */
2174     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2175                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2176                     VIP_MASK);
2177     load_seg_vm(env, R_CS, new_cs & 0xffff);
2178     load_seg_vm(env, R_SS, new_ss & 0xffff);
2179     load_seg_vm(env, R_ES, new_es & 0xffff);
2180     load_seg_vm(env, R_DS, new_ds & 0xffff);
2181     load_seg_vm(env, R_FS, new_fs & 0xffff);
2182     load_seg_vm(env, R_GS, new_gs & 0xffff);
2183 
2184     env->eip = new_eip & 0xffff;
2185     env->regs[R_ESP] = new_esp;
2186 }
2187 
2188 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2189 {
2190     int tss_selector, type;
2191     uint32_t e1, e2;
2192 
2193     /* specific case for TSS */
2194     if (env->eflags & NT_MASK) {
2195 #ifdef TARGET_X86_64
2196         if (env->hflags & HF_LMA_MASK) {
2197             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2198         }
2199 #endif
2200         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2201         if (tss_selector & 4) {
2202             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2203         }
2204         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2205             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2206         }
2207         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2208         /* NOTE: we check both segment and busy TSS */
2209         if (type != 3) {
2210             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2211         }
2212         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2213     } else {
2214         helper_ret_protected(env, shift, 1, 0, GETPC());
2215     }
2216     env->hflags2 &= ~HF2_NMI_MASK;
2217 }
2218 
2219 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2220 {
2221     helper_ret_protected(env, shift, 0, addend, GETPC());
2222 }
2223 
2224 void helper_sysenter(CPUX86State *env)
2225 {
2226     if (env->sysenter_cs == 0) {
2227         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2228     }
2229     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2230 
2231 #ifdef TARGET_X86_64
2232     if (env->hflags & HF_LMA_MASK) {
2233         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2234                                0, 0xffffffff,
2235                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2236                                DESC_S_MASK |
2237                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2238                                DESC_L_MASK);
2239     } else
2240 #endif
2241     {
2242         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2243                                0, 0xffffffff,
2244                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2245                                DESC_S_MASK |
2246                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2247     }
2248     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2249                            0, 0xffffffff,
2250                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2251                            DESC_S_MASK |
2252                            DESC_W_MASK | DESC_A_MASK);
2253     env->regs[R_ESP] = env->sysenter_esp;
2254     env->eip = env->sysenter_eip;
2255 }
2256 
2257 void helper_sysexit(CPUX86State *env, int dflag)
2258 {
2259     int cpl;
2260 
2261     cpl = env->hflags & HF_CPL_MASK;
2262     if (env->sysenter_cs == 0 || cpl != 0) {
2263         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2264     }
2265 #ifdef TARGET_X86_64
2266     if (dflag == 2) {
2267         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2268                                3, 0, 0xffffffff,
2269                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2270                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2271                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2272                                DESC_L_MASK);
2273         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2274                                3, 0, 0xffffffff,
2275                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2276                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2277                                DESC_W_MASK | DESC_A_MASK);
2278     } else
2279 #endif
2280     {
2281         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2282                                3, 0, 0xffffffff,
2283                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2284                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2285                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2286         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2287                                3, 0, 0xffffffff,
2288                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2289                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2290                                DESC_W_MASK | DESC_A_MASK);
2291     }
2292     env->regs[R_ESP] = env->regs[R_ECX];
2293     env->eip = env->regs[R_EDX];
2294 }
2295 
2296 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2297 {
2298     unsigned int limit;
2299     uint32_t e1, e2, selector;
2300     int rpl, dpl, cpl, type;
2301 
2302     selector = selector1 & 0xffff;
2303     assert(CC_OP == CC_OP_EFLAGS);
2304     if ((selector & 0xfffc) == 0) {
2305         goto fail;
2306     }
2307     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2308         goto fail;
2309     }
2310     rpl = selector & 3;
2311     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2312     cpl = env->hflags & HF_CPL_MASK;
2313     if (e2 & DESC_S_MASK) {
2314         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2315             /* conforming */
2316         } else {
2317             if (dpl < cpl || dpl < rpl) {
2318                 goto fail;
2319             }
2320         }
2321     } else {
2322         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2323         switch (type) {
2324         case 1:
2325         case 2:
2326         case 3:
2327         case 9:
2328         case 11:
2329             break;
2330         default:
2331             goto fail;
2332         }
2333         if (dpl < cpl || dpl < rpl) {
2334         fail:
2335             CC_SRC &= ~CC_Z;
2336             return 0;
2337         }
2338     }
2339     limit = get_seg_limit(e1, e2);
2340     CC_SRC |= CC_Z;
2341     return limit;
2342 }
2343 
2344 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2345 {
2346     uint32_t e1, e2, selector;
2347     int rpl, dpl, cpl, type;
2348 
2349     selector = selector1 & 0xffff;
2350     assert(CC_OP == CC_OP_EFLAGS);
2351     if ((selector & 0xfffc) == 0) {
2352         goto fail;
2353     }
2354     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2355         goto fail;
2356     }
2357     rpl = selector & 3;
2358     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2359     cpl = env->hflags & HF_CPL_MASK;
2360     if (e2 & DESC_S_MASK) {
2361         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2362             /* conforming */
2363         } else {
2364             if (dpl < cpl || dpl < rpl) {
2365                 goto fail;
2366             }
2367         }
2368     } else {
2369         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2370         switch (type) {
2371         case 1:
2372         case 2:
2373         case 3:
2374         case 4:
2375         case 5:
2376         case 9:
2377         case 11:
2378         case 12:
2379             break;
2380         default:
2381             goto fail;
2382         }
2383         if (dpl < cpl || dpl < rpl) {
2384         fail:
2385             CC_SRC &= ~CC_Z;
2386             return 0;
2387         }
2388     }
2389     CC_SRC |= CC_Z;
2390     return e2 & 0x00f0ff00;
2391 }
2392 
2393 void helper_verr(CPUX86State *env, target_ulong selector1)
2394 {
2395     uint32_t e1, e2, eflags, selector;
2396     int rpl, dpl, cpl;
2397 
2398     selector = selector1 & 0xffff;
2399     eflags = cpu_cc_compute_all(env) | CC_Z;
2400     if ((selector & 0xfffc) == 0) {
2401         goto fail;
2402     }
2403     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2404         goto fail;
2405     }
2406     if (!(e2 & DESC_S_MASK)) {
2407         goto fail;
2408     }
2409     rpl = selector & 3;
2410     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2411     cpl = env->hflags & HF_CPL_MASK;
2412     if (e2 & DESC_CS_MASK) {
2413         if (!(e2 & DESC_R_MASK)) {
2414             goto fail;
2415         }
2416         if (!(e2 & DESC_C_MASK)) {
2417             if (dpl < cpl || dpl < rpl) {
2418                 goto fail;
2419             }
2420         }
2421     } else {
2422         if (dpl < cpl || dpl < rpl) {
2423         fail:
2424             eflags &= ~CC_Z;
2425         }
2426     }
2427     CC_SRC = eflags;
2428     CC_OP = CC_OP_EFLAGS;
2429 }
2430 
2431 void helper_verw(CPUX86State *env, target_ulong selector1)
2432 {
2433     uint32_t e1, e2, eflags, selector;
2434     int rpl, dpl, cpl;
2435 
2436     selector = selector1 & 0xffff;
2437     eflags = cpu_cc_compute_all(env) | CC_Z;
2438     if ((selector & 0xfffc) == 0) {
2439         goto fail;
2440     }
2441     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2442         goto fail;
2443     }
2444     if (!(e2 & DESC_S_MASK)) {
2445         goto fail;
2446     }
2447     rpl = selector & 3;
2448     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2449     cpl = env->hflags & HF_CPL_MASK;
2450     if (e2 & DESC_CS_MASK) {
2451         goto fail;
2452     } else {
2453         if (dpl < cpl || dpl < rpl) {
2454             goto fail;
2455         }
2456         if (!(e2 & DESC_W_MASK)) {
2457         fail:
2458             eflags &= ~CC_Z;
2459         }
2460     }
2461     CC_SRC = eflags;
2462     CC_OP = CC_OP_EFLAGS;
2463 }
2464