xref: /qemu/target/i386/tcg/seg_helper.c (revision 05d41bbcb34ee30465517229a888da93666b4f3f)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 #ifdef TARGET_X86_64
32 #define SET_ESP(val, sp_mask)                                   \
33     do {                                                        \
34         if ((sp_mask) == 0xffff) {                              \
35             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
36                 ((val) & 0xffff);                               \
37         } else if ((sp_mask) == 0xffffffffLL) {                 \
38             env->regs[R_ESP] = (uint32_t)(val);                 \
39         } else {                                                \
40             env->regs[R_ESP] = (val);                           \
41         }                                                       \
42     } while (0)
43 #else
44 #define SET_ESP(val, sp_mask)                                   \
45     do {                                                        \
46         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
47             ((val) & (sp_mask));                                \
48     } while (0)
49 #endif
50 
51 /* XXX: use mmu_index to have proper DPL support */
52 typedef struct StackAccess
53 {
54     CPUX86State *env;
55     uintptr_t ra;
56     target_ulong ss_base;
57     target_ulong sp;
58     target_ulong sp_mask;
59     int mmu_index;
60 } StackAccess;
61 
62 static void pushw(StackAccess *sa, uint16_t val)
63 {
64     sa->sp -= 2;
65     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
66                       val, sa->mmu_index, sa->ra);
67 }
68 
69 static void pushl(StackAccess *sa, uint32_t val)
70 {
71     sa->sp -= 4;
72     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
73                       val, sa->mmu_index, sa->ra);
74 }
75 
76 static uint16_t popw(StackAccess *sa)
77 {
78     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
79                                       sa->ss_base + (sa->sp & sa->sp_mask),
80                                       sa->mmu_index, sa->ra);
81     sa->sp += 2;
82     return ret;
83 }
84 
85 static uint32_t popl(StackAccess *sa)
86 {
87     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
88                                      sa->ss_base + (sa->sp & sa->sp_mask),
89                                      sa->mmu_index, sa->ra);
90     sa->sp += 4;
91     return ret;
92 }
93 
94 int get_pg_mode(CPUX86State *env)
95 {
96     int pg_mode = 0;
97     if (!(env->cr[0] & CR0_PG_MASK)) {
98         return 0;
99     }
100     if (env->cr[0] & CR0_WP_MASK) {
101         pg_mode |= PG_MODE_WP;
102     }
103     if (env->cr[4] & CR4_PAE_MASK) {
104         pg_mode |= PG_MODE_PAE;
105         if (env->efer & MSR_EFER_NXE) {
106             pg_mode |= PG_MODE_NXE;
107         }
108     }
109     if (env->cr[4] & CR4_PSE_MASK) {
110         pg_mode |= PG_MODE_PSE;
111     }
112     if (env->cr[4] & CR4_SMEP_MASK) {
113         pg_mode |= PG_MODE_SMEP;
114     }
115     if (env->hflags & HF_LMA_MASK) {
116         pg_mode |= PG_MODE_LMA;
117         if (env->cr[4] & CR4_PKE_MASK) {
118             pg_mode |= PG_MODE_PKE;
119         }
120         if (env->cr[4] & CR4_PKS_MASK) {
121             pg_mode |= PG_MODE_PKS;
122         }
123         if (env->cr[4] & CR4_LA57_MASK) {
124             pg_mode |= PG_MODE_LA57;
125         }
126     }
127     return pg_mode;
128 }
129 
130 /* return non zero if error */
131 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
132                                uint32_t *e2_ptr, int selector,
133                                uintptr_t retaddr)
134 {
135     SegmentCache *dt;
136     int index;
137     target_ulong ptr;
138 
139     if (selector & 0x4) {
140         dt = &env->ldt;
141     } else {
142         dt = &env->gdt;
143     }
144     index = selector & ~7;
145     if ((index + 7) > dt->limit) {
146         return -1;
147     }
148     ptr = dt->base + index;
149     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
150     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
151     return 0;
152 }
153 
154 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
155                                uint32_t *e2_ptr, int selector)
156 {
157     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
158 }
159 
160 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161 {
162     unsigned int limit;
163 
164     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
165     if (e2 & DESC_G_MASK) {
166         limit = (limit << 12) | 0xfff;
167     }
168     return limit;
169 }
170 
171 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
172 {
173     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
174 }
175 
176 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
177                                          uint32_t e2)
178 {
179     sc->base = get_seg_base(e1, e2);
180     sc->limit = get_seg_limit(e1, e2);
181     sc->flags = e2;
182 }
183 
184 /* init the segment cache in vm86 mode. */
185 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
186 {
187     selector &= 0xffff;
188 
189     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
190                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
191                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
192 }
193 
194 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
195                                        uint32_t *esp_ptr, int dpl,
196                                        uintptr_t retaddr)
197 {
198     X86CPU *cpu = env_archcpu(env);
199     int type, index, shift;
200 
201 #if 0
202     {
203         int i;
204         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
205         for (i = 0; i < env->tr.limit; i++) {
206             printf("%02x ", env->tr.base[i]);
207             if ((i & 7) == 7) {
208                 printf("\n");
209             }
210         }
211         printf("\n");
212     }
213 #endif
214 
215     if (!(env->tr.flags & DESC_P_MASK)) {
216         cpu_abort(CPU(cpu), "invalid tss");
217     }
218     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
219     if ((type & 7) != 1) {
220         cpu_abort(CPU(cpu), "invalid tss type");
221     }
222     shift = type >> 3;
223     index = (dpl * 4 + 2) << shift;
224     if (index + (4 << shift) - 1 > env->tr.limit) {
225         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
226     }
227     if (shift == 0) {
228         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
229         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
230     } else {
231         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
232         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
233     }
234 }
235 
236 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
237                          int cpl, uintptr_t retaddr)
238 {
239     uint32_t e1, e2;
240     int rpl, dpl;
241 
242     if ((selector & 0xfffc) != 0) {
243         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
244             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
245         }
246         if (!(e2 & DESC_S_MASK)) {
247             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
248         }
249         rpl = selector & 3;
250         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
251         if (seg_reg == R_CS) {
252             if (!(e2 & DESC_CS_MASK)) {
253                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
254             }
255             if (dpl != rpl) {
256                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
257             }
258         } else if (seg_reg == R_SS) {
259             /* SS must be writable data */
260             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
261                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
262             }
263             if (dpl != cpl || dpl != rpl) {
264                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
265             }
266         } else {
267             /* not readable code */
268             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
269                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
270             }
271             /* if data or non conforming code, checks the rights */
272             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
273                 if (dpl < cpl || dpl < rpl) {
274                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
275                 }
276             }
277         }
278         if (!(e2 & DESC_P_MASK)) {
279             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
280         }
281         cpu_x86_load_seg_cache(env, seg_reg, selector,
282                                get_seg_base(e1, e2),
283                                get_seg_limit(e1, e2),
284                                e2);
285     } else {
286         if (seg_reg == R_SS || seg_reg == R_CS) {
287             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
288         }
289     }
290 }
291 
292 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
293                          uintptr_t retaddr)
294 {
295     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
296     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
297 
298     if (value) {
299         e2 |= DESC_TSS_BUSY_MASK;
300     } else {
301         e2 &= ~DESC_TSS_BUSY_MASK;
302     }
303 
304     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
305 }
306 
307 #define SWITCH_TSS_JMP  0
308 #define SWITCH_TSS_IRET 1
309 #define SWITCH_TSS_CALL 2
310 
311 /* return 0 if switching to a 16-bit selector */
312 static int switch_tss_ra(CPUX86State *env, int tss_selector,
313                          uint32_t e1, uint32_t e2, int source,
314                          uint32_t next_eip, uintptr_t retaddr)
315 {
316     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
317     target_ulong tss_base;
318     uint32_t new_regs[8], new_segs[6];
319     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
320     uint32_t old_eflags, eflags_mask;
321     SegmentCache *dt;
322     int index;
323     target_ulong ptr;
324 
325     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
326     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
327               source);
328 
329     /* if task gate, we read the TSS segment and we load it */
330     if (type == 5) {
331         if (!(e2 & DESC_P_MASK)) {
332             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
333         }
334         tss_selector = e1 >> 16;
335         if (tss_selector & 4) {
336             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
337         }
338         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
339             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
340         }
341         if (e2 & DESC_S_MASK) {
342             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
343         }
344         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
345         if ((type & 7) != 1) {
346             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
347         }
348     }
349 
350     if (!(e2 & DESC_P_MASK)) {
351         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
352     }
353 
354     if (type & 8) {
355         tss_limit_max = 103;
356     } else {
357         tss_limit_max = 43;
358     }
359     tss_limit = get_seg_limit(e1, e2);
360     tss_base = get_seg_base(e1, e2);
361     if ((tss_selector & 4) != 0 ||
362         tss_limit < tss_limit_max) {
363         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
364     }
365     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
366     if (old_type & 8) {
367         old_tss_limit_max = 103;
368     } else {
369         old_tss_limit_max = 43;
370     }
371 
372     /* new TSS must be busy iff the source is an IRET instruction  */
373     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
374         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
375     }
376 
377     /* read all the registers from the new TSS */
378     if (type & 8) {
379         /* 32 bit */
380         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
381         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
382         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
383         for (i = 0; i < 8; i++) {
384             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
385                                             retaddr);
386         }
387         for (i = 0; i < 6; i++) {
388             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
389                                              retaddr);
390         }
391         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
392         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
393     } else {
394         /* 16 bit */
395         new_cr3 = 0;
396         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
397         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
398         for (i = 0; i < 8; i++) {
399             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
400         }
401         for (i = 0; i < 4; i++) {
402             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
403                                              retaddr);
404         }
405         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
406         new_segs[R_FS] = 0;
407         new_segs[R_GS] = 0;
408         new_trap = 0;
409     }
410     /* XXX: avoid a compiler warning, see
411      http://support.amd.com/us/Processor_TechDocs/24593.pdf
412      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
413     (void)new_trap;
414 
415     /* NOTE: we must avoid memory exceptions during the task switch,
416        so we make dummy accesses before */
417     /* XXX: it can still fail in some cases, so a bigger hack is
418        necessary to valid the TLB after having done the accesses */
419 
420     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
421     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
422     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
423     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
424 
425     /* clear busy bit (it is restartable) */
426     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
427         tss_set_busy(env, env->tr.selector, 0, retaddr);
428     }
429     old_eflags = cpu_compute_eflags(env);
430     if (source == SWITCH_TSS_IRET) {
431         old_eflags &= ~NT_MASK;
432     }
433 
434     /* save the current state in the old TSS */
435     if (old_type & 8) {
436         /* 32 bit */
437         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
438         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
439         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
440         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
441         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
442         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
443         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
444         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
445         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
446         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
447         for (i = 0; i < 6; i++) {
448             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
449                               env->segs[i].selector, retaddr);
450         }
451     } else {
452         /* 16 bit */
453         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
454         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
455         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
456         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
457         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
458         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
459         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
460         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
461         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
462         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
463         for (i = 0; i < 4; i++) {
464             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
465                               env->segs[i].selector, retaddr);
466         }
467     }
468 
469     /* now if an exception occurs, it will occurs in the next task
470        context */
471 
472     if (source == SWITCH_TSS_CALL) {
473         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
474         new_eflags |= NT_MASK;
475     }
476 
477     /* set busy bit */
478     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
479         tss_set_busy(env, tss_selector, 1, retaddr);
480     }
481 
482     /* set the new CPU state */
483     /* from this point, any exception which occurs can give problems */
484     env->cr[0] |= CR0_TS_MASK;
485     env->hflags |= HF_TS_MASK;
486     env->tr.selector = tss_selector;
487     env->tr.base = tss_base;
488     env->tr.limit = tss_limit;
489     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
490 
491     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
492         cpu_x86_update_cr3(env, new_cr3);
493     }
494 
495     /* load all registers without an exception, then reload them with
496        possible exception */
497     env->eip = new_eip;
498     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
499         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
500     if (type & 8) {
501         cpu_load_eflags(env, new_eflags, eflags_mask);
502         for (i = 0; i < 8; i++) {
503             env->regs[i] = new_regs[i];
504         }
505     } else {
506         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
507         for (i = 0; i < 8; i++) {
508             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
509         }
510     }
511     if (new_eflags & VM_MASK) {
512         for (i = 0; i < 6; i++) {
513             load_seg_vm(env, i, new_segs[i]);
514         }
515     } else {
516         /* first just selectors as the rest may trigger exceptions */
517         for (i = 0; i < 6; i++) {
518             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
519         }
520     }
521 
522     env->ldt.selector = new_ldt & ~4;
523     env->ldt.base = 0;
524     env->ldt.limit = 0;
525     env->ldt.flags = 0;
526 
527     /* load the LDT */
528     if (new_ldt & 4) {
529         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
530     }
531 
532     if ((new_ldt & 0xfffc) != 0) {
533         dt = &env->gdt;
534         index = new_ldt & ~7;
535         if ((index + 7) > dt->limit) {
536             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
537         }
538         ptr = dt->base + index;
539         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
540         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
541         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
542             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
543         }
544         if (!(e2 & DESC_P_MASK)) {
545             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
546         }
547         load_seg_cache_raw_dt(&env->ldt, e1, e2);
548     }
549 
550     /* load the segments */
551     if (!(new_eflags & VM_MASK)) {
552         int cpl = new_segs[R_CS] & 3;
553         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
554         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
555         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
556         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
557         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
558         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
559     }
560 
561     /* check that env->eip is in the CS segment limits */
562     if (new_eip > env->segs[R_CS].limit) {
563         /* XXX: different exception if CALL? */
564         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
565     }
566 
567 #ifndef CONFIG_USER_ONLY
568     /* reset local breakpoints */
569     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
570         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
571     }
572 #endif
573     return type >> 3;
574 }
575 
576 static int switch_tss(CPUX86State *env, int tss_selector,
577                       uint32_t e1, uint32_t e2, int source,
578                       uint32_t next_eip)
579 {
580     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
581 }
582 
583 static inline unsigned int get_sp_mask(unsigned int e2)
584 {
585 #ifdef TARGET_X86_64
586     if (e2 & DESC_L_MASK) {
587         return 0;
588     } else
589 #endif
590     if (e2 & DESC_B_MASK) {
591         return 0xffffffff;
592     } else {
593         return 0xffff;
594     }
595 }
596 
597 static int exception_is_fault(int intno)
598 {
599     switch (intno) {
600         /*
601          * #DB can be both fault- and trap-like, but it never sets RF=1
602          * in the RFLAGS value pushed on the stack.
603          */
604     case EXCP01_DB:
605     case EXCP03_INT3:
606     case EXCP04_INTO:
607     case EXCP08_DBLE:
608     case EXCP12_MCHK:
609         return 0;
610     }
611     /* Everything else including reserved exception is a fault.  */
612     return 1;
613 }
614 
615 int exception_has_error_code(int intno)
616 {
617     switch (intno) {
618     case 8:
619     case 10:
620     case 11:
621     case 12:
622     case 13:
623     case 14:
624     case 17:
625         return 1;
626     }
627     return 0;
628 }
629 
630 /* protected mode interrupt */
631 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
632                                    int error_code, unsigned int next_eip,
633                                    int is_hw)
634 {
635     SegmentCache *dt;
636     target_ulong ptr;
637     int type, dpl, selector, ss_dpl, cpl;
638     int has_error_code, new_stack, shift;
639     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
640     uint32_t old_eip, eflags;
641     int vm86 = env->eflags & VM_MASK;
642     StackAccess sa;
643     bool set_rf;
644 
645     has_error_code = 0;
646     if (!is_int && !is_hw) {
647         has_error_code = exception_has_error_code(intno);
648     }
649     if (is_int) {
650         old_eip = next_eip;
651         set_rf = false;
652     } else {
653         old_eip = env->eip;
654         set_rf = exception_is_fault(intno);
655     }
656 
657     dt = &env->idt;
658     if (intno * 8 + 7 > dt->limit) {
659         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
660     }
661     ptr = dt->base + intno * 8;
662     e1 = cpu_ldl_kernel(env, ptr);
663     e2 = cpu_ldl_kernel(env, ptr + 4);
664     /* check gate type */
665     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
666     switch (type) {
667     case 5: /* task gate */
668     case 6: /* 286 interrupt gate */
669     case 7: /* 286 trap gate */
670     case 14: /* 386 interrupt gate */
671     case 15: /* 386 trap gate */
672         break;
673     default:
674         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
675         break;
676     }
677     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
678     cpl = env->hflags & HF_CPL_MASK;
679     /* check privilege if software int */
680     if (is_int && dpl < cpl) {
681         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
682     }
683 
684     sa.env = env;
685     sa.ra = 0;
686     sa.mmu_index = cpu_mmu_index_kernel(env);
687 
688     if (type == 5) {
689         /* task gate */
690         /* must do that check here to return the correct error code */
691         if (!(e2 & DESC_P_MASK)) {
692             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
693         }
694         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
695         if (has_error_code) {
696             /* push the error code */
697             if (env->segs[R_SS].flags & DESC_B_MASK) {
698                 sa.sp_mask = 0xffffffff;
699             } else {
700                 sa.sp_mask = 0xffff;
701             }
702             sa.sp = env->regs[R_ESP];
703             sa.ss_base = env->segs[R_SS].base;
704             if (shift) {
705                 pushl(&sa, error_code);
706             } else {
707                 pushw(&sa, error_code);
708             }
709             SET_ESP(sa.sp, sa.sp_mask);
710         }
711         return;
712     }
713 
714     /* Otherwise, trap or interrupt gate */
715 
716     /* check valid bit */
717     if (!(e2 & DESC_P_MASK)) {
718         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
719     }
720     selector = e1 >> 16;
721     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
722     if ((selector & 0xfffc) == 0) {
723         raise_exception_err(env, EXCP0D_GPF, 0);
724     }
725     if (load_segment(env, &e1, &e2, selector) != 0) {
726         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727     }
728     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
729         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
730     }
731     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
732     if (dpl > cpl) {
733         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734     }
735     if (!(e2 & DESC_P_MASK)) {
736         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
737     }
738     if (e2 & DESC_C_MASK) {
739         dpl = cpl;
740     }
741     if (dpl < cpl) {
742         /* to inner privilege */
743         uint32_t esp;
744         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
745         if ((ss & 0xfffc) == 0) {
746             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
747         }
748         if ((ss & 3) != dpl) {
749             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
750         }
751         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
752             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
753         }
754         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
755         if (ss_dpl != dpl) {
756             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
757         }
758         if (!(ss_e2 & DESC_S_MASK) ||
759             (ss_e2 & DESC_CS_MASK) ||
760             !(ss_e2 & DESC_W_MASK)) {
761             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
762         }
763         if (!(ss_e2 & DESC_P_MASK)) {
764             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
765         }
766         new_stack = 1;
767         sa.sp = esp;
768         sa.sp_mask = get_sp_mask(ss_e2);
769         sa.ss_base = get_seg_base(ss_e1, ss_e2);
770     } else  {
771         /* to same privilege */
772         if (vm86) {
773             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
774         }
775         new_stack = 0;
776         sa.sp = env->regs[R_ESP];
777         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
778         sa.ss_base = env->segs[R_SS].base;
779     }
780 
781     shift = type >> 3;
782 
783 #if 0
784     /* XXX: check that enough room is available */
785     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786     if (vm86) {
787         push_size += 8;
788     }
789     push_size <<= shift;
790 #endif
791     eflags = cpu_compute_eflags(env);
792     /*
793      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
794      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
795      */
796     if (set_rf) {
797         eflags |= RF_MASK;
798     }
799 
800     if (shift == 1) {
801         if (new_stack) {
802             if (vm86) {
803                 pushl(&sa, env->segs[R_GS].selector);
804                 pushl(&sa, env->segs[R_FS].selector);
805                 pushl(&sa, env->segs[R_DS].selector);
806                 pushl(&sa, env->segs[R_ES].selector);
807             }
808             pushl(&sa, env->segs[R_SS].selector);
809             pushl(&sa, env->regs[R_ESP]);
810         }
811         pushl(&sa, eflags);
812         pushl(&sa, env->segs[R_CS].selector);
813         pushl(&sa, old_eip);
814         if (has_error_code) {
815             pushl(&sa, error_code);
816         }
817     } else {
818         if (new_stack) {
819             if (vm86) {
820                 pushw(&sa, env->segs[R_GS].selector);
821                 pushw(&sa, env->segs[R_FS].selector);
822                 pushw(&sa, env->segs[R_DS].selector);
823                 pushw(&sa, env->segs[R_ES].selector);
824             }
825             pushw(&sa, env->segs[R_SS].selector);
826             pushw(&sa, env->regs[R_ESP]);
827         }
828         pushw(&sa, eflags);
829         pushw(&sa, env->segs[R_CS].selector);
830         pushw(&sa, old_eip);
831         if (has_error_code) {
832             pushw(&sa, error_code);
833         }
834     }
835 
836     /* interrupt gate clear IF mask */
837     if ((type & 1) == 0) {
838         env->eflags &= ~IF_MASK;
839     }
840     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
841 
842     if (new_stack) {
843         if (vm86) {
844             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
845             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
846             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
847             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
848         }
849         ss = (ss & ~3) | dpl;
850         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
851                                get_seg_limit(ss_e1, ss_e2), ss_e2);
852     }
853     SET_ESP(sa.sp, sa.sp_mask);
854 
855     selector = (selector & ~3) | dpl;
856     cpu_x86_load_seg_cache(env, R_CS, selector,
857                    get_seg_base(e1, e2),
858                    get_seg_limit(e1, e2),
859                    e2);
860     env->eip = offset;
861 }
862 
863 #ifdef TARGET_X86_64
864 
865 static void pushq(StackAccess *sa, uint64_t val)
866 {
867     sa->sp -= 8;
868     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
869 }
870 
871 static uint64_t popq(StackAccess *sa)
872 {
873     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
874     sa->sp += 8;
875     return ret;
876 }
877 
878 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
879 {
880     X86CPU *cpu = env_archcpu(env);
881     int index, pg_mode;
882     target_ulong rsp;
883     int32_t sext;
884 
885 #if 0
886     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
887            env->tr.base, env->tr.limit);
888 #endif
889 
890     if (!(env->tr.flags & DESC_P_MASK)) {
891         cpu_abort(CPU(cpu), "invalid tss");
892     }
893     index = 8 * level + 4;
894     if ((index + 7) > env->tr.limit) {
895         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
896     }
897 
898     rsp = cpu_ldq_kernel(env, env->tr.base + index);
899 
900     /* test virtual address sign extension */
901     pg_mode = get_pg_mode(env);
902     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
903     if (sext != 0 && sext != -1) {
904         raise_exception_err(env, EXCP0C_STACK, 0);
905     }
906 
907     return rsp;
908 }
909 
910 /* 64 bit interrupt */
911 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
912                            int error_code, target_ulong next_eip, int is_hw)
913 {
914     SegmentCache *dt;
915     target_ulong ptr;
916     int type, dpl, selector, cpl, ist;
917     int has_error_code, new_stack;
918     uint32_t e1, e2, e3, ss, eflags;
919     target_ulong old_eip, offset;
920     bool set_rf;
921     StackAccess sa;
922 
923     has_error_code = 0;
924     if (!is_int && !is_hw) {
925         has_error_code = exception_has_error_code(intno);
926     }
927     if (is_int) {
928         old_eip = next_eip;
929         set_rf = false;
930     } else {
931         old_eip = env->eip;
932         set_rf = exception_is_fault(intno);
933     }
934 
935     dt = &env->idt;
936     if (intno * 16 + 15 > dt->limit) {
937         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
938     }
939     ptr = dt->base + intno * 16;
940     e1 = cpu_ldl_kernel(env, ptr);
941     e2 = cpu_ldl_kernel(env, ptr + 4);
942     e3 = cpu_ldl_kernel(env, ptr + 8);
943     /* check gate type */
944     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
945     switch (type) {
946     case 14: /* 386 interrupt gate */
947     case 15: /* 386 trap gate */
948         break;
949     default:
950         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
951         break;
952     }
953     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
954     cpl = env->hflags & HF_CPL_MASK;
955     /* check privilege if software int */
956     if (is_int && dpl < cpl) {
957         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
958     }
959     /* check valid bit */
960     if (!(e2 & DESC_P_MASK)) {
961         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
962     }
963     selector = e1 >> 16;
964     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
965     ist = e2 & 7;
966     if ((selector & 0xfffc) == 0) {
967         raise_exception_err(env, EXCP0D_GPF, 0);
968     }
969 
970     if (load_segment(env, &e1, &e2, selector) != 0) {
971         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
972     }
973     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
974         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
975     }
976     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
977     if (dpl > cpl) {
978         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
979     }
980     if (!(e2 & DESC_P_MASK)) {
981         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
982     }
983     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
984         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
985     }
986     if (e2 & DESC_C_MASK) {
987         dpl = cpl;
988     }
989 
990     sa.env = env;
991     sa.ra = 0;
992     sa.mmu_index = cpu_mmu_index_kernel(env);
993     sa.sp_mask = -1;
994     sa.ss_base = 0;
995     if (dpl < cpl || ist != 0) {
996         /* to inner privilege */
997         new_stack = 1;
998         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
999         ss = 0;
1000     } else {
1001         /* to same privilege */
1002         if (env->eflags & VM_MASK) {
1003             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1004         }
1005         new_stack = 0;
1006         sa.sp = env->regs[R_ESP];
1007     }
1008     sa.sp &= ~0xfLL; /* align stack */
1009 
1010     /* See do_interrupt_protected.  */
1011     eflags = cpu_compute_eflags(env);
1012     if (set_rf) {
1013         eflags |= RF_MASK;
1014     }
1015 
1016     pushq(&sa, env->segs[R_SS].selector);
1017     pushq(&sa, env->regs[R_ESP]);
1018     pushq(&sa, eflags);
1019     pushq(&sa, env->segs[R_CS].selector);
1020     pushq(&sa, old_eip);
1021     if (has_error_code) {
1022         pushq(&sa, error_code);
1023     }
1024 
1025     /* interrupt gate clear IF mask */
1026     if ((type & 1) == 0) {
1027         env->eflags &= ~IF_MASK;
1028     }
1029     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1030 
1031     if (new_stack) {
1032         ss = 0 | dpl;
1033         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1034     }
1035     env->regs[R_ESP] = sa.sp;
1036 
1037     selector = (selector & ~3) | dpl;
1038     cpu_x86_load_seg_cache(env, R_CS, selector,
1039                    get_seg_base(e1, e2),
1040                    get_seg_limit(e1, e2),
1041                    e2);
1042     env->eip = offset;
1043 }
1044 #endif /* TARGET_X86_64 */
1045 
1046 void helper_sysret(CPUX86State *env, int dflag)
1047 {
1048     int cpl, selector;
1049 
1050     if (!(env->efer & MSR_EFER_SCE)) {
1051         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1052     }
1053     cpl = env->hflags & HF_CPL_MASK;
1054     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1055         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1056     }
1057     selector = (env->star >> 48) & 0xffff;
1058 #ifdef TARGET_X86_64
1059     if (env->hflags & HF_LMA_MASK) {
1060         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1061                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1062                         NT_MASK);
1063         if (dflag == 2) {
1064             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1065                                    0, 0xffffffff,
1066                                    DESC_G_MASK | DESC_P_MASK |
1067                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1068                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1069                                    DESC_L_MASK);
1070             env->eip = env->regs[R_ECX];
1071         } else {
1072             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1073                                    0, 0xffffffff,
1074                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1075                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1076                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1077             env->eip = (uint32_t)env->regs[R_ECX];
1078         }
1079         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1080                                0, 0xffffffff,
1081                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1082                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083                                DESC_W_MASK | DESC_A_MASK);
1084     } else
1085 #endif
1086     {
1087         env->eflags |= IF_MASK;
1088         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089                                0, 0xffffffff,
1090                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093         env->eip = (uint32_t)env->regs[R_ECX];
1094         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1095                                0, 0xffffffff,
1096                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098                                DESC_W_MASK | DESC_A_MASK);
1099     }
1100 }
1101 
1102 /* real mode interrupt */
1103 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1104                               int error_code, unsigned int next_eip)
1105 {
1106     SegmentCache *dt;
1107     target_ulong ptr;
1108     int selector;
1109     uint32_t offset;
1110     uint32_t old_cs, old_eip;
1111     StackAccess sa;
1112 
1113     /* real mode (simpler!) */
1114     dt = &env->idt;
1115     if (intno * 4 + 3 > dt->limit) {
1116         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1117     }
1118     ptr = dt->base + intno * 4;
1119     offset = cpu_lduw_kernel(env, ptr);
1120     selector = cpu_lduw_kernel(env, ptr + 2);
1121 
1122     sa.env = env;
1123     sa.ra = 0;
1124     sa.sp = env->regs[R_ESP];
1125     sa.sp_mask = 0xffff;
1126     sa.ss_base = env->segs[R_SS].base;
1127     sa.mmu_index = cpu_mmu_index_kernel(env);
1128 
1129     if (is_int) {
1130         old_eip = next_eip;
1131     } else {
1132         old_eip = env->eip;
1133     }
1134     old_cs = env->segs[R_CS].selector;
1135     /* XXX: use SS segment size? */
1136     pushw(&sa, cpu_compute_eflags(env));
1137     pushw(&sa, old_cs);
1138     pushw(&sa, old_eip);
1139 
1140     /* update processor state */
1141     SET_ESP(sa.sp, sa.sp_mask);
1142     env->eip = offset;
1143     env->segs[R_CS].selector = selector;
1144     env->segs[R_CS].base = (selector << 4);
1145     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1146 }
1147 
1148 /*
1149  * Begin execution of an interruption. is_int is TRUE if coming from
1150  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1151  * instruction. It is only relevant if is_int is TRUE.
1152  */
1153 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1154                       int error_code, target_ulong next_eip, int is_hw)
1155 {
1156     CPUX86State *env = &cpu->env;
1157 
1158     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1159         if ((env->cr[0] & CR0_PE_MASK)) {
1160             static int count;
1161 
1162             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1163                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1164                      count, intno, error_code, is_int,
1165                      env->hflags & HF_CPL_MASK,
1166                      env->segs[R_CS].selector, env->eip,
1167                      (int)env->segs[R_CS].base + env->eip,
1168                      env->segs[R_SS].selector, env->regs[R_ESP]);
1169             if (intno == 0x0e) {
1170                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1171             } else {
1172                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1173             }
1174             qemu_log("\n");
1175             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1176 #if 0
1177             {
1178                 int i;
1179                 target_ulong ptr;
1180 
1181                 qemu_log("       code=");
1182                 ptr = env->segs[R_CS].base + env->eip;
1183                 for (i = 0; i < 16; i++) {
1184                     qemu_log(" %02x", ldub(ptr + i));
1185                 }
1186                 qemu_log("\n");
1187             }
1188 #endif
1189             count++;
1190         }
1191     }
1192     if (env->cr[0] & CR0_PE_MASK) {
1193 #if !defined(CONFIG_USER_ONLY)
1194         if (env->hflags & HF_GUEST_MASK) {
1195             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1196         }
1197 #endif
1198 #ifdef TARGET_X86_64
1199         if (env->hflags & HF_LMA_MASK) {
1200             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1201         } else
1202 #endif
1203         {
1204             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1205                                    is_hw);
1206         }
1207     } else {
1208 #if !defined(CONFIG_USER_ONLY)
1209         if (env->hflags & HF_GUEST_MASK) {
1210             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1211         }
1212 #endif
1213         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1214     }
1215 
1216 #if !defined(CONFIG_USER_ONLY)
1217     if (env->hflags & HF_GUEST_MASK) {
1218         CPUState *cs = CPU(cpu);
1219         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1220                                       offsetof(struct vmcb,
1221                                                control.event_inj));
1222 
1223         x86_stl_phys(cs,
1224                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1225                  event_inj & ~SVM_EVTINJ_VALID);
1226     }
1227 #endif
1228 }
1229 
1230 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1231 {
1232     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1233 }
1234 
1235 void helper_lldt(CPUX86State *env, int selector)
1236 {
1237     SegmentCache *dt;
1238     uint32_t e1, e2;
1239     int index, entry_limit;
1240     target_ulong ptr;
1241 
1242     selector &= 0xffff;
1243     if ((selector & 0xfffc) == 0) {
1244         /* XXX: NULL selector case: invalid LDT */
1245         env->ldt.base = 0;
1246         env->ldt.limit = 0;
1247     } else {
1248         if (selector & 0x4) {
1249             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1250         }
1251         dt = &env->gdt;
1252         index = selector & ~7;
1253 #ifdef TARGET_X86_64
1254         if (env->hflags & HF_LMA_MASK) {
1255             entry_limit = 15;
1256         } else
1257 #endif
1258         {
1259             entry_limit = 7;
1260         }
1261         if ((index + entry_limit) > dt->limit) {
1262             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1263         }
1264         ptr = dt->base + index;
1265         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1266         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1267         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1268             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1269         }
1270         if (!(e2 & DESC_P_MASK)) {
1271             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1272         }
1273 #ifdef TARGET_X86_64
1274         if (env->hflags & HF_LMA_MASK) {
1275             uint32_t e3;
1276 
1277             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1278             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1279             env->ldt.base |= (target_ulong)e3 << 32;
1280         } else
1281 #endif
1282         {
1283             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1284         }
1285     }
1286     env->ldt.selector = selector;
1287 }
1288 
1289 void helper_ltr(CPUX86State *env, int selector)
1290 {
1291     SegmentCache *dt;
1292     uint32_t e1, e2;
1293     int index, type, entry_limit;
1294     target_ulong ptr;
1295 
1296     selector &= 0xffff;
1297     if ((selector & 0xfffc) == 0) {
1298         /* NULL selector case: invalid TR */
1299         env->tr.base = 0;
1300         env->tr.limit = 0;
1301         env->tr.flags = 0;
1302     } else {
1303         if (selector & 0x4) {
1304             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1305         }
1306         dt = &env->gdt;
1307         index = selector & ~7;
1308 #ifdef TARGET_X86_64
1309         if (env->hflags & HF_LMA_MASK) {
1310             entry_limit = 15;
1311         } else
1312 #endif
1313         {
1314             entry_limit = 7;
1315         }
1316         if ((index + entry_limit) > dt->limit) {
1317             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1318         }
1319         ptr = dt->base + index;
1320         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1321         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1322         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1323         if ((e2 & DESC_S_MASK) ||
1324             (type != 1 && type != 9)) {
1325             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1326         }
1327         if (!(e2 & DESC_P_MASK)) {
1328             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1329         }
1330 #ifdef TARGET_X86_64
1331         if (env->hflags & HF_LMA_MASK) {
1332             uint32_t e3, e4;
1333 
1334             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1335             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1336             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1337                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1338             }
1339             load_seg_cache_raw_dt(&env->tr, e1, e2);
1340             env->tr.base |= (target_ulong)e3 << 32;
1341         } else
1342 #endif
1343         {
1344             load_seg_cache_raw_dt(&env->tr, e1, e2);
1345         }
1346         e2 |= DESC_TSS_BUSY_MASK;
1347         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1348     }
1349     env->tr.selector = selector;
1350 }
1351 
1352 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1353 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1354 {
1355     uint32_t e1, e2;
1356     int cpl, dpl, rpl;
1357     SegmentCache *dt;
1358     int index;
1359     target_ulong ptr;
1360 
1361     selector &= 0xffff;
1362     cpl = env->hflags & HF_CPL_MASK;
1363     if ((selector & 0xfffc) == 0) {
1364         /* null selector case */
1365         if (seg_reg == R_SS
1366 #ifdef TARGET_X86_64
1367             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1368 #endif
1369             ) {
1370             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1371         }
1372         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1373     } else {
1374 
1375         if (selector & 0x4) {
1376             dt = &env->ldt;
1377         } else {
1378             dt = &env->gdt;
1379         }
1380         index = selector & ~7;
1381         if ((index + 7) > dt->limit) {
1382             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1383         }
1384         ptr = dt->base + index;
1385         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1386         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1387 
1388         if (!(e2 & DESC_S_MASK)) {
1389             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1390         }
1391         rpl = selector & 3;
1392         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1393         if (seg_reg == R_SS) {
1394             /* must be writable segment */
1395             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1396                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1397             }
1398             if (rpl != cpl || dpl != cpl) {
1399                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1400             }
1401         } else {
1402             /* must be readable segment */
1403             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1404                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1405             }
1406 
1407             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1408                 /* if not conforming code, test rights */
1409                 if (dpl < cpl || dpl < rpl) {
1410                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1411                 }
1412             }
1413         }
1414 
1415         if (!(e2 & DESC_P_MASK)) {
1416             if (seg_reg == R_SS) {
1417                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1418             } else {
1419                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1420             }
1421         }
1422 
1423         /* set the access bit if not already set */
1424         if (!(e2 & DESC_A_MASK)) {
1425             e2 |= DESC_A_MASK;
1426             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1427         }
1428 
1429         cpu_x86_load_seg_cache(env, seg_reg, selector,
1430                        get_seg_base(e1, e2),
1431                        get_seg_limit(e1, e2),
1432                        e2);
1433 #if 0
1434         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1435                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1436 #endif
1437     }
1438 }
1439 
1440 /* protected mode jump */
1441 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1442                            target_ulong next_eip)
1443 {
1444     int gate_cs, type;
1445     uint32_t e1, e2, cpl, dpl, rpl, limit;
1446 
1447     if ((new_cs & 0xfffc) == 0) {
1448         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1449     }
1450     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1451         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1452     }
1453     cpl = env->hflags & HF_CPL_MASK;
1454     if (e2 & DESC_S_MASK) {
1455         if (!(e2 & DESC_CS_MASK)) {
1456             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1457         }
1458         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1459         if (e2 & DESC_C_MASK) {
1460             /* conforming code segment */
1461             if (dpl > cpl) {
1462                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1463             }
1464         } else {
1465             /* non conforming code segment */
1466             rpl = new_cs & 3;
1467             if (rpl > cpl) {
1468                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1469             }
1470             if (dpl != cpl) {
1471                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1472             }
1473         }
1474         if (!(e2 & DESC_P_MASK)) {
1475             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1476         }
1477         limit = get_seg_limit(e1, e2);
1478         if (new_eip > limit &&
1479             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1480             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1481         }
1482         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1483                        get_seg_base(e1, e2), limit, e2);
1484         env->eip = new_eip;
1485     } else {
1486         /* jump to call or task gate */
1487         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1488         rpl = new_cs & 3;
1489         cpl = env->hflags & HF_CPL_MASK;
1490         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1491 
1492 #ifdef TARGET_X86_64
1493         if (env->efer & MSR_EFER_LMA) {
1494             if (type != 12) {
1495                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1496             }
1497         }
1498 #endif
1499         switch (type) {
1500         case 1: /* 286 TSS */
1501         case 9: /* 386 TSS */
1502         case 5: /* task gate */
1503             if (dpl < cpl || dpl < rpl) {
1504                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1505             }
1506             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1507             break;
1508         case 4: /* 286 call gate */
1509         case 12: /* 386 call gate */
1510             if ((dpl < cpl) || (dpl < rpl)) {
1511                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1512             }
1513             if (!(e2 & DESC_P_MASK)) {
1514                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1515             }
1516             gate_cs = e1 >> 16;
1517             new_eip = (e1 & 0xffff);
1518             if (type == 12) {
1519                 new_eip |= (e2 & 0xffff0000);
1520             }
1521 
1522 #ifdef TARGET_X86_64
1523             if (env->efer & MSR_EFER_LMA) {
1524                 /* load the upper 8 bytes of the 64-bit call gate */
1525                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1526                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1527                                            GETPC());
1528                 }
1529                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1530                 if (type != 0) {
1531                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1532                                            GETPC());
1533                 }
1534                 new_eip |= ((target_ulong)e1) << 32;
1535             }
1536 #endif
1537 
1538             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1539                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1540             }
1541             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1542             /* must be code segment */
1543             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1544                  (DESC_S_MASK | DESC_CS_MASK))) {
1545                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1546             }
1547             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1548                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1549                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1550             }
1551 #ifdef TARGET_X86_64
1552             if (env->efer & MSR_EFER_LMA) {
1553                 if (!(e2 & DESC_L_MASK)) {
1554                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1555                 }
1556                 if (e2 & DESC_B_MASK) {
1557                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1558                 }
1559             }
1560 #endif
1561             if (!(e2 & DESC_P_MASK)) {
1562                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1563             }
1564             limit = get_seg_limit(e1, e2);
1565             if (new_eip > limit &&
1566                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1567                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1568             }
1569             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1570                                    get_seg_base(e1, e2), limit, e2);
1571             env->eip = new_eip;
1572             break;
1573         default:
1574             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1575             break;
1576         }
1577     }
1578 }
1579 
1580 /* real mode call */
1581 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1582                        int shift, uint32_t next_eip)
1583 {
1584     StackAccess sa;
1585 
1586     sa.env = env;
1587     sa.ra = GETPC();
1588     sa.sp = env->regs[R_ESP];
1589     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1590     sa.ss_base = env->segs[R_SS].base;
1591     sa.mmu_index = cpu_mmu_index_kernel(env);
1592 
1593     if (shift) {
1594         pushl(&sa, env->segs[R_CS].selector);
1595         pushl(&sa, next_eip);
1596     } else {
1597         pushw(&sa, env->segs[R_CS].selector);
1598         pushw(&sa, next_eip);
1599     }
1600 
1601     SET_ESP(sa.sp, sa.sp_mask);
1602     env->eip = new_eip;
1603     env->segs[R_CS].selector = new_cs;
1604     env->segs[R_CS].base = (new_cs << 4);
1605 }
1606 
1607 /* protected mode call */
1608 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1609                             int shift, target_ulong next_eip)
1610 {
1611     int new_stack, i;
1612     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1613     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1614     uint32_t val, limit, old_sp_mask;
1615     target_ulong old_ssp, offset;
1616     StackAccess sa;
1617 
1618     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1619     LOG_PCALL_STATE(env_cpu(env));
1620     if ((new_cs & 0xfffc) == 0) {
1621         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1622     }
1623     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1624         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1625     }
1626     cpl = env->hflags & HF_CPL_MASK;
1627     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1628 
1629     sa.env = env;
1630     sa.ra = GETPC();
1631     sa.mmu_index = cpu_mmu_index_kernel(env);
1632 
1633     if (e2 & DESC_S_MASK) {
1634         if (!(e2 & DESC_CS_MASK)) {
1635             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1636         }
1637         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1638         if (e2 & DESC_C_MASK) {
1639             /* conforming code segment */
1640             if (dpl > cpl) {
1641                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1642             }
1643         } else {
1644             /* non conforming code segment */
1645             rpl = new_cs & 3;
1646             if (rpl > cpl) {
1647                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1648             }
1649             if (dpl != cpl) {
1650                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1651             }
1652         }
1653         if (!(e2 & DESC_P_MASK)) {
1654             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1655         }
1656 
1657 #ifdef TARGET_X86_64
1658         /* XXX: check 16/32 bit cases in long mode */
1659         if (shift == 2) {
1660             /* 64 bit case */
1661             sa.sp = env->regs[R_ESP];
1662             sa.sp_mask = -1;
1663             sa.ss_base = 0;
1664             pushq(&sa, env->segs[R_CS].selector);
1665             pushq(&sa, next_eip);
1666             /* from this point, not restartable */
1667             env->regs[R_ESP] = sa.sp;
1668             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1669                                    get_seg_base(e1, e2),
1670                                    get_seg_limit(e1, e2), e2);
1671             env->eip = new_eip;
1672         } else
1673 #endif
1674         {
1675             sa.sp = env->regs[R_ESP];
1676             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1677             sa.ss_base = env->segs[R_SS].base;
1678             if (shift) {
1679                 pushl(&sa, env->segs[R_CS].selector);
1680                 pushl(&sa, next_eip);
1681             } else {
1682                 pushw(&sa, env->segs[R_CS].selector);
1683                 pushw(&sa, next_eip);
1684             }
1685 
1686             limit = get_seg_limit(e1, e2);
1687             if (new_eip > limit) {
1688                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1689             }
1690             /* from this point, not restartable */
1691             SET_ESP(sa.sp, sa.sp_mask);
1692             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1693                                    get_seg_base(e1, e2), limit, e2);
1694             env->eip = new_eip;
1695         }
1696     } else {
1697         /* check gate type */
1698         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1699         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1700         rpl = new_cs & 3;
1701 
1702 #ifdef TARGET_X86_64
1703         if (env->efer & MSR_EFER_LMA) {
1704             if (type != 12) {
1705                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1706             }
1707         }
1708 #endif
1709 
1710         switch (type) {
1711         case 1: /* available 286 TSS */
1712         case 9: /* available 386 TSS */
1713         case 5: /* task gate */
1714             if (dpl < cpl || dpl < rpl) {
1715                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1716             }
1717             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1718             return;
1719         case 4: /* 286 call gate */
1720         case 12: /* 386 call gate */
1721             break;
1722         default:
1723             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1724             break;
1725         }
1726         shift = type >> 3;
1727 
1728         if (dpl < cpl || dpl < rpl) {
1729             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1730         }
1731         /* check valid bit */
1732         if (!(e2 & DESC_P_MASK)) {
1733             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1734         }
1735         selector = e1 >> 16;
1736         param_count = e2 & 0x1f;
1737         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1738 #ifdef TARGET_X86_64
1739         if (env->efer & MSR_EFER_LMA) {
1740             /* load the upper 8 bytes of the 64-bit call gate */
1741             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1742                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1743                                        GETPC());
1744             }
1745             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1746             if (type != 0) {
1747                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1748                                        GETPC());
1749             }
1750             offset |= ((target_ulong)e1) << 32;
1751         }
1752 #endif
1753         if ((selector & 0xfffc) == 0) {
1754             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1755         }
1756 
1757         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1758             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1759         }
1760         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1761             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1762         }
1763         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1764         if (dpl > cpl) {
1765             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1766         }
1767 #ifdef TARGET_X86_64
1768         if (env->efer & MSR_EFER_LMA) {
1769             if (!(e2 & DESC_L_MASK)) {
1770                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1771             }
1772             if (e2 & DESC_B_MASK) {
1773                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1774             }
1775             shift++;
1776         }
1777 #endif
1778         if (!(e2 & DESC_P_MASK)) {
1779             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1780         }
1781 
1782         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1783             /* to inner privilege */
1784 #ifdef TARGET_X86_64
1785             if (shift == 2) {
1786                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1787                 new_stack = 1;
1788                 sa.sp = get_rsp_from_tss(env, dpl);
1789                 sa.sp_mask = -1;
1790                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1791                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1792                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1793             } else
1794 #endif
1795             {
1796                 uint32_t sp32;
1797                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1798                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1799                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1800                           env->regs[R_ESP]);
1801                 if ((ss & 0xfffc) == 0) {
1802                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1803                 }
1804                 if ((ss & 3) != dpl) {
1805                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1806                 }
1807                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1808                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1809                 }
1810                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1811                 if (ss_dpl != dpl) {
1812                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1813                 }
1814                 if (!(ss_e2 & DESC_S_MASK) ||
1815                     (ss_e2 & DESC_CS_MASK) ||
1816                     !(ss_e2 & DESC_W_MASK)) {
1817                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1818                 }
1819                 if (!(ss_e2 & DESC_P_MASK)) {
1820                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1821                 }
1822 
1823                 sa.sp = sp32;
1824                 sa.sp_mask = get_sp_mask(ss_e2);
1825                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1826             }
1827 
1828             /* push_size = ((param_count * 2) + 8) << shift; */
1829             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1830             old_ssp = env->segs[R_SS].base;
1831 
1832 #ifdef TARGET_X86_64
1833             if (shift == 2) {
1834                 /* XXX: verify if new stack address is canonical */
1835                 pushq(&sa, env->segs[R_SS].selector);
1836                 pushq(&sa, env->regs[R_ESP]);
1837                 /* parameters aren't supported for 64-bit call gates */
1838             } else
1839 #endif
1840             if (shift == 1) {
1841                 pushl(&sa, env->segs[R_SS].selector);
1842                 pushl(&sa, env->regs[R_ESP]);
1843                 for (i = param_count - 1; i >= 0; i--) {
1844                     val = cpu_ldl_data_ra(env,
1845                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1846                                           GETPC());
1847                     pushl(&sa, val);
1848                 }
1849             } else {
1850                 pushw(&sa, env->segs[R_SS].selector);
1851                 pushw(&sa, env->regs[R_ESP]);
1852                 for (i = param_count - 1; i >= 0; i--) {
1853                     val = cpu_lduw_data_ra(env,
1854                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1855                                            GETPC());
1856                     pushw(&sa, val);
1857                 }
1858             }
1859             new_stack = 1;
1860         } else {
1861             /* to same privilege */
1862             sa.sp = env->regs[R_ESP];
1863             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1864             sa.ss_base = env->segs[R_SS].base;
1865             /* push_size = (4 << shift); */
1866             new_stack = 0;
1867         }
1868 
1869 #ifdef TARGET_X86_64
1870         if (shift == 2) {
1871             pushq(&sa, env->segs[R_CS].selector);
1872             pushq(&sa, next_eip);
1873         } else
1874 #endif
1875         if (shift == 1) {
1876             pushl(&sa, env->segs[R_CS].selector);
1877             pushl(&sa, next_eip);
1878         } else {
1879             pushw(&sa, env->segs[R_CS].selector);
1880             pushw(&sa, next_eip);
1881         }
1882 
1883         /* from this point, not restartable */
1884 
1885         if (new_stack) {
1886 #ifdef TARGET_X86_64
1887             if (shift == 2) {
1888                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1889             } else
1890 #endif
1891             {
1892                 ss = (ss & ~3) | dpl;
1893                 cpu_x86_load_seg_cache(env, R_SS, ss,
1894                                        sa.ss_base,
1895                                        get_seg_limit(ss_e1, ss_e2),
1896                                        ss_e2);
1897             }
1898         }
1899 
1900         selector = (selector & ~3) | dpl;
1901         cpu_x86_load_seg_cache(env, R_CS, selector,
1902                        get_seg_base(e1, e2),
1903                        get_seg_limit(e1, e2),
1904                        e2);
1905         SET_ESP(sa.sp, sa.sp_mask);
1906         env->eip = offset;
1907     }
1908 }
1909 
1910 /* real and vm86 mode iret */
1911 void helper_iret_real(CPUX86State *env, int shift)
1912 {
1913     uint32_t new_cs, new_eip, new_eflags;
1914     int eflags_mask;
1915     StackAccess sa;
1916 
1917     sa.env = env;
1918     sa.ra = GETPC();
1919     sa.mmu_index = x86_mmu_index_pl(env, 0);
1920     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1921     sa.sp = env->regs[R_ESP];
1922     sa.ss_base = env->segs[R_SS].base;
1923 
1924     if (shift == 1) {
1925         /* 32 bits */
1926         new_eip = popl(&sa);
1927         new_cs = popl(&sa) & 0xffff;
1928         new_eflags = popl(&sa);
1929     } else {
1930         /* 16 bits */
1931         new_eip = popw(&sa);
1932         new_cs = popw(&sa);
1933         new_eflags = popw(&sa);
1934     }
1935     SET_ESP(sa.sp, sa.sp_mask);
1936     env->segs[R_CS].selector = new_cs;
1937     env->segs[R_CS].base = (new_cs << 4);
1938     env->eip = new_eip;
1939     if (env->eflags & VM_MASK) {
1940         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1941             NT_MASK;
1942     } else {
1943         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1944             RF_MASK | NT_MASK;
1945     }
1946     if (shift == 0) {
1947         eflags_mask &= 0xffff;
1948     }
1949     cpu_load_eflags(env, new_eflags, eflags_mask);
1950     env->hflags2 &= ~HF2_NMI_MASK;
1951 }
1952 
1953 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1954 {
1955     int dpl;
1956     uint32_t e2;
1957 
1958     /* XXX: on x86_64, we do not want to nullify FS and GS because
1959        they may still contain a valid base. I would be interested to
1960        know how a real x86_64 CPU behaves */
1961     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1962         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1963         return;
1964     }
1965 
1966     e2 = env->segs[seg_reg].flags;
1967     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1968     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1969         /* data or non conforming code segment */
1970         if (dpl < cpl) {
1971             cpu_x86_load_seg_cache(env, seg_reg, 0,
1972                                    env->segs[seg_reg].base,
1973                                    env->segs[seg_reg].limit,
1974                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1975         }
1976     }
1977 }
1978 
1979 /* protected mode iret */
1980 static inline void helper_ret_protected(CPUX86State *env, int shift,
1981                                         int is_iret, int addend,
1982                                         uintptr_t retaddr)
1983 {
1984     uint32_t new_cs, new_eflags, new_ss;
1985     uint32_t new_es, new_ds, new_fs, new_gs;
1986     uint32_t e1, e2, ss_e1, ss_e2;
1987     int cpl, dpl, rpl, eflags_mask, iopl;
1988     target_ulong new_eip, new_esp;
1989     StackAccess sa;
1990 
1991     cpl = env->hflags & HF_CPL_MASK;
1992 
1993     sa.env = env;
1994     sa.ra = retaddr;
1995     sa.mmu_index = x86_mmu_index_pl(env, cpl);
1996 
1997 #ifdef TARGET_X86_64
1998     if (shift == 2) {
1999         sa.sp_mask = -1;
2000     } else
2001 #endif
2002     {
2003         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2004     }
2005     sa.sp = env->regs[R_ESP];
2006     sa.ss_base = env->segs[R_SS].base;
2007     new_eflags = 0; /* avoid warning */
2008 #ifdef TARGET_X86_64
2009     if (shift == 2) {
2010         new_eip = popq(&sa);
2011         new_cs = popq(&sa) & 0xffff;
2012         if (is_iret) {
2013             new_eflags = popq(&sa);
2014         }
2015     } else
2016 #endif
2017     {
2018         if (shift == 1) {
2019             /* 32 bits */
2020             new_eip = popl(&sa);
2021             new_cs = popl(&sa) & 0xffff;
2022             if (is_iret) {
2023                 new_eflags = popl(&sa);
2024                 if (new_eflags & VM_MASK) {
2025                     goto return_to_vm86;
2026                 }
2027             }
2028         } else {
2029             /* 16 bits */
2030             new_eip = popw(&sa);
2031             new_cs = popw(&sa);
2032             if (is_iret) {
2033                 new_eflags = popw(&sa);
2034             }
2035         }
2036     }
2037     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2038               new_cs, new_eip, shift, addend);
2039     LOG_PCALL_STATE(env_cpu(env));
2040     if ((new_cs & 0xfffc) == 0) {
2041         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2042     }
2043     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2044         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2045     }
2046     if (!(e2 & DESC_S_MASK) ||
2047         !(e2 & DESC_CS_MASK)) {
2048         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2049     }
2050     rpl = new_cs & 3;
2051     if (rpl < cpl) {
2052         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2053     }
2054     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2055     if (e2 & DESC_C_MASK) {
2056         if (dpl > rpl) {
2057             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2058         }
2059     } else {
2060         if (dpl != rpl) {
2061             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2062         }
2063     }
2064     if (!(e2 & DESC_P_MASK)) {
2065         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2066     }
2067 
2068     sa.sp += addend;
2069     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2070                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2071         /* return to same privilege level */
2072         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2073                        get_seg_base(e1, e2),
2074                        get_seg_limit(e1, e2),
2075                        e2);
2076     } else {
2077         /* return to different privilege level */
2078 #ifdef TARGET_X86_64
2079         if (shift == 2) {
2080             new_esp = popq(&sa);
2081             new_ss = popq(&sa) & 0xffff;
2082         } else
2083 #endif
2084         {
2085             if (shift == 1) {
2086                 /* 32 bits */
2087                 new_esp = popl(&sa);
2088                 new_ss = popl(&sa) & 0xffff;
2089             } else {
2090                 /* 16 bits */
2091                 new_esp = popw(&sa);
2092                 new_ss = popw(&sa);
2093             }
2094         }
2095         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2096                   new_ss, new_esp);
2097         if ((new_ss & 0xfffc) == 0) {
2098 #ifdef TARGET_X86_64
2099             /* NULL ss is allowed in long mode if cpl != 3 */
2100             /* XXX: test CS64? */
2101             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2102                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2103                                        0, 0xffffffff,
2104                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2105                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2106                                        DESC_W_MASK | DESC_A_MASK);
2107                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2108             } else
2109 #endif
2110             {
2111                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2112             }
2113         } else {
2114             if ((new_ss & 3) != rpl) {
2115                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2116             }
2117             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2118                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2119             }
2120             if (!(ss_e2 & DESC_S_MASK) ||
2121                 (ss_e2 & DESC_CS_MASK) ||
2122                 !(ss_e2 & DESC_W_MASK)) {
2123                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2124             }
2125             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2126             if (dpl != rpl) {
2127                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2128             }
2129             if (!(ss_e2 & DESC_P_MASK)) {
2130                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2131             }
2132             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2133                                    get_seg_base(ss_e1, ss_e2),
2134                                    get_seg_limit(ss_e1, ss_e2),
2135                                    ss_e2);
2136         }
2137 
2138         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2139                        get_seg_base(e1, e2),
2140                        get_seg_limit(e1, e2),
2141                        e2);
2142         sa.sp = new_esp;
2143 #ifdef TARGET_X86_64
2144         if (env->hflags & HF_CS64_MASK) {
2145             sa.sp_mask = -1;
2146         } else
2147 #endif
2148         {
2149             sa.sp_mask = get_sp_mask(ss_e2);
2150         }
2151 
2152         /* validate data segments */
2153         validate_seg(env, R_ES, rpl);
2154         validate_seg(env, R_DS, rpl);
2155         validate_seg(env, R_FS, rpl);
2156         validate_seg(env, R_GS, rpl);
2157 
2158         sa.sp += addend;
2159     }
2160     SET_ESP(sa.sp, sa.sp_mask);
2161     env->eip = new_eip;
2162     if (is_iret) {
2163         /* NOTE: 'cpl' is the _old_ CPL */
2164         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2165         if (cpl == 0) {
2166             eflags_mask |= IOPL_MASK;
2167         }
2168         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2169         if (cpl <= iopl) {
2170             eflags_mask |= IF_MASK;
2171         }
2172         if (shift == 0) {
2173             eflags_mask &= 0xffff;
2174         }
2175         cpu_load_eflags(env, new_eflags, eflags_mask);
2176     }
2177     return;
2178 
2179  return_to_vm86:
2180     new_esp = popl(&sa);
2181     new_ss = popl(&sa);
2182     new_es = popl(&sa);
2183     new_ds = popl(&sa);
2184     new_fs = popl(&sa);
2185     new_gs = popl(&sa);
2186 
2187     /* modify processor state */
2188     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2189                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2190                     VIP_MASK);
2191     load_seg_vm(env, R_CS, new_cs & 0xffff);
2192     load_seg_vm(env, R_SS, new_ss & 0xffff);
2193     load_seg_vm(env, R_ES, new_es & 0xffff);
2194     load_seg_vm(env, R_DS, new_ds & 0xffff);
2195     load_seg_vm(env, R_FS, new_fs & 0xffff);
2196     load_seg_vm(env, R_GS, new_gs & 0xffff);
2197 
2198     env->eip = new_eip & 0xffff;
2199     env->regs[R_ESP] = new_esp;
2200 }
2201 
2202 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2203 {
2204     int tss_selector, type;
2205     uint32_t e1, e2;
2206 
2207     /* specific case for TSS */
2208     if (env->eflags & NT_MASK) {
2209 #ifdef TARGET_X86_64
2210         if (env->hflags & HF_LMA_MASK) {
2211             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2212         }
2213 #endif
2214         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2215         if (tss_selector & 4) {
2216             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2217         }
2218         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2219             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2220         }
2221         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2222         /* NOTE: we check both segment and busy TSS */
2223         if (type != 3) {
2224             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2225         }
2226         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2227     } else {
2228         helper_ret_protected(env, shift, 1, 0, GETPC());
2229     }
2230     env->hflags2 &= ~HF2_NMI_MASK;
2231 }
2232 
2233 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2234 {
2235     helper_ret_protected(env, shift, 0, addend, GETPC());
2236 }
2237 
2238 void helper_sysenter(CPUX86State *env)
2239 {
2240     if (env->sysenter_cs == 0) {
2241         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2242     }
2243     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2244 
2245 #ifdef TARGET_X86_64
2246     if (env->hflags & HF_LMA_MASK) {
2247         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2248                                0, 0xffffffff,
2249                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2250                                DESC_S_MASK |
2251                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2252                                DESC_L_MASK);
2253     } else
2254 #endif
2255     {
2256         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2257                                0, 0xffffffff,
2258                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2259                                DESC_S_MASK |
2260                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2261     }
2262     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2263                            0, 0xffffffff,
2264                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2265                            DESC_S_MASK |
2266                            DESC_W_MASK | DESC_A_MASK);
2267     env->regs[R_ESP] = env->sysenter_esp;
2268     env->eip = env->sysenter_eip;
2269 }
2270 
2271 void helper_sysexit(CPUX86State *env, int dflag)
2272 {
2273     int cpl;
2274 
2275     cpl = env->hflags & HF_CPL_MASK;
2276     if (env->sysenter_cs == 0 || cpl != 0) {
2277         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2278     }
2279 #ifdef TARGET_X86_64
2280     if (dflag == 2) {
2281         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2282                                3, 0, 0xffffffff,
2283                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2284                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2285                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2286                                DESC_L_MASK);
2287         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2288                                3, 0, 0xffffffff,
2289                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2290                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2291                                DESC_W_MASK | DESC_A_MASK);
2292     } else
2293 #endif
2294     {
2295         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2296                                3, 0, 0xffffffff,
2297                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2299                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2300         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2301                                3, 0, 0xffffffff,
2302                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2303                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2304                                DESC_W_MASK | DESC_A_MASK);
2305     }
2306     env->regs[R_ESP] = env->regs[R_ECX];
2307     env->eip = env->regs[R_EDX];
2308 }
2309 
2310 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2311 {
2312     unsigned int limit;
2313     uint32_t e1, e2, selector;
2314     int rpl, dpl, cpl, type;
2315 
2316     selector = selector1 & 0xffff;
2317     assert(CC_OP == CC_OP_EFLAGS);
2318     if ((selector & 0xfffc) == 0) {
2319         goto fail;
2320     }
2321     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2322         goto fail;
2323     }
2324     rpl = selector & 3;
2325     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2326     cpl = env->hflags & HF_CPL_MASK;
2327     if (e2 & DESC_S_MASK) {
2328         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2329             /* conforming */
2330         } else {
2331             if (dpl < cpl || dpl < rpl) {
2332                 goto fail;
2333             }
2334         }
2335     } else {
2336         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2337         switch (type) {
2338         case 1:
2339         case 2:
2340         case 3:
2341         case 9:
2342         case 11:
2343             break;
2344         default:
2345             goto fail;
2346         }
2347         if (dpl < cpl || dpl < rpl) {
2348         fail:
2349             CC_SRC &= ~CC_Z;
2350             return 0;
2351         }
2352     }
2353     limit = get_seg_limit(e1, e2);
2354     CC_SRC |= CC_Z;
2355     return limit;
2356 }
2357 
2358 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2359 {
2360     uint32_t e1, e2, selector;
2361     int rpl, dpl, cpl, type;
2362 
2363     selector = selector1 & 0xffff;
2364     assert(CC_OP == CC_OP_EFLAGS);
2365     if ((selector & 0xfffc) == 0) {
2366         goto fail;
2367     }
2368     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2369         goto fail;
2370     }
2371     rpl = selector & 3;
2372     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2373     cpl = env->hflags & HF_CPL_MASK;
2374     if (e2 & DESC_S_MASK) {
2375         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2376             /* conforming */
2377         } else {
2378             if (dpl < cpl || dpl < rpl) {
2379                 goto fail;
2380             }
2381         }
2382     } else {
2383         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2384         switch (type) {
2385         case 1:
2386         case 2:
2387         case 3:
2388         case 4:
2389         case 5:
2390         case 9:
2391         case 11:
2392         case 12:
2393             break;
2394         default:
2395             goto fail;
2396         }
2397         if (dpl < cpl || dpl < rpl) {
2398         fail:
2399             CC_SRC &= ~CC_Z;
2400             return 0;
2401         }
2402     }
2403     CC_SRC |= CC_Z;
2404     return e2 & 0x00f0ff00;
2405 }
2406 
2407 void helper_verr(CPUX86State *env, target_ulong selector1)
2408 {
2409     uint32_t e1, e2, eflags, selector;
2410     int rpl, dpl, cpl;
2411 
2412     selector = selector1 & 0xffff;
2413     eflags = cpu_cc_compute_all(env) | CC_Z;
2414     if ((selector & 0xfffc) == 0) {
2415         goto fail;
2416     }
2417     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2418         goto fail;
2419     }
2420     if (!(e2 & DESC_S_MASK)) {
2421         goto fail;
2422     }
2423     rpl = selector & 3;
2424     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2425     cpl = env->hflags & HF_CPL_MASK;
2426     if (e2 & DESC_CS_MASK) {
2427         if (!(e2 & DESC_R_MASK)) {
2428             goto fail;
2429         }
2430         if (!(e2 & DESC_C_MASK)) {
2431             if (dpl < cpl || dpl < rpl) {
2432                 goto fail;
2433             }
2434         }
2435     } else {
2436         if (dpl < cpl || dpl < rpl) {
2437         fail:
2438             eflags &= ~CC_Z;
2439         }
2440     }
2441     CC_SRC = eflags;
2442     CC_OP = CC_OP_EFLAGS;
2443 }
2444 
2445 void helper_verw(CPUX86State *env, target_ulong selector1)
2446 {
2447     uint32_t e1, e2, eflags, selector;
2448     int rpl, dpl, cpl;
2449 
2450     selector = selector1 & 0xffff;
2451     eflags = cpu_cc_compute_all(env) | CC_Z;
2452     if ((selector & 0xfffc) == 0) {
2453         goto fail;
2454     }
2455     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2456         goto fail;
2457     }
2458     if (!(e2 & DESC_S_MASK)) {
2459         goto fail;
2460     }
2461     rpl = selector & 3;
2462     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2463     cpl = env->hflags & HF_CPL_MASK;
2464     if (e2 & DESC_CS_MASK) {
2465         goto fail;
2466     } else {
2467         if (dpl < cpl || dpl < rpl) {
2468             goto fail;
2469         }
2470         if (!(e2 & DESC_W_MASK)) {
2471         fail:
2472             eflags &= ~CC_Z;
2473         }
2474     }
2475     CC_SRC = eflags;
2476     CC_OP = CC_OP_EFLAGS;
2477 }
2478