xref: /qemu/target/i386/tcg/seg_helper.c (revision 30ca39244bccc93d90a9a763e7e3d115ba089c13)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 /* return non zero if error */
32 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
33                                uint32_t *e2_ptr, int selector,
34                                uintptr_t retaddr)
35 {
36     SegmentCache *dt;
37     int index;
38     target_ulong ptr;
39 
40     if (selector & 0x4) {
41         dt = &env->ldt;
42     } else {
43         dt = &env->gdt;
44     }
45     index = selector & ~7;
46     if ((index + 7) > dt->limit) {
47         return -1;
48     }
49     ptr = dt->base + index;
50     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
51     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
52     return 0;
53 }
54 
55 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
56                                uint32_t *e2_ptr, int selector)
57 {
58     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
59 }
60 
61 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
62 {
63     unsigned int limit;
64 
65     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
66     if (e2 & DESC_G_MASK) {
67         limit = (limit << 12) | 0xfff;
68     }
69     return limit;
70 }
71 
72 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
73 {
74     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
75 }
76 
77 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
78                                          uint32_t e2)
79 {
80     sc->base = get_seg_base(e1, e2);
81     sc->limit = get_seg_limit(e1, e2);
82     sc->flags = e2;
83 }
84 
85 /* init the segment cache in vm86 mode. */
86 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
87 {
88     selector &= 0xffff;
89 
90     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
91                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
92                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
93 }
94 
95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
96                                        uint32_t *esp_ptr, int dpl,
97                                        uintptr_t retaddr)
98 {
99     X86CPU *cpu = env_archcpu(env);
100     int type, index, shift;
101 
102 #if 0
103     {
104         int i;
105         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
106         for (i = 0; i < env->tr.limit; i++) {
107             printf("%02x ", env->tr.base[i]);
108             if ((i & 7) == 7) {
109                 printf("\n");
110             }
111         }
112         printf("\n");
113     }
114 #endif
115 
116     if (!(env->tr.flags & DESC_P_MASK)) {
117         cpu_abort(CPU(cpu), "invalid tss");
118     }
119     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
120     if ((type & 7) != 1) {
121         cpu_abort(CPU(cpu), "invalid tss type");
122     }
123     shift = type >> 3;
124     index = (dpl * 4 + 2) << shift;
125     if (index + (4 << shift) - 1 > env->tr.limit) {
126         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
127     }
128     if (shift == 0) {
129         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
130         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
131     } else {
132         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
133         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
134     }
135 }
136 
137 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
138                          int cpl, uintptr_t retaddr)
139 {
140     uint32_t e1, e2;
141     int rpl, dpl;
142 
143     if ((selector & 0xfffc) != 0) {
144         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
145             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
146         }
147         if (!(e2 & DESC_S_MASK)) {
148             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
149         }
150         rpl = selector & 3;
151         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
152         if (seg_reg == R_CS) {
153             if (!(e2 & DESC_CS_MASK)) {
154                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
155             }
156             if (dpl != rpl) {
157                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
158             }
159         } else if (seg_reg == R_SS) {
160             /* SS must be writable data */
161             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
162                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
163             }
164             if (dpl != cpl || dpl != rpl) {
165                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
166             }
167         } else {
168             /* not readable code */
169             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
170                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
171             }
172             /* if data or non conforming code, checks the rights */
173             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
174                 if (dpl < cpl || dpl < rpl) {
175                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
176                 }
177             }
178         }
179         if (!(e2 & DESC_P_MASK)) {
180             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
181         }
182         cpu_x86_load_seg_cache(env, seg_reg, selector,
183                                get_seg_base(e1, e2),
184                                get_seg_limit(e1, e2),
185                                e2);
186     } else {
187         if (seg_reg == R_SS || seg_reg == R_CS) {
188             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
189         }
190     }
191 }
192 
193 #define SWITCH_TSS_JMP  0
194 #define SWITCH_TSS_IRET 1
195 #define SWITCH_TSS_CALL 2
196 
197 /* XXX: restore CPU state in registers (PowerPC case) */
198 static void switch_tss_ra(CPUX86State *env, int tss_selector,
199                           uint32_t e1, uint32_t e2, int source,
200                           uint32_t next_eip, uintptr_t retaddr)
201 {
202     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
203     target_ulong tss_base;
204     uint32_t new_regs[8], new_segs[6];
205     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
206     uint32_t old_eflags, eflags_mask;
207     SegmentCache *dt;
208     int index;
209     target_ulong ptr;
210 
211     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
212     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
213               source);
214 
215     /* if task gate, we read the TSS segment and we load it */
216     if (type == 5) {
217         if (!(e2 & DESC_P_MASK)) {
218             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
219         }
220         tss_selector = e1 >> 16;
221         if (tss_selector & 4) {
222             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
223         }
224         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
225             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
226         }
227         if (e2 & DESC_S_MASK) {
228             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
229         }
230         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
231         if ((type & 7) != 1) {
232             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
233         }
234     }
235 
236     if (!(e2 & DESC_P_MASK)) {
237         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
238     }
239 
240     if (type & 8) {
241         tss_limit_max = 103;
242     } else {
243         tss_limit_max = 43;
244     }
245     tss_limit = get_seg_limit(e1, e2);
246     tss_base = get_seg_base(e1, e2);
247     if ((tss_selector & 4) != 0 ||
248         tss_limit < tss_limit_max) {
249         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
250     }
251     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
252     if (old_type & 8) {
253         old_tss_limit_max = 103;
254     } else {
255         old_tss_limit_max = 43;
256     }
257 
258     /* read all the registers from the new TSS */
259     if (type & 8) {
260         /* 32 bit */
261         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
262         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
263         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
264         for (i = 0; i < 8; i++) {
265             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
266                                             retaddr);
267         }
268         for (i = 0; i < 6; i++) {
269             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
270                                              retaddr);
271         }
272         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
273         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
274     } else {
275         /* 16 bit */
276         new_cr3 = 0;
277         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
278         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
279         for (i = 0; i < 8; i++) {
280             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
281         }
282         for (i = 0; i < 4; i++) {
283             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
284                                              retaddr);
285         }
286         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
287         new_segs[R_FS] = 0;
288         new_segs[R_GS] = 0;
289         new_trap = 0;
290     }
291     /* XXX: avoid a compiler warning, see
292      http://support.amd.com/us/Processor_TechDocs/24593.pdf
293      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
294     (void)new_trap;
295 
296     /* NOTE: we must avoid memory exceptions during the task switch,
297        so we make dummy accesses before */
298     /* XXX: it can still fail in some cases, so a bigger hack is
299        necessary to valid the TLB after having done the accesses */
300 
301     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
302     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
303     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
304     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
305 
306     /* clear busy bit (it is restartable) */
307     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
308         target_ulong ptr;
309         uint32_t e2;
310 
311         ptr = env->gdt.base + (env->tr.selector & ~7);
312         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
313         e2 &= ~DESC_TSS_BUSY_MASK;
314         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
315     }
316     old_eflags = cpu_compute_eflags(env);
317     if (source == SWITCH_TSS_IRET) {
318         old_eflags &= ~NT_MASK;
319     }
320 
321     /* save the current state in the old TSS */
322     if (old_type & 8) {
323         /* 32 bit */
324         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
325         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
326         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
327         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
328         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
329         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
330         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
331         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
332         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
333         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
334         for (i = 0; i < 6; i++) {
335             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
336                               env->segs[i].selector, retaddr);
337         }
338     } else {
339         /* 16 bit */
340         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
341         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
342         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
343         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
344         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
345         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
346         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
347         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
348         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
349         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
350         for (i = 0; i < 4; i++) {
351             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
352                               env->segs[i].selector, retaddr);
353         }
354     }
355 
356     /* now if an exception occurs, it will occurs in the next task
357        context */
358 
359     if (source == SWITCH_TSS_CALL) {
360         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
361         new_eflags |= NT_MASK;
362     }
363 
364     /* set busy bit */
365     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
366         target_ulong ptr;
367         uint32_t e2;
368 
369         ptr = env->gdt.base + (tss_selector & ~7);
370         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
371         e2 |= DESC_TSS_BUSY_MASK;
372         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
373     }
374 
375     /* set the new CPU state */
376     /* from this point, any exception which occurs can give problems */
377     env->cr[0] |= CR0_TS_MASK;
378     env->hflags |= HF_TS_MASK;
379     env->tr.selector = tss_selector;
380     env->tr.base = tss_base;
381     env->tr.limit = tss_limit;
382     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
383 
384     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
385         cpu_x86_update_cr3(env, new_cr3);
386     }
387 
388     /* load all registers without an exception, then reload them with
389        possible exception */
390     env->eip = new_eip;
391     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
392         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
393     if (type & 8) {
394         cpu_load_eflags(env, new_eflags, eflags_mask);
395         for (i = 0; i < 8; i++) {
396             env->regs[i] = new_regs[i];
397         }
398     } else {
399         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
400         for (i = 0; i < 8; i++) {
401             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
402         }
403     }
404     if (new_eflags & VM_MASK) {
405         for (i = 0; i < 6; i++) {
406             load_seg_vm(env, i, new_segs[i]);
407         }
408     } else {
409         /* first just selectors as the rest may trigger exceptions */
410         for (i = 0; i < 6; i++) {
411             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
412         }
413     }
414 
415     env->ldt.selector = new_ldt & ~4;
416     env->ldt.base = 0;
417     env->ldt.limit = 0;
418     env->ldt.flags = 0;
419 
420     /* load the LDT */
421     if (new_ldt & 4) {
422         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
423     }
424 
425     if ((new_ldt & 0xfffc) != 0) {
426         dt = &env->gdt;
427         index = new_ldt & ~7;
428         if ((index + 7) > dt->limit) {
429             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
430         }
431         ptr = dt->base + index;
432         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
433         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
434         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
435             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
436         }
437         if (!(e2 & DESC_P_MASK)) {
438             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
439         }
440         load_seg_cache_raw_dt(&env->ldt, e1, e2);
441     }
442 
443     /* load the segments */
444     if (!(new_eflags & VM_MASK)) {
445         int cpl = new_segs[R_CS] & 3;
446         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
447         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
448         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
449         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
450         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
451         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
452     }
453 
454     /* check that env->eip is in the CS segment limits */
455     if (new_eip > env->segs[R_CS].limit) {
456         /* XXX: different exception if CALL? */
457         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
458     }
459 
460 #ifndef CONFIG_USER_ONLY
461     /* reset local breakpoints */
462     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
463         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
464     }
465 #endif
466 }
467 
468 static void switch_tss(CPUX86State *env, int tss_selector,
469                        uint32_t e1, uint32_t e2, int source,
470                         uint32_t next_eip)
471 {
472     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
473 }
474 
475 static inline unsigned int get_sp_mask(unsigned int e2)
476 {
477 #ifdef TARGET_X86_64
478     if (e2 & DESC_L_MASK) {
479         return 0;
480     } else
481 #endif
482     if (e2 & DESC_B_MASK) {
483         return 0xffffffff;
484     } else {
485         return 0xffff;
486     }
487 }
488 
489 int exception_has_error_code(int intno)
490 {
491     switch (intno) {
492     case 8:
493     case 10:
494     case 11:
495     case 12:
496     case 13:
497     case 14:
498     case 17:
499         return 1;
500     }
501     return 0;
502 }
503 
504 #ifdef TARGET_X86_64
505 #define SET_ESP(val, sp_mask)                                   \
506     do {                                                        \
507         if ((sp_mask) == 0xffff) {                              \
508             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
509                 ((val) & 0xffff);                               \
510         } else if ((sp_mask) == 0xffffffffLL) {                 \
511             env->regs[R_ESP] = (uint32_t)(val);                 \
512         } else {                                                \
513             env->regs[R_ESP] = (val);                           \
514         }                                                       \
515     } while (0)
516 #else
517 #define SET_ESP(val, sp_mask)                                   \
518     do {                                                        \
519         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
520             ((val) & (sp_mask));                                \
521     } while (0)
522 #endif
523 
524 /* in 64-bit machines, this can overflow. So this segment addition macro
525  * can be used to trim the value to 32-bit whenever needed */
526 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
527 
528 /* XXX: add a is_user flag to have proper security support */
529 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
530     {                                                            \
531         sp -= 2;                                                 \
532         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
533     }
534 
535 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
536     {                                                                   \
537         sp -= 4;                                                        \
538         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
539     }
540 
541 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
542     {                                                            \
543         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
544         sp += 2;                                                 \
545     }
546 
547 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
548     {                                                                   \
549         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
550         sp += 4;                                                        \
551     }
552 
553 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
554 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
555 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
556 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
557 
558 /* protected mode interrupt */
559 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
560                                    int error_code, unsigned int next_eip,
561                                    int is_hw)
562 {
563     SegmentCache *dt;
564     target_ulong ptr, ssp;
565     int type, dpl, selector, ss_dpl, cpl;
566     int has_error_code, new_stack, shift;
567     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
568     uint32_t old_eip, sp_mask;
569     int vm86 = env->eflags & VM_MASK;
570 
571     has_error_code = 0;
572     if (!is_int && !is_hw) {
573         has_error_code = exception_has_error_code(intno);
574     }
575     if (is_int) {
576         old_eip = next_eip;
577     } else {
578         old_eip = env->eip;
579     }
580 
581     dt = &env->idt;
582     if (intno * 8 + 7 > dt->limit) {
583         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
584     }
585     ptr = dt->base + intno * 8;
586     e1 = cpu_ldl_kernel(env, ptr);
587     e2 = cpu_ldl_kernel(env, ptr + 4);
588     /* check gate type */
589     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
590     switch (type) {
591     case 5: /* task gate */
592     case 6: /* 286 interrupt gate */
593     case 7: /* 286 trap gate */
594     case 14: /* 386 interrupt gate */
595     case 15: /* 386 trap gate */
596         break;
597     default:
598         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
599         break;
600     }
601     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
602     cpl = env->hflags & HF_CPL_MASK;
603     /* check privilege if software int */
604     if (is_int && dpl < cpl) {
605         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
606     }
607 
608     if (type == 5) {
609         /* task gate */
610         /* must do that check here to return the correct error code */
611         if (!(e2 & DESC_P_MASK)) {
612             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
613         }
614         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
615         if (has_error_code) {
616             int type;
617             uint32_t mask;
618 
619             /* push the error code */
620             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
621             shift = type >> 3;
622             if (env->segs[R_SS].flags & DESC_B_MASK) {
623                 mask = 0xffffffff;
624             } else {
625                 mask = 0xffff;
626             }
627             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
628             ssp = env->segs[R_SS].base + esp;
629             if (shift) {
630                 cpu_stl_kernel(env, ssp, error_code);
631             } else {
632                 cpu_stw_kernel(env, ssp, error_code);
633             }
634             SET_ESP(esp, mask);
635         }
636         return;
637     }
638 
639     /* Otherwise, trap or interrupt gate */
640 
641     /* check valid bit */
642     if (!(e2 & DESC_P_MASK)) {
643         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
644     }
645     selector = e1 >> 16;
646     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
647     if ((selector & 0xfffc) == 0) {
648         raise_exception_err(env, EXCP0D_GPF, 0);
649     }
650     if (load_segment(env, &e1, &e2, selector) != 0) {
651         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
652     }
653     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
654         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
655     }
656     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
657     if (dpl > cpl) {
658         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
659     }
660     if (!(e2 & DESC_P_MASK)) {
661         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
662     }
663     if (e2 & DESC_C_MASK) {
664         dpl = cpl;
665     }
666     if (dpl < cpl) {
667         /* to inner privilege */
668         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
669         if ((ss & 0xfffc) == 0) {
670             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
671         }
672         if ((ss & 3) != dpl) {
673             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
674         }
675         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
676             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
677         }
678         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
679         if (ss_dpl != dpl) {
680             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
681         }
682         if (!(ss_e2 & DESC_S_MASK) ||
683             (ss_e2 & DESC_CS_MASK) ||
684             !(ss_e2 & DESC_W_MASK)) {
685             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
686         }
687         if (!(ss_e2 & DESC_P_MASK)) {
688             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
689         }
690         new_stack = 1;
691         sp_mask = get_sp_mask(ss_e2);
692         ssp = get_seg_base(ss_e1, ss_e2);
693     } else  {
694         /* to same privilege */
695         if (vm86) {
696             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697         }
698         new_stack = 0;
699         sp_mask = get_sp_mask(env->segs[R_SS].flags);
700         ssp = env->segs[R_SS].base;
701         esp = env->regs[R_ESP];
702     }
703 
704     shift = type >> 3;
705 
706 #if 0
707     /* XXX: check that enough room is available */
708     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
709     if (vm86) {
710         push_size += 8;
711     }
712     push_size <<= shift;
713 #endif
714     if (shift == 1) {
715         if (new_stack) {
716             if (vm86) {
717                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
718                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
719                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
720                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
721             }
722             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
723             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
724         }
725         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
726         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
727         PUSHL(ssp, esp, sp_mask, old_eip);
728         if (has_error_code) {
729             PUSHL(ssp, esp, sp_mask, error_code);
730         }
731     } else {
732         if (new_stack) {
733             if (vm86) {
734                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
735                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
736                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
737                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
738             }
739             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
740             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
741         }
742         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
743         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
744         PUSHW(ssp, esp, sp_mask, old_eip);
745         if (has_error_code) {
746             PUSHW(ssp, esp, sp_mask, error_code);
747         }
748     }
749 
750     /* interrupt gate clear IF mask */
751     if ((type & 1) == 0) {
752         env->eflags &= ~IF_MASK;
753     }
754     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
755 
756     if (new_stack) {
757         if (vm86) {
758             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
759             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
760             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
761             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
762         }
763         ss = (ss & ~3) | dpl;
764         cpu_x86_load_seg_cache(env, R_SS, ss,
765                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
766     }
767     SET_ESP(esp, sp_mask);
768 
769     selector = (selector & ~3) | dpl;
770     cpu_x86_load_seg_cache(env, R_CS, selector,
771                    get_seg_base(e1, e2),
772                    get_seg_limit(e1, e2),
773                    e2);
774     env->eip = offset;
775 }
776 
777 #ifdef TARGET_X86_64
778 
779 #define PUSHQ_RA(sp, val, ra)                   \
780     {                                           \
781         sp -= 8;                                \
782         cpu_stq_kernel_ra(env, sp, (val), ra);  \
783     }
784 
785 #define POPQ_RA(sp, val, ra)                    \
786     {                                           \
787         val = cpu_ldq_kernel_ra(env, sp, ra);   \
788         sp += 8;                                \
789     }
790 
791 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
792 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
793 
794 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
795 {
796     X86CPU *cpu = env_archcpu(env);
797     int index;
798 
799 #if 0
800     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
801            env->tr.base, env->tr.limit);
802 #endif
803 
804     if (!(env->tr.flags & DESC_P_MASK)) {
805         cpu_abort(CPU(cpu), "invalid tss");
806     }
807     index = 8 * level + 4;
808     if ((index + 7) > env->tr.limit) {
809         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
810     }
811     return cpu_ldq_kernel(env, env->tr.base + index);
812 }
813 
814 /* 64 bit interrupt */
815 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
816                            int error_code, target_ulong next_eip, int is_hw)
817 {
818     SegmentCache *dt;
819     target_ulong ptr;
820     int type, dpl, selector, cpl, ist;
821     int has_error_code, new_stack;
822     uint32_t e1, e2, e3, ss;
823     target_ulong old_eip, esp, offset;
824 
825     has_error_code = 0;
826     if (!is_int && !is_hw) {
827         has_error_code = exception_has_error_code(intno);
828     }
829     if (is_int) {
830         old_eip = next_eip;
831     } else {
832         old_eip = env->eip;
833     }
834 
835     dt = &env->idt;
836     if (intno * 16 + 15 > dt->limit) {
837         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
838     }
839     ptr = dt->base + intno * 16;
840     e1 = cpu_ldl_kernel(env, ptr);
841     e2 = cpu_ldl_kernel(env, ptr + 4);
842     e3 = cpu_ldl_kernel(env, ptr + 8);
843     /* check gate type */
844     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
845     switch (type) {
846     case 14: /* 386 interrupt gate */
847     case 15: /* 386 trap gate */
848         break;
849     default:
850         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
851         break;
852     }
853     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
854     cpl = env->hflags & HF_CPL_MASK;
855     /* check privilege if software int */
856     if (is_int && dpl < cpl) {
857         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
858     }
859     /* check valid bit */
860     if (!(e2 & DESC_P_MASK)) {
861         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
862     }
863     selector = e1 >> 16;
864     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
865     ist = e2 & 7;
866     if ((selector & 0xfffc) == 0) {
867         raise_exception_err(env, EXCP0D_GPF, 0);
868     }
869 
870     if (load_segment(env, &e1, &e2, selector) != 0) {
871         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
872     }
873     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
874         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
875     }
876     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
877     if (dpl > cpl) {
878         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
879     }
880     if (!(e2 & DESC_P_MASK)) {
881         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
882     }
883     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
884         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
885     }
886     if (e2 & DESC_C_MASK) {
887         dpl = cpl;
888     }
889     if (dpl < cpl || ist != 0) {
890         /* to inner privilege */
891         new_stack = 1;
892         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
893         ss = 0;
894     } else {
895         /* to same privilege */
896         if (env->eflags & VM_MASK) {
897             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
898         }
899         new_stack = 0;
900         esp = env->regs[R_ESP];
901     }
902     esp &= ~0xfLL; /* align stack */
903 
904     PUSHQ(esp, env->segs[R_SS].selector);
905     PUSHQ(esp, env->regs[R_ESP]);
906     PUSHQ(esp, cpu_compute_eflags(env));
907     PUSHQ(esp, env->segs[R_CS].selector);
908     PUSHQ(esp, old_eip);
909     if (has_error_code) {
910         PUSHQ(esp, error_code);
911     }
912 
913     /* interrupt gate clear IF mask */
914     if ((type & 1) == 0) {
915         env->eflags &= ~IF_MASK;
916     }
917     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
918 
919     if (new_stack) {
920         ss = 0 | dpl;
921         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
922     }
923     env->regs[R_ESP] = esp;
924 
925     selector = (selector & ~3) | dpl;
926     cpu_x86_load_seg_cache(env, R_CS, selector,
927                    get_seg_base(e1, e2),
928                    get_seg_limit(e1, e2),
929                    e2);
930     env->eip = offset;
931 }
932 
933 void helper_sysret(CPUX86State *env, int dflag)
934 {
935     int cpl, selector;
936 
937     if (!(env->efer & MSR_EFER_SCE)) {
938         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
939     }
940     cpl = env->hflags & HF_CPL_MASK;
941     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
942         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
943     }
944     selector = (env->star >> 48) & 0xffff;
945     if (env->hflags & HF_LMA_MASK) {
946         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
947                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
948                         NT_MASK);
949         if (dflag == 2) {
950             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
951                                    0, 0xffffffff,
952                                    DESC_G_MASK | DESC_P_MASK |
953                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
954                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
955                                    DESC_L_MASK);
956             env->eip = env->regs[R_ECX];
957         } else {
958             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
959                                    0, 0xffffffff,
960                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
961                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
962                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
963             env->eip = (uint32_t)env->regs[R_ECX];
964         }
965         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
966                                0, 0xffffffff,
967                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
968                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
969                                DESC_W_MASK | DESC_A_MASK);
970     } else {
971         env->eflags |= IF_MASK;
972         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
973                                0, 0xffffffff,
974                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
975                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
976                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
977         env->eip = (uint32_t)env->regs[R_ECX];
978         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
979                                0, 0xffffffff,
980                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
982                                DESC_W_MASK | DESC_A_MASK);
983     }
984 }
985 #endif /* TARGET_X86_64 */
986 
987 /* real mode interrupt */
988 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
989                               int error_code, unsigned int next_eip)
990 {
991     SegmentCache *dt;
992     target_ulong ptr, ssp;
993     int selector;
994     uint32_t offset, esp;
995     uint32_t old_cs, old_eip;
996 
997     /* real mode (simpler!) */
998     dt = &env->idt;
999     if (intno * 4 + 3 > dt->limit) {
1000         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1001     }
1002     ptr = dt->base + intno * 4;
1003     offset = cpu_lduw_kernel(env, ptr);
1004     selector = cpu_lduw_kernel(env, ptr + 2);
1005     esp = env->regs[R_ESP];
1006     ssp = env->segs[R_SS].base;
1007     if (is_int) {
1008         old_eip = next_eip;
1009     } else {
1010         old_eip = env->eip;
1011     }
1012     old_cs = env->segs[R_CS].selector;
1013     /* XXX: use SS segment size? */
1014     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1015     PUSHW(ssp, esp, 0xffff, old_cs);
1016     PUSHW(ssp, esp, 0xffff, old_eip);
1017 
1018     /* update processor state */
1019     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1020     env->eip = offset;
1021     env->segs[R_CS].selector = selector;
1022     env->segs[R_CS].base = (selector << 4);
1023     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1024 }
1025 
1026 /*
1027  * Begin execution of an interruption. is_int is TRUE if coming from
1028  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1029  * instruction. It is only relevant if is_int is TRUE.
1030  */
1031 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1032                       int error_code, target_ulong next_eip, int is_hw)
1033 {
1034     CPUX86State *env = &cpu->env;
1035 
1036     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1037         if ((env->cr[0] & CR0_PE_MASK)) {
1038             static int count;
1039 
1040             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1041                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1042                      count, intno, error_code, is_int,
1043                      env->hflags & HF_CPL_MASK,
1044                      env->segs[R_CS].selector, env->eip,
1045                      (int)env->segs[R_CS].base + env->eip,
1046                      env->segs[R_SS].selector, env->regs[R_ESP]);
1047             if (intno == 0x0e) {
1048                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1049             } else {
1050                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1051             }
1052             qemu_log("\n");
1053             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1054 #if 0
1055             {
1056                 int i;
1057                 target_ulong ptr;
1058 
1059                 qemu_log("       code=");
1060                 ptr = env->segs[R_CS].base + env->eip;
1061                 for (i = 0; i < 16; i++) {
1062                     qemu_log(" %02x", ldub(ptr + i));
1063                 }
1064                 qemu_log("\n");
1065             }
1066 #endif
1067             count++;
1068         }
1069     }
1070     if (env->cr[0] & CR0_PE_MASK) {
1071 #if !defined(CONFIG_USER_ONLY)
1072         if (env->hflags & HF_GUEST_MASK) {
1073             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1074         }
1075 #endif
1076 #ifdef TARGET_X86_64
1077         if (env->hflags & HF_LMA_MASK) {
1078             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1079         } else
1080 #endif
1081         {
1082             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1083                                    is_hw);
1084         }
1085     } else {
1086 #if !defined(CONFIG_USER_ONLY)
1087         if (env->hflags & HF_GUEST_MASK) {
1088             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1089         }
1090 #endif
1091         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1092     }
1093 
1094 #if !defined(CONFIG_USER_ONLY)
1095     if (env->hflags & HF_GUEST_MASK) {
1096         CPUState *cs = CPU(cpu);
1097         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1098                                       offsetof(struct vmcb,
1099                                                control.event_inj));
1100 
1101         x86_stl_phys(cs,
1102                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1103                  event_inj & ~SVM_EVTINJ_VALID);
1104     }
1105 #endif
1106 }
1107 
1108 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1109 {
1110     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1111 }
1112 
1113 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1114 {
1115     X86CPU *cpu = X86_CPU(cs);
1116     CPUX86State *env = &cpu->env;
1117     int intno;
1118 
1119     interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1120     if (!interrupt_request) {
1121         return false;
1122     }
1123 
1124     /* Don't process multiple interrupt requests in a single call.
1125      * This is required to make icount-driven execution deterministic.
1126      */
1127     switch (interrupt_request) {
1128 #if !defined(CONFIG_USER_ONLY)
1129     case CPU_INTERRUPT_POLL:
1130         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1131         apic_poll_irq(cpu->apic_state);
1132         break;
1133 #endif
1134     case CPU_INTERRUPT_SIPI:
1135         do_cpu_sipi(cpu);
1136         break;
1137     case CPU_INTERRUPT_SMI:
1138         cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1139         cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1140 #ifdef CONFIG_USER_ONLY
1141         cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
1142 #else
1143         do_smm_enter(cpu);
1144 #endif /* CONFIG_USER_ONLY */
1145         break;
1146     case CPU_INTERRUPT_NMI:
1147         cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1148         cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1149         env->hflags2 |= HF2_NMI_MASK;
1150         do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1151         break;
1152     case CPU_INTERRUPT_MCE:
1153         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1154         do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1155         break;
1156     case CPU_INTERRUPT_HARD:
1157         cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1158         cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1159                                    CPU_INTERRUPT_VIRQ);
1160         intno = cpu_get_pic_interrupt(env);
1161         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1162                       "Servicing hardware INT=0x%02x\n", intno);
1163         do_interrupt_x86_hardirq(env, intno, 1);
1164         break;
1165 #if !defined(CONFIG_USER_ONLY)
1166     case CPU_INTERRUPT_VIRQ:
1167         cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1168         intno = x86_ldl_phys(cs, env->vm_vmcb
1169                              + offsetof(struct vmcb, control.int_vector));
1170         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1171                       "Servicing virtual hardware INT=0x%02x\n", intno);
1172         do_interrupt_x86_hardirq(env, intno, 1);
1173         cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1174         env->int_ctl &= ~V_IRQ_MASK;
1175         break;
1176 #endif
1177     }
1178 
1179     /* Ensure that no TB jump will be modified as the program flow was changed.  */
1180     return true;
1181 }
1182 
1183 void helper_lldt(CPUX86State *env, int selector)
1184 {
1185     SegmentCache *dt;
1186     uint32_t e1, e2;
1187     int index, entry_limit;
1188     target_ulong ptr;
1189 
1190     selector &= 0xffff;
1191     if ((selector & 0xfffc) == 0) {
1192         /* XXX: NULL selector case: invalid LDT */
1193         env->ldt.base = 0;
1194         env->ldt.limit = 0;
1195     } else {
1196         if (selector & 0x4) {
1197             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1198         }
1199         dt = &env->gdt;
1200         index = selector & ~7;
1201 #ifdef TARGET_X86_64
1202         if (env->hflags & HF_LMA_MASK) {
1203             entry_limit = 15;
1204         } else
1205 #endif
1206         {
1207             entry_limit = 7;
1208         }
1209         if ((index + entry_limit) > dt->limit) {
1210             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1211         }
1212         ptr = dt->base + index;
1213         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1214         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1215         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1216             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1217         }
1218         if (!(e2 & DESC_P_MASK)) {
1219             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1220         }
1221 #ifdef TARGET_X86_64
1222         if (env->hflags & HF_LMA_MASK) {
1223             uint32_t e3;
1224 
1225             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1226             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1227             env->ldt.base |= (target_ulong)e3 << 32;
1228         } else
1229 #endif
1230         {
1231             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1232         }
1233     }
1234     env->ldt.selector = selector;
1235 }
1236 
1237 void helper_ltr(CPUX86State *env, int selector)
1238 {
1239     SegmentCache *dt;
1240     uint32_t e1, e2;
1241     int index, type, entry_limit;
1242     target_ulong ptr;
1243 
1244     selector &= 0xffff;
1245     if ((selector & 0xfffc) == 0) {
1246         /* NULL selector case: invalid TR */
1247         env->tr.base = 0;
1248         env->tr.limit = 0;
1249         env->tr.flags = 0;
1250     } else {
1251         if (selector & 0x4) {
1252             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1253         }
1254         dt = &env->gdt;
1255         index = selector & ~7;
1256 #ifdef TARGET_X86_64
1257         if (env->hflags & HF_LMA_MASK) {
1258             entry_limit = 15;
1259         } else
1260 #endif
1261         {
1262             entry_limit = 7;
1263         }
1264         if ((index + entry_limit) > dt->limit) {
1265             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1266         }
1267         ptr = dt->base + index;
1268         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1269         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1270         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1271         if ((e2 & DESC_S_MASK) ||
1272             (type != 1 && type != 9)) {
1273             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1274         }
1275         if (!(e2 & DESC_P_MASK)) {
1276             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1277         }
1278 #ifdef TARGET_X86_64
1279         if (env->hflags & HF_LMA_MASK) {
1280             uint32_t e3, e4;
1281 
1282             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1283             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1284             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1285                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1286             }
1287             load_seg_cache_raw_dt(&env->tr, e1, e2);
1288             env->tr.base |= (target_ulong)e3 << 32;
1289         } else
1290 #endif
1291         {
1292             load_seg_cache_raw_dt(&env->tr, e1, e2);
1293         }
1294         e2 |= DESC_TSS_BUSY_MASK;
1295         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1296     }
1297     env->tr.selector = selector;
1298 }
1299 
1300 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1301 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1302 {
1303     uint32_t e1, e2;
1304     int cpl, dpl, rpl;
1305     SegmentCache *dt;
1306     int index;
1307     target_ulong ptr;
1308 
1309     selector &= 0xffff;
1310     cpl = env->hflags & HF_CPL_MASK;
1311     if ((selector & 0xfffc) == 0) {
1312         /* null selector case */
1313         if (seg_reg == R_SS
1314 #ifdef TARGET_X86_64
1315             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1316 #endif
1317             ) {
1318             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1319         }
1320         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1321     } else {
1322 
1323         if (selector & 0x4) {
1324             dt = &env->ldt;
1325         } else {
1326             dt = &env->gdt;
1327         }
1328         index = selector & ~7;
1329         if ((index + 7) > dt->limit) {
1330             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1331         }
1332         ptr = dt->base + index;
1333         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1334         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1335 
1336         if (!(e2 & DESC_S_MASK)) {
1337             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1338         }
1339         rpl = selector & 3;
1340         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1341         if (seg_reg == R_SS) {
1342             /* must be writable segment */
1343             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1344                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1345             }
1346             if (rpl != cpl || dpl != cpl) {
1347                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1348             }
1349         } else {
1350             /* must be readable segment */
1351             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1352                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1353             }
1354 
1355             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1356                 /* if not conforming code, test rights */
1357                 if (dpl < cpl || dpl < rpl) {
1358                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1359                 }
1360             }
1361         }
1362 
1363         if (!(e2 & DESC_P_MASK)) {
1364             if (seg_reg == R_SS) {
1365                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1366             } else {
1367                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1368             }
1369         }
1370 
1371         /* set the access bit if not already set */
1372         if (!(e2 & DESC_A_MASK)) {
1373             e2 |= DESC_A_MASK;
1374             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1375         }
1376 
1377         cpu_x86_load_seg_cache(env, seg_reg, selector,
1378                        get_seg_base(e1, e2),
1379                        get_seg_limit(e1, e2),
1380                        e2);
1381 #if 0
1382         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1383                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1384 #endif
1385     }
1386 }
1387 
1388 /* protected mode jump */
1389 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1390                            target_ulong next_eip)
1391 {
1392     int gate_cs, type;
1393     uint32_t e1, e2, cpl, dpl, rpl, limit;
1394 
1395     if ((new_cs & 0xfffc) == 0) {
1396         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1397     }
1398     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1399         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1400     }
1401     cpl = env->hflags & HF_CPL_MASK;
1402     if (e2 & DESC_S_MASK) {
1403         if (!(e2 & DESC_CS_MASK)) {
1404             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1405         }
1406         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1407         if (e2 & DESC_C_MASK) {
1408             /* conforming code segment */
1409             if (dpl > cpl) {
1410                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1411             }
1412         } else {
1413             /* non conforming code segment */
1414             rpl = new_cs & 3;
1415             if (rpl > cpl) {
1416                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1417             }
1418             if (dpl != cpl) {
1419                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1420             }
1421         }
1422         if (!(e2 & DESC_P_MASK)) {
1423             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1424         }
1425         limit = get_seg_limit(e1, e2);
1426         if (new_eip > limit &&
1427             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1428             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1429         }
1430         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1431                        get_seg_base(e1, e2), limit, e2);
1432         env->eip = new_eip;
1433     } else {
1434         /* jump to call or task gate */
1435         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1436         rpl = new_cs & 3;
1437         cpl = env->hflags & HF_CPL_MASK;
1438         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1439 
1440 #ifdef TARGET_X86_64
1441         if (env->efer & MSR_EFER_LMA) {
1442             if (type != 12) {
1443                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1444             }
1445         }
1446 #endif
1447         switch (type) {
1448         case 1: /* 286 TSS */
1449         case 9: /* 386 TSS */
1450         case 5: /* task gate */
1451             if (dpl < cpl || dpl < rpl) {
1452                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1453             }
1454             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1455             break;
1456         case 4: /* 286 call gate */
1457         case 12: /* 386 call gate */
1458             if ((dpl < cpl) || (dpl < rpl)) {
1459                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1460             }
1461             if (!(e2 & DESC_P_MASK)) {
1462                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1463             }
1464             gate_cs = e1 >> 16;
1465             new_eip = (e1 & 0xffff);
1466             if (type == 12) {
1467                 new_eip |= (e2 & 0xffff0000);
1468             }
1469 
1470 #ifdef TARGET_X86_64
1471             if (env->efer & MSR_EFER_LMA) {
1472                 /* load the upper 8 bytes of the 64-bit call gate */
1473                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1474                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1475                                            GETPC());
1476                 }
1477                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1478                 if (type != 0) {
1479                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1480                                            GETPC());
1481                 }
1482                 new_eip |= ((target_ulong)e1) << 32;
1483             }
1484 #endif
1485 
1486             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1487                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1488             }
1489             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1490             /* must be code segment */
1491             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1492                  (DESC_S_MASK | DESC_CS_MASK))) {
1493                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1494             }
1495             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1496                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1497                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1498             }
1499 #ifdef TARGET_X86_64
1500             if (env->efer & MSR_EFER_LMA) {
1501                 if (!(e2 & DESC_L_MASK)) {
1502                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1503                 }
1504                 if (e2 & DESC_B_MASK) {
1505                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1506                 }
1507             }
1508 #endif
1509             if (!(e2 & DESC_P_MASK)) {
1510                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1511             }
1512             limit = get_seg_limit(e1, e2);
1513             if (new_eip > limit &&
1514                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1515                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1516             }
1517             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1518                                    get_seg_base(e1, e2), limit, e2);
1519             env->eip = new_eip;
1520             break;
1521         default:
1522             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1523             break;
1524         }
1525     }
1526 }
1527 
1528 /* real mode call */
1529 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1530                        int shift, int next_eip)
1531 {
1532     int new_eip;
1533     uint32_t esp, esp_mask;
1534     target_ulong ssp;
1535 
1536     new_eip = new_eip1;
1537     esp = env->regs[R_ESP];
1538     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1539     ssp = env->segs[R_SS].base;
1540     if (shift) {
1541         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1542         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1543     } else {
1544         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1545         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1546     }
1547 
1548     SET_ESP(esp, esp_mask);
1549     env->eip = new_eip;
1550     env->segs[R_CS].selector = new_cs;
1551     env->segs[R_CS].base = (new_cs << 4);
1552 }
1553 
1554 /* protected mode call */
1555 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1556                             int shift, target_ulong next_eip)
1557 {
1558     int new_stack, i;
1559     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1560     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1561     uint32_t val, limit, old_sp_mask;
1562     target_ulong ssp, old_ssp, offset, sp;
1563 
1564     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1565     LOG_PCALL_STATE(env_cpu(env));
1566     if ((new_cs & 0xfffc) == 0) {
1567         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1568     }
1569     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1570         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1571     }
1572     cpl = env->hflags & HF_CPL_MASK;
1573     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1574     if (e2 & DESC_S_MASK) {
1575         if (!(e2 & DESC_CS_MASK)) {
1576             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1577         }
1578         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1579         if (e2 & DESC_C_MASK) {
1580             /* conforming code segment */
1581             if (dpl > cpl) {
1582                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1583             }
1584         } else {
1585             /* non conforming code segment */
1586             rpl = new_cs & 3;
1587             if (rpl > cpl) {
1588                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1589             }
1590             if (dpl != cpl) {
1591                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1592             }
1593         }
1594         if (!(e2 & DESC_P_MASK)) {
1595             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1596         }
1597 
1598 #ifdef TARGET_X86_64
1599         /* XXX: check 16/32 bit cases in long mode */
1600         if (shift == 2) {
1601             target_ulong rsp;
1602 
1603             /* 64 bit case */
1604             rsp = env->regs[R_ESP];
1605             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1606             PUSHQ_RA(rsp, next_eip, GETPC());
1607             /* from this point, not restartable */
1608             env->regs[R_ESP] = rsp;
1609             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1610                                    get_seg_base(e1, e2),
1611                                    get_seg_limit(e1, e2), e2);
1612             env->eip = new_eip;
1613         } else
1614 #endif
1615         {
1616             sp = env->regs[R_ESP];
1617             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1618             ssp = env->segs[R_SS].base;
1619             if (shift) {
1620                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1621                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1622             } else {
1623                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1624                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1625             }
1626 
1627             limit = get_seg_limit(e1, e2);
1628             if (new_eip > limit) {
1629                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1630             }
1631             /* from this point, not restartable */
1632             SET_ESP(sp, sp_mask);
1633             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1634                                    get_seg_base(e1, e2), limit, e2);
1635             env->eip = new_eip;
1636         }
1637     } else {
1638         /* check gate type */
1639         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1640         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1641         rpl = new_cs & 3;
1642 
1643 #ifdef TARGET_X86_64
1644         if (env->efer & MSR_EFER_LMA) {
1645             if (type != 12) {
1646                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1647             }
1648         }
1649 #endif
1650 
1651         switch (type) {
1652         case 1: /* available 286 TSS */
1653         case 9: /* available 386 TSS */
1654         case 5: /* task gate */
1655             if (dpl < cpl || dpl < rpl) {
1656                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1657             }
1658             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1659             return;
1660         case 4: /* 286 call gate */
1661         case 12: /* 386 call gate */
1662             break;
1663         default:
1664             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1665             break;
1666         }
1667         shift = type >> 3;
1668 
1669         if (dpl < cpl || dpl < rpl) {
1670             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1671         }
1672         /* check valid bit */
1673         if (!(e2 & DESC_P_MASK)) {
1674             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1675         }
1676         selector = e1 >> 16;
1677         param_count = e2 & 0x1f;
1678         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1679 #ifdef TARGET_X86_64
1680         if (env->efer & MSR_EFER_LMA) {
1681             /* load the upper 8 bytes of the 64-bit call gate */
1682             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1683                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1684                                        GETPC());
1685             }
1686             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1687             if (type != 0) {
1688                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1689                                        GETPC());
1690             }
1691             offset |= ((target_ulong)e1) << 32;
1692         }
1693 #endif
1694         if ((selector & 0xfffc) == 0) {
1695             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1696         }
1697 
1698         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1699             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1700         }
1701         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1702             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1703         }
1704         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1705         if (dpl > cpl) {
1706             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1707         }
1708 #ifdef TARGET_X86_64
1709         if (env->efer & MSR_EFER_LMA) {
1710             if (!(e2 & DESC_L_MASK)) {
1711                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1712             }
1713             if (e2 & DESC_B_MASK) {
1714                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1715             }
1716             shift++;
1717         }
1718 #endif
1719         if (!(e2 & DESC_P_MASK)) {
1720             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1721         }
1722 
1723         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1724             /* to inner privilege */
1725 #ifdef TARGET_X86_64
1726             if (shift == 2) {
1727                 sp = get_rsp_from_tss(env, dpl);
1728                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1729                 new_stack = 1;
1730                 sp_mask = 0;
1731                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1732                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1733                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1734             } else
1735 #endif
1736             {
1737                 uint32_t sp32;
1738                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1739                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1740                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1741                           env->regs[R_ESP]);
1742                 sp = sp32;
1743                 if ((ss & 0xfffc) == 0) {
1744                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1745                 }
1746                 if ((ss & 3) != dpl) {
1747                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1748                 }
1749                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1750                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1751                 }
1752                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1753                 if (ss_dpl != dpl) {
1754                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1755                 }
1756                 if (!(ss_e2 & DESC_S_MASK) ||
1757                     (ss_e2 & DESC_CS_MASK) ||
1758                     !(ss_e2 & DESC_W_MASK)) {
1759                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1760                 }
1761                 if (!(ss_e2 & DESC_P_MASK)) {
1762                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1763                 }
1764 
1765                 sp_mask = get_sp_mask(ss_e2);
1766                 ssp = get_seg_base(ss_e1, ss_e2);
1767             }
1768 
1769             /* push_size = ((param_count * 2) + 8) << shift; */
1770 
1771             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1772             old_ssp = env->segs[R_SS].base;
1773 #ifdef TARGET_X86_64
1774             if (shift == 2) {
1775                 /* XXX: verify if new stack address is canonical */
1776                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1777                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1778                 /* parameters aren't supported for 64-bit call gates */
1779             } else
1780 #endif
1781             if (shift == 1) {
1782                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1783                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1784                 for (i = param_count - 1; i >= 0; i--) {
1785                     val = cpu_ldl_kernel_ra(env, old_ssp +
1786                                             ((env->regs[R_ESP] + i * 4) &
1787                                              old_sp_mask), GETPC());
1788                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1789                 }
1790             } else {
1791                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1792                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1793                 for (i = param_count - 1; i >= 0; i--) {
1794                     val = cpu_lduw_kernel_ra(env, old_ssp +
1795                                              ((env->regs[R_ESP] + i * 2) &
1796                                               old_sp_mask), GETPC());
1797                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1798                 }
1799             }
1800             new_stack = 1;
1801         } else {
1802             /* to same privilege */
1803             sp = env->regs[R_ESP];
1804             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1805             ssp = env->segs[R_SS].base;
1806             /* push_size = (4 << shift); */
1807             new_stack = 0;
1808         }
1809 
1810 #ifdef TARGET_X86_64
1811         if (shift == 2) {
1812             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
1813             PUSHQ_RA(sp, next_eip, GETPC());
1814         } else
1815 #endif
1816         if (shift == 1) {
1817             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1818             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1819         } else {
1820             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1821             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1822         }
1823 
1824         /* from this point, not restartable */
1825 
1826         if (new_stack) {
1827 #ifdef TARGET_X86_64
1828             if (shift == 2) {
1829                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1830             } else
1831 #endif
1832             {
1833                 ss = (ss & ~3) | dpl;
1834                 cpu_x86_load_seg_cache(env, R_SS, ss,
1835                                        ssp,
1836                                        get_seg_limit(ss_e1, ss_e2),
1837                                        ss_e2);
1838             }
1839         }
1840 
1841         selector = (selector & ~3) | dpl;
1842         cpu_x86_load_seg_cache(env, R_CS, selector,
1843                        get_seg_base(e1, e2),
1844                        get_seg_limit(e1, e2),
1845                        e2);
1846         SET_ESP(sp, sp_mask);
1847         env->eip = offset;
1848     }
1849 }
1850 
1851 /* real and vm86 mode iret */
1852 void helper_iret_real(CPUX86State *env, int shift)
1853 {
1854     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1855     target_ulong ssp;
1856     int eflags_mask;
1857 
1858     sp_mask = 0xffff; /* XXXX: use SS segment size? */
1859     sp = env->regs[R_ESP];
1860     ssp = env->segs[R_SS].base;
1861     if (shift == 1) {
1862         /* 32 bits */
1863         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1864         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1865         new_cs &= 0xffff;
1866         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1867     } else {
1868         /* 16 bits */
1869         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1870         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1871         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1872     }
1873     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1874     env->segs[R_CS].selector = new_cs;
1875     env->segs[R_CS].base = (new_cs << 4);
1876     env->eip = new_eip;
1877     if (env->eflags & VM_MASK) {
1878         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1879             NT_MASK;
1880     } else {
1881         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1882             RF_MASK | NT_MASK;
1883     }
1884     if (shift == 0) {
1885         eflags_mask &= 0xffff;
1886     }
1887     cpu_load_eflags(env, new_eflags, eflags_mask);
1888     env->hflags2 &= ~HF2_NMI_MASK;
1889 }
1890 
1891 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1892 {
1893     int dpl;
1894     uint32_t e2;
1895 
1896     /* XXX: on x86_64, we do not want to nullify FS and GS because
1897        they may still contain a valid base. I would be interested to
1898        know how a real x86_64 CPU behaves */
1899     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1900         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1901         return;
1902     }
1903 
1904     e2 = env->segs[seg_reg].flags;
1905     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1906     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1907         /* data or non conforming code segment */
1908         if (dpl < cpl) {
1909             cpu_x86_load_seg_cache(env, seg_reg, 0,
1910                                    env->segs[seg_reg].base,
1911                                    env->segs[seg_reg].limit,
1912                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1913         }
1914     }
1915 }
1916 
1917 /* protected mode iret */
1918 static inline void helper_ret_protected(CPUX86State *env, int shift,
1919                                         int is_iret, int addend,
1920                                         uintptr_t retaddr)
1921 {
1922     uint32_t new_cs, new_eflags, new_ss;
1923     uint32_t new_es, new_ds, new_fs, new_gs;
1924     uint32_t e1, e2, ss_e1, ss_e2;
1925     int cpl, dpl, rpl, eflags_mask, iopl;
1926     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1927 
1928 #ifdef TARGET_X86_64
1929     if (shift == 2) {
1930         sp_mask = -1;
1931     } else
1932 #endif
1933     {
1934         sp_mask = get_sp_mask(env->segs[R_SS].flags);
1935     }
1936     sp = env->regs[R_ESP];
1937     ssp = env->segs[R_SS].base;
1938     new_eflags = 0; /* avoid warning */
1939 #ifdef TARGET_X86_64
1940     if (shift == 2) {
1941         POPQ_RA(sp, new_eip, retaddr);
1942         POPQ_RA(sp, new_cs, retaddr);
1943         new_cs &= 0xffff;
1944         if (is_iret) {
1945             POPQ_RA(sp, new_eflags, retaddr);
1946         }
1947     } else
1948 #endif
1949     {
1950         if (shift == 1) {
1951             /* 32 bits */
1952             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
1953             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
1954             new_cs &= 0xffff;
1955             if (is_iret) {
1956                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1957                 if (new_eflags & VM_MASK) {
1958                     goto return_to_vm86;
1959                 }
1960             }
1961         } else {
1962             /* 16 bits */
1963             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
1964             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
1965             if (is_iret) {
1966                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1967             }
1968         }
1969     }
1970     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
1971               new_cs, new_eip, shift, addend);
1972     LOG_PCALL_STATE(env_cpu(env));
1973     if ((new_cs & 0xfffc) == 0) {
1974         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1975     }
1976     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
1977         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1978     }
1979     if (!(e2 & DESC_S_MASK) ||
1980         !(e2 & DESC_CS_MASK)) {
1981         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1982     }
1983     cpl = env->hflags & HF_CPL_MASK;
1984     rpl = new_cs & 3;
1985     if (rpl < cpl) {
1986         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1987     }
1988     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1989     if (e2 & DESC_C_MASK) {
1990         if (dpl > rpl) {
1991             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1992         }
1993     } else {
1994         if (dpl != rpl) {
1995             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1996         }
1997     }
1998     if (!(e2 & DESC_P_MASK)) {
1999         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2000     }
2001 
2002     sp += addend;
2003     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2004                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2005         /* return to same privilege level */
2006         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2007                        get_seg_base(e1, e2),
2008                        get_seg_limit(e1, e2),
2009                        e2);
2010     } else {
2011         /* return to different privilege level */
2012 #ifdef TARGET_X86_64
2013         if (shift == 2) {
2014             POPQ_RA(sp, new_esp, retaddr);
2015             POPQ_RA(sp, new_ss, retaddr);
2016             new_ss &= 0xffff;
2017         } else
2018 #endif
2019         {
2020             if (shift == 1) {
2021                 /* 32 bits */
2022                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2023                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2024                 new_ss &= 0xffff;
2025             } else {
2026                 /* 16 bits */
2027                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2028                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2029             }
2030         }
2031         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2032                   new_ss, new_esp);
2033         if ((new_ss & 0xfffc) == 0) {
2034 #ifdef TARGET_X86_64
2035             /* NULL ss is allowed in long mode if cpl != 3 */
2036             /* XXX: test CS64? */
2037             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2038                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2039                                        0, 0xffffffff,
2040                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2041                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2042                                        DESC_W_MASK | DESC_A_MASK);
2043                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2044             } else
2045 #endif
2046             {
2047                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2048             }
2049         } else {
2050             if ((new_ss & 3) != rpl) {
2051                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2052             }
2053             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2054                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2055             }
2056             if (!(ss_e2 & DESC_S_MASK) ||
2057                 (ss_e2 & DESC_CS_MASK) ||
2058                 !(ss_e2 & DESC_W_MASK)) {
2059                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2060             }
2061             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2062             if (dpl != rpl) {
2063                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2064             }
2065             if (!(ss_e2 & DESC_P_MASK)) {
2066                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2067             }
2068             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2069                                    get_seg_base(ss_e1, ss_e2),
2070                                    get_seg_limit(ss_e1, ss_e2),
2071                                    ss_e2);
2072         }
2073 
2074         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2075                        get_seg_base(e1, e2),
2076                        get_seg_limit(e1, e2),
2077                        e2);
2078         sp = new_esp;
2079 #ifdef TARGET_X86_64
2080         if (env->hflags & HF_CS64_MASK) {
2081             sp_mask = -1;
2082         } else
2083 #endif
2084         {
2085             sp_mask = get_sp_mask(ss_e2);
2086         }
2087 
2088         /* validate data segments */
2089         validate_seg(env, R_ES, rpl);
2090         validate_seg(env, R_DS, rpl);
2091         validate_seg(env, R_FS, rpl);
2092         validate_seg(env, R_GS, rpl);
2093 
2094         sp += addend;
2095     }
2096     SET_ESP(sp, sp_mask);
2097     env->eip = new_eip;
2098     if (is_iret) {
2099         /* NOTE: 'cpl' is the _old_ CPL */
2100         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2101         if (cpl == 0) {
2102             eflags_mask |= IOPL_MASK;
2103         }
2104         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2105         if (cpl <= iopl) {
2106             eflags_mask |= IF_MASK;
2107         }
2108         if (shift == 0) {
2109             eflags_mask &= 0xffff;
2110         }
2111         cpu_load_eflags(env, new_eflags, eflags_mask);
2112     }
2113     return;
2114 
2115  return_to_vm86:
2116     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2117     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2118     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2119     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2120     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2121     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2122 
2123     /* modify processor state */
2124     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2125                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2126                     VIP_MASK);
2127     load_seg_vm(env, R_CS, new_cs & 0xffff);
2128     load_seg_vm(env, R_SS, new_ss & 0xffff);
2129     load_seg_vm(env, R_ES, new_es & 0xffff);
2130     load_seg_vm(env, R_DS, new_ds & 0xffff);
2131     load_seg_vm(env, R_FS, new_fs & 0xffff);
2132     load_seg_vm(env, R_GS, new_gs & 0xffff);
2133 
2134     env->eip = new_eip & 0xffff;
2135     env->regs[R_ESP] = new_esp;
2136 }
2137 
2138 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2139 {
2140     int tss_selector, type;
2141     uint32_t e1, e2;
2142 
2143     /* specific case for TSS */
2144     if (env->eflags & NT_MASK) {
2145 #ifdef TARGET_X86_64
2146         if (env->hflags & HF_LMA_MASK) {
2147             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2148         }
2149 #endif
2150         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2151         if (tss_selector & 4) {
2152             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2153         }
2154         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2155             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2156         }
2157         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2158         /* NOTE: we check both segment and busy TSS */
2159         if (type != 3) {
2160             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2161         }
2162         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2163     } else {
2164         helper_ret_protected(env, shift, 1, 0, GETPC());
2165     }
2166     env->hflags2 &= ~HF2_NMI_MASK;
2167 }
2168 
2169 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2170 {
2171     helper_ret_protected(env, shift, 0, addend, GETPC());
2172 }
2173 
2174 void helper_sysenter(CPUX86State *env)
2175 {
2176     if (env->sysenter_cs == 0) {
2177         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2178     }
2179     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2180 
2181 #ifdef TARGET_X86_64
2182     if (env->hflags & HF_LMA_MASK) {
2183         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2184                                0, 0xffffffff,
2185                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2186                                DESC_S_MASK |
2187                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2188                                DESC_L_MASK);
2189     } else
2190 #endif
2191     {
2192         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2193                                0, 0xffffffff,
2194                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2195                                DESC_S_MASK |
2196                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2197     }
2198     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2199                            0, 0xffffffff,
2200                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2201                            DESC_S_MASK |
2202                            DESC_W_MASK | DESC_A_MASK);
2203     env->regs[R_ESP] = env->sysenter_esp;
2204     env->eip = env->sysenter_eip;
2205 }
2206 
2207 void helper_sysexit(CPUX86State *env, int dflag)
2208 {
2209     int cpl;
2210 
2211     cpl = env->hflags & HF_CPL_MASK;
2212     if (env->sysenter_cs == 0 || cpl != 0) {
2213         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2214     }
2215 #ifdef TARGET_X86_64
2216     if (dflag == 2) {
2217         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2218                                3, 0, 0xffffffff,
2219                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2220                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2221                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2222                                DESC_L_MASK);
2223         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2224                                3, 0, 0xffffffff,
2225                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2226                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2227                                DESC_W_MASK | DESC_A_MASK);
2228     } else
2229 #endif
2230     {
2231         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2232                                3, 0, 0xffffffff,
2233                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2234                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2235                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2236         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2237                                3, 0, 0xffffffff,
2238                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2239                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2240                                DESC_W_MASK | DESC_A_MASK);
2241     }
2242     env->regs[R_ESP] = env->regs[R_ECX];
2243     env->eip = env->regs[R_EDX];
2244 }
2245 
2246 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2247 {
2248     unsigned int limit;
2249     uint32_t e1, e2, eflags, selector;
2250     int rpl, dpl, cpl, type;
2251 
2252     selector = selector1 & 0xffff;
2253     eflags = cpu_cc_compute_all(env, CC_OP);
2254     if ((selector & 0xfffc) == 0) {
2255         goto fail;
2256     }
2257     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2258         goto fail;
2259     }
2260     rpl = selector & 3;
2261     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2262     cpl = env->hflags & HF_CPL_MASK;
2263     if (e2 & DESC_S_MASK) {
2264         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2265             /* conforming */
2266         } else {
2267             if (dpl < cpl || dpl < rpl) {
2268                 goto fail;
2269             }
2270         }
2271     } else {
2272         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2273         switch (type) {
2274         case 1:
2275         case 2:
2276         case 3:
2277         case 9:
2278         case 11:
2279             break;
2280         default:
2281             goto fail;
2282         }
2283         if (dpl < cpl || dpl < rpl) {
2284         fail:
2285             CC_SRC = eflags & ~CC_Z;
2286             return 0;
2287         }
2288     }
2289     limit = get_seg_limit(e1, e2);
2290     CC_SRC = eflags | CC_Z;
2291     return limit;
2292 }
2293 
2294 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2295 {
2296     uint32_t e1, e2, eflags, selector;
2297     int rpl, dpl, cpl, type;
2298 
2299     selector = selector1 & 0xffff;
2300     eflags = cpu_cc_compute_all(env, CC_OP);
2301     if ((selector & 0xfffc) == 0) {
2302         goto fail;
2303     }
2304     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2305         goto fail;
2306     }
2307     rpl = selector & 3;
2308     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2309     cpl = env->hflags & HF_CPL_MASK;
2310     if (e2 & DESC_S_MASK) {
2311         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2312             /* conforming */
2313         } else {
2314             if (dpl < cpl || dpl < rpl) {
2315                 goto fail;
2316             }
2317         }
2318     } else {
2319         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2320         switch (type) {
2321         case 1:
2322         case 2:
2323         case 3:
2324         case 4:
2325         case 5:
2326         case 9:
2327         case 11:
2328         case 12:
2329             break;
2330         default:
2331             goto fail;
2332         }
2333         if (dpl < cpl || dpl < rpl) {
2334         fail:
2335             CC_SRC = eflags & ~CC_Z;
2336             return 0;
2337         }
2338     }
2339     CC_SRC = eflags | CC_Z;
2340     return e2 & 0x00f0ff00;
2341 }
2342 
2343 void helper_verr(CPUX86State *env, target_ulong selector1)
2344 {
2345     uint32_t e1, e2, eflags, selector;
2346     int rpl, dpl, cpl;
2347 
2348     selector = selector1 & 0xffff;
2349     eflags = cpu_cc_compute_all(env, CC_OP);
2350     if ((selector & 0xfffc) == 0) {
2351         goto fail;
2352     }
2353     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2354         goto fail;
2355     }
2356     if (!(e2 & DESC_S_MASK)) {
2357         goto fail;
2358     }
2359     rpl = selector & 3;
2360     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2361     cpl = env->hflags & HF_CPL_MASK;
2362     if (e2 & DESC_CS_MASK) {
2363         if (!(e2 & DESC_R_MASK)) {
2364             goto fail;
2365         }
2366         if (!(e2 & DESC_C_MASK)) {
2367             if (dpl < cpl || dpl < rpl) {
2368                 goto fail;
2369             }
2370         }
2371     } else {
2372         if (dpl < cpl || dpl < rpl) {
2373         fail:
2374             CC_SRC = eflags & ~CC_Z;
2375             return;
2376         }
2377     }
2378     CC_SRC = eflags | CC_Z;
2379 }
2380 
2381 void helper_verw(CPUX86State *env, target_ulong selector1)
2382 {
2383     uint32_t e1, e2, eflags, selector;
2384     int rpl, dpl, cpl;
2385 
2386     selector = selector1 & 0xffff;
2387     eflags = cpu_cc_compute_all(env, CC_OP);
2388     if ((selector & 0xfffc) == 0) {
2389         goto fail;
2390     }
2391     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2392         goto fail;
2393     }
2394     if (!(e2 & DESC_S_MASK)) {
2395         goto fail;
2396     }
2397     rpl = selector & 3;
2398     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2399     cpl = env->hflags & HF_CPL_MASK;
2400     if (e2 & DESC_CS_MASK) {
2401         goto fail;
2402     } else {
2403         if (dpl < cpl || dpl < rpl) {
2404             goto fail;
2405         }
2406         if (!(e2 & DESC_W_MASK)) {
2407         fail:
2408             CC_SRC = eflags & ~CC_Z;
2409             return;
2410         }
2411     }
2412     CC_SRC = eflags | CC_Z;
2413 }
2414