xref: /qemu/target/i386/tcg/seg_helper.c (revision af3f37319cb1e1ca0c42842ecdbd1bcfc64a4b6f)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 
29 //#define DEBUG_PCALL
30 
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu)                                  \
34     log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
39 
40 /*
41  * TODO: Convert callers to compute cpu_mmu_index_kernel once
42  * and use *_mmuidx_ra directly.
43  */
44 #define cpu_ldub_kernel_ra(e, p, r) \
45     cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
46 #define cpu_lduw_kernel_ra(e, p, r) \
47     cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
48 #define cpu_ldl_kernel_ra(e, p, r) \
49     cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
50 #define cpu_ldq_kernel_ra(e, p, r) \
51     cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
52 
53 #define cpu_stb_kernel_ra(e, p, v, r) \
54     cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
55 #define cpu_stw_kernel_ra(e, p, v, r) \
56     cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
57 #define cpu_stl_kernel_ra(e, p, v, r) \
58     cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
59 #define cpu_stq_kernel_ra(e, p, v, r) \
60     cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
61 
62 #define cpu_ldub_kernel(e, p)    cpu_ldub_kernel_ra(e, p, 0)
63 #define cpu_lduw_kernel(e, p)    cpu_lduw_kernel_ra(e, p, 0)
64 #define cpu_ldl_kernel(e, p)     cpu_ldl_kernel_ra(e, p, 0)
65 #define cpu_ldq_kernel(e, p)     cpu_ldq_kernel_ra(e, p, 0)
66 
67 #define cpu_stb_kernel(e, p, v)  cpu_stb_kernel_ra(e, p, v, 0)
68 #define cpu_stw_kernel(e, p, v)  cpu_stw_kernel_ra(e, p, v, 0)
69 #define cpu_stl_kernel(e, p, v)  cpu_stl_kernel_ra(e, p, v, 0)
70 #define cpu_stq_kernel(e, p, v)  cpu_stq_kernel_ra(e, p, v, 0)
71 
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74                                uint32_t *e2_ptr, int selector,
75                                uintptr_t retaddr)
76 {
77     SegmentCache *dt;
78     int index;
79     target_ulong ptr;
80 
81     if (selector & 0x4) {
82         dt = &env->ldt;
83     } else {
84         dt = &env->gdt;
85     }
86     index = selector & ~7;
87     if ((index + 7) > dt->limit) {
88         return -1;
89     }
90     ptr = dt->base + index;
91     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93     return 0;
94 }
95 
96 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97                                uint32_t *e2_ptr, int selector)
98 {
99     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100 }
101 
102 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103 {
104     unsigned int limit;
105 
106     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107     if (e2 & DESC_G_MASK) {
108         limit = (limit << 12) | 0xfff;
109     }
110     return limit;
111 }
112 
113 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114 {
115     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
116 }
117 
118 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119                                          uint32_t e2)
120 {
121     sc->base = get_seg_base(e1, e2);
122     sc->limit = get_seg_limit(e1, e2);
123     sc->flags = e2;
124 }
125 
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128 {
129     selector &= 0xffff;
130 
131     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
134 }
135 
136 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137                                        uint32_t *esp_ptr, int dpl,
138                                        uintptr_t retaddr)
139 {
140     X86CPU *cpu = env_archcpu(env);
141     int type, index, shift;
142 
143 #if 0
144     {
145         int i;
146         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147         for (i = 0; i < env->tr.limit; i++) {
148             printf("%02x ", env->tr.base[i]);
149             if ((i & 7) == 7) {
150                 printf("\n");
151             }
152         }
153         printf("\n");
154     }
155 #endif
156 
157     if (!(env->tr.flags & DESC_P_MASK)) {
158         cpu_abort(CPU(cpu), "invalid tss");
159     }
160     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161     if ((type & 7) != 1) {
162         cpu_abort(CPU(cpu), "invalid tss type");
163     }
164     shift = type >> 3;
165     index = (dpl * 4 + 2) << shift;
166     if (index + (4 << shift) - 1 > env->tr.limit) {
167         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
168     }
169     if (shift == 0) {
170         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172     } else {
173         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175     }
176 }
177 
178 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179                          uintptr_t retaddr)
180 {
181     uint32_t e1, e2;
182     int rpl, dpl;
183 
184     if ((selector & 0xfffc) != 0) {
185         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
187         }
188         if (!(e2 & DESC_S_MASK)) {
189             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
190         }
191         rpl = selector & 3;
192         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193         if (seg_reg == R_CS) {
194             if (!(e2 & DESC_CS_MASK)) {
195                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
196             }
197             if (dpl != rpl) {
198                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199             }
200         } else if (seg_reg == R_SS) {
201             /* SS must be writable data */
202             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
204             }
205             if (dpl != cpl || dpl != rpl) {
206                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207             }
208         } else {
209             /* not readable code */
210             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212             }
213             /* if data or non conforming code, checks the rights */
214             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215                 if (dpl < cpl || dpl < rpl) {
216                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217                 }
218             }
219         }
220         if (!(e2 & DESC_P_MASK)) {
221             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
222         }
223         cpu_x86_load_seg_cache(env, seg_reg, selector,
224                                get_seg_base(e1, e2),
225                                get_seg_limit(e1, e2),
226                                e2);
227     } else {
228         if (seg_reg == R_SS || seg_reg == R_CS) {
229             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
230         }
231     }
232 }
233 
234 #define SWITCH_TSS_JMP  0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
237 
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State *env, int tss_selector,
240                           uint32_t e1, uint32_t e2, int source,
241                           uint32_t next_eip, uintptr_t retaddr)
242 {
243     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244     target_ulong tss_base;
245     uint32_t new_regs[8], new_segs[6];
246     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247     uint32_t old_eflags, eflags_mask;
248     SegmentCache *dt;
249     int index;
250     target_ulong ptr;
251 
252     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254               source);
255 
256     /* if task gate, we read the TSS segment and we load it */
257     if (type == 5) {
258         if (!(e2 & DESC_P_MASK)) {
259             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
260         }
261         tss_selector = e1 >> 16;
262         if (tss_selector & 4) {
263             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
264         }
265         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
267         }
268         if (e2 & DESC_S_MASK) {
269             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
270         }
271         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272         if ((type & 7) != 1) {
273             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274         }
275     }
276 
277     if (!(e2 & DESC_P_MASK)) {
278         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
279     }
280 
281     if (type & 8) {
282         tss_limit_max = 103;
283     } else {
284         tss_limit_max = 43;
285     }
286     tss_limit = get_seg_limit(e1, e2);
287     tss_base = get_seg_base(e1, e2);
288     if ((tss_selector & 4) != 0 ||
289         tss_limit < tss_limit_max) {
290         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
291     }
292     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293     if (old_type & 8) {
294         old_tss_limit_max = 103;
295     } else {
296         old_tss_limit_max = 43;
297     }
298 
299     /* read all the registers from the new TSS */
300     if (type & 8) {
301         /* 32 bit */
302         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305         for (i = 0; i < 8; i++) {
306             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307                                             retaddr);
308         }
309         for (i = 0; i < 6; i++) {
310             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311                                              retaddr);
312         }
313         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315     } else {
316         /* 16 bit */
317         new_cr3 = 0;
318         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320         for (i = 0; i < 8; i++) {
321             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322                                              retaddr) | 0xffff0000;
323         }
324         for (i = 0; i < 4; i++) {
325             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326                                              retaddr);
327         }
328         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329         new_segs[R_FS] = 0;
330         new_segs[R_GS] = 0;
331         new_trap = 0;
332     }
333     /* XXX: avoid a compiler warning, see
334      http://support.amd.com/us/Processor_TechDocs/24593.pdf
335      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336     (void)new_trap;
337 
338     /* NOTE: we must avoid memory exceptions during the task switch,
339        so we make dummy accesses before */
340     /* XXX: it can still fail in some cases, so a bigger hack is
341        necessary to valid the TLB after having done the accesses */
342 
343     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
347 
348     /* clear busy bit (it is restartable) */
349     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350         target_ulong ptr;
351         uint32_t e2;
352 
353         ptr = env->gdt.base + (env->tr.selector & ~7);
354         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355         e2 &= ~DESC_TSS_BUSY_MASK;
356         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
357     }
358     old_eflags = cpu_compute_eflags(env);
359     if (source == SWITCH_TSS_IRET) {
360         old_eflags &= ~NT_MASK;
361     }
362 
363     /* save the current state in the old TSS */
364     if (type & 8) {
365         /* 32 bit */
366         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376         for (i = 0; i < 6; i++) {
377             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378                               env->segs[i].selector, retaddr);
379         }
380     } else {
381         /* 16 bit */
382         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392         for (i = 0; i < 4; i++) {
393             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394                               env->segs[i].selector, retaddr);
395         }
396     }
397 
398     /* now if an exception occurs, it will occurs in the next task
399        context */
400 
401     if (source == SWITCH_TSS_CALL) {
402         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403         new_eflags |= NT_MASK;
404     }
405 
406     /* set busy bit */
407     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408         target_ulong ptr;
409         uint32_t e2;
410 
411         ptr = env->gdt.base + (tss_selector & ~7);
412         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413         e2 |= DESC_TSS_BUSY_MASK;
414         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
415     }
416 
417     /* set the new CPU state */
418     /* from this point, any exception which occurs can give problems */
419     env->cr[0] |= CR0_TS_MASK;
420     env->hflags |= HF_TS_MASK;
421     env->tr.selector = tss_selector;
422     env->tr.base = tss_base;
423     env->tr.limit = tss_limit;
424     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425 
426     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427         cpu_x86_update_cr3(env, new_cr3);
428     }
429 
430     /* load all registers without an exception, then reload them with
431        possible exception */
432     env->eip = new_eip;
433     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435     if (!(type & 8)) {
436         eflags_mask &= 0xffff;
437     }
438     cpu_load_eflags(env, new_eflags, eflags_mask);
439     /* XXX: what to do in 16 bit case? */
440     env->regs[R_EAX] = new_regs[0];
441     env->regs[R_ECX] = new_regs[1];
442     env->regs[R_EDX] = new_regs[2];
443     env->regs[R_EBX] = new_regs[3];
444     env->regs[R_ESP] = new_regs[4];
445     env->regs[R_EBP] = new_regs[5];
446     env->regs[R_ESI] = new_regs[6];
447     env->regs[R_EDI] = new_regs[7];
448     if (new_eflags & VM_MASK) {
449         for (i = 0; i < 6; i++) {
450             load_seg_vm(env, i, new_segs[i]);
451         }
452     } else {
453         /* first just selectors as the rest may trigger exceptions */
454         for (i = 0; i < 6; i++) {
455             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456         }
457     }
458 
459     env->ldt.selector = new_ldt & ~4;
460     env->ldt.base = 0;
461     env->ldt.limit = 0;
462     env->ldt.flags = 0;
463 
464     /* load the LDT */
465     if (new_ldt & 4) {
466         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
467     }
468 
469     if ((new_ldt & 0xfffc) != 0) {
470         dt = &env->gdt;
471         index = new_ldt & ~7;
472         if ((index + 7) > dt->limit) {
473             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
474         }
475         ptr = dt->base + index;
476         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
480         }
481         if (!(e2 & DESC_P_MASK)) {
482             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
483         }
484         load_seg_cache_raw_dt(&env->ldt, e1, e2);
485     }
486 
487     /* load the segments */
488     if (!(new_eflags & VM_MASK)) {
489         int cpl = new_segs[R_CS] & 3;
490         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
496     }
497 
498     /* check that env->eip is in the CS segment limits */
499     if (new_eip > env->segs[R_CS].limit) {
500         /* XXX: different exception if CALL? */
501         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
502     }
503 
504 #ifndef CONFIG_USER_ONLY
505     /* reset local breakpoints */
506     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
508     }
509 #endif
510 }
511 
512 static void switch_tss(CPUX86State *env, int tss_selector,
513                        uint32_t e1, uint32_t e2, int source,
514                         uint32_t next_eip)
515 {
516     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517 }
518 
519 static inline unsigned int get_sp_mask(unsigned int e2)
520 {
521 #ifdef TARGET_X86_64
522     if (e2 & DESC_L_MASK) {
523         return 0;
524     } else
525 #endif
526     if (e2 & DESC_B_MASK) {
527         return 0xffffffff;
528     } else {
529         return 0xffff;
530     }
531 }
532 
533 static int exception_has_error_code(int intno)
534 {
535     switch (intno) {
536     case 8:
537     case 10:
538     case 11:
539     case 12:
540     case 13:
541     case 14:
542     case 17:
543         return 1;
544     }
545     return 0;
546 }
547 
548 #ifdef TARGET_X86_64
549 #define SET_ESP(val, sp_mask)                                   \
550     do {                                                        \
551         if ((sp_mask) == 0xffff) {                              \
552             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
553                 ((val) & 0xffff);                               \
554         } else if ((sp_mask) == 0xffffffffLL) {                 \
555             env->regs[R_ESP] = (uint32_t)(val);                 \
556         } else {                                                \
557             env->regs[R_ESP] = (val);                           \
558         }                                                       \
559     } while (0)
560 #else
561 #define SET_ESP(val, sp_mask)                                   \
562     do {                                                        \
563         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
564             ((val) & (sp_mask));                                \
565     } while (0)
566 #endif
567 
568 /* in 64-bit machines, this can overflow. So this segment addition macro
569  * can be used to trim the value to 32-bit whenever needed */
570 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
571 
572 /* XXX: add a is_user flag to have proper security support */
573 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
574     {                                                            \
575         sp -= 2;                                                 \
576         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577     }
578 
579 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
580     {                                                                   \
581         sp -= 4;                                                        \
582         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583     }
584 
585 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
586     {                                                            \
587         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
588         sp += 2;                                                 \
589     }
590 
591 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
592     {                                                                   \
593         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
594         sp += 4;                                                        \
595     }
596 
597 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
598 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
599 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
600 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
601 
602 /* protected mode interrupt */
603 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
604                                    int error_code, unsigned int next_eip,
605                                    int is_hw)
606 {
607     SegmentCache *dt;
608     target_ulong ptr, ssp;
609     int type, dpl, selector, ss_dpl, cpl;
610     int has_error_code, new_stack, shift;
611     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
612     uint32_t old_eip, sp_mask;
613     int vm86 = env->eflags & VM_MASK;
614 
615     has_error_code = 0;
616     if (!is_int && !is_hw) {
617         has_error_code = exception_has_error_code(intno);
618     }
619     if (is_int) {
620         old_eip = next_eip;
621     } else {
622         old_eip = env->eip;
623     }
624 
625     dt = &env->idt;
626     if (intno * 8 + 7 > dt->limit) {
627         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
628     }
629     ptr = dt->base + intno * 8;
630     e1 = cpu_ldl_kernel(env, ptr);
631     e2 = cpu_ldl_kernel(env, ptr + 4);
632     /* check gate type */
633     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
634     switch (type) {
635     case 5: /* task gate */
636         /* must do that check here to return the correct error code */
637         if (!(e2 & DESC_P_MASK)) {
638             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
639         }
640         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
641         if (has_error_code) {
642             int type;
643             uint32_t mask;
644 
645             /* push the error code */
646             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
647             shift = type >> 3;
648             if (env->segs[R_SS].flags & DESC_B_MASK) {
649                 mask = 0xffffffff;
650             } else {
651                 mask = 0xffff;
652             }
653             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
654             ssp = env->segs[R_SS].base + esp;
655             if (shift) {
656                 cpu_stl_kernel(env, ssp, error_code);
657             } else {
658                 cpu_stw_kernel(env, ssp, error_code);
659             }
660             SET_ESP(esp, mask);
661         }
662         return;
663     case 6: /* 286 interrupt gate */
664     case 7: /* 286 trap gate */
665     case 14: /* 386 interrupt gate */
666     case 15: /* 386 trap gate */
667         break;
668     default:
669         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
670         break;
671     }
672     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
673     cpl = env->hflags & HF_CPL_MASK;
674     /* check privilege if software int */
675     if (is_int && dpl < cpl) {
676         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
677     }
678     /* check valid bit */
679     if (!(e2 & DESC_P_MASK)) {
680         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
681     }
682     selector = e1 >> 16;
683     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
684     if ((selector & 0xfffc) == 0) {
685         raise_exception_err(env, EXCP0D_GPF, 0);
686     }
687     if (load_segment(env, &e1, &e2, selector) != 0) {
688         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
689     }
690     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
691         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692     }
693     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
694     if (dpl > cpl) {
695         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
696     }
697     if (!(e2 & DESC_P_MASK)) {
698         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
699     }
700     if (e2 & DESC_C_MASK) {
701         dpl = cpl;
702     }
703     if (dpl < cpl) {
704         /* to inner privilege */
705         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
706         if ((ss & 0xfffc) == 0) {
707             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
708         }
709         if ((ss & 3) != dpl) {
710             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
711         }
712         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
713             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714         }
715         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
716         if (ss_dpl != dpl) {
717             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
718         }
719         if (!(ss_e2 & DESC_S_MASK) ||
720             (ss_e2 & DESC_CS_MASK) ||
721             !(ss_e2 & DESC_W_MASK)) {
722             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
723         }
724         if (!(ss_e2 & DESC_P_MASK)) {
725             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
726         }
727         new_stack = 1;
728         sp_mask = get_sp_mask(ss_e2);
729         ssp = get_seg_base(ss_e1, ss_e2);
730     } else  {
731         /* to same privilege */
732         if (vm86) {
733             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734         }
735         new_stack = 0;
736         sp_mask = get_sp_mask(env->segs[R_SS].flags);
737         ssp = env->segs[R_SS].base;
738         esp = env->regs[R_ESP];
739     }
740 
741     shift = type >> 3;
742 
743 #if 0
744     /* XXX: check that enough room is available */
745     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
746     if (vm86) {
747         push_size += 8;
748     }
749     push_size <<= shift;
750 #endif
751     if (shift == 1) {
752         if (new_stack) {
753             if (vm86) {
754                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
755                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
756                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
757                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
758             }
759             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
760             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
761         }
762         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
763         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
764         PUSHL(ssp, esp, sp_mask, old_eip);
765         if (has_error_code) {
766             PUSHL(ssp, esp, sp_mask, error_code);
767         }
768     } else {
769         if (new_stack) {
770             if (vm86) {
771                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
772                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
773                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
774                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
775             }
776             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
777             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
778         }
779         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
780         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
781         PUSHW(ssp, esp, sp_mask, old_eip);
782         if (has_error_code) {
783             PUSHW(ssp, esp, sp_mask, error_code);
784         }
785     }
786 
787     /* interrupt gate clear IF mask */
788     if ((type & 1) == 0) {
789         env->eflags &= ~IF_MASK;
790     }
791     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
792 
793     if (new_stack) {
794         if (vm86) {
795             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
796             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
797             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
798             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799         }
800         ss = (ss & ~3) | dpl;
801         cpu_x86_load_seg_cache(env, R_SS, ss,
802                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803     }
804     SET_ESP(esp, sp_mask);
805 
806     selector = (selector & ~3) | dpl;
807     cpu_x86_load_seg_cache(env, R_CS, selector,
808                    get_seg_base(e1, e2),
809                    get_seg_limit(e1, e2),
810                    e2);
811     env->eip = offset;
812 }
813 
814 #ifdef TARGET_X86_64
815 
816 #define PUSHQ_RA(sp, val, ra)                   \
817     {                                           \
818         sp -= 8;                                \
819         cpu_stq_kernel_ra(env, sp, (val), ra);  \
820     }
821 
822 #define POPQ_RA(sp, val, ra)                    \
823     {                                           \
824         val = cpu_ldq_kernel_ra(env, sp, ra);   \
825         sp += 8;                                \
826     }
827 
828 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
829 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
830 
831 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
832 {
833     X86CPU *cpu = env_archcpu(env);
834     int index;
835 
836 #if 0
837     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
838            env->tr.base, env->tr.limit);
839 #endif
840 
841     if (!(env->tr.flags & DESC_P_MASK)) {
842         cpu_abort(CPU(cpu), "invalid tss");
843     }
844     index = 8 * level + 4;
845     if ((index + 7) > env->tr.limit) {
846         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
847     }
848     return cpu_ldq_kernel(env, env->tr.base + index);
849 }
850 
851 /* 64 bit interrupt */
852 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
853                            int error_code, target_ulong next_eip, int is_hw)
854 {
855     SegmentCache *dt;
856     target_ulong ptr;
857     int type, dpl, selector, cpl, ist;
858     int has_error_code, new_stack;
859     uint32_t e1, e2, e3, ss;
860     target_ulong old_eip, esp, offset;
861 
862     has_error_code = 0;
863     if (!is_int && !is_hw) {
864         has_error_code = exception_has_error_code(intno);
865     }
866     if (is_int) {
867         old_eip = next_eip;
868     } else {
869         old_eip = env->eip;
870     }
871 
872     dt = &env->idt;
873     if (intno * 16 + 15 > dt->limit) {
874         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
875     }
876     ptr = dt->base + intno * 16;
877     e1 = cpu_ldl_kernel(env, ptr);
878     e2 = cpu_ldl_kernel(env, ptr + 4);
879     e3 = cpu_ldl_kernel(env, ptr + 8);
880     /* check gate type */
881     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
882     switch (type) {
883     case 14: /* 386 interrupt gate */
884     case 15: /* 386 trap gate */
885         break;
886     default:
887         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
888         break;
889     }
890     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891     cpl = env->hflags & HF_CPL_MASK;
892     /* check privilege if software int */
893     if (is_int && dpl < cpl) {
894         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
895     }
896     /* check valid bit */
897     if (!(e2 & DESC_P_MASK)) {
898         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
899     }
900     selector = e1 >> 16;
901     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902     ist = e2 & 7;
903     if ((selector & 0xfffc) == 0) {
904         raise_exception_err(env, EXCP0D_GPF, 0);
905     }
906 
907     if (load_segment(env, &e1, &e2, selector) != 0) {
908         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909     }
910     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
911         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912     }
913     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
914     if (dpl > cpl) {
915         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
916     }
917     if (!(e2 & DESC_P_MASK)) {
918         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
919     }
920     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
921         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
922     }
923     if (e2 & DESC_C_MASK) {
924         dpl = cpl;
925     }
926     if (dpl < cpl || ist != 0) {
927         /* to inner privilege */
928         new_stack = 1;
929         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
930         ss = 0;
931     } else {
932         /* to same privilege */
933         if (env->eflags & VM_MASK) {
934             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
935         }
936         new_stack = 0;
937         esp = env->regs[R_ESP];
938     }
939     esp &= ~0xfLL; /* align stack */
940 
941     PUSHQ(esp, env->segs[R_SS].selector);
942     PUSHQ(esp, env->regs[R_ESP]);
943     PUSHQ(esp, cpu_compute_eflags(env));
944     PUSHQ(esp, env->segs[R_CS].selector);
945     PUSHQ(esp, old_eip);
946     if (has_error_code) {
947         PUSHQ(esp, error_code);
948     }
949 
950     /* interrupt gate clear IF mask */
951     if ((type & 1) == 0) {
952         env->eflags &= ~IF_MASK;
953     }
954     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
955 
956     if (new_stack) {
957         ss = 0 | dpl;
958         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
959     }
960     env->regs[R_ESP] = esp;
961 
962     selector = (selector & ~3) | dpl;
963     cpu_x86_load_seg_cache(env, R_CS, selector,
964                    get_seg_base(e1, e2),
965                    get_seg_limit(e1, e2),
966                    e2);
967     env->eip = offset;
968 }
969 #endif
970 
971 #ifdef TARGET_X86_64
972 #if defined(CONFIG_USER_ONLY)
973 void helper_syscall(CPUX86State *env, int next_eip_addend)
974 {
975     CPUState *cs = env_cpu(env);
976 
977     cs->exception_index = EXCP_SYSCALL;
978     env->exception_is_int = 0;
979     env->exception_next_eip = env->eip + next_eip_addend;
980     cpu_loop_exit(cs);
981 }
982 #else
983 void helper_syscall(CPUX86State *env, int next_eip_addend)
984 {
985     int selector;
986 
987     if (!(env->efer & MSR_EFER_SCE)) {
988         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
989     }
990     selector = (env->star >> 32) & 0xffff;
991     if (env->hflags & HF_LMA_MASK) {
992         int code64;
993 
994         env->regs[R_ECX] = env->eip + next_eip_addend;
995         env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
996 
997         code64 = env->hflags & HF_CS64_MASK;
998 
999         env->eflags &= ~(env->fmask | RF_MASK);
1000         cpu_load_eflags(env, env->eflags, 0);
1001         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002                            0, 0xffffffff,
1003                                DESC_G_MASK | DESC_P_MASK |
1004                                DESC_S_MASK |
1005                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006                                DESC_L_MASK);
1007         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008                                0, 0xffffffff,
1009                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010                                DESC_S_MASK |
1011                                DESC_W_MASK | DESC_A_MASK);
1012         if (code64) {
1013             env->eip = env->lstar;
1014         } else {
1015             env->eip = env->cstar;
1016         }
1017     } else {
1018         env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1019 
1020         env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1021         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022                            0, 0xffffffff,
1023                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024                                DESC_S_MASK |
1025                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027                                0, 0xffffffff,
1028                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029                                DESC_S_MASK |
1030                                DESC_W_MASK | DESC_A_MASK);
1031         env->eip = (uint32_t)env->star;
1032     }
1033 }
1034 #endif
1035 #endif
1036 
1037 #ifdef TARGET_X86_64
1038 void helper_sysret(CPUX86State *env, int dflag)
1039 {
1040     int cpl, selector;
1041 
1042     if (!(env->efer & MSR_EFER_SCE)) {
1043         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044     }
1045     cpl = env->hflags & HF_CPL_MASK;
1046     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048     }
1049     selector = (env->star >> 48) & 0xffff;
1050     if (env->hflags & HF_LMA_MASK) {
1051         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053                         NT_MASK);
1054         if (dflag == 2) {
1055             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056                                    0, 0xffffffff,
1057                                    DESC_G_MASK | DESC_P_MASK |
1058                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060                                    DESC_L_MASK);
1061             env->eip = env->regs[R_ECX];
1062         } else {
1063             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064                                    0, 0xffffffff,
1065                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068             env->eip = (uint32_t)env->regs[R_ECX];
1069         }
1070         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1071                                0, 0xffffffff,
1072                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074                                DESC_W_MASK | DESC_A_MASK);
1075     } else {
1076         env->eflags |= IF_MASK;
1077         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078                                0, 0xffffffff,
1079                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082         env->eip = (uint32_t)env->regs[R_ECX];
1083         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1084                                0, 0xffffffff,
1085                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087                                DESC_W_MASK | DESC_A_MASK);
1088     }
1089 }
1090 #endif
1091 
1092 /* real mode interrupt */
1093 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094                               int error_code, unsigned int next_eip)
1095 {
1096     SegmentCache *dt;
1097     target_ulong ptr, ssp;
1098     int selector;
1099     uint32_t offset, esp;
1100     uint32_t old_cs, old_eip;
1101 
1102     /* real mode (simpler!) */
1103     dt = &env->idt;
1104     if (intno * 4 + 3 > dt->limit) {
1105         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1106     }
1107     ptr = dt->base + intno * 4;
1108     offset = cpu_lduw_kernel(env, ptr);
1109     selector = cpu_lduw_kernel(env, ptr + 2);
1110     esp = env->regs[R_ESP];
1111     ssp = env->segs[R_SS].base;
1112     if (is_int) {
1113         old_eip = next_eip;
1114     } else {
1115         old_eip = env->eip;
1116     }
1117     old_cs = env->segs[R_CS].selector;
1118     /* XXX: use SS segment size? */
1119     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1120     PUSHW(ssp, esp, 0xffff, old_cs);
1121     PUSHW(ssp, esp, 0xffff, old_eip);
1122 
1123     /* update processor state */
1124     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1125     env->eip = offset;
1126     env->segs[R_CS].selector = selector;
1127     env->segs[R_CS].base = (selector << 4);
1128     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1129 }
1130 
1131 #if defined(CONFIG_USER_ONLY)
1132 /* fake user mode interrupt. is_int is TRUE if coming from the int
1133  * instruction. next_eip is the env->eip value AFTER the interrupt
1134  * instruction. It is only relevant if is_int is TRUE or if intno
1135  * is EXCP_SYSCALL.
1136  */
1137 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1138                               int error_code, target_ulong next_eip)
1139 {
1140     if (is_int) {
1141         SegmentCache *dt;
1142         target_ulong ptr;
1143         int dpl, cpl, shift;
1144         uint32_t e2;
1145 
1146         dt = &env->idt;
1147         if (env->hflags & HF_LMA_MASK) {
1148             shift = 4;
1149         } else {
1150             shift = 3;
1151         }
1152         ptr = dt->base + (intno << shift);
1153         e2 = cpu_ldl_kernel(env, ptr + 4);
1154 
1155         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1156         cpl = env->hflags & HF_CPL_MASK;
1157         /* check privilege if software int */
1158         if (dpl < cpl) {
1159             raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160         }
1161     }
1162 
1163     /* Since we emulate only user space, we cannot do more than
1164        exiting the emulation with the suitable exception and error
1165        code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1166     if (is_int || intno == EXCP_SYSCALL) {
1167         env->eip = next_eip;
1168     }
1169 }
1170 
1171 #else
1172 
1173 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1174                             int error_code, int is_hw, int rm)
1175 {
1176     CPUState *cs = env_cpu(env);
1177     uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1178                                                           control.event_inj));
1179 
1180     if (!(event_inj & SVM_EVTINJ_VALID)) {
1181         int type;
1182 
1183         if (is_int) {
1184             type = SVM_EVTINJ_TYPE_SOFT;
1185         } else {
1186             type = SVM_EVTINJ_TYPE_EXEPT;
1187         }
1188         event_inj = intno | type | SVM_EVTINJ_VALID;
1189         if (!rm && exception_has_error_code(intno)) {
1190             event_inj |= SVM_EVTINJ_VALID_ERR;
1191             x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1192                                              control.event_inj_err),
1193                      error_code);
1194         }
1195         x86_stl_phys(cs,
1196                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1197                  event_inj);
1198     }
1199 }
1200 #endif
1201 
1202 /*
1203  * Begin execution of an interruption. is_int is TRUE if coming from
1204  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1205  * instruction. It is only relevant if is_int is TRUE.
1206  */
1207 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1208                              int error_code, target_ulong next_eip, int is_hw)
1209 {
1210     CPUX86State *env = &cpu->env;
1211 
1212     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1213         if ((env->cr[0] & CR0_PE_MASK)) {
1214             static int count;
1215 
1216             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218                      count, intno, error_code, is_int,
1219                      env->hflags & HF_CPL_MASK,
1220                      env->segs[R_CS].selector, env->eip,
1221                      (int)env->segs[R_CS].base + env->eip,
1222                      env->segs[R_SS].selector, env->regs[R_ESP]);
1223             if (intno == 0x0e) {
1224                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225             } else {
1226                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1227             }
1228             qemu_log("\n");
1229             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1230 #if 0
1231             {
1232                 int i;
1233                 target_ulong ptr;
1234 
1235                 qemu_log("       code=");
1236                 ptr = env->segs[R_CS].base + env->eip;
1237                 for (i = 0; i < 16; i++) {
1238                     qemu_log(" %02x", ldub(ptr + i));
1239                 }
1240                 qemu_log("\n");
1241             }
1242 #endif
1243             count++;
1244         }
1245     }
1246     if (env->cr[0] & CR0_PE_MASK) {
1247 #if !defined(CONFIG_USER_ONLY)
1248         if (env->hflags & HF_GUEST_MASK) {
1249             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1250         }
1251 #endif
1252 #ifdef TARGET_X86_64
1253         if (env->hflags & HF_LMA_MASK) {
1254             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1255         } else
1256 #endif
1257         {
1258             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259                                    is_hw);
1260         }
1261     } else {
1262 #if !defined(CONFIG_USER_ONLY)
1263         if (env->hflags & HF_GUEST_MASK) {
1264             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1265         }
1266 #endif
1267         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1268     }
1269 
1270 #if !defined(CONFIG_USER_ONLY)
1271     if (env->hflags & HF_GUEST_MASK) {
1272         CPUState *cs = CPU(cpu);
1273         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1274                                       offsetof(struct vmcb,
1275                                                control.event_inj));
1276 
1277         x86_stl_phys(cs,
1278                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1279                  event_inj & ~SVM_EVTINJ_VALID);
1280     }
1281 #endif
1282 }
1283 
1284 void x86_cpu_do_interrupt(CPUState *cs)
1285 {
1286     X86CPU *cpu = X86_CPU(cs);
1287     CPUX86State *env = &cpu->env;
1288 
1289 #if defined(CONFIG_USER_ONLY)
1290     /* if user mode only, we simulate a fake exception
1291        which will be handled outside the cpu execution
1292        loop */
1293     do_interrupt_user(env, cs->exception_index,
1294                       env->exception_is_int,
1295                       env->error_code,
1296                       env->exception_next_eip);
1297     /* successfully delivered */
1298     env->old_exception = -1;
1299 #else
1300     if (cs->exception_index >= EXCP_VMEXIT) {
1301         assert(env->old_exception == -1);
1302         do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1303     } else {
1304         do_interrupt_all(cpu, cs->exception_index,
1305                          env->exception_is_int,
1306                          env->error_code,
1307                          env->exception_next_eip, 0);
1308         /* successfully delivered */
1309         env->old_exception = -1;
1310     }
1311 #endif
1312 }
1313 
1314 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1315 {
1316     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1317 }
1318 
1319 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1320 {
1321     X86CPU *cpu = X86_CPU(cs);
1322     CPUX86State *env = &cpu->env;
1323     int intno;
1324 
1325     interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1326     if (!interrupt_request) {
1327         return false;
1328     }
1329 
1330     /* Don't process multiple interrupt requests in a single call.
1331      * This is required to make icount-driven execution deterministic.
1332      */
1333     switch (interrupt_request) {
1334 #if !defined(CONFIG_USER_ONLY)
1335     case CPU_INTERRUPT_POLL:
1336         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1337         apic_poll_irq(cpu->apic_state);
1338         break;
1339 #endif
1340     case CPU_INTERRUPT_SIPI:
1341         do_cpu_sipi(cpu);
1342         break;
1343     case CPU_INTERRUPT_SMI:
1344         cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1345         cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1346         do_smm_enter(cpu);
1347         break;
1348     case CPU_INTERRUPT_NMI:
1349         cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1350         cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1351         env->hflags2 |= HF2_NMI_MASK;
1352         do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1353         break;
1354     case CPU_INTERRUPT_MCE:
1355         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1356         do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1357         break;
1358     case CPU_INTERRUPT_HARD:
1359         cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1360         cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1361                                    CPU_INTERRUPT_VIRQ);
1362         intno = cpu_get_pic_interrupt(env);
1363         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1364                       "Servicing hardware INT=0x%02x\n", intno);
1365         do_interrupt_x86_hardirq(env, intno, 1);
1366         break;
1367 #if !defined(CONFIG_USER_ONLY)
1368     case CPU_INTERRUPT_VIRQ:
1369         /* FIXME: this should respect TPR */
1370         cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1371         intno = x86_ldl_phys(cs, env->vm_vmcb
1372                              + offsetof(struct vmcb, control.int_vector));
1373         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1374                       "Servicing virtual hardware INT=0x%02x\n", intno);
1375         do_interrupt_x86_hardirq(env, intno, 1);
1376         cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1377         break;
1378 #endif
1379     }
1380 
1381     /* Ensure that no TB jump will be modified as the program flow was changed.  */
1382     return true;
1383 }
1384 
1385 void helper_lldt(CPUX86State *env, int selector)
1386 {
1387     SegmentCache *dt;
1388     uint32_t e1, e2;
1389     int index, entry_limit;
1390     target_ulong ptr;
1391 
1392     selector &= 0xffff;
1393     if ((selector & 0xfffc) == 0) {
1394         /* XXX: NULL selector case: invalid LDT */
1395         env->ldt.base = 0;
1396         env->ldt.limit = 0;
1397     } else {
1398         if (selector & 0x4) {
1399             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1400         }
1401         dt = &env->gdt;
1402         index = selector & ~7;
1403 #ifdef TARGET_X86_64
1404         if (env->hflags & HF_LMA_MASK) {
1405             entry_limit = 15;
1406         } else
1407 #endif
1408         {
1409             entry_limit = 7;
1410         }
1411         if ((index + entry_limit) > dt->limit) {
1412             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1413         }
1414         ptr = dt->base + index;
1415         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1416         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1417         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1418             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1419         }
1420         if (!(e2 & DESC_P_MASK)) {
1421             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1422         }
1423 #ifdef TARGET_X86_64
1424         if (env->hflags & HF_LMA_MASK) {
1425             uint32_t e3;
1426 
1427             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1428             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1429             env->ldt.base |= (target_ulong)e3 << 32;
1430         } else
1431 #endif
1432         {
1433             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1434         }
1435     }
1436     env->ldt.selector = selector;
1437 }
1438 
1439 void helper_ltr(CPUX86State *env, int selector)
1440 {
1441     SegmentCache *dt;
1442     uint32_t e1, e2;
1443     int index, type, entry_limit;
1444     target_ulong ptr;
1445 
1446     selector &= 0xffff;
1447     if ((selector & 0xfffc) == 0) {
1448         /* NULL selector case: invalid TR */
1449         env->tr.base = 0;
1450         env->tr.limit = 0;
1451         env->tr.flags = 0;
1452     } else {
1453         if (selector & 0x4) {
1454             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1455         }
1456         dt = &env->gdt;
1457         index = selector & ~7;
1458 #ifdef TARGET_X86_64
1459         if (env->hflags & HF_LMA_MASK) {
1460             entry_limit = 15;
1461         } else
1462 #endif
1463         {
1464             entry_limit = 7;
1465         }
1466         if ((index + entry_limit) > dt->limit) {
1467             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1468         }
1469         ptr = dt->base + index;
1470         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1471         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1472         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1473         if ((e2 & DESC_S_MASK) ||
1474             (type != 1 && type != 9)) {
1475             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1476         }
1477         if (!(e2 & DESC_P_MASK)) {
1478             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1479         }
1480 #ifdef TARGET_X86_64
1481         if (env->hflags & HF_LMA_MASK) {
1482             uint32_t e3, e4;
1483 
1484             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1485             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1486             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1487                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1488             }
1489             load_seg_cache_raw_dt(&env->tr, e1, e2);
1490             env->tr.base |= (target_ulong)e3 << 32;
1491         } else
1492 #endif
1493         {
1494             load_seg_cache_raw_dt(&env->tr, e1, e2);
1495         }
1496         e2 |= DESC_TSS_BUSY_MASK;
1497         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1498     }
1499     env->tr.selector = selector;
1500 }
1501 
1502 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1503 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1504 {
1505     uint32_t e1, e2;
1506     int cpl, dpl, rpl;
1507     SegmentCache *dt;
1508     int index;
1509     target_ulong ptr;
1510 
1511     selector &= 0xffff;
1512     cpl = env->hflags & HF_CPL_MASK;
1513     if ((selector & 0xfffc) == 0) {
1514         /* null selector case */
1515         if (seg_reg == R_SS
1516 #ifdef TARGET_X86_64
1517             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1518 #endif
1519             ) {
1520             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1521         }
1522         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1523     } else {
1524 
1525         if (selector & 0x4) {
1526             dt = &env->ldt;
1527         } else {
1528             dt = &env->gdt;
1529         }
1530         index = selector & ~7;
1531         if ((index + 7) > dt->limit) {
1532             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1533         }
1534         ptr = dt->base + index;
1535         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1536         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1537 
1538         if (!(e2 & DESC_S_MASK)) {
1539             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1540         }
1541         rpl = selector & 3;
1542         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1543         if (seg_reg == R_SS) {
1544             /* must be writable segment */
1545             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1546                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1547             }
1548             if (rpl != cpl || dpl != cpl) {
1549                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1550             }
1551         } else {
1552             /* must be readable segment */
1553             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1554                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1555             }
1556 
1557             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1558                 /* if not conforming code, test rights */
1559                 if (dpl < cpl || dpl < rpl) {
1560                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1561                 }
1562             }
1563         }
1564 
1565         if (!(e2 & DESC_P_MASK)) {
1566             if (seg_reg == R_SS) {
1567                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1568             } else {
1569                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1570             }
1571         }
1572 
1573         /* set the access bit if not already set */
1574         if (!(e2 & DESC_A_MASK)) {
1575             e2 |= DESC_A_MASK;
1576             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1577         }
1578 
1579         cpu_x86_load_seg_cache(env, seg_reg, selector,
1580                        get_seg_base(e1, e2),
1581                        get_seg_limit(e1, e2),
1582                        e2);
1583 #if 0
1584         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1585                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1586 #endif
1587     }
1588 }
1589 
1590 /* protected mode jump */
1591 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1592                            target_ulong next_eip)
1593 {
1594     int gate_cs, type;
1595     uint32_t e1, e2, cpl, dpl, rpl, limit;
1596 
1597     if ((new_cs & 0xfffc) == 0) {
1598         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1599     }
1600     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1601         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1602     }
1603     cpl = env->hflags & HF_CPL_MASK;
1604     if (e2 & DESC_S_MASK) {
1605         if (!(e2 & DESC_CS_MASK)) {
1606             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1607         }
1608         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1609         if (e2 & DESC_C_MASK) {
1610             /* conforming code segment */
1611             if (dpl > cpl) {
1612                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1613             }
1614         } else {
1615             /* non conforming code segment */
1616             rpl = new_cs & 3;
1617             if (rpl > cpl) {
1618                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1619             }
1620             if (dpl != cpl) {
1621                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1622             }
1623         }
1624         if (!(e2 & DESC_P_MASK)) {
1625             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1626         }
1627         limit = get_seg_limit(e1, e2);
1628         if (new_eip > limit &&
1629             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1630             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1631         }
1632         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1633                        get_seg_base(e1, e2), limit, e2);
1634         env->eip = new_eip;
1635     } else {
1636         /* jump to call or task gate */
1637         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1638         rpl = new_cs & 3;
1639         cpl = env->hflags & HF_CPL_MASK;
1640         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1641 
1642 #ifdef TARGET_X86_64
1643         if (env->efer & MSR_EFER_LMA) {
1644             if (type != 12) {
1645                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1646             }
1647         }
1648 #endif
1649         switch (type) {
1650         case 1: /* 286 TSS */
1651         case 9: /* 386 TSS */
1652         case 5: /* task gate */
1653             if (dpl < cpl || dpl < rpl) {
1654                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655             }
1656             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1657             break;
1658         case 4: /* 286 call gate */
1659         case 12: /* 386 call gate */
1660             if ((dpl < cpl) || (dpl < rpl)) {
1661                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1662             }
1663             if (!(e2 & DESC_P_MASK)) {
1664                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1665             }
1666             gate_cs = e1 >> 16;
1667             new_eip = (e1 & 0xffff);
1668             if (type == 12) {
1669                 new_eip |= (e2 & 0xffff0000);
1670             }
1671 
1672 #ifdef TARGET_X86_64
1673             if (env->efer & MSR_EFER_LMA) {
1674                 /* load the upper 8 bytes of the 64-bit call gate */
1675                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1676                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1677                                            GETPC());
1678                 }
1679                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1680                 if (type != 0) {
1681                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1682                                            GETPC());
1683                 }
1684                 new_eip |= ((target_ulong)e1) << 32;
1685             }
1686 #endif
1687 
1688             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1689                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1690             }
1691             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1692             /* must be code segment */
1693             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1694                  (DESC_S_MASK | DESC_CS_MASK))) {
1695                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1696             }
1697             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1698                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1699                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1700             }
1701 #ifdef TARGET_X86_64
1702             if (env->efer & MSR_EFER_LMA) {
1703                 if (!(e2 & DESC_L_MASK)) {
1704                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1705                 }
1706                 if (e2 & DESC_B_MASK) {
1707                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1708                 }
1709             }
1710 #endif
1711             if (!(e2 & DESC_P_MASK)) {
1712                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1713             }
1714             limit = get_seg_limit(e1, e2);
1715             if (new_eip > limit &&
1716                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1717                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1718             }
1719             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1720                                    get_seg_base(e1, e2), limit, e2);
1721             env->eip = new_eip;
1722             break;
1723         default:
1724             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1725             break;
1726         }
1727     }
1728 }
1729 
1730 /* real mode call */
1731 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1732                        int shift, int next_eip)
1733 {
1734     int new_eip;
1735     uint32_t esp, esp_mask;
1736     target_ulong ssp;
1737 
1738     new_eip = new_eip1;
1739     esp = env->regs[R_ESP];
1740     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1741     ssp = env->segs[R_SS].base;
1742     if (shift) {
1743         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1744         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1745     } else {
1746         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1747         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1748     }
1749 
1750     SET_ESP(esp, esp_mask);
1751     env->eip = new_eip;
1752     env->segs[R_CS].selector = new_cs;
1753     env->segs[R_CS].base = (new_cs << 4);
1754 }
1755 
1756 /* protected mode call */
1757 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1758                             int shift, target_ulong next_eip)
1759 {
1760     int new_stack, i;
1761     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1762     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1763     uint32_t val, limit, old_sp_mask;
1764     target_ulong ssp, old_ssp, offset, sp;
1765 
1766     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1767     LOG_PCALL_STATE(env_cpu(env));
1768     if ((new_cs & 0xfffc) == 0) {
1769         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1770     }
1771     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1772         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1773     }
1774     cpl = env->hflags & HF_CPL_MASK;
1775     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1776     if (e2 & DESC_S_MASK) {
1777         if (!(e2 & DESC_CS_MASK)) {
1778             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1779         }
1780         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1781         if (e2 & DESC_C_MASK) {
1782             /* conforming code segment */
1783             if (dpl > cpl) {
1784                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1785             }
1786         } else {
1787             /* non conforming code segment */
1788             rpl = new_cs & 3;
1789             if (rpl > cpl) {
1790                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1791             }
1792             if (dpl != cpl) {
1793                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1794             }
1795         }
1796         if (!(e2 & DESC_P_MASK)) {
1797             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1798         }
1799 
1800 #ifdef TARGET_X86_64
1801         /* XXX: check 16/32 bit cases in long mode */
1802         if (shift == 2) {
1803             target_ulong rsp;
1804 
1805             /* 64 bit case */
1806             rsp = env->regs[R_ESP];
1807             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1808             PUSHQ_RA(rsp, next_eip, GETPC());
1809             /* from this point, not restartable */
1810             env->regs[R_ESP] = rsp;
1811             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1812                                    get_seg_base(e1, e2),
1813                                    get_seg_limit(e1, e2), e2);
1814             env->eip = new_eip;
1815         } else
1816 #endif
1817         {
1818             sp = env->regs[R_ESP];
1819             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1820             ssp = env->segs[R_SS].base;
1821             if (shift) {
1822                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1823                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1824             } else {
1825                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1826                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1827             }
1828 
1829             limit = get_seg_limit(e1, e2);
1830             if (new_eip > limit) {
1831                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1832             }
1833             /* from this point, not restartable */
1834             SET_ESP(sp, sp_mask);
1835             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1836                                    get_seg_base(e1, e2), limit, e2);
1837             env->eip = new_eip;
1838         }
1839     } else {
1840         /* check gate type */
1841         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1842         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1843         rpl = new_cs & 3;
1844 
1845 #ifdef TARGET_X86_64
1846         if (env->efer & MSR_EFER_LMA) {
1847             if (type != 12) {
1848                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1849             }
1850         }
1851 #endif
1852 
1853         switch (type) {
1854         case 1: /* available 286 TSS */
1855         case 9: /* available 386 TSS */
1856         case 5: /* task gate */
1857             if (dpl < cpl || dpl < rpl) {
1858                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1859             }
1860             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1861             return;
1862         case 4: /* 286 call gate */
1863         case 12: /* 386 call gate */
1864             break;
1865         default:
1866             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1867             break;
1868         }
1869         shift = type >> 3;
1870 
1871         if (dpl < cpl || dpl < rpl) {
1872             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1873         }
1874         /* check valid bit */
1875         if (!(e2 & DESC_P_MASK)) {
1876             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1877         }
1878         selector = e1 >> 16;
1879         param_count = e2 & 0x1f;
1880         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1881 #ifdef TARGET_X86_64
1882         if (env->efer & MSR_EFER_LMA) {
1883             /* load the upper 8 bytes of the 64-bit call gate */
1884             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1885                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1886                                        GETPC());
1887             }
1888             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1889             if (type != 0) {
1890                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1891                                        GETPC());
1892             }
1893             offset |= ((target_ulong)e1) << 32;
1894         }
1895 #endif
1896         if ((selector & 0xfffc) == 0) {
1897             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1898         }
1899 
1900         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1901             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1902         }
1903         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1904             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1905         }
1906         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1907         if (dpl > cpl) {
1908             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1909         }
1910 #ifdef TARGET_X86_64
1911         if (env->efer & MSR_EFER_LMA) {
1912             if (!(e2 & DESC_L_MASK)) {
1913                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1914             }
1915             if (e2 & DESC_B_MASK) {
1916                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1917             }
1918             shift++;
1919         }
1920 #endif
1921         if (!(e2 & DESC_P_MASK)) {
1922             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1923         }
1924 
1925         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1926             /* to inner privilege */
1927 #ifdef TARGET_X86_64
1928             if (shift == 2) {
1929                 sp = get_rsp_from_tss(env, dpl);
1930                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1931                 new_stack = 1;
1932                 sp_mask = 0;
1933                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1934                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1935                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1936             } else
1937 #endif
1938             {
1939                 uint32_t sp32;
1940                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1941                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1942                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1943                           env->regs[R_ESP]);
1944                 sp = sp32;
1945                 if ((ss & 0xfffc) == 0) {
1946                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1947                 }
1948                 if ((ss & 3) != dpl) {
1949                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1950                 }
1951                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1952                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1953                 }
1954                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1955                 if (ss_dpl != dpl) {
1956                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1957                 }
1958                 if (!(ss_e2 & DESC_S_MASK) ||
1959                     (ss_e2 & DESC_CS_MASK) ||
1960                     !(ss_e2 & DESC_W_MASK)) {
1961                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1962                 }
1963                 if (!(ss_e2 & DESC_P_MASK)) {
1964                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1965                 }
1966 
1967                 sp_mask = get_sp_mask(ss_e2);
1968                 ssp = get_seg_base(ss_e1, ss_e2);
1969             }
1970 
1971             /* push_size = ((param_count * 2) + 8) << shift; */
1972 
1973             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1974             old_ssp = env->segs[R_SS].base;
1975 #ifdef TARGET_X86_64
1976             if (shift == 2) {
1977                 /* XXX: verify if new stack address is canonical */
1978                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1979                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1980                 /* parameters aren't supported for 64-bit call gates */
1981             } else
1982 #endif
1983             if (shift == 1) {
1984                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1985                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1986                 for (i = param_count - 1; i >= 0; i--) {
1987                     val = cpu_ldl_kernel_ra(env, old_ssp +
1988                                             ((env->regs[R_ESP] + i * 4) &
1989                                              old_sp_mask), GETPC());
1990                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1991                 }
1992             } else {
1993                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1994                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1995                 for (i = param_count - 1; i >= 0; i--) {
1996                     val = cpu_lduw_kernel_ra(env, old_ssp +
1997                                              ((env->regs[R_ESP] + i * 2) &
1998                                               old_sp_mask), GETPC());
1999                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
2000                 }
2001             }
2002             new_stack = 1;
2003         } else {
2004             /* to same privilege */
2005             sp = env->regs[R_ESP];
2006             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2007             ssp = env->segs[R_SS].base;
2008             /* push_size = (4 << shift); */
2009             new_stack = 0;
2010         }
2011 
2012 #ifdef TARGET_X86_64
2013         if (shift == 2) {
2014             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2015             PUSHQ_RA(sp, next_eip, GETPC());
2016         } else
2017 #endif
2018         if (shift == 1) {
2019             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2020             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2021         } else {
2022             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2023             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2024         }
2025 
2026         /* from this point, not restartable */
2027 
2028         if (new_stack) {
2029 #ifdef TARGET_X86_64
2030             if (shift == 2) {
2031                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2032             } else
2033 #endif
2034             {
2035                 ss = (ss & ~3) | dpl;
2036                 cpu_x86_load_seg_cache(env, R_SS, ss,
2037                                        ssp,
2038                                        get_seg_limit(ss_e1, ss_e2),
2039                                        ss_e2);
2040             }
2041         }
2042 
2043         selector = (selector & ~3) | dpl;
2044         cpu_x86_load_seg_cache(env, R_CS, selector,
2045                        get_seg_base(e1, e2),
2046                        get_seg_limit(e1, e2),
2047                        e2);
2048         SET_ESP(sp, sp_mask);
2049         env->eip = offset;
2050     }
2051 }
2052 
2053 /* real and vm86 mode iret */
2054 void helper_iret_real(CPUX86State *env, int shift)
2055 {
2056     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2057     target_ulong ssp;
2058     int eflags_mask;
2059 
2060     sp_mask = 0xffff; /* XXXX: use SS segment size? */
2061     sp = env->regs[R_ESP];
2062     ssp = env->segs[R_SS].base;
2063     if (shift == 1) {
2064         /* 32 bits */
2065         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2066         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2067         new_cs &= 0xffff;
2068         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2069     } else {
2070         /* 16 bits */
2071         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2072         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2073         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2074     }
2075     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2076     env->segs[R_CS].selector = new_cs;
2077     env->segs[R_CS].base = (new_cs << 4);
2078     env->eip = new_eip;
2079     if (env->eflags & VM_MASK) {
2080         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2081             NT_MASK;
2082     } else {
2083         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2084             RF_MASK | NT_MASK;
2085     }
2086     if (shift == 0) {
2087         eflags_mask &= 0xffff;
2088     }
2089     cpu_load_eflags(env, new_eflags, eflags_mask);
2090     env->hflags2 &= ~HF2_NMI_MASK;
2091 }
2092 
2093 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2094 {
2095     int dpl;
2096     uint32_t e2;
2097 
2098     /* XXX: on x86_64, we do not want to nullify FS and GS because
2099        they may still contain a valid base. I would be interested to
2100        know how a real x86_64 CPU behaves */
2101     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2102         (env->segs[seg_reg].selector & 0xfffc) == 0) {
2103         return;
2104     }
2105 
2106     e2 = env->segs[seg_reg].flags;
2107     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2108     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2109         /* data or non conforming code segment */
2110         if (dpl < cpl) {
2111             cpu_x86_load_seg_cache(env, seg_reg, 0,
2112                                    env->segs[seg_reg].base,
2113                                    env->segs[seg_reg].limit,
2114                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2115         }
2116     }
2117 }
2118 
2119 /* protected mode iret */
2120 static inline void helper_ret_protected(CPUX86State *env, int shift,
2121                                         int is_iret, int addend,
2122                                         uintptr_t retaddr)
2123 {
2124     uint32_t new_cs, new_eflags, new_ss;
2125     uint32_t new_es, new_ds, new_fs, new_gs;
2126     uint32_t e1, e2, ss_e1, ss_e2;
2127     int cpl, dpl, rpl, eflags_mask, iopl;
2128     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2129 
2130 #ifdef TARGET_X86_64
2131     if (shift == 2) {
2132         sp_mask = -1;
2133     } else
2134 #endif
2135     {
2136         sp_mask = get_sp_mask(env->segs[R_SS].flags);
2137     }
2138     sp = env->regs[R_ESP];
2139     ssp = env->segs[R_SS].base;
2140     new_eflags = 0; /* avoid warning */
2141 #ifdef TARGET_X86_64
2142     if (shift == 2) {
2143         POPQ_RA(sp, new_eip, retaddr);
2144         POPQ_RA(sp, new_cs, retaddr);
2145         new_cs &= 0xffff;
2146         if (is_iret) {
2147             POPQ_RA(sp, new_eflags, retaddr);
2148         }
2149     } else
2150 #endif
2151     {
2152         if (shift == 1) {
2153             /* 32 bits */
2154             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2155             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2156             new_cs &= 0xffff;
2157             if (is_iret) {
2158                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2159                 if (new_eflags & VM_MASK) {
2160                     goto return_to_vm86;
2161                 }
2162             }
2163         } else {
2164             /* 16 bits */
2165             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2166             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2167             if (is_iret) {
2168                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2169             }
2170         }
2171     }
2172     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2173               new_cs, new_eip, shift, addend);
2174     LOG_PCALL_STATE(env_cpu(env));
2175     if ((new_cs & 0xfffc) == 0) {
2176         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2177     }
2178     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2179         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2180     }
2181     if (!(e2 & DESC_S_MASK) ||
2182         !(e2 & DESC_CS_MASK)) {
2183         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2184     }
2185     cpl = env->hflags & HF_CPL_MASK;
2186     rpl = new_cs & 3;
2187     if (rpl < cpl) {
2188         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2189     }
2190     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2191     if (e2 & DESC_C_MASK) {
2192         if (dpl > rpl) {
2193             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2194         }
2195     } else {
2196         if (dpl != rpl) {
2197             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2198         }
2199     }
2200     if (!(e2 & DESC_P_MASK)) {
2201         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2202     }
2203 
2204     sp += addend;
2205     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2206                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2207         /* return to same privilege level */
2208         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2209                        get_seg_base(e1, e2),
2210                        get_seg_limit(e1, e2),
2211                        e2);
2212     } else {
2213         /* return to different privilege level */
2214 #ifdef TARGET_X86_64
2215         if (shift == 2) {
2216             POPQ_RA(sp, new_esp, retaddr);
2217             POPQ_RA(sp, new_ss, retaddr);
2218             new_ss &= 0xffff;
2219         } else
2220 #endif
2221         {
2222             if (shift == 1) {
2223                 /* 32 bits */
2224                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2225                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2226                 new_ss &= 0xffff;
2227             } else {
2228                 /* 16 bits */
2229                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2230                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2231             }
2232         }
2233         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2234                   new_ss, new_esp);
2235         if ((new_ss & 0xfffc) == 0) {
2236 #ifdef TARGET_X86_64
2237             /* NULL ss is allowed in long mode if cpl != 3 */
2238             /* XXX: test CS64? */
2239             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2240                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2241                                        0, 0xffffffff,
2242                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2243                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2244                                        DESC_W_MASK | DESC_A_MASK);
2245                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2246             } else
2247 #endif
2248             {
2249                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2250             }
2251         } else {
2252             if ((new_ss & 3) != rpl) {
2253                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2254             }
2255             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2256                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2257             }
2258             if (!(ss_e2 & DESC_S_MASK) ||
2259                 (ss_e2 & DESC_CS_MASK) ||
2260                 !(ss_e2 & DESC_W_MASK)) {
2261                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2262             }
2263             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2264             if (dpl != rpl) {
2265                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2266             }
2267             if (!(ss_e2 & DESC_P_MASK)) {
2268                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2269             }
2270             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2271                                    get_seg_base(ss_e1, ss_e2),
2272                                    get_seg_limit(ss_e1, ss_e2),
2273                                    ss_e2);
2274         }
2275 
2276         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2277                        get_seg_base(e1, e2),
2278                        get_seg_limit(e1, e2),
2279                        e2);
2280         sp = new_esp;
2281 #ifdef TARGET_X86_64
2282         if (env->hflags & HF_CS64_MASK) {
2283             sp_mask = -1;
2284         } else
2285 #endif
2286         {
2287             sp_mask = get_sp_mask(ss_e2);
2288         }
2289 
2290         /* validate data segments */
2291         validate_seg(env, R_ES, rpl);
2292         validate_seg(env, R_DS, rpl);
2293         validate_seg(env, R_FS, rpl);
2294         validate_seg(env, R_GS, rpl);
2295 
2296         sp += addend;
2297     }
2298     SET_ESP(sp, sp_mask);
2299     env->eip = new_eip;
2300     if (is_iret) {
2301         /* NOTE: 'cpl' is the _old_ CPL */
2302         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2303         if (cpl == 0) {
2304             eflags_mask |= IOPL_MASK;
2305         }
2306         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2307         if (cpl <= iopl) {
2308             eflags_mask |= IF_MASK;
2309         }
2310         if (shift == 0) {
2311             eflags_mask &= 0xffff;
2312         }
2313         cpu_load_eflags(env, new_eflags, eflags_mask);
2314     }
2315     return;
2316 
2317  return_to_vm86:
2318     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2319     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2320     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2321     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2322     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2323     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2324 
2325     /* modify processor state */
2326     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2327                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2328                     VIP_MASK);
2329     load_seg_vm(env, R_CS, new_cs & 0xffff);
2330     load_seg_vm(env, R_SS, new_ss & 0xffff);
2331     load_seg_vm(env, R_ES, new_es & 0xffff);
2332     load_seg_vm(env, R_DS, new_ds & 0xffff);
2333     load_seg_vm(env, R_FS, new_fs & 0xffff);
2334     load_seg_vm(env, R_GS, new_gs & 0xffff);
2335 
2336     env->eip = new_eip & 0xffff;
2337     env->regs[R_ESP] = new_esp;
2338 }
2339 
2340 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2341 {
2342     int tss_selector, type;
2343     uint32_t e1, e2;
2344 
2345     /* specific case for TSS */
2346     if (env->eflags & NT_MASK) {
2347 #ifdef TARGET_X86_64
2348         if (env->hflags & HF_LMA_MASK) {
2349             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2350         }
2351 #endif
2352         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2353         if (tss_selector & 4) {
2354             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2355         }
2356         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2357             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2358         }
2359         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2360         /* NOTE: we check both segment and busy TSS */
2361         if (type != 3) {
2362             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2363         }
2364         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2365     } else {
2366         helper_ret_protected(env, shift, 1, 0, GETPC());
2367     }
2368     env->hflags2 &= ~HF2_NMI_MASK;
2369 }
2370 
2371 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2372 {
2373     helper_ret_protected(env, shift, 0, addend, GETPC());
2374 }
2375 
2376 void helper_sysenter(CPUX86State *env)
2377 {
2378     if (env->sysenter_cs == 0) {
2379         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2380     }
2381     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2382 
2383 #ifdef TARGET_X86_64
2384     if (env->hflags & HF_LMA_MASK) {
2385         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2386                                0, 0xffffffff,
2387                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2388                                DESC_S_MASK |
2389                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2390                                DESC_L_MASK);
2391     } else
2392 #endif
2393     {
2394         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2395                                0, 0xffffffff,
2396                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2397                                DESC_S_MASK |
2398                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2399     }
2400     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2401                            0, 0xffffffff,
2402                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2403                            DESC_S_MASK |
2404                            DESC_W_MASK | DESC_A_MASK);
2405     env->regs[R_ESP] = env->sysenter_esp;
2406     env->eip = env->sysenter_eip;
2407 }
2408 
2409 void helper_sysexit(CPUX86State *env, int dflag)
2410 {
2411     int cpl;
2412 
2413     cpl = env->hflags & HF_CPL_MASK;
2414     if (env->sysenter_cs == 0 || cpl != 0) {
2415         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2416     }
2417 #ifdef TARGET_X86_64
2418     if (dflag == 2) {
2419         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2420                                3, 0, 0xffffffff,
2421                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2422                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2423                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2424                                DESC_L_MASK);
2425         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2426                                3, 0, 0xffffffff,
2427                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2428                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2429                                DESC_W_MASK | DESC_A_MASK);
2430     } else
2431 #endif
2432     {
2433         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2434                                3, 0, 0xffffffff,
2435                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2436                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2437                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2438         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2439                                3, 0, 0xffffffff,
2440                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2441                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2442                                DESC_W_MASK | DESC_A_MASK);
2443     }
2444     env->regs[R_ESP] = env->regs[R_ECX];
2445     env->eip = env->regs[R_EDX];
2446 }
2447 
2448 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2449 {
2450     unsigned int limit;
2451     uint32_t e1, e2, eflags, selector;
2452     int rpl, dpl, cpl, type;
2453 
2454     selector = selector1 & 0xffff;
2455     eflags = cpu_cc_compute_all(env, CC_OP);
2456     if ((selector & 0xfffc) == 0) {
2457         goto fail;
2458     }
2459     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2460         goto fail;
2461     }
2462     rpl = selector & 3;
2463     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2464     cpl = env->hflags & HF_CPL_MASK;
2465     if (e2 & DESC_S_MASK) {
2466         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2467             /* conforming */
2468         } else {
2469             if (dpl < cpl || dpl < rpl) {
2470                 goto fail;
2471             }
2472         }
2473     } else {
2474         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2475         switch (type) {
2476         case 1:
2477         case 2:
2478         case 3:
2479         case 9:
2480         case 11:
2481             break;
2482         default:
2483             goto fail;
2484         }
2485         if (dpl < cpl || dpl < rpl) {
2486         fail:
2487             CC_SRC = eflags & ~CC_Z;
2488             return 0;
2489         }
2490     }
2491     limit = get_seg_limit(e1, e2);
2492     CC_SRC = eflags | CC_Z;
2493     return limit;
2494 }
2495 
2496 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2497 {
2498     uint32_t e1, e2, eflags, selector;
2499     int rpl, dpl, cpl, type;
2500 
2501     selector = selector1 & 0xffff;
2502     eflags = cpu_cc_compute_all(env, CC_OP);
2503     if ((selector & 0xfffc) == 0) {
2504         goto fail;
2505     }
2506     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2507         goto fail;
2508     }
2509     rpl = selector & 3;
2510     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2511     cpl = env->hflags & HF_CPL_MASK;
2512     if (e2 & DESC_S_MASK) {
2513         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2514             /* conforming */
2515         } else {
2516             if (dpl < cpl || dpl < rpl) {
2517                 goto fail;
2518             }
2519         }
2520     } else {
2521         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2522         switch (type) {
2523         case 1:
2524         case 2:
2525         case 3:
2526         case 4:
2527         case 5:
2528         case 9:
2529         case 11:
2530         case 12:
2531             break;
2532         default:
2533             goto fail;
2534         }
2535         if (dpl < cpl || dpl < rpl) {
2536         fail:
2537             CC_SRC = eflags & ~CC_Z;
2538             return 0;
2539         }
2540     }
2541     CC_SRC = eflags | CC_Z;
2542     return e2 & 0x00f0ff00;
2543 }
2544 
2545 void helper_verr(CPUX86State *env, target_ulong selector1)
2546 {
2547     uint32_t e1, e2, eflags, selector;
2548     int rpl, dpl, cpl;
2549 
2550     selector = selector1 & 0xffff;
2551     eflags = cpu_cc_compute_all(env, CC_OP);
2552     if ((selector & 0xfffc) == 0) {
2553         goto fail;
2554     }
2555     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2556         goto fail;
2557     }
2558     if (!(e2 & DESC_S_MASK)) {
2559         goto fail;
2560     }
2561     rpl = selector & 3;
2562     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563     cpl = env->hflags & HF_CPL_MASK;
2564     if (e2 & DESC_CS_MASK) {
2565         if (!(e2 & DESC_R_MASK)) {
2566             goto fail;
2567         }
2568         if (!(e2 & DESC_C_MASK)) {
2569             if (dpl < cpl || dpl < rpl) {
2570                 goto fail;
2571             }
2572         }
2573     } else {
2574         if (dpl < cpl || dpl < rpl) {
2575         fail:
2576             CC_SRC = eflags & ~CC_Z;
2577             return;
2578         }
2579     }
2580     CC_SRC = eflags | CC_Z;
2581 }
2582 
2583 void helper_verw(CPUX86State *env, target_ulong selector1)
2584 {
2585     uint32_t e1, e2, eflags, selector;
2586     int rpl, dpl, cpl;
2587 
2588     selector = selector1 & 0xffff;
2589     eflags = cpu_cc_compute_all(env, CC_OP);
2590     if ((selector & 0xfffc) == 0) {
2591         goto fail;
2592     }
2593     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2594         goto fail;
2595     }
2596     if (!(e2 & DESC_S_MASK)) {
2597         goto fail;
2598     }
2599     rpl = selector & 3;
2600     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2601     cpl = env->hflags & HF_CPL_MASK;
2602     if (e2 & DESC_CS_MASK) {
2603         goto fail;
2604     } else {
2605         if (dpl < cpl || dpl < rpl) {
2606             goto fail;
2607         }
2608         if (!(e2 & DESC_W_MASK)) {
2609         fail:
2610             CC_SRC = eflags & ~CC_Z;
2611             return;
2612         }
2613     }
2614     CC_SRC = eflags | CC_Z;
2615 }
2616 
2617 #if defined(CONFIG_USER_ONLY)
2618 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2619 {
2620     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2621         int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2622         selector &= 0xffff;
2623         cpu_x86_load_seg_cache(env, seg_reg, selector,
2624                                (selector << 4), 0xffff,
2625                                DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2626                                DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2627     } else {
2628         helper_load_seg(env, seg_reg, selector);
2629     }
2630 }
2631 #endif
2632 
2633 /* check if Port I/O is allowed in TSS */
2634 static inline void check_io(CPUX86State *env, int addr, int size,
2635                             uintptr_t retaddr)
2636 {
2637     int io_offset, val, mask;
2638 
2639     /* TSS must be a valid 32 bit one */
2640     if (!(env->tr.flags & DESC_P_MASK) ||
2641         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2642         env->tr.limit < 103) {
2643         goto fail;
2644     }
2645     io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2646     io_offset += (addr >> 3);
2647     /* Note: the check needs two bytes */
2648     if ((io_offset + 1) > env->tr.limit) {
2649         goto fail;
2650     }
2651     val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2652     val >>= (addr & 7);
2653     mask = (1 << size) - 1;
2654     /* all bits must be zero to allow the I/O */
2655     if ((val & mask) != 0) {
2656     fail:
2657         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2658     }
2659 }
2660 
2661 void helper_check_iob(CPUX86State *env, uint32_t t0)
2662 {
2663     check_io(env, t0, 1, GETPC());
2664 }
2665 
2666 void helper_check_iow(CPUX86State *env, uint32_t t0)
2667 {
2668     check_io(env, t0, 2, GETPC());
2669 }
2670 
2671 void helper_check_iol(CPUX86State *env, uint32_t t0)
2672 {
2673     check_io(env, t0, 4, GETPC());
2674 }
2675