xref: /qemu/target/i386/tcg/seg_helper.c (revision a93b55ec223f07c7ca74a748e607db48cab945f6)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 
30 //#define DEBUG_PCALL
31 
32 #ifdef DEBUG_PCALL
33 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
34 # define LOG_PCALL_STATE(cpu)                                  \
35     log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 #else
37 # define LOG_PCALL(...) do { } while (0)
38 # define LOG_PCALL_STATE(cpu) do { } while (0)
39 #endif
40 
41 /*
42  * TODO: Convert callers to compute cpu_mmu_index_kernel once
43  * and use *_mmuidx_ra directly.
44  */
45 #define cpu_ldub_kernel_ra(e, p, r) \
46     cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
47 #define cpu_lduw_kernel_ra(e, p, r) \
48     cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
49 #define cpu_ldl_kernel_ra(e, p, r) \
50     cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
51 #define cpu_ldq_kernel_ra(e, p, r) \
52     cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
53 
54 #define cpu_stb_kernel_ra(e, p, v, r) \
55     cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
56 #define cpu_stw_kernel_ra(e, p, v, r) \
57     cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
58 #define cpu_stl_kernel_ra(e, p, v, r) \
59     cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
60 #define cpu_stq_kernel_ra(e, p, v, r) \
61     cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
62 
63 #define cpu_ldub_kernel(e, p)    cpu_ldub_kernel_ra(e, p, 0)
64 #define cpu_lduw_kernel(e, p)    cpu_lduw_kernel_ra(e, p, 0)
65 #define cpu_ldl_kernel(e, p)     cpu_ldl_kernel_ra(e, p, 0)
66 #define cpu_ldq_kernel(e, p)     cpu_ldq_kernel_ra(e, p, 0)
67 
68 #define cpu_stb_kernel(e, p, v)  cpu_stb_kernel_ra(e, p, v, 0)
69 #define cpu_stw_kernel(e, p, v)  cpu_stw_kernel_ra(e, p, v, 0)
70 #define cpu_stl_kernel(e, p, v)  cpu_stl_kernel_ra(e, p, v, 0)
71 #define cpu_stq_kernel(e, p, v)  cpu_stq_kernel_ra(e, p, v, 0)
72 
73 /* return non zero if error */
74 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
75                                uint32_t *e2_ptr, int selector,
76                                uintptr_t retaddr)
77 {
78     SegmentCache *dt;
79     int index;
80     target_ulong ptr;
81 
82     if (selector & 0x4) {
83         dt = &env->ldt;
84     } else {
85         dt = &env->gdt;
86     }
87     index = selector & ~7;
88     if ((index + 7) > dt->limit) {
89         return -1;
90     }
91     ptr = dt->base + index;
92     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
93     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
94     return 0;
95 }
96 
97 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
98                                uint32_t *e2_ptr, int selector)
99 {
100     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
101 }
102 
103 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 {
105     unsigned int limit;
106 
107     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
108     if (e2 & DESC_G_MASK) {
109         limit = (limit << 12) | 0xfff;
110     }
111     return limit;
112 }
113 
114 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 {
116     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
117 }
118 
119 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
120                                          uint32_t e2)
121 {
122     sc->base = get_seg_base(e1, e2);
123     sc->limit = get_seg_limit(e1, e2);
124     sc->flags = e2;
125 }
126 
127 /* init the segment cache in vm86 mode. */
128 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 {
130     selector &= 0xffff;
131 
132     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
133                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
134                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
135 }
136 
137 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
138                                        uint32_t *esp_ptr, int dpl,
139                                        uintptr_t retaddr)
140 {
141     X86CPU *cpu = env_archcpu(env);
142     int type, index, shift;
143 
144 #if 0
145     {
146         int i;
147         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
148         for (i = 0; i < env->tr.limit; i++) {
149             printf("%02x ", env->tr.base[i]);
150             if ((i & 7) == 7) {
151                 printf("\n");
152             }
153         }
154         printf("\n");
155     }
156 #endif
157 
158     if (!(env->tr.flags & DESC_P_MASK)) {
159         cpu_abort(CPU(cpu), "invalid tss");
160     }
161     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
162     if ((type & 7) != 1) {
163         cpu_abort(CPU(cpu), "invalid tss type");
164     }
165     shift = type >> 3;
166     index = (dpl * 4 + 2) << shift;
167     if (index + (4 << shift) - 1 > env->tr.limit) {
168         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169     }
170     if (shift == 0) {
171         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
172         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
173     } else {
174         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
175         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
176     }
177 }
178 
179 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
180                          int cpl, uintptr_t retaddr)
181 {
182     uint32_t e1, e2;
183     int rpl, dpl;
184 
185     if ((selector & 0xfffc) != 0) {
186         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
187             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188         }
189         if (!(e2 & DESC_S_MASK)) {
190             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191         }
192         rpl = selector & 3;
193         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
194         if (seg_reg == R_CS) {
195             if (!(e2 & DESC_CS_MASK)) {
196                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197             }
198             if (dpl != rpl) {
199                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200             }
201         } else if (seg_reg == R_SS) {
202             /* SS must be writable data */
203             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
204                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205             }
206             if (dpl != cpl || dpl != rpl) {
207                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208             }
209         } else {
210             /* not readable code */
211             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
212                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213             }
214             /* if data or non conforming code, checks the rights */
215             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
216                 if (dpl < cpl || dpl < rpl) {
217                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
218                 }
219             }
220         }
221         if (!(e2 & DESC_P_MASK)) {
222             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223         }
224         cpu_x86_load_seg_cache(env, seg_reg, selector,
225                                get_seg_base(e1, e2),
226                                get_seg_limit(e1, e2),
227                                e2);
228     } else {
229         if (seg_reg == R_SS || seg_reg == R_CS) {
230             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231         }
232     }
233 }
234 
235 #define SWITCH_TSS_JMP  0
236 #define SWITCH_TSS_IRET 1
237 #define SWITCH_TSS_CALL 2
238 
239 /* XXX: restore CPU state in registers (PowerPC case) */
240 static void switch_tss_ra(CPUX86State *env, int tss_selector,
241                           uint32_t e1, uint32_t e2, int source,
242                           uint32_t next_eip, uintptr_t retaddr)
243 {
244     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
245     target_ulong tss_base;
246     uint32_t new_regs[8], new_segs[6];
247     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
248     uint32_t old_eflags, eflags_mask;
249     SegmentCache *dt;
250     int index;
251     target_ulong ptr;
252 
253     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
254     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
255               source);
256 
257     /* if task gate, we read the TSS segment and we load it */
258     if (type == 5) {
259         if (!(e2 & DESC_P_MASK)) {
260             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261         }
262         tss_selector = e1 >> 16;
263         if (tss_selector & 4) {
264             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265         }
266         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
267             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268         }
269         if (e2 & DESC_S_MASK) {
270             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271         }
272         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
273         if ((type & 7) != 1) {
274             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
275         }
276     }
277 
278     if (!(e2 & DESC_P_MASK)) {
279         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
280     }
281 
282     if (type & 8) {
283         tss_limit_max = 103;
284     } else {
285         tss_limit_max = 43;
286     }
287     tss_limit = get_seg_limit(e1, e2);
288     tss_base = get_seg_base(e1, e2);
289     if ((tss_selector & 4) != 0 ||
290         tss_limit < tss_limit_max) {
291         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292     }
293     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
294     if (old_type & 8) {
295         old_tss_limit_max = 103;
296     } else {
297         old_tss_limit_max = 43;
298     }
299 
300     /* read all the registers from the new TSS */
301     if (type & 8) {
302         /* 32 bit */
303         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
304         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
305         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
306         for (i = 0; i < 8; i++) {
307             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
308                                             retaddr);
309         }
310         for (i = 0; i < 6; i++) {
311             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
312                                              retaddr);
313         }
314         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
315         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
316     } else {
317         /* 16 bit */
318         new_cr3 = 0;
319         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
320         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
321         for (i = 0; i < 8; i++) {
322             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
323                                              retaddr) | 0xffff0000;
324         }
325         for (i = 0; i < 4; i++) {
326             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
327                                              retaddr);
328         }
329         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
330         new_segs[R_FS] = 0;
331         new_segs[R_GS] = 0;
332         new_trap = 0;
333     }
334     /* XXX: avoid a compiler warning, see
335      http://support.amd.com/us/Processor_TechDocs/24593.pdf
336      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
337     (void)new_trap;
338 
339     /* NOTE: we must avoid memory exceptions during the task switch,
340        so we make dummy accesses before */
341     /* XXX: it can still fail in some cases, so a bigger hack is
342        necessary to valid the TLB after having done the accesses */
343 
344     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
345     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
346     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
347     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 
349     /* clear busy bit (it is restartable) */
350     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
351         target_ulong ptr;
352         uint32_t e2;
353 
354         ptr = env->gdt.base + (env->tr.selector & ~7);
355         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
356         e2 &= ~DESC_TSS_BUSY_MASK;
357         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358     }
359     old_eflags = cpu_compute_eflags(env);
360     if (source == SWITCH_TSS_IRET) {
361         old_eflags &= ~NT_MASK;
362     }
363 
364     /* save the current state in the old TSS */
365     if (type & 8) {
366         /* 32 bit */
367         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
368         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
369         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
370         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
371         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
372         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
373         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
374         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
375         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
376         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
377         for (i = 0; i < 6; i++) {
378             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
379                               env->segs[i].selector, retaddr);
380         }
381     } else {
382         /* 16 bit */
383         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
384         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
385         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
386         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
387         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
388         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
389         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
390         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
391         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
392         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
393         for (i = 0; i < 4; i++) {
394             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
395                               env->segs[i].selector, retaddr);
396         }
397     }
398 
399     /* now if an exception occurs, it will occurs in the next task
400        context */
401 
402     if (source == SWITCH_TSS_CALL) {
403         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
404         new_eflags |= NT_MASK;
405     }
406 
407     /* set busy bit */
408     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409         target_ulong ptr;
410         uint32_t e2;
411 
412         ptr = env->gdt.base + (tss_selector & ~7);
413         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
414         e2 |= DESC_TSS_BUSY_MASK;
415         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
416     }
417 
418     /* set the new CPU state */
419     /* from this point, any exception which occurs can give problems */
420     env->cr[0] |= CR0_TS_MASK;
421     env->hflags |= HF_TS_MASK;
422     env->tr.selector = tss_selector;
423     env->tr.base = tss_base;
424     env->tr.limit = tss_limit;
425     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 
427     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
428         cpu_x86_update_cr3(env, new_cr3);
429     }
430 
431     /* load all registers without an exception, then reload them with
432        possible exception */
433     env->eip = new_eip;
434     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
435         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
436     if (!(type & 8)) {
437         eflags_mask &= 0xffff;
438     }
439     cpu_load_eflags(env, new_eflags, eflags_mask);
440     /* XXX: what to do in 16 bit case? */
441     env->regs[R_EAX] = new_regs[0];
442     env->regs[R_ECX] = new_regs[1];
443     env->regs[R_EDX] = new_regs[2];
444     env->regs[R_EBX] = new_regs[3];
445     env->regs[R_ESP] = new_regs[4];
446     env->regs[R_EBP] = new_regs[5];
447     env->regs[R_ESI] = new_regs[6];
448     env->regs[R_EDI] = new_regs[7];
449     if (new_eflags & VM_MASK) {
450         for (i = 0; i < 6; i++) {
451             load_seg_vm(env, i, new_segs[i]);
452         }
453     } else {
454         /* first just selectors as the rest may trigger exceptions */
455         for (i = 0; i < 6; i++) {
456             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
457         }
458     }
459 
460     env->ldt.selector = new_ldt & ~4;
461     env->ldt.base = 0;
462     env->ldt.limit = 0;
463     env->ldt.flags = 0;
464 
465     /* load the LDT */
466     if (new_ldt & 4) {
467         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
468     }
469 
470     if ((new_ldt & 0xfffc) != 0) {
471         dt = &env->gdt;
472         index = new_ldt & ~7;
473         if ((index + 7) > dt->limit) {
474             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475         }
476         ptr = dt->base + index;
477         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
478         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
479         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
480             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481         }
482         if (!(e2 & DESC_P_MASK)) {
483             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484         }
485         load_seg_cache_raw_dt(&env->ldt, e1, e2);
486     }
487 
488     /* load the segments */
489     if (!(new_eflags & VM_MASK)) {
490         int cpl = new_segs[R_CS] & 3;
491         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
492         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
493         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
494         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
495         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
496         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
497     }
498 
499     /* check that env->eip is in the CS segment limits */
500     if (new_eip > env->segs[R_CS].limit) {
501         /* XXX: different exception if CALL? */
502         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
503     }
504 
505 #ifndef CONFIG_USER_ONLY
506     /* reset local breakpoints */
507     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
508         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509     }
510 #endif
511 }
512 
513 static void switch_tss(CPUX86State *env, int tss_selector,
514                        uint32_t e1, uint32_t e2, int source,
515                         uint32_t next_eip)
516 {
517     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
518 }
519 
520 static inline unsigned int get_sp_mask(unsigned int e2)
521 {
522 #ifdef TARGET_X86_64
523     if (e2 & DESC_L_MASK) {
524         return 0;
525     } else
526 #endif
527     if (e2 & DESC_B_MASK) {
528         return 0xffffffff;
529     } else {
530         return 0xffff;
531     }
532 }
533 
534 static int exception_has_error_code(int intno)
535 {
536     switch (intno) {
537     case 8:
538     case 10:
539     case 11:
540     case 12:
541     case 13:
542     case 14:
543     case 17:
544         return 1;
545     }
546     return 0;
547 }
548 
549 #ifdef TARGET_X86_64
550 #define SET_ESP(val, sp_mask)                                   \
551     do {                                                        \
552         if ((sp_mask) == 0xffff) {                              \
553             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
554                 ((val) & 0xffff);                               \
555         } else if ((sp_mask) == 0xffffffffLL) {                 \
556             env->regs[R_ESP] = (uint32_t)(val);                 \
557         } else {                                                \
558             env->regs[R_ESP] = (val);                           \
559         }                                                       \
560     } while (0)
561 #else
562 #define SET_ESP(val, sp_mask)                                   \
563     do {                                                        \
564         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
565             ((val) & (sp_mask));                                \
566     } while (0)
567 #endif
568 
569 /* in 64-bit machines, this can overflow. So this segment addition macro
570  * can be used to trim the value to 32-bit whenever needed */
571 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
572 
573 /* XXX: add a is_user flag to have proper security support */
574 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
575     {                                                            \
576         sp -= 2;                                                 \
577         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
578     }
579 
580 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
581     {                                                                   \
582         sp -= 4;                                                        \
583         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
584     }
585 
586 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
587     {                                                            \
588         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
589         sp += 2;                                                 \
590     }
591 
592 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
593     {                                                                   \
594         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
595         sp += 4;                                                        \
596     }
597 
598 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
599 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
600 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
601 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
602 
603 /* protected mode interrupt */
604 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
605                                    int error_code, unsigned int next_eip,
606                                    int is_hw)
607 {
608     SegmentCache *dt;
609     target_ulong ptr, ssp;
610     int type, dpl, selector, ss_dpl, cpl;
611     int has_error_code, new_stack, shift;
612     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
613     uint32_t old_eip, sp_mask;
614     int vm86 = env->eflags & VM_MASK;
615 
616     has_error_code = 0;
617     if (!is_int && !is_hw) {
618         has_error_code = exception_has_error_code(intno);
619     }
620     if (is_int) {
621         old_eip = next_eip;
622     } else {
623         old_eip = env->eip;
624     }
625 
626     dt = &env->idt;
627     if (intno * 8 + 7 > dt->limit) {
628         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
629     }
630     ptr = dt->base + intno * 8;
631     e1 = cpu_ldl_kernel(env, ptr);
632     e2 = cpu_ldl_kernel(env, ptr + 4);
633     /* check gate type */
634     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
635     switch (type) {
636     case 5: /* task gate */
637     case 6: /* 286 interrupt gate */
638     case 7: /* 286 trap gate */
639     case 14: /* 386 interrupt gate */
640     case 15: /* 386 trap gate */
641         break;
642     default:
643         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
644         break;
645     }
646     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
647     cpl = env->hflags & HF_CPL_MASK;
648     /* check privilege if software int */
649     if (is_int && dpl < cpl) {
650         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
651     }
652 
653     if (type == 5) {
654         /* task gate */
655         /* must do that check here to return the correct error code */
656         if (!(e2 & DESC_P_MASK)) {
657             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
658         }
659         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
660         if (has_error_code) {
661             int type;
662             uint32_t mask;
663 
664             /* push the error code */
665             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
666             shift = type >> 3;
667             if (env->segs[R_SS].flags & DESC_B_MASK) {
668                 mask = 0xffffffff;
669             } else {
670                 mask = 0xffff;
671             }
672             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
673             ssp = env->segs[R_SS].base + esp;
674             if (shift) {
675                 cpu_stl_kernel(env, ssp, error_code);
676             } else {
677                 cpu_stw_kernel(env, ssp, error_code);
678             }
679             SET_ESP(esp, mask);
680         }
681         return;
682     }
683 
684     /* Otherwise, trap or interrupt gate */
685 
686     /* check valid bit */
687     if (!(e2 & DESC_P_MASK)) {
688         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
689     }
690     selector = e1 >> 16;
691     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
692     if ((selector & 0xfffc) == 0) {
693         raise_exception_err(env, EXCP0D_GPF, 0);
694     }
695     if (load_segment(env, &e1, &e2, selector) != 0) {
696         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697     }
698     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
699         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
700     }
701     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702     if (dpl > cpl) {
703         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
704     }
705     if (!(e2 & DESC_P_MASK)) {
706         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
707     }
708     if (e2 & DESC_C_MASK) {
709         dpl = cpl;
710     }
711     if (dpl < cpl) {
712         /* to inner privilege */
713         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
714         if ((ss & 0xfffc) == 0) {
715             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716         }
717         if ((ss & 3) != dpl) {
718             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719         }
720         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
721             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722         }
723         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
724         if (ss_dpl != dpl) {
725             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
726         }
727         if (!(ss_e2 & DESC_S_MASK) ||
728             (ss_e2 & DESC_CS_MASK) ||
729             !(ss_e2 & DESC_W_MASK)) {
730             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
731         }
732         if (!(ss_e2 & DESC_P_MASK)) {
733             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
734         }
735         new_stack = 1;
736         sp_mask = get_sp_mask(ss_e2);
737         ssp = get_seg_base(ss_e1, ss_e2);
738     } else  {
739         /* to same privilege */
740         if (vm86) {
741             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
742         }
743         new_stack = 0;
744         sp_mask = get_sp_mask(env->segs[R_SS].flags);
745         ssp = env->segs[R_SS].base;
746         esp = env->regs[R_ESP];
747     }
748 
749     shift = type >> 3;
750 
751 #if 0
752     /* XXX: check that enough room is available */
753     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
754     if (vm86) {
755         push_size += 8;
756     }
757     push_size <<= shift;
758 #endif
759     if (shift == 1) {
760         if (new_stack) {
761             if (vm86) {
762                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766             }
767             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
769         }
770         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
771         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772         PUSHL(ssp, esp, sp_mask, old_eip);
773         if (has_error_code) {
774             PUSHL(ssp, esp, sp_mask, error_code);
775         }
776     } else {
777         if (new_stack) {
778             if (vm86) {
779                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783             }
784             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
786         }
787         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
788         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789         PUSHW(ssp, esp, sp_mask, old_eip);
790         if (has_error_code) {
791             PUSHW(ssp, esp, sp_mask, error_code);
792         }
793     }
794 
795     /* interrupt gate clear IF mask */
796     if ((type & 1) == 0) {
797         env->eflags &= ~IF_MASK;
798     }
799     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
800 
801     if (new_stack) {
802         if (vm86) {
803             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
804             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
805             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
806             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
807         }
808         ss = (ss & ~3) | dpl;
809         cpu_x86_load_seg_cache(env, R_SS, ss,
810                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
811     }
812     SET_ESP(esp, sp_mask);
813 
814     selector = (selector & ~3) | dpl;
815     cpu_x86_load_seg_cache(env, R_CS, selector,
816                    get_seg_base(e1, e2),
817                    get_seg_limit(e1, e2),
818                    e2);
819     env->eip = offset;
820 }
821 
822 #ifdef TARGET_X86_64
823 
824 #define PUSHQ_RA(sp, val, ra)                   \
825     {                                           \
826         sp -= 8;                                \
827         cpu_stq_kernel_ra(env, sp, (val), ra);  \
828     }
829 
830 #define POPQ_RA(sp, val, ra)                    \
831     {                                           \
832         val = cpu_ldq_kernel_ra(env, sp, ra);   \
833         sp += 8;                                \
834     }
835 
836 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
837 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
838 
839 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
840 {
841     X86CPU *cpu = env_archcpu(env);
842     int index;
843 
844 #if 0
845     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
846            env->tr.base, env->tr.limit);
847 #endif
848 
849     if (!(env->tr.flags & DESC_P_MASK)) {
850         cpu_abort(CPU(cpu), "invalid tss");
851     }
852     index = 8 * level + 4;
853     if ((index + 7) > env->tr.limit) {
854         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
855     }
856     return cpu_ldq_kernel(env, env->tr.base + index);
857 }
858 
859 /* 64 bit interrupt */
860 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
861                            int error_code, target_ulong next_eip, int is_hw)
862 {
863     SegmentCache *dt;
864     target_ulong ptr;
865     int type, dpl, selector, cpl, ist;
866     int has_error_code, new_stack;
867     uint32_t e1, e2, e3, ss;
868     target_ulong old_eip, esp, offset;
869 
870     has_error_code = 0;
871     if (!is_int && !is_hw) {
872         has_error_code = exception_has_error_code(intno);
873     }
874     if (is_int) {
875         old_eip = next_eip;
876     } else {
877         old_eip = env->eip;
878     }
879 
880     dt = &env->idt;
881     if (intno * 16 + 15 > dt->limit) {
882         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
883     }
884     ptr = dt->base + intno * 16;
885     e1 = cpu_ldl_kernel(env, ptr);
886     e2 = cpu_ldl_kernel(env, ptr + 4);
887     e3 = cpu_ldl_kernel(env, ptr + 8);
888     /* check gate type */
889     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
890     switch (type) {
891     case 14: /* 386 interrupt gate */
892     case 15: /* 386 trap gate */
893         break;
894     default:
895         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
896         break;
897     }
898     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
899     cpl = env->hflags & HF_CPL_MASK;
900     /* check privilege if software int */
901     if (is_int && dpl < cpl) {
902         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
903     }
904     /* check valid bit */
905     if (!(e2 & DESC_P_MASK)) {
906         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
907     }
908     selector = e1 >> 16;
909     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
910     ist = e2 & 7;
911     if ((selector & 0xfffc) == 0) {
912         raise_exception_err(env, EXCP0D_GPF, 0);
913     }
914 
915     if (load_segment(env, &e1, &e2, selector) != 0) {
916         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
917     }
918     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
919         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
920     }
921     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922     if (dpl > cpl) {
923         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
924     }
925     if (!(e2 & DESC_P_MASK)) {
926         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
927     }
928     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
929         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
930     }
931     if (e2 & DESC_C_MASK) {
932         dpl = cpl;
933     }
934     if (dpl < cpl || ist != 0) {
935         /* to inner privilege */
936         new_stack = 1;
937         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
938         ss = 0;
939     } else {
940         /* to same privilege */
941         if (env->eflags & VM_MASK) {
942             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
943         }
944         new_stack = 0;
945         esp = env->regs[R_ESP];
946     }
947     esp &= ~0xfLL; /* align stack */
948 
949     PUSHQ(esp, env->segs[R_SS].selector);
950     PUSHQ(esp, env->regs[R_ESP]);
951     PUSHQ(esp, cpu_compute_eflags(env));
952     PUSHQ(esp, env->segs[R_CS].selector);
953     PUSHQ(esp, old_eip);
954     if (has_error_code) {
955         PUSHQ(esp, error_code);
956     }
957 
958     /* interrupt gate clear IF mask */
959     if ((type & 1) == 0) {
960         env->eflags &= ~IF_MASK;
961     }
962     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
963 
964     if (new_stack) {
965         ss = 0 | dpl;
966         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
967     }
968     env->regs[R_ESP] = esp;
969 
970     selector = (selector & ~3) | dpl;
971     cpu_x86_load_seg_cache(env, R_CS, selector,
972                    get_seg_base(e1, e2),
973                    get_seg_limit(e1, e2),
974                    e2);
975     env->eip = offset;
976 }
977 #endif
978 
979 #ifdef TARGET_X86_64
980 #if defined(CONFIG_USER_ONLY)
981 void helper_syscall(CPUX86State *env, int next_eip_addend)
982 {
983     CPUState *cs = env_cpu(env);
984 
985     cs->exception_index = EXCP_SYSCALL;
986     env->exception_is_int = 0;
987     env->exception_next_eip = env->eip + next_eip_addend;
988     cpu_loop_exit(cs);
989 }
990 #else
991 void helper_syscall(CPUX86State *env, int next_eip_addend)
992 {
993     int selector;
994 
995     if (!(env->efer & MSR_EFER_SCE)) {
996         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
997     }
998     selector = (env->star >> 32) & 0xffff;
999     if (env->hflags & HF_LMA_MASK) {
1000         int code64;
1001 
1002         env->regs[R_ECX] = env->eip + next_eip_addend;
1003         env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
1004 
1005         code64 = env->hflags & HF_CS64_MASK;
1006 
1007         env->eflags &= ~(env->fmask | RF_MASK);
1008         cpu_load_eflags(env, env->eflags, 0);
1009         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010                            0, 0xffffffff,
1011                                DESC_G_MASK | DESC_P_MASK |
1012                                DESC_S_MASK |
1013                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1014                                DESC_L_MASK);
1015         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1016                                0, 0xffffffff,
1017                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018                                DESC_S_MASK |
1019                                DESC_W_MASK | DESC_A_MASK);
1020         if (code64) {
1021             env->eip = env->lstar;
1022         } else {
1023             env->eip = env->cstar;
1024         }
1025     } else {
1026         env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1027 
1028         env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1029         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030                            0, 0xffffffff,
1031                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032                                DESC_S_MASK |
1033                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035                                0, 0xffffffff,
1036                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037                                DESC_S_MASK |
1038                                DESC_W_MASK | DESC_A_MASK);
1039         env->eip = (uint32_t)env->star;
1040     }
1041 }
1042 #endif
1043 #endif
1044 
1045 #ifdef TARGET_X86_64
1046 void helper_sysret(CPUX86State *env, int dflag)
1047 {
1048     int cpl, selector;
1049 
1050     if (!(env->efer & MSR_EFER_SCE)) {
1051         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1052     }
1053     cpl = env->hflags & HF_CPL_MASK;
1054     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1055         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1056     }
1057     selector = (env->star >> 48) & 0xffff;
1058     if (env->hflags & HF_LMA_MASK) {
1059         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1060                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1061                         NT_MASK);
1062         if (dflag == 2) {
1063             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1064                                    0, 0xffffffff,
1065                                    DESC_G_MASK | DESC_P_MASK |
1066                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1068                                    DESC_L_MASK);
1069             env->eip = env->regs[R_ECX];
1070         } else {
1071             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1072                                    0, 0xffffffff,
1073                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1076             env->eip = (uint32_t)env->regs[R_ECX];
1077         }
1078         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1079                                0, 0xffffffff,
1080                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082                                DESC_W_MASK | DESC_A_MASK);
1083     } else {
1084         env->eflags |= IF_MASK;
1085         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086                                0, 0xffffffff,
1087                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090         env->eip = (uint32_t)env->regs[R_ECX];
1091         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1092                                0, 0xffffffff,
1093                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095                                DESC_W_MASK | DESC_A_MASK);
1096     }
1097 }
1098 #endif
1099 
1100 /* real mode interrupt */
1101 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1102                               int error_code, unsigned int next_eip)
1103 {
1104     SegmentCache *dt;
1105     target_ulong ptr, ssp;
1106     int selector;
1107     uint32_t offset, esp;
1108     uint32_t old_cs, old_eip;
1109 
1110     /* real mode (simpler!) */
1111     dt = &env->idt;
1112     if (intno * 4 + 3 > dt->limit) {
1113         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1114     }
1115     ptr = dt->base + intno * 4;
1116     offset = cpu_lduw_kernel(env, ptr);
1117     selector = cpu_lduw_kernel(env, ptr + 2);
1118     esp = env->regs[R_ESP];
1119     ssp = env->segs[R_SS].base;
1120     if (is_int) {
1121         old_eip = next_eip;
1122     } else {
1123         old_eip = env->eip;
1124     }
1125     old_cs = env->segs[R_CS].selector;
1126     /* XXX: use SS segment size? */
1127     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1128     PUSHW(ssp, esp, 0xffff, old_cs);
1129     PUSHW(ssp, esp, 0xffff, old_eip);
1130 
1131     /* update processor state */
1132     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1133     env->eip = offset;
1134     env->segs[R_CS].selector = selector;
1135     env->segs[R_CS].base = (selector << 4);
1136     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1137 }
1138 
1139 #if defined(CONFIG_USER_ONLY)
1140 /* fake user mode interrupt. is_int is TRUE if coming from the int
1141  * instruction. next_eip is the env->eip value AFTER the interrupt
1142  * instruction. It is only relevant if is_int is TRUE or if intno
1143  * is EXCP_SYSCALL.
1144  */
1145 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1146                               int error_code, target_ulong next_eip)
1147 {
1148     if (is_int) {
1149         SegmentCache *dt;
1150         target_ulong ptr;
1151         int dpl, cpl, shift;
1152         uint32_t e2;
1153 
1154         dt = &env->idt;
1155         if (env->hflags & HF_LMA_MASK) {
1156             shift = 4;
1157         } else {
1158             shift = 3;
1159         }
1160         ptr = dt->base + (intno << shift);
1161         e2 = cpu_ldl_kernel(env, ptr + 4);
1162 
1163         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1164         cpl = env->hflags & HF_CPL_MASK;
1165         /* check privilege if software int */
1166         if (dpl < cpl) {
1167             raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1168         }
1169     }
1170 
1171     /* Since we emulate only user space, we cannot do more than
1172        exiting the emulation with the suitable exception and error
1173        code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1174     if (is_int || intno == EXCP_SYSCALL) {
1175         env->eip = next_eip;
1176     }
1177 }
1178 
1179 #else
1180 
1181 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1182                             int error_code, int is_hw, int rm)
1183 {
1184     CPUState *cs = env_cpu(env);
1185     uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1186                                                           control.event_inj));
1187 
1188     if (!(event_inj & SVM_EVTINJ_VALID)) {
1189         int type;
1190 
1191         if (is_int) {
1192             type = SVM_EVTINJ_TYPE_SOFT;
1193         } else {
1194             type = SVM_EVTINJ_TYPE_EXEPT;
1195         }
1196         event_inj = intno | type | SVM_EVTINJ_VALID;
1197         if (!rm && exception_has_error_code(intno)) {
1198             event_inj |= SVM_EVTINJ_VALID_ERR;
1199             x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1200                                              control.event_inj_err),
1201                      error_code);
1202         }
1203         x86_stl_phys(cs,
1204                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1205                  event_inj);
1206     }
1207 }
1208 #endif
1209 
1210 /*
1211  * Begin execution of an interruption. is_int is TRUE if coming from
1212  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1213  * instruction. It is only relevant if is_int is TRUE.
1214  */
1215 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1216                              int error_code, target_ulong next_eip, int is_hw)
1217 {
1218     CPUX86State *env = &cpu->env;
1219 
1220     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1221         if ((env->cr[0] & CR0_PE_MASK)) {
1222             static int count;
1223 
1224             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1225                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1226                      count, intno, error_code, is_int,
1227                      env->hflags & HF_CPL_MASK,
1228                      env->segs[R_CS].selector, env->eip,
1229                      (int)env->segs[R_CS].base + env->eip,
1230                      env->segs[R_SS].selector, env->regs[R_ESP]);
1231             if (intno == 0x0e) {
1232                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1233             } else {
1234                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1235             }
1236             qemu_log("\n");
1237             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1238 #if 0
1239             {
1240                 int i;
1241                 target_ulong ptr;
1242 
1243                 qemu_log("       code=");
1244                 ptr = env->segs[R_CS].base + env->eip;
1245                 for (i = 0; i < 16; i++) {
1246                     qemu_log(" %02x", ldub(ptr + i));
1247                 }
1248                 qemu_log("\n");
1249             }
1250 #endif
1251             count++;
1252         }
1253     }
1254     if (env->cr[0] & CR0_PE_MASK) {
1255 #if !defined(CONFIG_USER_ONLY)
1256         if (env->hflags & HF_GUEST_MASK) {
1257             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1258         }
1259 #endif
1260 #ifdef TARGET_X86_64
1261         if (env->hflags & HF_LMA_MASK) {
1262             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1263         } else
1264 #endif
1265         {
1266             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1267                                    is_hw);
1268         }
1269     } else {
1270 #if !defined(CONFIG_USER_ONLY)
1271         if (env->hflags & HF_GUEST_MASK) {
1272             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1273         }
1274 #endif
1275         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1276     }
1277 
1278 #if !defined(CONFIG_USER_ONLY)
1279     if (env->hflags & HF_GUEST_MASK) {
1280         CPUState *cs = CPU(cpu);
1281         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1282                                       offsetof(struct vmcb,
1283                                                control.event_inj));
1284 
1285         x86_stl_phys(cs,
1286                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1287                  event_inj & ~SVM_EVTINJ_VALID);
1288     }
1289 #endif
1290 }
1291 
1292 void x86_cpu_do_interrupt(CPUState *cs)
1293 {
1294     X86CPU *cpu = X86_CPU(cs);
1295     CPUX86State *env = &cpu->env;
1296 
1297 #if defined(CONFIG_USER_ONLY)
1298     /* if user mode only, we simulate a fake exception
1299        which will be handled outside the cpu execution
1300        loop */
1301     do_interrupt_user(env, cs->exception_index,
1302                       env->exception_is_int,
1303                       env->error_code,
1304                       env->exception_next_eip);
1305     /* successfully delivered */
1306     env->old_exception = -1;
1307 #else
1308     if (cs->exception_index == EXCP_VMEXIT) {
1309         assert(env->old_exception == -1);
1310         do_vmexit(env);
1311     } else {
1312         do_interrupt_all(cpu, cs->exception_index,
1313                          env->exception_is_int,
1314                          env->error_code,
1315                          env->exception_next_eip, 0);
1316         /* successfully delivered */
1317         env->old_exception = -1;
1318     }
1319 #endif
1320 }
1321 
1322 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1323 {
1324     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1325 }
1326 
1327 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1328 {
1329     X86CPU *cpu = X86_CPU(cs);
1330     CPUX86State *env = &cpu->env;
1331     int intno;
1332 
1333     interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1334     if (!interrupt_request) {
1335         return false;
1336     }
1337 
1338     /* Don't process multiple interrupt requests in a single call.
1339      * This is required to make icount-driven execution deterministic.
1340      */
1341     switch (interrupt_request) {
1342 #if !defined(CONFIG_USER_ONLY)
1343     case CPU_INTERRUPT_POLL:
1344         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1345         apic_poll_irq(cpu->apic_state);
1346         break;
1347 #endif
1348     case CPU_INTERRUPT_SIPI:
1349         do_cpu_sipi(cpu);
1350         break;
1351     case CPU_INTERRUPT_SMI:
1352         cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1353         cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1354 #ifdef CONFIG_USER_ONLY
1355         cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
1356 #else
1357         do_smm_enter(cpu);
1358 #endif /* CONFIG_USER_ONLY */
1359         break;
1360     case CPU_INTERRUPT_NMI:
1361         cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1362         cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1363         env->hflags2 |= HF2_NMI_MASK;
1364         do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1365         break;
1366     case CPU_INTERRUPT_MCE:
1367         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1368         do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1369         break;
1370     case CPU_INTERRUPT_HARD:
1371         cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1372         cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1373                                    CPU_INTERRUPT_VIRQ);
1374         intno = cpu_get_pic_interrupt(env);
1375         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1376                       "Servicing hardware INT=0x%02x\n", intno);
1377         do_interrupt_x86_hardirq(env, intno, 1);
1378         break;
1379 #if !defined(CONFIG_USER_ONLY)
1380     case CPU_INTERRUPT_VIRQ:
1381         /* FIXME: this should respect TPR */
1382         cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1383         intno = x86_ldl_phys(cs, env->vm_vmcb
1384                              + offsetof(struct vmcb, control.int_vector));
1385         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1386                       "Servicing virtual hardware INT=0x%02x\n", intno);
1387         do_interrupt_x86_hardirq(env, intno, 1);
1388         cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1389         break;
1390 #endif
1391     }
1392 
1393     /* Ensure that no TB jump will be modified as the program flow was changed.  */
1394     return true;
1395 }
1396 
1397 void helper_lldt(CPUX86State *env, int selector)
1398 {
1399     SegmentCache *dt;
1400     uint32_t e1, e2;
1401     int index, entry_limit;
1402     target_ulong ptr;
1403 
1404     selector &= 0xffff;
1405     if ((selector & 0xfffc) == 0) {
1406         /* XXX: NULL selector case: invalid LDT */
1407         env->ldt.base = 0;
1408         env->ldt.limit = 0;
1409     } else {
1410         if (selector & 0x4) {
1411             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412         }
1413         dt = &env->gdt;
1414         index = selector & ~7;
1415 #ifdef TARGET_X86_64
1416         if (env->hflags & HF_LMA_MASK) {
1417             entry_limit = 15;
1418         } else
1419 #endif
1420         {
1421             entry_limit = 7;
1422         }
1423         if ((index + entry_limit) > dt->limit) {
1424             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1425         }
1426         ptr = dt->base + index;
1427         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1428         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1429         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1430             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1431         }
1432         if (!(e2 & DESC_P_MASK)) {
1433             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1434         }
1435 #ifdef TARGET_X86_64
1436         if (env->hflags & HF_LMA_MASK) {
1437             uint32_t e3;
1438 
1439             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1440             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1441             env->ldt.base |= (target_ulong)e3 << 32;
1442         } else
1443 #endif
1444         {
1445             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1446         }
1447     }
1448     env->ldt.selector = selector;
1449 }
1450 
1451 void helper_ltr(CPUX86State *env, int selector)
1452 {
1453     SegmentCache *dt;
1454     uint32_t e1, e2;
1455     int index, type, entry_limit;
1456     target_ulong ptr;
1457 
1458     selector &= 0xffff;
1459     if ((selector & 0xfffc) == 0) {
1460         /* NULL selector case: invalid TR */
1461         env->tr.base = 0;
1462         env->tr.limit = 0;
1463         env->tr.flags = 0;
1464     } else {
1465         if (selector & 0x4) {
1466             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1467         }
1468         dt = &env->gdt;
1469         index = selector & ~7;
1470 #ifdef TARGET_X86_64
1471         if (env->hflags & HF_LMA_MASK) {
1472             entry_limit = 15;
1473         } else
1474 #endif
1475         {
1476             entry_limit = 7;
1477         }
1478         if ((index + entry_limit) > dt->limit) {
1479             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1480         }
1481         ptr = dt->base + index;
1482         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1483         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1484         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1485         if ((e2 & DESC_S_MASK) ||
1486             (type != 1 && type != 9)) {
1487             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1488         }
1489         if (!(e2 & DESC_P_MASK)) {
1490             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1491         }
1492 #ifdef TARGET_X86_64
1493         if (env->hflags & HF_LMA_MASK) {
1494             uint32_t e3, e4;
1495 
1496             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1497             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1498             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1499                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1500             }
1501             load_seg_cache_raw_dt(&env->tr, e1, e2);
1502             env->tr.base |= (target_ulong)e3 << 32;
1503         } else
1504 #endif
1505         {
1506             load_seg_cache_raw_dt(&env->tr, e1, e2);
1507         }
1508         e2 |= DESC_TSS_BUSY_MASK;
1509         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1510     }
1511     env->tr.selector = selector;
1512 }
1513 
1514 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1515 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1516 {
1517     uint32_t e1, e2;
1518     int cpl, dpl, rpl;
1519     SegmentCache *dt;
1520     int index;
1521     target_ulong ptr;
1522 
1523     selector &= 0xffff;
1524     cpl = env->hflags & HF_CPL_MASK;
1525     if ((selector & 0xfffc) == 0) {
1526         /* null selector case */
1527         if (seg_reg == R_SS
1528 #ifdef TARGET_X86_64
1529             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1530 #endif
1531             ) {
1532             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1533         }
1534         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1535     } else {
1536 
1537         if (selector & 0x4) {
1538             dt = &env->ldt;
1539         } else {
1540             dt = &env->gdt;
1541         }
1542         index = selector & ~7;
1543         if ((index + 7) > dt->limit) {
1544             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1545         }
1546         ptr = dt->base + index;
1547         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1548         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1549 
1550         if (!(e2 & DESC_S_MASK)) {
1551             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1552         }
1553         rpl = selector & 3;
1554         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1555         if (seg_reg == R_SS) {
1556             /* must be writable segment */
1557             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1558                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1559             }
1560             if (rpl != cpl || dpl != cpl) {
1561                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1562             }
1563         } else {
1564             /* must be readable segment */
1565             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1566                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1567             }
1568 
1569             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1570                 /* if not conforming code, test rights */
1571                 if (dpl < cpl || dpl < rpl) {
1572                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1573                 }
1574             }
1575         }
1576 
1577         if (!(e2 & DESC_P_MASK)) {
1578             if (seg_reg == R_SS) {
1579                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1580             } else {
1581                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1582             }
1583         }
1584 
1585         /* set the access bit if not already set */
1586         if (!(e2 & DESC_A_MASK)) {
1587             e2 |= DESC_A_MASK;
1588             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1589         }
1590 
1591         cpu_x86_load_seg_cache(env, seg_reg, selector,
1592                        get_seg_base(e1, e2),
1593                        get_seg_limit(e1, e2),
1594                        e2);
1595 #if 0
1596         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1597                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1598 #endif
1599     }
1600 }
1601 
1602 /* protected mode jump */
1603 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1604                            target_ulong next_eip)
1605 {
1606     int gate_cs, type;
1607     uint32_t e1, e2, cpl, dpl, rpl, limit;
1608 
1609     if ((new_cs & 0xfffc) == 0) {
1610         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1611     }
1612     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1613         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1614     }
1615     cpl = env->hflags & HF_CPL_MASK;
1616     if (e2 & DESC_S_MASK) {
1617         if (!(e2 & DESC_CS_MASK)) {
1618             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1619         }
1620         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1621         if (e2 & DESC_C_MASK) {
1622             /* conforming code segment */
1623             if (dpl > cpl) {
1624                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1625             }
1626         } else {
1627             /* non conforming code segment */
1628             rpl = new_cs & 3;
1629             if (rpl > cpl) {
1630                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1631             }
1632             if (dpl != cpl) {
1633                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1634             }
1635         }
1636         if (!(e2 & DESC_P_MASK)) {
1637             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1638         }
1639         limit = get_seg_limit(e1, e2);
1640         if (new_eip > limit &&
1641             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1642             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1643         }
1644         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1645                        get_seg_base(e1, e2), limit, e2);
1646         env->eip = new_eip;
1647     } else {
1648         /* jump to call or task gate */
1649         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1650         rpl = new_cs & 3;
1651         cpl = env->hflags & HF_CPL_MASK;
1652         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1653 
1654 #ifdef TARGET_X86_64
1655         if (env->efer & MSR_EFER_LMA) {
1656             if (type != 12) {
1657                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1658             }
1659         }
1660 #endif
1661         switch (type) {
1662         case 1: /* 286 TSS */
1663         case 9: /* 386 TSS */
1664         case 5: /* task gate */
1665             if (dpl < cpl || dpl < rpl) {
1666                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1667             }
1668             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1669             break;
1670         case 4: /* 286 call gate */
1671         case 12: /* 386 call gate */
1672             if ((dpl < cpl) || (dpl < rpl)) {
1673                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1674             }
1675             if (!(e2 & DESC_P_MASK)) {
1676                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1677             }
1678             gate_cs = e1 >> 16;
1679             new_eip = (e1 & 0xffff);
1680             if (type == 12) {
1681                 new_eip |= (e2 & 0xffff0000);
1682             }
1683 
1684 #ifdef TARGET_X86_64
1685             if (env->efer & MSR_EFER_LMA) {
1686                 /* load the upper 8 bytes of the 64-bit call gate */
1687                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1688                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1689                                            GETPC());
1690                 }
1691                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1692                 if (type != 0) {
1693                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1694                                            GETPC());
1695                 }
1696                 new_eip |= ((target_ulong)e1) << 32;
1697             }
1698 #endif
1699 
1700             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1701                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1702             }
1703             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1704             /* must be code segment */
1705             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1706                  (DESC_S_MASK | DESC_CS_MASK))) {
1707                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1708             }
1709             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1710                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1711                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1712             }
1713 #ifdef TARGET_X86_64
1714             if (env->efer & MSR_EFER_LMA) {
1715                 if (!(e2 & DESC_L_MASK)) {
1716                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1717                 }
1718                 if (e2 & DESC_B_MASK) {
1719                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1720                 }
1721             }
1722 #endif
1723             if (!(e2 & DESC_P_MASK)) {
1724                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1725             }
1726             limit = get_seg_limit(e1, e2);
1727             if (new_eip > limit &&
1728                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1729                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1730             }
1731             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1732                                    get_seg_base(e1, e2), limit, e2);
1733             env->eip = new_eip;
1734             break;
1735         default:
1736             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1737             break;
1738         }
1739     }
1740 }
1741 
1742 /* real mode call */
1743 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1744                        int shift, int next_eip)
1745 {
1746     int new_eip;
1747     uint32_t esp, esp_mask;
1748     target_ulong ssp;
1749 
1750     new_eip = new_eip1;
1751     esp = env->regs[R_ESP];
1752     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1753     ssp = env->segs[R_SS].base;
1754     if (shift) {
1755         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1756         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1757     } else {
1758         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1759         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1760     }
1761 
1762     SET_ESP(esp, esp_mask);
1763     env->eip = new_eip;
1764     env->segs[R_CS].selector = new_cs;
1765     env->segs[R_CS].base = (new_cs << 4);
1766 }
1767 
1768 /* protected mode call */
1769 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1770                             int shift, target_ulong next_eip)
1771 {
1772     int new_stack, i;
1773     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1774     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1775     uint32_t val, limit, old_sp_mask;
1776     target_ulong ssp, old_ssp, offset, sp;
1777 
1778     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1779     LOG_PCALL_STATE(env_cpu(env));
1780     if ((new_cs & 0xfffc) == 0) {
1781         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1782     }
1783     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1784         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1785     }
1786     cpl = env->hflags & HF_CPL_MASK;
1787     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1788     if (e2 & DESC_S_MASK) {
1789         if (!(e2 & DESC_CS_MASK)) {
1790             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1791         }
1792         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1793         if (e2 & DESC_C_MASK) {
1794             /* conforming code segment */
1795             if (dpl > cpl) {
1796                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1797             }
1798         } else {
1799             /* non conforming code segment */
1800             rpl = new_cs & 3;
1801             if (rpl > cpl) {
1802                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1803             }
1804             if (dpl != cpl) {
1805                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1806             }
1807         }
1808         if (!(e2 & DESC_P_MASK)) {
1809             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1810         }
1811 
1812 #ifdef TARGET_X86_64
1813         /* XXX: check 16/32 bit cases in long mode */
1814         if (shift == 2) {
1815             target_ulong rsp;
1816 
1817             /* 64 bit case */
1818             rsp = env->regs[R_ESP];
1819             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1820             PUSHQ_RA(rsp, next_eip, GETPC());
1821             /* from this point, not restartable */
1822             env->regs[R_ESP] = rsp;
1823             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1824                                    get_seg_base(e1, e2),
1825                                    get_seg_limit(e1, e2), e2);
1826             env->eip = new_eip;
1827         } else
1828 #endif
1829         {
1830             sp = env->regs[R_ESP];
1831             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1832             ssp = env->segs[R_SS].base;
1833             if (shift) {
1834                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1835                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1836             } else {
1837                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1838                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1839             }
1840 
1841             limit = get_seg_limit(e1, e2);
1842             if (new_eip > limit) {
1843                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1844             }
1845             /* from this point, not restartable */
1846             SET_ESP(sp, sp_mask);
1847             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1848                                    get_seg_base(e1, e2), limit, e2);
1849             env->eip = new_eip;
1850         }
1851     } else {
1852         /* check gate type */
1853         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1854         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1855         rpl = new_cs & 3;
1856 
1857 #ifdef TARGET_X86_64
1858         if (env->efer & MSR_EFER_LMA) {
1859             if (type != 12) {
1860                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1861             }
1862         }
1863 #endif
1864 
1865         switch (type) {
1866         case 1: /* available 286 TSS */
1867         case 9: /* available 386 TSS */
1868         case 5: /* task gate */
1869             if (dpl < cpl || dpl < rpl) {
1870                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1871             }
1872             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1873             return;
1874         case 4: /* 286 call gate */
1875         case 12: /* 386 call gate */
1876             break;
1877         default:
1878             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1879             break;
1880         }
1881         shift = type >> 3;
1882 
1883         if (dpl < cpl || dpl < rpl) {
1884             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1885         }
1886         /* check valid bit */
1887         if (!(e2 & DESC_P_MASK)) {
1888             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1889         }
1890         selector = e1 >> 16;
1891         param_count = e2 & 0x1f;
1892         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1893 #ifdef TARGET_X86_64
1894         if (env->efer & MSR_EFER_LMA) {
1895             /* load the upper 8 bytes of the 64-bit call gate */
1896             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1897                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1898                                        GETPC());
1899             }
1900             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1901             if (type != 0) {
1902                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1903                                        GETPC());
1904             }
1905             offset |= ((target_ulong)e1) << 32;
1906         }
1907 #endif
1908         if ((selector & 0xfffc) == 0) {
1909             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1910         }
1911 
1912         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1913             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1914         }
1915         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1916             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1917         }
1918         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1919         if (dpl > cpl) {
1920             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1921         }
1922 #ifdef TARGET_X86_64
1923         if (env->efer & MSR_EFER_LMA) {
1924             if (!(e2 & DESC_L_MASK)) {
1925                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1926             }
1927             if (e2 & DESC_B_MASK) {
1928                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1929             }
1930             shift++;
1931         }
1932 #endif
1933         if (!(e2 & DESC_P_MASK)) {
1934             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1935         }
1936 
1937         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1938             /* to inner privilege */
1939 #ifdef TARGET_X86_64
1940             if (shift == 2) {
1941                 sp = get_rsp_from_tss(env, dpl);
1942                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1943                 new_stack = 1;
1944                 sp_mask = 0;
1945                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1946                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1947                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1948             } else
1949 #endif
1950             {
1951                 uint32_t sp32;
1952                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1953                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1954                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1955                           env->regs[R_ESP]);
1956                 sp = sp32;
1957                 if ((ss & 0xfffc) == 0) {
1958                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1959                 }
1960                 if ((ss & 3) != dpl) {
1961                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1962                 }
1963                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1964                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1965                 }
1966                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1967                 if (ss_dpl != dpl) {
1968                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1969                 }
1970                 if (!(ss_e2 & DESC_S_MASK) ||
1971                     (ss_e2 & DESC_CS_MASK) ||
1972                     !(ss_e2 & DESC_W_MASK)) {
1973                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1974                 }
1975                 if (!(ss_e2 & DESC_P_MASK)) {
1976                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1977                 }
1978 
1979                 sp_mask = get_sp_mask(ss_e2);
1980                 ssp = get_seg_base(ss_e1, ss_e2);
1981             }
1982 
1983             /* push_size = ((param_count * 2) + 8) << shift; */
1984 
1985             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1986             old_ssp = env->segs[R_SS].base;
1987 #ifdef TARGET_X86_64
1988             if (shift == 2) {
1989                 /* XXX: verify if new stack address is canonical */
1990                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1991                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1992                 /* parameters aren't supported for 64-bit call gates */
1993             } else
1994 #endif
1995             if (shift == 1) {
1996                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1997                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1998                 for (i = param_count - 1; i >= 0; i--) {
1999                     val = cpu_ldl_kernel_ra(env, old_ssp +
2000                                             ((env->regs[R_ESP] + i * 4) &
2001                                              old_sp_mask), GETPC());
2002                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
2003                 }
2004             } else {
2005                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
2006                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
2007                 for (i = param_count - 1; i >= 0; i--) {
2008                     val = cpu_lduw_kernel_ra(env, old_ssp +
2009                                              ((env->regs[R_ESP] + i * 2) &
2010                                               old_sp_mask), GETPC());
2011                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
2012                 }
2013             }
2014             new_stack = 1;
2015         } else {
2016             /* to same privilege */
2017             sp = env->regs[R_ESP];
2018             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2019             ssp = env->segs[R_SS].base;
2020             /* push_size = (4 << shift); */
2021             new_stack = 0;
2022         }
2023 
2024 #ifdef TARGET_X86_64
2025         if (shift == 2) {
2026             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2027             PUSHQ_RA(sp, next_eip, GETPC());
2028         } else
2029 #endif
2030         if (shift == 1) {
2031             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2032             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2033         } else {
2034             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2035             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2036         }
2037 
2038         /* from this point, not restartable */
2039 
2040         if (new_stack) {
2041 #ifdef TARGET_X86_64
2042             if (shift == 2) {
2043                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2044             } else
2045 #endif
2046             {
2047                 ss = (ss & ~3) | dpl;
2048                 cpu_x86_load_seg_cache(env, R_SS, ss,
2049                                        ssp,
2050                                        get_seg_limit(ss_e1, ss_e2),
2051                                        ss_e2);
2052             }
2053         }
2054 
2055         selector = (selector & ~3) | dpl;
2056         cpu_x86_load_seg_cache(env, R_CS, selector,
2057                        get_seg_base(e1, e2),
2058                        get_seg_limit(e1, e2),
2059                        e2);
2060         SET_ESP(sp, sp_mask);
2061         env->eip = offset;
2062     }
2063 }
2064 
2065 /* real and vm86 mode iret */
2066 void helper_iret_real(CPUX86State *env, int shift)
2067 {
2068     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2069     target_ulong ssp;
2070     int eflags_mask;
2071 
2072     sp_mask = 0xffff; /* XXXX: use SS segment size? */
2073     sp = env->regs[R_ESP];
2074     ssp = env->segs[R_SS].base;
2075     if (shift == 1) {
2076         /* 32 bits */
2077         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2078         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2079         new_cs &= 0xffff;
2080         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2081     } else {
2082         /* 16 bits */
2083         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2084         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2085         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2086     }
2087     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2088     env->segs[R_CS].selector = new_cs;
2089     env->segs[R_CS].base = (new_cs << 4);
2090     env->eip = new_eip;
2091     if (env->eflags & VM_MASK) {
2092         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2093             NT_MASK;
2094     } else {
2095         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2096             RF_MASK | NT_MASK;
2097     }
2098     if (shift == 0) {
2099         eflags_mask &= 0xffff;
2100     }
2101     cpu_load_eflags(env, new_eflags, eflags_mask);
2102     env->hflags2 &= ~HF2_NMI_MASK;
2103 }
2104 
2105 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
2106 {
2107     int dpl;
2108     uint32_t e2;
2109 
2110     /* XXX: on x86_64, we do not want to nullify FS and GS because
2111        they may still contain a valid base. I would be interested to
2112        know how a real x86_64 CPU behaves */
2113     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2114         (env->segs[seg_reg].selector & 0xfffc) == 0) {
2115         return;
2116     }
2117 
2118     e2 = env->segs[seg_reg].flags;
2119     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2120     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2121         /* data or non conforming code segment */
2122         if (dpl < cpl) {
2123             cpu_x86_load_seg_cache(env, seg_reg, 0,
2124                                    env->segs[seg_reg].base,
2125                                    env->segs[seg_reg].limit,
2126                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2127         }
2128     }
2129 }
2130 
2131 /* protected mode iret */
2132 static inline void helper_ret_protected(CPUX86State *env, int shift,
2133                                         int is_iret, int addend,
2134                                         uintptr_t retaddr)
2135 {
2136     uint32_t new_cs, new_eflags, new_ss;
2137     uint32_t new_es, new_ds, new_fs, new_gs;
2138     uint32_t e1, e2, ss_e1, ss_e2;
2139     int cpl, dpl, rpl, eflags_mask, iopl;
2140     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2141 
2142 #ifdef TARGET_X86_64
2143     if (shift == 2) {
2144         sp_mask = -1;
2145     } else
2146 #endif
2147     {
2148         sp_mask = get_sp_mask(env->segs[R_SS].flags);
2149     }
2150     sp = env->regs[R_ESP];
2151     ssp = env->segs[R_SS].base;
2152     new_eflags = 0; /* avoid warning */
2153 #ifdef TARGET_X86_64
2154     if (shift == 2) {
2155         POPQ_RA(sp, new_eip, retaddr);
2156         POPQ_RA(sp, new_cs, retaddr);
2157         new_cs &= 0xffff;
2158         if (is_iret) {
2159             POPQ_RA(sp, new_eflags, retaddr);
2160         }
2161     } else
2162 #endif
2163     {
2164         if (shift == 1) {
2165             /* 32 bits */
2166             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2167             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2168             new_cs &= 0xffff;
2169             if (is_iret) {
2170                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2171                 if (new_eflags & VM_MASK) {
2172                     goto return_to_vm86;
2173                 }
2174             }
2175         } else {
2176             /* 16 bits */
2177             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2178             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2179             if (is_iret) {
2180                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2181             }
2182         }
2183     }
2184     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2185               new_cs, new_eip, shift, addend);
2186     LOG_PCALL_STATE(env_cpu(env));
2187     if ((new_cs & 0xfffc) == 0) {
2188         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2189     }
2190     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2191         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2192     }
2193     if (!(e2 & DESC_S_MASK) ||
2194         !(e2 & DESC_CS_MASK)) {
2195         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2196     }
2197     cpl = env->hflags & HF_CPL_MASK;
2198     rpl = new_cs & 3;
2199     if (rpl < cpl) {
2200         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2201     }
2202     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2203     if (e2 & DESC_C_MASK) {
2204         if (dpl > rpl) {
2205             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2206         }
2207     } else {
2208         if (dpl != rpl) {
2209             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2210         }
2211     }
2212     if (!(e2 & DESC_P_MASK)) {
2213         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2214     }
2215 
2216     sp += addend;
2217     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2218                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2219         /* return to same privilege level */
2220         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2221                        get_seg_base(e1, e2),
2222                        get_seg_limit(e1, e2),
2223                        e2);
2224     } else {
2225         /* return to different privilege level */
2226 #ifdef TARGET_X86_64
2227         if (shift == 2) {
2228             POPQ_RA(sp, new_esp, retaddr);
2229             POPQ_RA(sp, new_ss, retaddr);
2230             new_ss &= 0xffff;
2231         } else
2232 #endif
2233         {
2234             if (shift == 1) {
2235                 /* 32 bits */
2236                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2237                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2238                 new_ss &= 0xffff;
2239             } else {
2240                 /* 16 bits */
2241                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2242                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2243             }
2244         }
2245         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2246                   new_ss, new_esp);
2247         if ((new_ss & 0xfffc) == 0) {
2248 #ifdef TARGET_X86_64
2249             /* NULL ss is allowed in long mode if cpl != 3 */
2250             /* XXX: test CS64? */
2251             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2252                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2253                                        0, 0xffffffff,
2254                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2255                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2256                                        DESC_W_MASK | DESC_A_MASK);
2257                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2258             } else
2259 #endif
2260             {
2261                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2262             }
2263         } else {
2264             if ((new_ss & 3) != rpl) {
2265                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2266             }
2267             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2268                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2269             }
2270             if (!(ss_e2 & DESC_S_MASK) ||
2271                 (ss_e2 & DESC_CS_MASK) ||
2272                 !(ss_e2 & DESC_W_MASK)) {
2273                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2274             }
2275             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2276             if (dpl != rpl) {
2277                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2278             }
2279             if (!(ss_e2 & DESC_P_MASK)) {
2280                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2281             }
2282             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2283                                    get_seg_base(ss_e1, ss_e2),
2284                                    get_seg_limit(ss_e1, ss_e2),
2285                                    ss_e2);
2286         }
2287 
2288         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2289                        get_seg_base(e1, e2),
2290                        get_seg_limit(e1, e2),
2291                        e2);
2292         sp = new_esp;
2293 #ifdef TARGET_X86_64
2294         if (env->hflags & HF_CS64_MASK) {
2295             sp_mask = -1;
2296         } else
2297 #endif
2298         {
2299             sp_mask = get_sp_mask(ss_e2);
2300         }
2301 
2302         /* validate data segments */
2303         validate_seg(env, R_ES, rpl);
2304         validate_seg(env, R_DS, rpl);
2305         validate_seg(env, R_FS, rpl);
2306         validate_seg(env, R_GS, rpl);
2307 
2308         sp += addend;
2309     }
2310     SET_ESP(sp, sp_mask);
2311     env->eip = new_eip;
2312     if (is_iret) {
2313         /* NOTE: 'cpl' is the _old_ CPL */
2314         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2315         if (cpl == 0) {
2316             eflags_mask |= IOPL_MASK;
2317         }
2318         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2319         if (cpl <= iopl) {
2320             eflags_mask |= IF_MASK;
2321         }
2322         if (shift == 0) {
2323             eflags_mask &= 0xffff;
2324         }
2325         cpu_load_eflags(env, new_eflags, eflags_mask);
2326     }
2327     return;
2328 
2329  return_to_vm86:
2330     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2331     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2332     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2333     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2334     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2335     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2336 
2337     /* modify processor state */
2338     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2339                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2340                     VIP_MASK);
2341     load_seg_vm(env, R_CS, new_cs & 0xffff);
2342     load_seg_vm(env, R_SS, new_ss & 0xffff);
2343     load_seg_vm(env, R_ES, new_es & 0xffff);
2344     load_seg_vm(env, R_DS, new_ds & 0xffff);
2345     load_seg_vm(env, R_FS, new_fs & 0xffff);
2346     load_seg_vm(env, R_GS, new_gs & 0xffff);
2347 
2348     env->eip = new_eip & 0xffff;
2349     env->regs[R_ESP] = new_esp;
2350 }
2351 
2352 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2353 {
2354     int tss_selector, type;
2355     uint32_t e1, e2;
2356 
2357     /* specific case for TSS */
2358     if (env->eflags & NT_MASK) {
2359 #ifdef TARGET_X86_64
2360         if (env->hflags & HF_LMA_MASK) {
2361             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2362         }
2363 #endif
2364         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2365         if (tss_selector & 4) {
2366             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2367         }
2368         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2369             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2370         }
2371         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2372         /* NOTE: we check both segment and busy TSS */
2373         if (type != 3) {
2374             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2375         }
2376         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2377     } else {
2378         helper_ret_protected(env, shift, 1, 0, GETPC());
2379     }
2380     env->hflags2 &= ~HF2_NMI_MASK;
2381 }
2382 
2383 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2384 {
2385     helper_ret_protected(env, shift, 0, addend, GETPC());
2386 }
2387 
2388 void helper_sysenter(CPUX86State *env)
2389 {
2390     if (env->sysenter_cs == 0) {
2391         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2392     }
2393     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2394 
2395 #ifdef TARGET_X86_64
2396     if (env->hflags & HF_LMA_MASK) {
2397         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2398                                0, 0xffffffff,
2399                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2400                                DESC_S_MASK |
2401                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2402                                DESC_L_MASK);
2403     } else
2404 #endif
2405     {
2406         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2407                                0, 0xffffffff,
2408                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2409                                DESC_S_MASK |
2410                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2411     }
2412     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2413                            0, 0xffffffff,
2414                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2415                            DESC_S_MASK |
2416                            DESC_W_MASK | DESC_A_MASK);
2417     env->regs[R_ESP] = env->sysenter_esp;
2418     env->eip = env->sysenter_eip;
2419 }
2420 
2421 void helper_sysexit(CPUX86State *env, int dflag)
2422 {
2423     int cpl;
2424 
2425     cpl = env->hflags & HF_CPL_MASK;
2426     if (env->sysenter_cs == 0 || cpl != 0) {
2427         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2428     }
2429 #ifdef TARGET_X86_64
2430     if (dflag == 2) {
2431         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2432                                3, 0, 0xffffffff,
2433                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2434                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2435                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2436                                DESC_L_MASK);
2437         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2438                                3, 0, 0xffffffff,
2439                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2440                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2441                                DESC_W_MASK | DESC_A_MASK);
2442     } else
2443 #endif
2444     {
2445         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2446                                3, 0, 0xffffffff,
2447                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2448                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2449                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2450         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2451                                3, 0, 0xffffffff,
2452                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2453                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2454                                DESC_W_MASK | DESC_A_MASK);
2455     }
2456     env->regs[R_ESP] = env->regs[R_ECX];
2457     env->eip = env->regs[R_EDX];
2458 }
2459 
2460 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2461 {
2462     unsigned int limit;
2463     uint32_t e1, e2, eflags, selector;
2464     int rpl, dpl, cpl, type;
2465 
2466     selector = selector1 & 0xffff;
2467     eflags = cpu_cc_compute_all(env, CC_OP);
2468     if ((selector & 0xfffc) == 0) {
2469         goto fail;
2470     }
2471     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2472         goto fail;
2473     }
2474     rpl = selector & 3;
2475     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2476     cpl = env->hflags & HF_CPL_MASK;
2477     if (e2 & DESC_S_MASK) {
2478         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2479             /* conforming */
2480         } else {
2481             if (dpl < cpl || dpl < rpl) {
2482                 goto fail;
2483             }
2484         }
2485     } else {
2486         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2487         switch (type) {
2488         case 1:
2489         case 2:
2490         case 3:
2491         case 9:
2492         case 11:
2493             break;
2494         default:
2495             goto fail;
2496         }
2497         if (dpl < cpl || dpl < rpl) {
2498         fail:
2499             CC_SRC = eflags & ~CC_Z;
2500             return 0;
2501         }
2502     }
2503     limit = get_seg_limit(e1, e2);
2504     CC_SRC = eflags | CC_Z;
2505     return limit;
2506 }
2507 
2508 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2509 {
2510     uint32_t e1, e2, eflags, selector;
2511     int rpl, dpl, cpl, type;
2512 
2513     selector = selector1 & 0xffff;
2514     eflags = cpu_cc_compute_all(env, CC_OP);
2515     if ((selector & 0xfffc) == 0) {
2516         goto fail;
2517     }
2518     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2519         goto fail;
2520     }
2521     rpl = selector & 3;
2522     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2523     cpl = env->hflags & HF_CPL_MASK;
2524     if (e2 & DESC_S_MASK) {
2525         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2526             /* conforming */
2527         } else {
2528             if (dpl < cpl || dpl < rpl) {
2529                 goto fail;
2530             }
2531         }
2532     } else {
2533         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2534         switch (type) {
2535         case 1:
2536         case 2:
2537         case 3:
2538         case 4:
2539         case 5:
2540         case 9:
2541         case 11:
2542         case 12:
2543             break;
2544         default:
2545             goto fail;
2546         }
2547         if (dpl < cpl || dpl < rpl) {
2548         fail:
2549             CC_SRC = eflags & ~CC_Z;
2550             return 0;
2551         }
2552     }
2553     CC_SRC = eflags | CC_Z;
2554     return e2 & 0x00f0ff00;
2555 }
2556 
2557 void helper_verr(CPUX86State *env, target_ulong selector1)
2558 {
2559     uint32_t e1, e2, eflags, selector;
2560     int rpl, dpl, cpl;
2561 
2562     selector = selector1 & 0xffff;
2563     eflags = cpu_cc_compute_all(env, CC_OP);
2564     if ((selector & 0xfffc) == 0) {
2565         goto fail;
2566     }
2567     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2568         goto fail;
2569     }
2570     if (!(e2 & DESC_S_MASK)) {
2571         goto fail;
2572     }
2573     rpl = selector & 3;
2574     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2575     cpl = env->hflags & HF_CPL_MASK;
2576     if (e2 & DESC_CS_MASK) {
2577         if (!(e2 & DESC_R_MASK)) {
2578             goto fail;
2579         }
2580         if (!(e2 & DESC_C_MASK)) {
2581             if (dpl < cpl || dpl < rpl) {
2582                 goto fail;
2583             }
2584         }
2585     } else {
2586         if (dpl < cpl || dpl < rpl) {
2587         fail:
2588             CC_SRC = eflags & ~CC_Z;
2589             return;
2590         }
2591     }
2592     CC_SRC = eflags | CC_Z;
2593 }
2594 
2595 void helper_verw(CPUX86State *env, target_ulong selector1)
2596 {
2597     uint32_t e1, e2, eflags, selector;
2598     int rpl, dpl, cpl;
2599 
2600     selector = selector1 & 0xffff;
2601     eflags = cpu_cc_compute_all(env, CC_OP);
2602     if ((selector & 0xfffc) == 0) {
2603         goto fail;
2604     }
2605     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2606         goto fail;
2607     }
2608     if (!(e2 & DESC_S_MASK)) {
2609         goto fail;
2610     }
2611     rpl = selector & 3;
2612     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2613     cpl = env->hflags & HF_CPL_MASK;
2614     if (e2 & DESC_CS_MASK) {
2615         goto fail;
2616     } else {
2617         if (dpl < cpl || dpl < rpl) {
2618             goto fail;
2619         }
2620         if (!(e2 & DESC_W_MASK)) {
2621         fail:
2622             CC_SRC = eflags & ~CC_Z;
2623             return;
2624         }
2625     }
2626     CC_SRC = eflags | CC_Z;
2627 }
2628 
2629 #if defined(CONFIG_USER_ONLY)
2630 void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector)
2631 {
2632     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2633         int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2634         selector &= 0xffff;
2635         cpu_x86_load_seg_cache(env, seg_reg, selector,
2636                                (selector << 4), 0xffff,
2637                                DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2638                                DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2639     } else {
2640         helper_load_seg(env, seg_reg, selector);
2641     }
2642 }
2643 #endif
2644 
2645 /* check if Port I/O is allowed in TSS */
2646 static inline void check_io(CPUX86State *env, int addr, int size,
2647                             uintptr_t retaddr)
2648 {
2649     int io_offset, val, mask;
2650 
2651     /* TSS must be a valid 32 bit one */
2652     if (!(env->tr.flags & DESC_P_MASK) ||
2653         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2654         env->tr.limit < 103) {
2655         goto fail;
2656     }
2657     io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2658     io_offset += (addr >> 3);
2659     /* Note: the check needs two bytes */
2660     if ((io_offset + 1) > env->tr.limit) {
2661         goto fail;
2662     }
2663     val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2664     val >>= (addr & 7);
2665     mask = (1 << size) - 1;
2666     /* all bits must be zero to allow the I/O */
2667     if ((val & mask) != 0) {
2668     fail:
2669         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2670     }
2671 }
2672 
2673 void helper_check_iob(CPUX86State *env, uint32_t t0)
2674 {
2675     check_io(env, t0, 1, GETPC());
2676 }
2677 
2678 void helper_check_iow(CPUX86State *env, uint32_t t0)
2679 {
2680     check_io(env, t0, 2, GETPC());
2681 }
2682 
2683 void helper_check_iol(CPUX86State *env, uint32_t t0)
2684 {
2685     check_io(env, t0, 4, GETPC());
2686 }
2687