xref: /qemu/target/i386/tcg/seg_helper.c (revision cc944932ecef3b7a56ae62d89dd92fb9e56c5cc8)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 #include "tcg-cpu.h"
32 
33 #ifdef TARGET_X86_64
34 #define SET_ESP(val, sp_mask)                                   \
35     do {                                                        \
36         if ((sp_mask) == 0xffff) {                              \
37             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
38                 ((val) & 0xffff);                               \
39         } else if ((sp_mask) == 0xffffffffLL) {                 \
40             env->regs[R_ESP] = (uint32_t)(val);                 \
41         } else {                                                \
42             env->regs[R_ESP] = (val);                           \
43         }                                                       \
44     } while (0)
45 #else
46 #define SET_ESP(val, sp_mask)                                   \
47     do {                                                        \
48         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
49             ((val) & (sp_mask));                                \
50     } while (0)
51 #endif
52 
53 /* XXX: use mmu_index to have proper DPL support */
54 typedef struct StackAccess
55 {
56     CPUX86State *env;
57     uintptr_t ra;
58     target_ulong ss_base;
59     target_ulong sp;
60     target_ulong sp_mask;
61     int mmu_index;
62 } StackAccess;
63 
64 static void pushw(StackAccess *sa, uint16_t val)
65 {
66     sa->sp -= 2;
67     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
68                       val, sa->mmu_index, sa->ra);
69 }
70 
71 static void pushl(StackAccess *sa, uint32_t val)
72 {
73     sa->sp -= 4;
74     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
75                       val, sa->mmu_index, sa->ra);
76 }
77 
78 static uint16_t popw(StackAccess *sa)
79 {
80     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
81                                       sa->ss_base + (sa->sp & sa->sp_mask),
82                                       sa->mmu_index, sa->ra);
83     sa->sp += 2;
84     return ret;
85 }
86 
87 static uint32_t popl(StackAccess *sa)
88 {
89     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
90                                      sa->ss_base + (sa->sp & sa->sp_mask),
91                                      sa->mmu_index, sa->ra);
92     sa->sp += 4;
93     return ret;
94 }
95 
96 int get_pg_mode(CPUX86State *env)
97 {
98     int pg_mode = PG_MODE_PG;
99     if (!(env->cr[0] & CR0_PG_MASK)) {
100         return 0;
101     }
102     if (env->cr[0] & CR0_WP_MASK) {
103         pg_mode |= PG_MODE_WP;
104     }
105     if (env->cr[4] & CR4_PAE_MASK) {
106         pg_mode |= PG_MODE_PAE;
107         if (env->efer & MSR_EFER_NXE) {
108             pg_mode |= PG_MODE_NXE;
109         }
110     }
111     if (env->cr[4] & CR4_PSE_MASK) {
112         pg_mode |= PG_MODE_PSE;
113     }
114     if (env->cr[4] & CR4_SMEP_MASK) {
115         pg_mode |= PG_MODE_SMEP;
116     }
117     if (env->hflags & HF_LMA_MASK) {
118         pg_mode |= PG_MODE_LMA;
119         if (env->cr[4] & CR4_PKE_MASK) {
120             pg_mode |= PG_MODE_PKE;
121         }
122         if (env->cr[4] & CR4_PKS_MASK) {
123             pg_mode |= PG_MODE_PKS;
124         }
125         if (env->cr[4] & CR4_LA57_MASK) {
126             pg_mode |= PG_MODE_LA57;
127         }
128     }
129     return pg_mode;
130 }
131 
132 static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
133 {
134     int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
135     int mmu_index_base =
136         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
137         (pl < 3 && (env->eflags & AC_MASK)
138          ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
139 
140     return mmu_index_base + mmu_index_32;
141 }
142 
143 int cpu_mmu_index_kernel(CPUX86State *env)
144 {
145     return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
146 }
147 
148 /* return non zero if error */
149 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
150                                uint32_t *e2_ptr, int selector,
151                                uintptr_t retaddr)
152 {
153     SegmentCache *dt;
154     int index;
155     target_ulong ptr;
156 
157     if (selector & 0x4) {
158         dt = &env->ldt;
159     } else {
160         dt = &env->gdt;
161     }
162     index = selector & ~7;
163     if ((index + 7) > dt->limit) {
164         return -1;
165     }
166     ptr = dt->base + index;
167     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
168     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
169     return 0;
170 }
171 
172 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
173                                uint32_t *e2_ptr, int selector)
174 {
175     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
176 }
177 
178 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
179 {
180     unsigned int limit;
181 
182     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
183     if (e2 & DESC_G_MASK) {
184         limit = (limit << 12) | 0xfff;
185     }
186     return limit;
187 }
188 
189 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
190 {
191     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
192 }
193 
194 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
195                                          uint32_t e2)
196 {
197     sc->base = get_seg_base(e1, e2);
198     sc->limit = get_seg_limit(e1, e2);
199     sc->flags = e2;
200 }
201 
202 /* init the segment cache in vm86 mode. */
203 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
204 {
205     selector &= 0xffff;
206 
207     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
208                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
209                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
210 }
211 
212 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
213                                        uint32_t *esp_ptr, int dpl,
214                                        uintptr_t retaddr)
215 {
216     X86CPU *cpu = env_archcpu(env);
217     int type, index, shift;
218 
219 #if 0
220     {
221         int i;
222         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
223         for (i = 0; i < env->tr.limit; i++) {
224             printf("%02x ", env->tr.base[i]);
225             if ((i & 7) == 7) {
226                 printf("\n");
227             }
228         }
229         printf("\n");
230     }
231 #endif
232 
233     if (!(env->tr.flags & DESC_P_MASK)) {
234         cpu_abort(CPU(cpu), "invalid tss");
235     }
236     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
237     if ((type & 7) != 1) {
238         cpu_abort(CPU(cpu), "invalid tss type");
239     }
240     shift = type >> 3;
241     index = (dpl * 4 + 2) << shift;
242     if (index + (4 << shift) - 1 > env->tr.limit) {
243         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
244     }
245     if (shift == 0) {
246         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
247         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
248     } else {
249         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
250         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
251     }
252 }
253 
254 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
255                          int cpl, uintptr_t retaddr)
256 {
257     uint32_t e1, e2;
258     int rpl, dpl;
259 
260     if ((selector & 0xfffc) != 0) {
261         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
262             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
263         }
264         if (!(e2 & DESC_S_MASK)) {
265             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
266         }
267         rpl = selector & 3;
268         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
269         if (seg_reg == R_CS) {
270             if (!(e2 & DESC_CS_MASK)) {
271                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
272             }
273             if (dpl != rpl) {
274                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
275             }
276         } else if (seg_reg == R_SS) {
277             /* SS must be writable data */
278             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
279                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
280             }
281             if (dpl != cpl || dpl != rpl) {
282                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
283             }
284         } else {
285             /* not readable code */
286             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
287                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
288             }
289             /* if data or non conforming code, checks the rights */
290             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
291                 if (dpl < cpl || dpl < rpl) {
292                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
293                 }
294             }
295         }
296         if (!(e2 & DESC_P_MASK)) {
297             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
298         }
299         cpu_x86_load_seg_cache(env, seg_reg, selector,
300                                get_seg_base(e1, e2),
301                                get_seg_limit(e1, e2),
302                                e2);
303     } else {
304         if (seg_reg == R_SS || seg_reg == R_CS) {
305             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
306         }
307     }
308 }
309 
310 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
311                          uintptr_t retaddr)
312 {
313     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
314     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
315 
316     if (value) {
317         e2 |= DESC_TSS_BUSY_MASK;
318     } else {
319         e2 &= ~DESC_TSS_BUSY_MASK;
320     }
321 
322     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
323 }
324 
325 #define SWITCH_TSS_JMP  0
326 #define SWITCH_TSS_IRET 1
327 #define SWITCH_TSS_CALL 2
328 
329 /* return 0 if switching to a 16-bit selector */
330 static int switch_tss_ra(CPUX86State *env, int tss_selector,
331                          uint32_t e1, uint32_t e2, int source,
332                          uint32_t next_eip, uintptr_t retaddr)
333 {
334     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
335     target_ulong tss_base;
336     uint32_t new_regs[8], new_segs[6];
337     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
338     uint32_t old_eflags, eflags_mask;
339     SegmentCache *dt;
340     int mmu_index, index;
341     target_ulong ptr;
342     X86Access old, new;
343 
344     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
345     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
346               source);
347 
348     /* if task gate, we read the TSS segment and we load it */
349     if (type == 5) {
350         if (!(e2 & DESC_P_MASK)) {
351             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
352         }
353         tss_selector = e1 >> 16;
354         if (tss_selector & 4) {
355             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
356         }
357         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
358             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
359         }
360         if (e2 & DESC_S_MASK) {
361             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
362         }
363         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
364         if ((type & 7) != 1) {
365             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
366         }
367     }
368 
369     if (!(e2 & DESC_P_MASK)) {
370         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
371     }
372 
373     if (type & 8) {
374         tss_limit_max = 103;
375     } else {
376         tss_limit_max = 43;
377     }
378     tss_limit = get_seg_limit(e1, e2);
379     tss_base = get_seg_base(e1, e2);
380     if ((tss_selector & 4) != 0 ||
381         tss_limit < tss_limit_max) {
382         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
383     }
384     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
385     if (old_type & 8) {
386         old_tss_limit_max = 103;
387     } else {
388         old_tss_limit_max = 43;
389     }
390 
391     /* new TSS must be busy iff the source is an IRET instruction  */
392     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
393         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
394     }
395 
396     /* X86Access avoids memory exceptions during the task switch */
397     mmu_index = cpu_mmu_index_kernel(env);
398     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
399                        MMU_DATA_STORE, mmu_index, retaddr);
400 
401     if (source == SWITCH_TSS_CALL) {
402         /* Probe for future write of parent task */
403         probe_access(env, tss_base, 2, MMU_DATA_STORE,
404                      mmu_index, retaddr);
405     }
406     /* While true tss_limit may be larger, we don't access the iopb here. */
407     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
408                        MMU_DATA_LOAD, mmu_index, retaddr);
409 
410     /* save the current state in the old TSS */
411     old_eflags = cpu_compute_eflags(env);
412     if (old_type & 8) {
413         /* 32 bit */
414         access_stl(&old, env->tr.base + 0x20, next_eip);
415         access_stl(&old, env->tr.base + 0x24, old_eflags);
416         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
417         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
418         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
419         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
420         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
421         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
422         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
423         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
424         for (i = 0; i < 6; i++) {
425             access_stw(&old, env->tr.base + (0x48 + i * 4),
426                        env->segs[i].selector);
427         }
428     } else {
429         /* 16 bit */
430         access_stw(&old, env->tr.base + 0x0e, next_eip);
431         access_stw(&old, env->tr.base + 0x10, old_eflags);
432         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
433         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
434         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
435         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
436         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
437         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
438         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
439         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
440         for (i = 0; i < 4; i++) {
441             access_stw(&old, env->tr.base + (0x22 + i * 2),
442                        env->segs[i].selector);
443         }
444     }
445 
446     /* read all the registers from the new TSS */
447     if (type & 8) {
448         /* 32 bit */
449         new_cr3 = access_ldl(&new, tss_base + 0x1c);
450         new_eip = access_ldl(&new, tss_base + 0x20);
451         new_eflags = access_ldl(&new, tss_base + 0x24);
452         for (i = 0; i < 8; i++) {
453             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
454         }
455         for (i = 0; i < 6; i++) {
456             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
457         }
458         new_ldt = access_ldw(&new, tss_base + 0x60);
459         new_trap = access_ldl(&new, tss_base + 0x64);
460     } else {
461         /* 16 bit */
462         new_cr3 = 0;
463         new_eip = access_ldw(&new, tss_base + 0x0e);
464         new_eflags = access_ldw(&new, tss_base + 0x10);
465         for (i = 0; i < 8; i++) {
466             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
467         }
468         for (i = 0; i < 4; i++) {
469             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
470         }
471         new_ldt = access_ldw(&new, tss_base + 0x2a);
472         new_segs[R_FS] = 0;
473         new_segs[R_GS] = 0;
474         new_trap = 0;
475     }
476     /* XXX: avoid a compiler warning, see
477      http://support.amd.com/us/Processor_TechDocs/24593.pdf
478      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
479     (void)new_trap;
480 
481     /* clear busy bit (it is restartable) */
482     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
483         tss_set_busy(env, env->tr.selector, 0, retaddr);
484     }
485 
486     if (source == SWITCH_TSS_IRET) {
487         old_eflags &= ~NT_MASK;
488         if (old_type & 8) {
489             access_stl(&old, env->tr.base + 0x24, old_eflags);
490         } else {
491             access_stw(&old, env->tr.base + 0x10, old_eflags);
492 	}
493     }
494 
495     if (source == SWITCH_TSS_CALL) {
496         /*
497          * Thanks to the probe_access above, we know the first two
498          * bytes addressed by &new are writable too.
499          */
500         access_stw(&new, tss_base, env->tr.selector);
501         new_eflags |= NT_MASK;
502     }
503 
504     /* set busy bit */
505     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
506         tss_set_busy(env, tss_selector, 1, retaddr);
507     }
508 
509     /* set the new CPU state */
510 
511     /* now if an exception occurs, it will occur in the next task context */
512 
513     env->cr[0] |= CR0_TS_MASK;
514     env->hflags |= HF_TS_MASK;
515     env->tr.selector = tss_selector;
516     env->tr.base = tss_base;
517     env->tr.limit = tss_limit;
518     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
519 
520     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
521         cpu_x86_update_cr3(env, new_cr3);
522     }
523 
524     /* load all registers without an exception, then reload them with
525        possible exception */
526     env->eip = new_eip;
527     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
528         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
529     if (type & 8) {
530         cpu_load_eflags(env, new_eflags, eflags_mask);
531         for (i = 0; i < 8; i++) {
532             env->regs[i] = new_regs[i];
533         }
534     } else {
535         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
536         for (i = 0; i < 8; i++) {
537             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
538         }
539     }
540     if (new_eflags & VM_MASK) {
541         for (i = 0; i < 6; i++) {
542             load_seg_vm(env, i, new_segs[i]);
543         }
544     } else {
545         /* first just selectors as the rest may trigger exceptions */
546         for (i = 0; i < 6; i++) {
547             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
548         }
549     }
550 
551     env->ldt.selector = new_ldt & ~4;
552     env->ldt.base = 0;
553     env->ldt.limit = 0;
554     env->ldt.flags = 0;
555 
556     /* load the LDT */
557     if (new_ldt & 4) {
558         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
559     }
560 
561     if ((new_ldt & 0xfffc) != 0) {
562         dt = &env->gdt;
563         index = new_ldt & ~7;
564         if ((index + 7) > dt->limit) {
565             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
566         }
567         ptr = dt->base + index;
568         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
569         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
570         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
571             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
572         }
573         if (!(e2 & DESC_P_MASK)) {
574             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
575         }
576         load_seg_cache_raw_dt(&env->ldt, e1, e2);
577     }
578 
579     /* load the segments */
580     if (!(new_eflags & VM_MASK)) {
581         int cpl = new_segs[R_CS] & 3;
582         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
583         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
584         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
585         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
586         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
587         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
588     }
589 
590     /* check that env->eip is in the CS segment limits */
591     if (new_eip > env->segs[R_CS].limit) {
592         /* XXX: different exception if CALL? */
593         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
594     }
595 
596 #ifndef CONFIG_USER_ONLY
597     /* reset local breakpoints */
598     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
599         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
600     }
601 #endif
602     return type >> 3;
603 }
604 
605 static int switch_tss(CPUX86State *env, int tss_selector,
606                       uint32_t e1, uint32_t e2, int source,
607                       uint32_t next_eip)
608 {
609     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
610 }
611 
612 static inline unsigned int get_sp_mask(unsigned int e2)
613 {
614 #ifdef TARGET_X86_64
615     if (e2 & DESC_L_MASK) {
616         return 0;
617     } else
618 #endif
619     if (e2 & DESC_B_MASK) {
620         return 0xffffffff;
621     } else {
622         return 0xffff;
623     }
624 }
625 
626 static int exception_is_fault(int intno)
627 {
628     switch (intno) {
629         /*
630          * #DB can be both fault- and trap-like, but it never sets RF=1
631          * in the RFLAGS value pushed on the stack.
632          */
633     case EXCP01_DB:
634     case EXCP03_INT3:
635     case EXCP04_INTO:
636     case EXCP08_DBLE:
637     case EXCP12_MCHK:
638         return 0;
639     }
640     /* Everything else including reserved exception is a fault.  */
641     return 1;
642 }
643 
644 int exception_has_error_code(int intno)
645 {
646     switch (intno) {
647     case 8:
648     case 10:
649     case 11:
650     case 12:
651     case 13:
652     case 14:
653     case 17:
654         return 1;
655     }
656     return 0;
657 }
658 
659 /* protected mode interrupt */
660 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
661                                    int error_code, unsigned int next_eip,
662                                    int is_hw)
663 {
664     SegmentCache *dt;
665     target_ulong ptr;
666     int type, dpl, selector, ss_dpl, cpl;
667     int has_error_code, new_stack, shift;
668     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
669     uint32_t old_eip, eflags;
670     int vm86 = env->eflags & VM_MASK;
671     StackAccess sa;
672     bool set_rf;
673 
674     has_error_code = 0;
675     if (!is_int && !is_hw) {
676         has_error_code = exception_has_error_code(intno);
677     }
678     if (is_int) {
679         old_eip = next_eip;
680         set_rf = false;
681     } else {
682         old_eip = env->eip;
683         set_rf = exception_is_fault(intno);
684     }
685 
686     dt = &env->idt;
687     if (intno * 8 + 7 > dt->limit) {
688         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
689     }
690     ptr = dt->base + intno * 8;
691     e1 = cpu_ldl_kernel(env, ptr);
692     e2 = cpu_ldl_kernel(env, ptr + 4);
693     /* check gate type */
694     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
695     switch (type) {
696     case 5: /* task gate */
697     case 6: /* 286 interrupt gate */
698     case 7: /* 286 trap gate */
699     case 14: /* 386 interrupt gate */
700     case 15: /* 386 trap gate */
701         break;
702     default:
703         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
704         break;
705     }
706     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
707     cpl = env->hflags & HF_CPL_MASK;
708     /* check privilege if software int */
709     if (is_int && dpl < cpl) {
710         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
711     }
712 
713     sa.env = env;
714     sa.ra = 0;
715 
716     if (type == 5) {
717         /* task gate */
718         /* must do that check here to return the correct error code */
719         if (!(e2 & DESC_P_MASK)) {
720             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
721         }
722         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
723         if (has_error_code) {
724             /* push the error code on the destination stack */
725             cpl = env->hflags & HF_CPL_MASK;
726             sa.mmu_index = x86_mmu_index_pl(env, cpl);
727             if (env->segs[R_SS].flags & DESC_B_MASK) {
728                 sa.sp_mask = 0xffffffff;
729             } else {
730                 sa.sp_mask = 0xffff;
731             }
732             sa.sp = env->regs[R_ESP];
733             sa.ss_base = env->segs[R_SS].base;
734             if (shift) {
735                 pushl(&sa, error_code);
736             } else {
737                 pushw(&sa, error_code);
738             }
739             SET_ESP(sa.sp, sa.sp_mask);
740         }
741         return;
742     }
743 
744     /* Otherwise, trap or interrupt gate */
745 
746     /* check valid bit */
747     if (!(e2 & DESC_P_MASK)) {
748         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
749     }
750     selector = e1 >> 16;
751     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
752     if ((selector & 0xfffc) == 0) {
753         raise_exception_err(env, EXCP0D_GPF, 0);
754     }
755     if (load_segment(env, &e1, &e2, selector) != 0) {
756         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
757     }
758     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
759         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
760     }
761     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
762     if (dpl > cpl) {
763         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
764     }
765     if (!(e2 & DESC_P_MASK)) {
766         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
767     }
768     if (e2 & DESC_C_MASK) {
769         dpl = cpl;
770     }
771     sa.mmu_index = x86_mmu_index_pl(env, dpl);
772     if (dpl < cpl) {
773         /* to inner privilege */
774         uint32_t esp;
775         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
776         if ((ss & 0xfffc) == 0) {
777             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
778         }
779         if ((ss & 3) != dpl) {
780             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
781         }
782         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
783             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
784         }
785         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
786         if (ss_dpl != dpl) {
787             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
788         }
789         if (!(ss_e2 & DESC_S_MASK) ||
790             (ss_e2 & DESC_CS_MASK) ||
791             !(ss_e2 & DESC_W_MASK)) {
792             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
793         }
794         if (!(ss_e2 & DESC_P_MASK)) {
795             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
796         }
797         new_stack = 1;
798         sa.sp = esp;
799         sa.sp_mask = get_sp_mask(ss_e2);
800         sa.ss_base = get_seg_base(ss_e1, ss_e2);
801     } else  {
802         /* to same privilege */
803         if (vm86) {
804             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
805         }
806         new_stack = 0;
807         sa.sp = env->regs[R_ESP];
808         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
809         sa.ss_base = env->segs[R_SS].base;
810     }
811 
812     shift = type >> 3;
813 
814 #if 0
815     /* XXX: check that enough room is available */
816     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
817     if (vm86) {
818         push_size += 8;
819     }
820     push_size <<= shift;
821 #endif
822     eflags = cpu_compute_eflags(env);
823     /*
824      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
825      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
826      */
827     if (set_rf) {
828         eflags |= RF_MASK;
829     }
830 
831     if (shift == 1) {
832         if (new_stack) {
833             if (vm86) {
834                 pushl(&sa, env->segs[R_GS].selector);
835                 pushl(&sa, env->segs[R_FS].selector);
836                 pushl(&sa, env->segs[R_DS].selector);
837                 pushl(&sa, env->segs[R_ES].selector);
838             }
839             pushl(&sa, env->segs[R_SS].selector);
840             pushl(&sa, env->regs[R_ESP]);
841         }
842         pushl(&sa, eflags);
843         pushl(&sa, env->segs[R_CS].selector);
844         pushl(&sa, old_eip);
845         if (has_error_code) {
846             pushl(&sa, error_code);
847         }
848     } else {
849         if (new_stack) {
850             if (vm86) {
851                 pushw(&sa, env->segs[R_GS].selector);
852                 pushw(&sa, env->segs[R_FS].selector);
853                 pushw(&sa, env->segs[R_DS].selector);
854                 pushw(&sa, env->segs[R_ES].selector);
855             }
856             pushw(&sa, env->segs[R_SS].selector);
857             pushw(&sa, env->regs[R_ESP]);
858         }
859         pushw(&sa, eflags);
860         pushw(&sa, env->segs[R_CS].selector);
861         pushw(&sa, old_eip);
862         if (has_error_code) {
863             pushw(&sa, error_code);
864         }
865     }
866 
867     /* interrupt gate clear IF mask */
868     if ((type & 1) == 0) {
869         env->eflags &= ~IF_MASK;
870     }
871     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
872 
873     if (new_stack) {
874         if (vm86) {
875             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
876             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
877             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
878             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
879         }
880         ss = (ss & ~3) | dpl;
881         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
882                                get_seg_limit(ss_e1, ss_e2), ss_e2);
883     }
884     SET_ESP(sa.sp, sa.sp_mask);
885 
886     selector = (selector & ~3) | dpl;
887     cpu_x86_load_seg_cache(env, R_CS, selector,
888                    get_seg_base(e1, e2),
889                    get_seg_limit(e1, e2),
890                    e2);
891     env->eip = offset;
892 }
893 
894 #ifdef TARGET_X86_64
895 
896 static void pushq(StackAccess *sa, uint64_t val)
897 {
898     sa->sp -= 8;
899     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
900 }
901 
902 static uint64_t popq(StackAccess *sa)
903 {
904     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
905     sa->sp += 8;
906     return ret;
907 }
908 
909 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
910 {
911     X86CPU *cpu = env_archcpu(env);
912     int index, pg_mode;
913     target_ulong rsp;
914     int32_t sext;
915 
916 #if 0
917     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
918            env->tr.base, env->tr.limit);
919 #endif
920 
921     if (!(env->tr.flags & DESC_P_MASK)) {
922         cpu_abort(CPU(cpu), "invalid tss");
923     }
924     index = 8 * level + 4;
925     if ((index + 7) > env->tr.limit) {
926         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
927     }
928 
929     rsp = cpu_ldq_kernel(env, env->tr.base + index);
930 
931     /* test virtual address sign extension */
932     pg_mode = get_pg_mode(env);
933     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
934     if (sext != 0 && sext != -1) {
935         raise_exception_err(env, EXCP0C_STACK, 0);
936     }
937 
938     return rsp;
939 }
940 
941 /* 64 bit interrupt */
942 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
943                            int error_code, target_ulong next_eip, int is_hw)
944 {
945     SegmentCache *dt;
946     target_ulong ptr;
947     int type, dpl, selector, cpl, ist;
948     int has_error_code, new_stack;
949     uint32_t e1, e2, e3, eflags;
950     target_ulong old_eip, offset;
951     bool set_rf;
952     StackAccess sa;
953 
954     has_error_code = 0;
955     if (!is_int && !is_hw) {
956         has_error_code = exception_has_error_code(intno);
957     }
958     if (is_int) {
959         old_eip = next_eip;
960         set_rf = false;
961     } else {
962         old_eip = env->eip;
963         set_rf = exception_is_fault(intno);
964     }
965 
966     dt = &env->idt;
967     if (intno * 16 + 15 > dt->limit) {
968         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
969     }
970     ptr = dt->base + intno * 16;
971     e1 = cpu_ldl_kernel(env, ptr);
972     e2 = cpu_ldl_kernel(env, ptr + 4);
973     e3 = cpu_ldl_kernel(env, ptr + 8);
974     /* check gate type */
975     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
976     switch (type) {
977     case 14: /* 386 interrupt gate */
978     case 15: /* 386 trap gate */
979         break;
980     default:
981         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
982         break;
983     }
984     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
985     cpl = env->hflags & HF_CPL_MASK;
986     /* check privilege if software int */
987     if (is_int && dpl < cpl) {
988         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
989     }
990     /* check valid bit */
991     if (!(e2 & DESC_P_MASK)) {
992         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
993     }
994     selector = e1 >> 16;
995     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
996     ist = e2 & 7;
997     if ((selector & 0xfffc) == 0) {
998         raise_exception_err(env, EXCP0D_GPF, 0);
999     }
1000 
1001     if (load_segment(env, &e1, &e2, selector) != 0) {
1002         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1003     }
1004     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1005         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1006     }
1007     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1008     if (dpl > cpl) {
1009         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1010     }
1011     if (!(e2 & DESC_P_MASK)) {
1012         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1013     }
1014     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1015         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1016     }
1017     if (e2 & DESC_C_MASK) {
1018         dpl = cpl;
1019     }
1020 
1021     sa.env = env;
1022     sa.ra = 0;
1023     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1024     sa.sp_mask = -1;
1025     sa.ss_base = 0;
1026     if (dpl < cpl || ist != 0) {
1027         /* to inner privilege */
1028         new_stack = 1;
1029         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1030     } else {
1031         /* to same privilege */
1032         if (env->eflags & VM_MASK) {
1033             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1034         }
1035         new_stack = 0;
1036         sa.sp = env->regs[R_ESP];
1037     }
1038     sa.sp &= ~0xfLL; /* align stack */
1039 
1040     /* See do_interrupt_protected.  */
1041     eflags = cpu_compute_eflags(env);
1042     if (set_rf) {
1043         eflags |= RF_MASK;
1044     }
1045 
1046     pushq(&sa, env->segs[R_SS].selector);
1047     pushq(&sa, env->regs[R_ESP]);
1048     pushq(&sa, eflags);
1049     pushq(&sa, env->segs[R_CS].selector);
1050     pushq(&sa, old_eip);
1051     if (has_error_code) {
1052         pushq(&sa, error_code);
1053     }
1054 
1055     /* interrupt gate clear IF mask */
1056     if ((type & 1) == 0) {
1057         env->eflags &= ~IF_MASK;
1058     }
1059     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1060 
1061     if (new_stack) {
1062         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1063         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1064     }
1065     env->regs[R_ESP] = sa.sp;
1066 
1067     selector = (selector & ~3) | dpl;
1068     cpu_x86_load_seg_cache(env, R_CS, selector,
1069                    get_seg_base(e1, e2),
1070                    get_seg_limit(e1, e2),
1071                    e2);
1072     env->eip = offset;
1073 }
1074 #endif /* TARGET_X86_64 */
1075 
1076 void helper_sysret(CPUX86State *env, int dflag)
1077 {
1078     int cpl, selector;
1079 
1080     if (!(env->efer & MSR_EFER_SCE)) {
1081         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1082     }
1083     cpl = env->hflags & HF_CPL_MASK;
1084     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1085         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1086     }
1087     selector = (env->star >> 48) & 0xffff;
1088 #ifdef TARGET_X86_64
1089     if (env->hflags & HF_LMA_MASK) {
1090         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1091                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1092                         NT_MASK);
1093         if (dflag == 2) {
1094             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1095                                    0, 0xffffffff,
1096                                    DESC_G_MASK | DESC_P_MASK |
1097                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1099                                    DESC_L_MASK);
1100             env->eip = env->regs[R_ECX];
1101         } else {
1102             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1103                                    0, 0xffffffff,
1104                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1105                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1106                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1107             env->eip = (uint32_t)env->regs[R_ECX];
1108         }
1109         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1110                                0, 0xffffffff,
1111                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113                                DESC_W_MASK | DESC_A_MASK);
1114     } else
1115 #endif
1116     {
1117         env->eflags |= IF_MASK;
1118         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1119                                0, 0xffffffff,
1120                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1121                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1122                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1123         env->eip = (uint32_t)env->regs[R_ECX];
1124         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1125                                0, 0xffffffff,
1126                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1127                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1128                                DESC_W_MASK | DESC_A_MASK);
1129     }
1130 }
1131 
1132 /* real mode interrupt */
1133 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1134                               int error_code, unsigned int next_eip)
1135 {
1136     SegmentCache *dt;
1137     target_ulong ptr;
1138     int selector;
1139     uint32_t offset;
1140     uint32_t old_cs, old_eip;
1141     StackAccess sa;
1142 
1143     /* real mode (simpler!) */
1144     dt = &env->idt;
1145     if (intno * 4 + 3 > dt->limit) {
1146         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1147     }
1148     ptr = dt->base + intno * 4;
1149     offset = cpu_lduw_kernel(env, ptr);
1150     selector = cpu_lduw_kernel(env, ptr + 2);
1151 
1152     sa.env = env;
1153     sa.ra = 0;
1154     sa.sp = env->regs[R_ESP];
1155     sa.sp_mask = 0xffff;
1156     sa.ss_base = env->segs[R_SS].base;
1157     sa.mmu_index = x86_mmu_index_pl(env, 0);
1158 
1159     if (is_int) {
1160         old_eip = next_eip;
1161     } else {
1162         old_eip = env->eip;
1163     }
1164     old_cs = env->segs[R_CS].selector;
1165     /* XXX: use SS segment size? */
1166     pushw(&sa, cpu_compute_eflags(env));
1167     pushw(&sa, old_cs);
1168     pushw(&sa, old_eip);
1169 
1170     /* update processor state */
1171     SET_ESP(sa.sp, sa.sp_mask);
1172     env->eip = offset;
1173     env->segs[R_CS].selector = selector;
1174     env->segs[R_CS].base = (selector << 4);
1175     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1176 }
1177 
1178 /*
1179  * Begin execution of an interruption. is_int is TRUE if coming from
1180  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1181  * instruction. It is only relevant if is_int is TRUE.
1182  */
1183 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1184                       int error_code, target_ulong next_eip, int is_hw)
1185 {
1186     CPUX86State *env = &cpu->env;
1187 
1188     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1189         if ((env->cr[0] & CR0_PE_MASK)) {
1190             static int count;
1191 
1192             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1193                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1194                      count, intno, error_code, is_int,
1195                      env->hflags & HF_CPL_MASK,
1196                      env->segs[R_CS].selector, env->eip,
1197                      (int)env->segs[R_CS].base + env->eip,
1198                      env->segs[R_SS].selector, env->regs[R_ESP]);
1199             if (intno == 0x0e) {
1200                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1201             } else {
1202                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1203             }
1204             qemu_log("\n");
1205             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1206 #if 0
1207             {
1208                 int i;
1209                 target_ulong ptr;
1210 
1211                 qemu_log("       code=");
1212                 ptr = env->segs[R_CS].base + env->eip;
1213                 for (i = 0; i < 16; i++) {
1214                     qemu_log(" %02x", ldub(ptr + i));
1215                 }
1216                 qemu_log("\n");
1217             }
1218 #endif
1219             count++;
1220         }
1221     }
1222     if (env->cr[0] & CR0_PE_MASK) {
1223 #if !defined(CONFIG_USER_ONLY)
1224         if (env->hflags & HF_GUEST_MASK) {
1225             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1226         }
1227 #endif
1228 #ifdef TARGET_X86_64
1229         if (env->hflags & HF_LMA_MASK) {
1230             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1231         } else
1232 #endif
1233         {
1234             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1235                                    is_hw);
1236         }
1237     } else {
1238 #if !defined(CONFIG_USER_ONLY)
1239         if (env->hflags & HF_GUEST_MASK) {
1240             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1241         }
1242 #endif
1243         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1244     }
1245 
1246 #if !defined(CONFIG_USER_ONLY)
1247     if (env->hflags & HF_GUEST_MASK) {
1248         CPUState *cs = CPU(cpu);
1249         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1250                                       offsetof(struct vmcb,
1251                                                control.event_inj));
1252 
1253         x86_stl_phys(cs,
1254                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1255                  event_inj & ~SVM_EVTINJ_VALID);
1256     }
1257 #endif
1258 }
1259 
1260 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1261 {
1262     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1263 }
1264 
1265 void helper_lldt(CPUX86State *env, int selector)
1266 {
1267     SegmentCache *dt;
1268     uint32_t e1, e2;
1269     int index, entry_limit;
1270     target_ulong ptr;
1271 
1272     selector &= 0xffff;
1273     if ((selector & 0xfffc) == 0) {
1274         /* XXX: NULL selector case: invalid LDT */
1275         env->ldt.base = 0;
1276         env->ldt.limit = 0;
1277     } else {
1278         if (selector & 0x4) {
1279             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1280         }
1281         dt = &env->gdt;
1282         index = selector & ~7;
1283 #ifdef TARGET_X86_64
1284         if (env->hflags & HF_LMA_MASK) {
1285             entry_limit = 15;
1286         } else
1287 #endif
1288         {
1289             entry_limit = 7;
1290         }
1291         if ((index + entry_limit) > dt->limit) {
1292             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1293         }
1294         ptr = dt->base + index;
1295         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1296         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1297         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1298             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1299         }
1300         if (!(e2 & DESC_P_MASK)) {
1301             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1302         }
1303 #ifdef TARGET_X86_64
1304         if (env->hflags & HF_LMA_MASK) {
1305             uint32_t e3;
1306 
1307             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1308             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1309             env->ldt.base |= (target_ulong)e3 << 32;
1310         } else
1311 #endif
1312         {
1313             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1314         }
1315     }
1316     env->ldt.selector = selector;
1317 }
1318 
1319 void helper_ltr(CPUX86State *env, int selector)
1320 {
1321     SegmentCache *dt;
1322     uint32_t e1, e2;
1323     int index, type, entry_limit;
1324     target_ulong ptr;
1325 
1326     selector &= 0xffff;
1327     if ((selector & 0xfffc) == 0) {
1328         /* NULL selector case: invalid TR */
1329         env->tr.base = 0;
1330         env->tr.limit = 0;
1331         env->tr.flags = 0;
1332     } else {
1333         if (selector & 0x4) {
1334             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1335         }
1336         dt = &env->gdt;
1337         index = selector & ~7;
1338 #ifdef TARGET_X86_64
1339         if (env->hflags & HF_LMA_MASK) {
1340             entry_limit = 15;
1341         } else
1342 #endif
1343         {
1344             entry_limit = 7;
1345         }
1346         if ((index + entry_limit) > dt->limit) {
1347             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1348         }
1349         ptr = dt->base + index;
1350         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1351         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1352         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1353         if ((e2 & DESC_S_MASK) ||
1354             (type != 1 && type != 9)) {
1355             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1356         }
1357         if (!(e2 & DESC_P_MASK)) {
1358             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1359         }
1360 #ifdef TARGET_X86_64
1361         if (env->hflags & HF_LMA_MASK) {
1362             uint32_t e3, e4;
1363 
1364             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1365             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1366             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1367                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1368             }
1369             load_seg_cache_raw_dt(&env->tr, e1, e2);
1370             env->tr.base |= (target_ulong)e3 << 32;
1371         } else
1372 #endif
1373         {
1374             load_seg_cache_raw_dt(&env->tr, e1, e2);
1375         }
1376         e2 |= DESC_TSS_BUSY_MASK;
1377         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1378     }
1379     env->tr.selector = selector;
1380 }
1381 
1382 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1383 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1384 {
1385     uint32_t e1, e2;
1386     int cpl, dpl, rpl;
1387     SegmentCache *dt;
1388     int index;
1389     target_ulong ptr;
1390 
1391     selector &= 0xffff;
1392     cpl = env->hflags & HF_CPL_MASK;
1393     if ((selector & 0xfffc) == 0) {
1394         /* null selector case */
1395         if (seg_reg == R_SS
1396 #ifdef TARGET_X86_64
1397             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1398 #endif
1399             ) {
1400             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1401         }
1402         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1403     } else {
1404 
1405         if (selector & 0x4) {
1406             dt = &env->ldt;
1407         } else {
1408             dt = &env->gdt;
1409         }
1410         index = selector & ~7;
1411         if ((index + 7) > dt->limit) {
1412             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1413         }
1414         ptr = dt->base + index;
1415         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1416         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1417 
1418         if (!(e2 & DESC_S_MASK)) {
1419             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1420         }
1421         rpl = selector & 3;
1422         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1423         if (seg_reg == R_SS) {
1424             /* must be writable segment */
1425             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1426                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1427             }
1428             if (rpl != cpl || dpl != cpl) {
1429                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1430             }
1431         } else {
1432             /* must be readable segment */
1433             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1434                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1435             }
1436 
1437             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1438                 /* if not conforming code, test rights */
1439                 if (dpl < cpl || dpl < rpl) {
1440                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1441                 }
1442             }
1443         }
1444 
1445         if (!(e2 & DESC_P_MASK)) {
1446             if (seg_reg == R_SS) {
1447                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1448             } else {
1449                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1450             }
1451         }
1452 
1453         /* set the access bit if not already set */
1454         if (!(e2 & DESC_A_MASK)) {
1455             e2 |= DESC_A_MASK;
1456             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1457         }
1458 
1459         cpu_x86_load_seg_cache(env, seg_reg, selector,
1460                        get_seg_base(e1, e2),
1461                        get_seg_limit(e1, e2),
1462                        e2);
1463 #if 0
1464         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1465                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1466 #endif
1467     }
1468 }
1469 
1470 /* protected mode jump */
1471 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1472                            target_ulong next_eip)
1473 {
1474     int gate_cs, type;
1475     uint32_t e1, e2, cpl, dpl, rpl, limit;
1476 
1477     if ((new_cs & 0xfffc) == 0) {
1478         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1479     }
1480     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1481         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1482     }
1483     cpl = env->hflags & HF_CPL_MASK;
1484     if (e2 & DESC_S_MASK) {
1485         if (!(e2 & DESC_CS_MASK)) {
1486             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1487         }
1488         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1489         if (e2 & DESC_C_MASK) {
1490             /* conforming code segment */
1491             if (dpl > cpl) {
1492                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1493             }
1494         } else {
1495             /* non conforming code segment */
1496             rpl = new_cs & 3;
1497             if (rpl > cpl) {
1498                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1499             }
1500             if (dpl != cpl) {
1501                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1502             }
1503         }
1504         if (!(e2 & DESC_P_MASK)) {
1505             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1506         }
1507         limit = get_seg_limit(e1, e2);
1508         if (new_eip > limit &&
1509             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1510             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1511         }
1512         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1513                        get_seg_base(e1, e2), limit, e2);
1514         env->eip = new_eip;
1515     } else {
1516         /* jump to call or task gate */
1517         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1518         rpl = new_cs & 3;
1519         cpl = env->hflags & HF_CPL_MASK;
1520         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1521 
1522 #ifdef TARGET_X86_64
1523         if (env->efer & MSR_EFER_LMA) {
1524             if (type != 12) {
1525                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1526             }
1527         }
1528 #endif
1529         switch (type) {
1530         case 1: /* 286 TSS */
1531         case 9: /* 386 TSS */
1532         case 5: /* task gate */
1533             if (dpl < cpl || dpl < rpl) {
1534                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1535             }
1536             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1537             break;
1538         case 4: /* 286 call gate */
1539         case 12: /* 386 call gate */
1540             if ((dpl < cpl) || (dpl < rpl)) {
1541                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1542             }
1543             if (!(e2 & DESC_P_MASK)) {
1544                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1545             }
1546             gate_cs = e1 >> 16;
1547             new_eip = (e1 & 0xffff);
1548             if (type == 12) {
1549                 new_eip |= (e2 & 0xffff0000);
1550             }
1551 
1552 #ifdef TARGET_X86_64
1553             if (env->efer & MSR_EFER_LMA) {
1554                 /* load the upper 8 bytes of the 64-bit call gate */
1555                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1556                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1557                                            GETPC());
1558                 }
1559                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1560                 if (type != 0) {
1561                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1562                                            GETPC());
1563                 }
1564                 new_eip |= ((target_ulong)e1) << 32;
1565             }
1566 #endif
1567 
1568             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1569                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1570             }
1571             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1572             /* must be code segment */
1573             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1574                  (DESC_S_MASK | DESC_CS_MASK))) {
1575                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1576             }
1577             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1578                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1579                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1580             }
1581 #ifdef TARGET_X86_64
1582             if (env->efer & MSR_EFER_LMA) {
1583                 if (!(e2 & DESC_L_MASK)) {
1584                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1585                 }
1586                 if (e2 & DESC_B_MASK) {
1587                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1588                 }
1589             }
1590 #endif
1591             if (!(e2 & DESC_P_MASK)) {
1592                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1593             }
1594             limit = get_seg_limit(e1, e2);
1595             if (new_eip > limit &&
1596                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1597                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1598             }
1599             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1600                                    get_seg_base(e1, e2), limit, e2);
1601             env->eip = new_eip;
1602             break;
1603         default:
1604             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1605             break;
1606         }
1607     }
1608 }
1609 
1610 /* real mode call */
1611 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1612                        int shift, uint32_t next_eip)
1613 {
1614     StackAccess sa;
1615 
1616     sa.env = env;
1617     sa.ra = GETPC();
1618     sa.sp = env->regs[R_ESP];
1619     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1620     sa.ss_base = env->segs[R_SS].base;
1621     sa.mmu_index = x86_mmu_index_pl(env, 0);
1622 
1623     if (shift) {
1624         pushl(&sa, env->segs[R_CS].selector);
1625         pushl(&sa, next_eip);
1626     } else {
1627         pushw(&sa, env->segs[R_CS].selector);
1628         pushw(&sa, next_eip);
1629     }
1630 
1631     SET_ESP(sa.sp, sa.sp_mask);
1632     env->eip = new_eip;
1633     env->segs[R_CS].selector = new_cs;
1634     env->segs[R_CS].base = (new_cs << 4);
1635 }
1636 
1637 /* protected mode call */
1638 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1639                             int shift, target_ulong next_eip)
1640 {
1641     int new_stack, i;
1642     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1643     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1644     uint32_t val, limit, old_sp_mask;
1645     target_ulong old_ssp, offset;
1646     StackAccess sa;
1647 
1648     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1649     LOG_PCALL_STATE(env_cpu(env));
1650     if ((new_cs & 0xfffc) == 0) {
1651         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1652     }
1653     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1654         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655     }
1656     cpl = env->hflags & HF_CPL_MASK;
1657     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1658 
1659     sa.env = env;
1660     sa.ra = GETPC();
1661 
1662     if (e2 & DESC_S_MASK) {
1663         /* "normal" far call, no stack switch possible */
1664         if (!(e2 & DESC_CS_MASK)) {
1665             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1666         }
1667         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1668         if (e2 & DESC_C_MASK) {
1669             /* conforming code segment */
1670             if (dpl > cpl) {
1671                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1672             }
1673         } else {
1674             /* non conforming code segment */
1675             rpl = new_cs & 3;
1676             if (rpl > cpl) {
1677                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1678             }
1679             if (dpl != cpl) {
1680                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1681             }
1682         }
1683         if (!(e2 & DESC_P_MASK)) {
1684             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1685         }
1686 
1687         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1688 #ifdef TARGET_X86_64
1689         /* XXX: check 16/32 bit cases in long mode */
1690         if (shift == 2) {
1691             /* 64 bit case */
1692             sa.sp = env->regs[R_ESP];
1693             sa.sp_mask = -1;
1694             sa.ss_base = 0;
1695             pushq(&sa, env->segs[R_CS].selector);
1696             pushq(&sa, next_eip);
1697             /* from this point, not restartable */
1698             env->regs[R_ESP] = sa.sp;
1699             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1700                                    get_seg_base(e1, e2),
1701                                    get_seg_limit(e1, e2), e2);
1702             env->eip = new_eip;
1703         } else
1704 #endif
1705         {
1706             sa.sp = env->regs[R_ESP];
1707             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1708             sa.ss_base = env->segs[R_SS].base;
1709             if (shift) {
1710                 pushl(&sa, env->segs[R_CS].selector);
1711                 pushl(&sa, next_eip);
1712             } else {
1713                 pushw(&sa, env->segs[R_CS].selector);
1714                 pushw(&sa, next_eip);
1715             }
1716 
1717             limit = get_seg_limit(e1, e2);
1718             if (new_eip > limit) {
1719                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1720             }
1721             /* from this point, not restartable */
1722             SET_ESP(sa.sp, sa.sp_mask);
1723             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1724                                    get_seg_base(e1, e2), limit, e2);
1725             env->eip = new_eip;
1726         }
1727     } else {
1728         /* check gate type */
1729         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1730         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1731         rpl = new_cs & 3;
1732 
1733 #ifdef TARGET_X86_64
1734         if (env->efer & MSR_EFER_LMA) {
1735             if (type != 12) {
1736                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1737             }
1738         }
1739 #endif
1740 
1741         switch (type) {
1742         case 1: /* available 286 TSS */
1743         case 9: /* available 386 TSS */
1744         case 5: /* task gate */
1745             if (dpl < cpl || dpl < rpl) {
1746                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1747             }
1748             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1749             return;
1750         case 4: /* 286 call gate */
1751         case 12: /* 386 call gate */
1752             break;
1753         default:
1754             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1755             break;
1756         }
1757         shift = type >> 3;
1758 
1759         if (dpl < cpl || dpl < rpl) {
1760             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1761         }
1762         /* check valid bit */
1763         if (!(e2 & DESC_P_MASK)) {
1764             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1765         }
1766         selector = e1 >> 16;
1767         param_count = e2 & 0x1f;
1768         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1769 #ifdef TARGET_X86_64
1770         if (env->efer & MSR_EFER_LMA) {
1771             /* load the upper 8 bytes of the 64-bit call gate */
1772             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1773                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1774                                        GETPC());
1775             }
1776             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1777             if (type != 0) {
1778                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1779                                        GETPC());
1780             }
1781             offset |= ((target_ulong)e1) << 32;
1782         }
1783 #endif
1784         if ((selector & 0xfffc) == 0) {
1785             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1786         }
1787 
1788         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1789             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1790         }
1791         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1792             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1793         }
1794         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1795         if (dpl > cpl) {
1796             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1797         }
1798 #ifdef TARGET_X86_64
1799         if (env->efer & MSR_EFER_LMA) {
1800             if (!(e2 & DESC_L_MASK)) {
1801                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1802             }
1803             if (e2 & DESC_B_MASK) {
1804                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1805             }
1806             shift++;
1807         }
1808 #endif
1809         if (!(e2 & DESC_P_MASK)) {
1810             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1811         }
1812 
1813         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1814             /* to inner privilege */
1815             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1816 #ifdef TARGET_X86_64
1817             if (shift == 2) {
1818                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1819                 new_stack = 1;
1820                 sa.sp = get_rsp_from_tss(env, dpl);
1821                 sa.sp_mask = -1;
1822                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1823                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1824                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1825             } else
1826 #endif
1827             {
1828                 uint32_t sp32;
1829                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1830                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1831                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1832                           env->regs[R_ESP]);
1833                 if ((ss & 0xfffc) == 0) {
1834                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1835                 }
1836                 if ((ss & 3) != dpl) {
1837                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1838                 }
1839                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1840                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1841                 }
1842                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1843                 if (ss_dpl != dpl) {
1844                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1845                 }
1846                 if (!(ss_e2 & DESC_S_MASK) ||
1847                     (ss_e2 & DESC_CS_MASK) ||
1848                     !(ss_e2 & DESC_W_MASK)) {
1849                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1850                 }
1851                 if (!(ss_e2 & DESC_P_MASK)) {
1852                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1853                 }
1854 
1855                 sa.sp = sp32;
1856                 sa.sp_mask = get_sp_mask(ss_e2);
1857                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1858             }
1859 
1860             /* push_size = ((param_count * 2) + 8) << shift; */
1861             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1862             old_ssp = env->segs[R_SS].base;
1863 
1864 #ifdef TARGET_X86_64
1865             if (shift == 2) {
1866                 /* XXX: verify if new stack address is canonical */
1867                 pushq(&sa, env->segs[R_SS].selector);
1868                 pushq(&sa, env->regs[R_ESP]);
1869                 /* parameters aren't supported for 64-bit call gates */
1870             } else
1871 #endif
1872             if (shift == 1) {
1873                 pushl(&sa, env->segs[R_SS].selector);
1874                 pushl(&sa, env->regs[R_ESP]);
1875                 for (i = param_count - 1; i >= 0; i--) {
1876                     val = cpu_ldl_data_ra(env,
1877                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1878                                           GETPC());
1879                     pushl(&sa, val);
1880                 }
1881             } else {
1882                 pushw(&sa, env->segs[R_SS].selector);
1883                 pushw(&sa, env->regs[R_ESP]);
1884                 for (i = param_count - 1; i >= 0; i--) {
1885                     val = cpu_lduw_data_ra(env,
1886                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1887                                            GETPC());
1888                     pushw(&sa, val);
1889                 }
1890             }
1891             new_stack = 1;
1892         } else {
1893             /* to same privilege */
1894             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1895             sa.sp = env->regs[R_ESP];
1896             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1897             sa.ss_base = env->segs[R_SS].base;
1898             /* push_size = (4 << shift); */
1899             new_stack = 0;
1900         }
1901 
1902 #ifdef TARGET_X86_64
1903         if (shift == 2) {
1904             pushq(&sa, env->segs[R_CS].selector);
1905             pushq(&sa, next_eip);
1906         } else
1907 #endif
1908         if (shift == 1) {
1909             pushl(&sa, env->segs[R_CS].selector);
1910             pushl(&sa, next_eip);
1911         } else {
1912             pushw(&sa, env->segs[R_CS].selector);
1913             pushw(&sa, next_eip);
1914         }
1915 
1916         /* from this point, not restartable */
1917 
1918         if (new_stack) {
1919 #ifdef TARGET_X86_64
1920             if (shift == 2) {
1921                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1922             } else
1923 #endif
1924             {
1925                 ss = (ss & ~3) | dpl;
1926                 cpu_x86_load_seg_cache(env, R_SS, ss,
1927                                        sa.ss_base,
1928                                        get_seg_limit(ss_e1, ss_e2),
1929                                        ss_e2);
1930             }
1931         }
1932 
1933         selector = (selector & ~3) | dpl;
1934         cpu_x86_load_seg_cache(env, R_CS, selector,
1935                        get_seg_base(e1, e2),
1936                        get_seg_limit(e1, e2),
1937                        e2);
1938         SET_ESP(sa.sp, sa.sp_mask);
1939         env->eip = offset;
1940     }
1941 }
1942 
1943 /* real and vm86 mode iret */
1944 void helper_iret_real(CPUX86State *env, int shift)
1945 {
1946     uint32_t new_cs, new_eip, new_eflags;
1947     int eflags_mask;
1948     StackAccess sa;
1949 
1950     sa.env = env;
1951     sa.ra = GETPC();
1952     sa.mmu_index = x86_mmu_index_pl(env, 0);
1953     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1954     sa.sp = env->regs[R_ESP];
1955     sa.ss_base = env->segs[R_SS].base;
1956 
1957     if (shift == 1) {
1958         /* 32 bits */
1959         new_eip = popl(&sa);
1960         new_cs = popl(&sa) & 0xffff;
1961         new_eflags = popl(&sa);
1962     } else {
1963         /* 16 bits */
1964         new_eip = popw(&sa);
1965         new_cs = popw(&sa);
1966         new_eflags = popw(&sa);
1967     }
1968     SET_ESP(sa.sp, sa.sp_mask);
1969     env->segs[R_CS].selector = new_cs;
1970     env->segs[R_CS].base = (new_cs << 4);
1971     env->eip = new_eip;
1972     if (env->eflags & VM_MASK) {
1973         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1974             NT_MASK;
1975     } else {
1976         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1977             RF_MASK | NT_MASK;
1978     }
1979     if (shift == 0) {
1980         eflags_mask &= 0xffff;
1981     }
1982     cpu_load_eflags(env, new_eflags, eflags_mask);
1983     env->hflags2 &= ~HF2_NMI_MASK;
1984 }
1985 
1986 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1987 {
1988     int dpl;
1989     uint32_t e2;
1990 
1991     /* XXX: on x86_64, we do not want to nullify FS and GS because
1992        they may still contain a valid base. I would be interested to
1993        know how a real x86_64 CPU behaves */
1994     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1995         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1996         return;
1997     }
1998 
1999     e2 = env->segs[seg_reg].flags;
2000     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2001     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2002         /* data or non conforming code segment */
2003         if (dpl < cpl) {
2004             cpu_x86_load_seg_cache(env, seg_reg, 0,
2005                                    env->segs[seg_reg].base,
2006                                    env->segs[seg_reg].limit,
2007                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2008         }
2009     }
2010 }
2011 
2012 /* protected mode iret */
2013 static inline void helper_ret_protected(CPUX86State *env, int shift,
2014                                         int is_iret, int addend,
2015                                         uintptr_t retaddr)
2016 {
2017     uint32_t new_cs, new_eflags, new_ss;
2018     uint32_t new_es, new_ds, new_fs, new_gs;
2019     uint32_t e1, e2, ss_e1, ss_e2;
2020     int cpl, dpl, rpl, eflags_mask, iopl;
2021     target_ulong new_eip, new_esp;
2022     StackAccess sa;
2023 
2024     cpl = env->hflags & HF_CPL_MASK;
2025 
2026     sa.env = env;
2027     sa.ra = retaddr;
2028     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2029 
2030 #ifdef TARGET_X86_64
2031     if (shift == 2) {
2032         sa.sp_mask = -1;
2033     } else
2034 #endif
2035     {
2036         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2037     }
2038     sa.sp = env->regs[R_ESP];
2039     sa.ss_base = env->segs[R_SS].base;
2040     new_eflags = 0; /* avoid warning */
2041 #ifdef TARGET_X86_64
2042     if (shift == 2) {
2043         new_eip = popq(&sa);
2044         new_cs = popq(&sa) & 0xffff;
2045         if (is_iret) {
2046             new_eflags = popq(&sa);
2047         }
2048     } else
2049 #endif
2050     {
2051         if (shift == 1) {
2052             /* 32 bits */
2053             new_eip = popl(&sa);
2054             new_cs = popl(&sa) & 0xffff;
2055             if (is_iret) {
2056                 new_eflags = popl(&sa);
2057                 if (new_eflags & VM_MASK) {
2058                     goto return_to_vm86;
2059                 }
2060             }
2061         } else {
2062             /* 16 bits */
2063             new_eip = popw(&sa);
2064             new_cs = popw(&sa);
2065             if (is_iret) {
2066                 new_eflags = popw(&sa);
2067             }
2068         }
2069     }
2070     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2071               new_cs, new_eip, shift, addend);
2072     LOG_PCALL_STATE(env_cpu(env));
2073     if ((new_cs & 0xfffc) == 0) {
2074         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2075     }
2076     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2077         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2078     }
2079     if (!(e2 & DESC_S_MASK) ||
2080         !(e2 & DESC_CS_MASK)) {
2081         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2082     }
2083     rpl = new_cs & 3;
2084     if (rpl < cpl) {
2085         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2086     }
2087     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2088     if (e2 & DESC_C_MASK) {
2089         if (dpl > rpl) {
2090             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2091         }
2092     } else {
2093         if (dpl != rpl) {
2094             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2095         }
2096     }
2097     if (!(e2 & DESC_P_MASK)) {
2098         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2099     }
2100 
2101     sa.sp += addend;
2102     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2103                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2104         /* return to same privilege level */
2105         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2106                        get_seg_base(e1, e2),
2107                        get_seg_limit(e1, e2),
2108                        e2);
2109     } else {
2110         /* return to different privilege level */
2111 #ifdef TARGET_X86_64
2112         if (shift == 2) {
2113             new_esp = popq(&sa);
2114             new_ss = popq(&sa) & 0xffff;
2115         } else
2116 #endif
2117         {
2118             if (shift == 1) {
2119                 /* 32 bits */
2120                 new_esp = popl(&sa);
2121                 new_ss = popl(&sa) & 0xffff;
2122             } else {
2123                 /* 16 bits */
2124                 new_esp = popw(&sa);
2125                 new_ss = popw(&sa);
2126             }
2127         }
2128         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2129                   new_ss, new_esp);
2130         if ((new_ss & 0xfffc) == 0) {
2131 #ifdef TARGET_X86_64
2132             /* NULL ss is allowed in long mode if cpl != 3 */
2133             /* XXX: test CS64? */
2134             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2135                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2136                                        0, 0xffffffff,
2137                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2138                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2139                                        DESC_W_MASK | DESC_A_MASK);
2140                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2141             } else
2142 #endif
2143             {
2144                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2145             }
2146         } else {
2147             if ((new_ss & 3) != rpl) {
2148                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2149             }
2150             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2151                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2152             }
2153             if (!(ss_e2 & DESC_S_MASK) ||
2154                 (ss_e2 & DESC_CS_MASK) ||
2155                 !(ss_e2 & DESC_W_MASK)) {
2156                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2157             }
2158             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2159             if (dpl != rpl) {
2160                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2161             }
2162             if (!(ss_e2 & DESC_P_MASK)) {
2163                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2164             }
2165             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2166                                    get_seg_base(ss_e1, ss_e2),
2167                                    get_seg_limit(ss_e1, ss_e2),
2168                                    ss_e2);
2169         }
2170 
2171         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2172                        get_seg_base(e1, e2),
2173                        get_seg_limit(e1, e2),
2174                        e2);
2175         sa.sp = new_esp;
2176 #ifdef TARGET_X86_64
2177         if (env->hflags & HF_CS64_MASK) {
2178             sa.sp_mask = -1;
2179         } else
2180 #endif
2181         {
2182             sa.sp_mask = get_sp_mask(ss_e2);
2183         }
2184 
2185         /* validate data segments */
2186         validate_seg(env, R_ES, rpl);
2187         validate_seg(env, R_DS, rpl);
2188         validate_seg(env, R_FS, rpl);
2189         validate_seg(env, R_GS, rpl);
2190 
2191         sa.sp += addend;
2192     }
2193     SET_ESP(sa.sp, sa.sp_mask);
2194     env->eip = new_eip;
2195     if (is_iret) {
2196         /* NOTE: 'cpl' is the _old_ CPL */
2197         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2198         if (cpl == 0) {
2199             eflags_mask |= IOPL_MASK;
2200         }
2201         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2202         if (cpl <= iopl) {
2203             eflags_mask |= IF_MASK;
2204         }
2205         if (shift == 0) {
2206             eflags_mask &= 0xffff;
2207         }
2208         cpu_load_eflags(env, new_eflags, eflags_mask);
2209     }
2210     return;
2211 
2212  return_to_vm86:
2213     new_esp = popl(&sa);
2214     new_ss = popl(&sa);
2215     new_es = popl(&sa);
2216     new_ds = popl(&sa);
2217     new_fs = popl(&sa);
2218     new_gs = popl(&sa);
2219 
2220     /* modify processor state */
2221     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2222                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2223                     VIP_MASK);
2224     load_seg_vm(env, R_CS, new_cs & 0xffff);
2225     load_seg_vm(env, R_SS, new_ss & 0xffff);
2226     load_seg_vm(env, R_ES, new_es & 0xffff);
2227     load_seg_vm(env, R_DS, new_ds & 0xffff);
2228     load_seg_vm(env, R_FS, new_fs & 0xffff);
2229     load_seg_vm(env, R_GS, new_gs & 0xffff);
2230 
2231     env->eip = new_eip & 0xffff;
2232     env->regs[R_ESP] = new_esp;
2233 }
2234 
2235 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2236 {
2237     int tss_selector, type;
2238     uint32_t e1, e2;
2239 
2240     /* specific case for TSS */
2241     if (env->eflags & NT_MASK) {
2242 #ifdef TARGET_X86_64
2243         if (env->hflags & HF_LMA_MASK) {
2244             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2245         }
2246 #endif
2247         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2248         if (tss_selector & 4) {
2249             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2250         }
2251         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2252             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2253         }
2254         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2255         /* NOTE: we check both segment and busy TSS */
2256         if (type != 3) {
2257             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2258         }
2259         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2260     } else {
2261         helper_ret_protected(env, shift, 1, 0, GETPC());
2262     }
2263     env->hflags2 &= ~HF2_NMI_MASK;
2264 }
2265 
2266 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2267 {
2268     helper_ret_protected(env, shift, 0, addend, GETPC());
2269 }
2270 
2271 void helper_sysenter(CPUX86State *env)
2272 {
2273     if (env->sysenter_cs == 0) {
2274         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2275     }
2276     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2277 
2278 #ifdef TARGET_X86_64
2279     if (env->hflags & HF_LMA_MASK) {
2280         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2281                                0, 0xffffffff,
2282                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2283                                DESC_S_MASK |
2284                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2285                                DESC_L_MASK);
2286     } else
2287 #endif
2288     {
2289         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2290                                0, 0xffffffff,
2291                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2292                                DESC_S_MASK |
2293                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2294     }
2295     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2296                            0, 0xffffffff,
2297                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298                            DESC_S_MASK |
2299                            DESC_W_MASK | DESC_A_MASK);
2300     env->regs[R_ESP] = env->sysenter_esp;
2301     env->eip = env->sysenter_eip;
2302 }
2303 
2304 void helper_sysexit(CPUX86State *env, int dflag)
2305 {
2306     int cpl;
2307 
2308     cpl = env->hflags & HF_CPL_MASK;
2309     if (env->sysenter_cs == 0 || cpl != 0) {
2310         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2311     }
2312 #ifdef TARGET_X86_64
2313     if (dflag == 2) {
2314         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2315                                3, 0, 0xffffffff,
2316                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2317                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2318                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2319                                DESC_L_MASK);
2320         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2321                                3, 0, 0xffffffff,
2322                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2324                                DESC_W_MASK | DESC_A_MASK);
2325     } else
2326 #endif
2327     {
2328         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2329                                3, 0, 0xffffffff,
2330                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2331                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2332                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2333         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2334                                3, 0, 0xffffffff,
2335                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2336                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2337                                DESC_W_MASK | DESC_A_MASK);
2338     }
2339     env->regs[R_ESP] = env->regs[R_ECX];
2340     env->eip = env->regs[R_EDX];
2341 }
2342 
2343 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2344 {
2345     unsigned int limit;
2346     uint32_t e1, e2, selector;
2347     int rpl, dpl, cpl, type;
2348 
2349     selector = selector1 & 0xffff;
2350     assert(CC_OP == CC_OP_EFLAGS);
2351     if ((selector & 0xfffc) == 0) {
2352         goto fail;
2353     }
2354     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2355         goto fail;
2356     }
2357     rpl = selector & 3;
2358     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2359     cpl = env->hflags & HF_CPL_MASK;
2360     if (e2 & DESC_S_MASK) {
2361         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2362             /* conforming */
2363         } else {
2364             if (dpl < cpl || dpl < rpl) {
2365                 goto fail;
2366             }
2367         }
2368     } else {
2369         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2370         switch (type) {
2371         case 1:
2372         case 2:
2373         case 3:
2374         case 9:
2375         case 11:
2376             break;
2377         default:
2378             goto fail;
2379         }
2380         if (dpl < cpl || dpl < rpl) {
2381         fail:
2382             CC_SRC &= ~CC_Z;
2383             return 0;
2384         }
2385     }
2386     limit = get_seg_limit(e1, e2);
2387     CC_SRC |= CC_Z;
2388     return limit;
2389 }
2390 
2391 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2392 {
2393     uint32_t e1, e2, selector;
2394     int rpl, dpl, cpl, type;
2395 
2396     selector = selector1 & 0xffff;
2397     assert(CC_OP == CC_OP_EFLAGS);
2398     if ((selector & 0xfffc) == 0) {
2399         goto fail;
2400     }
2401     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2402         goto fail;
2403     }
2404     rpl = selector & 3;
2405     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2406     cpl = env->hflags & HF_CPL_MASK;
2407     if (e2 & DESC_S_MASK) {
2408         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2409             /* conforming */
2410         } else {
2411             if (dpl < cpl || dpl < rpl) {
2412                 goto fail;
2413             }
2414         }
2415     } else {
2416         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2417         switch (type) {
2418         case 1:
2419         case 2:
2420         case 3:
2421         case 4:
2422         case 5:
2423         case 9:
2424         case 11:
2425         case 12:
2426             break;
2427         default:
2428             goto fail;
2429         }
2430         if (dpl < cpl || dpl < rpl) {
2431         fail:
2432             CC_SRC &= ~CC_Z;
2433             return 0;
2434         }
2435     }
2436     CC_SRC |= CC_Z;
2437     return e2 & 0x00f0ff00;
2438 }
2439 
2440 void helper_verr(CPUX86State *env, target_ulong selector1)
2441 {
2442     uint32_t e1, e2, eflags, selector;
2443     int rpl, dpl, cpl;
2444 
2445     selector = selector1 & 0xffff;
2446     eflags = cpu_cc_compute_all(env) | CC_Z;
2447     if ((selector & 0xfffc) == 0) {
2448         goto fail;
2449     }
2450     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2451         goto fail;
2452     }
2453     if (!(e2 & DESC_S_MASK)) {
2454         goto fail;
2455     }
2456     rpl = selector & 3;
2457     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2458     cpl = env->hflags & HF_CPL_MASK;
2459     if (e2 & DESC_CS_MASK) {
2460         if (!(e2 & DESC_R_MASK)) {
2461             goto fail;
2462         }
2463         if (!(e2 & DESC_C_MASK)) {
2464             if (dpl < cpl || dpl < rpl) {
2465                 goto fail;
2466             }
2467         }
2468     } else {
2469         if (dpl < cpl || dpl < rpl) {
2470         fail:
2471             eflags &= ~CC_Z;
2472         }
2473     }
2474     CC_SRC = eflags;
2475     CC_OP = CC_OP_EFLAGS;
2476 }
2477 
2478 void helper_verw(CPUX86State *env, target_ulong selector1)
2479 {
2480     uint32_t e1, e2, eflags, selector;
2481     int rpl, dpl, cpl;
2482 
2483     selector = selector1 & 0xffff;
2484     eflags = cpu_cc_compute_all(env) | CC_Z;
2485     if ((selector & 0xfffc) == 0) {
2486         goto fail;
2487     }
2488     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2489         goto fail;
2490     }
2491     if (!(e2 & DESC_S_MASK)) {
2492         goto fail;
2493     }
2494     rpl = selector & 3;
2495     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2496     cpl = env->hflags & HF_CPL_MASK;
2497     if (e2 & DESC_CS_MASK) {
2498         goto fail;
2499     } else {
2500         if (dpl < cpl || dpl < rpl) {
2501             goto fail;
2502         }
2503         if (!(e2 & DESC_W_MASK)) {
2504         fail:
2505             eflags &= ~CC_Z;
2506         }
2507     }
2508     CC_SRC = eflags;
2509     CC_OP = CC_OP_EFLAGS;
2510 }
2511