xref: /qemu/target/i386/tcg/seg_helper.c (revision 611c34a745ee7ab98733175458378f94e03c23d8)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 
32 #ifdef TARGET_X86_64
33 #define SET_ESP(val, sp_mask)                                   \
34     do {                                                        \
35         if ((sp_mask) == 0xffff) {                              \
36             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
37                 ((val) & 0xffff);                               \
38         } else if ((sp_mask) == 0xffffffffLL) {                 \
39             env->regs[R_ESP] = (uint32_t)(val);                 \
40         } else {                                                \
41             env->regs[R_ESP] = (val);                           \
42         }                                                       \
43     } while (0)
44 #else
45 #define SET_ESP(val, sp_mask)                                   \
46     do {                                                        \
47         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
48             ((val) & (sp_mask));                                \
49     } while (0)
50 #endif
51 
52 /* XXX: use mmu_index to have proper DPL support */
53 typedef struct StackAccess
54 {
55     CPUX86State *env;
56     uintptr_t ra;
57     target_ulong ss_base;
58     target_ulong sp;
59     target_ulong sp_mask;
60     int mmu_index;
61 } StackAccess;
62 
63 static void pushw(StackAccess *sa, uint16_t val)
64 {
65     sa->sp -= 2;
66     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
67                       val, sa->mmu_index, sa->ra);
68 }
69 
70 static void pushl(StackAccess *sa, uint32_t val)
71 {
72     sa->sp -= 4;
73     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
74                       val, sa->mmu_index, sa->ra);
75 }
76 
77 static uint16_t popw(StackAccess *sa)
78 {
79     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
80                                       sa->ss_base + (sa->sp & sa->sp_mask),
81                                       sa->mmu_index, sa->ra);
82     sa->sp += 2;
83     return ret;
84 }
85 
86 static uint32_t popl(StackAccess *sa)
87 {
88     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
89                                      sa->ss_base + (sa->sp & sa->sp_mask),
90                                      sa->mmu_index, sa->ra);
91     sa->sp += 4;
92     return ret;
93 }
94 
95 int get_pg_mode(CPUX86State *env)
96 {
97     int pg_mode = PG_MODE_PG;
98     if (!(env->cr[0] & CR0_PG_MASK)) {
99         return 0;
100     }
101     if (env->cr[0] & CR0_WP_MASK) {
102         pg_mode |= PG_MODE_WP;
103     }
104     if (env->cr[4] & CR4_PAE_MASK) {
105         pg_mode |= PG_MODE_PAE;
106         if (env->efer & MSR_EFER_NXE) {
107             pg_mode |= PG_MODE_NXE;
108         }
109     }
110     if (env->cr[4] & CR4_PSE_MASK) {
111         pg_mode |= PG_MODE_PSE;
112     }
113     if (env->cr[4] & CR4_SMEP_MASK) {
114         pg_mode |= PG_MODE_SMEP;
115     }
116     if (env->hflags & HF_LMA_MASK) {
117         pg_mode |= PG_MODE_LMA;
118         if (env->cr[4] & CR4_PKE_MASK) {
119             pg_mode |= PG_MODE_PKE;
120         }
121         if (env->cr[4] & CR4_PKS_MASK) {
122             pg_mode |= PG_MODE_PKS;
123         }
124         if (env->cr[4] & CR4_LA57_MASK) {
125             pg_mode |= PG_MODE_LA57;
126         }
127     }
128     return pg_mode;
129 }
130 
131 static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
132 {
133     int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
134     int mmu_index_base =
135         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
136         (pl < 3 && (env->eflags & AC_MASK)
137          ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
138 
139     return mmu_index_base + mmu_index_32;
140 }
141 
142 int cpu_mmu_index_kernel(CPUX86State *env)
143 {
144     return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
145 }
146 
147 /* return non zero if error */
148 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
149                                uint32_t *e2_ptr, int selector,
150                                uintptr_t retaddr)
151 {
152     SegmentCache *dt;
153     int index;
154     target_ulong ptr;
155 
156     if (selector & 0x4) {
157         dt = &env->ldt;
158     } else {
159         dt = &env->gdt;
160     }
161     index = selector & ~7;
162     if ((index + 7) > dt->limit) {
163         return -1;
164     }
165     ptr = dt->base + index;
166     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
167     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
168     return 0;
169 }
170 
171 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
172                                uint32_t *e2_ptr, int selector)
173 {
174     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
175 }
176 
177 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
178 {
179     unsigned int limit;
180 
181     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
182     if (e2 & DESC_G_MASK) {
183         limit = (limit << 12) | 0xfff;
184     }
185     return limit;
186 }
187 
188 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
189 {
190     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
191 }
192 
193 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
194                                          uint32_t e2)
195 {
196     sc->base = get_seg_base(e1, e2);
197     sc->limit = get_seg_limit(e1, e2);
198     sc->flags = e2;
199 }
200 
201 /* init the segment cache in vm86 mode. */
202 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
203 {
204     selector &= 0xffff;
205 
206     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
207                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
208                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
209 }
210 
211 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
212                                        uint32_t *esp_ptr, int dpl,
213                                        uintptr_t retaddr)
214 {
215     X86CPU *cpu = env_archcpu(env);
216     int type, index, shift;
217 
218 #if 0
219     {
220         int i;
221         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
222         for (i = 0; i < env->tr.limit; i++) {
223             printf("%02x ", env->tr.base[i]);
224             if ((i & 7) == 7) {
225                 printf("\n");
226             }
227         }
228         printf("\n");
229     }
230 #endif
231 
232     if (!(env->tr.flags & DESC_P_MASK)) {
233         cpu_abort(CPU(cpu), "invalid tss");
234     }
235     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
236     if ((type & 7) != 1) {
237         cpu_abort(CPU(cpu), "invalid tss type");
238     }
239     shift = type >> 3;
240     index = (dpl * 4 + 2) << shift;
241     if (index + (4 << shift) - 1 > env->tr.limit) {
242         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
243     }
244     if (shift == 0) {
245         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
246         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
247     } else {
248         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
249         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
250     }
251 }
252 
253 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
254                          int cpl, uintptr_t retaddr)
255 {
256     uint32_t e1, e2;
257     int rpl, dpl;
258 
259     if ((selector & 0xfffc) != 0) {
260         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
261             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
262         }
263         if (!(e2 & DESC_S_MASK)) {
264             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
265         }
266         rpl = selector & 3;
267         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
268         if (seg_reg == R_CS) {
269             if (!(e2 & DESC_CS_MASK)) {
270                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
271             }
272             if (dpl != rpl) {
273                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
274             }
275         } else if (seg_reg == R_SS) {
276             /* SS must be writable data */
277             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
278                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
279             }
280             if (dpl != cpl || dpl != rpl) {
281                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
282             }
283         } else {
284             /* not readable code */
285             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
286                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
287             }
288             /* if data or non conforming code, checks the rights */
289             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
290                 if (dpl < cpl || dpl < rpl) {
291                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
292                 }
293             }
294         }
295         if (!(e2 & DESC_P_MASK)) {
296             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
297         }
298         cpu_x86_load_seg_cache(env, seg_reg, selector,
299                                get_seg_base(e1, e2),
300                                get_seg_limit(e1, e2),
301                                e2);
302     } else {
303         if (seg_reg == R_SS || seg_reg == R_CS) {
304             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
305         }
306     }
307 }
308 
309 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
310                          uintptr_t retaddr)
311 {
312     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
313     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
314 
315     if (value) {
316         e2 |= DESC_TSS_BUSY_MASK;
317     } else {
318         e2 &= ~DESC_TSS_BUSY_MASK;
319     }
320 
321     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
322 }
323 
324 #define SWITCH_TSS_JMP  0
325 #define SWITCH_TSS_IRET 1
326 #define SWITCH_TSS_CALL 2
327 
328 /* return 0 if switching to a 16-bit selector */
329 static int switch_tss_ra(CPUX86State *env, int tss_selector,
330                          uint32_t e1, uint32_t e2, int source,
331                          uint32_t next_eip, uintptr_t retaddr)
332 {
333     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
334     target_ulong tss_base;
335     uint32_t new_regs[8], new_segs[6];
336     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
337     uint32_t old_eflags, eflags_mask;
338     SegmentCache *dt;
339     int mmu_index, index;
340     target_ulong ptr;
341     X86Access old, new;
342 
343     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
344     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
345               source);
346 
347     /* if task gate, we read the TSS segment and we load it */
348     if (type == 5) {
349         if (!(e2 & DESC_P_MASK)) {
350             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
351         }
352         tss_selector = e1 >> 16;
353         if (tss_selector & 4) {
354             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
355         }
356         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
357             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
358         }
359         if (e2 & DESC_S_MASK) {
360             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
361         }
362         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
363         if ((type & 7) != 1) {
364             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
365         }
366     }
367 
368     if (!(e2 & DESC_P_MASK)) {
369         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
370     }
371 
372     if (type & 8) {
373         tss_limit_max = 103;
374     } else {
375         tss_limit_max = 43;
376     }
377     tss_limit = get_seg_limit(e1, e2);
378     tss_base = get_seg_base(e1, e2);
379     if ((tss_selector & 4) != 0 ||
380         tss_limit < tss_limit_max) {
381         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
382     }
383     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
384     if (old_type & 8) {
385         old_tss_limit_max = 103;
386     } else {
387         old_tss_limit_max = 43;
388     }
389 
390     /* new TSS must be busy iff the source is an IRET instruction  */
391     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
392         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
393     }
394 
395     /* X86Access avoids memory exceptions during the task switch */
396     mmu_index = cpu_mmu_index_kernel(env);
397     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
398                        MMU_DATA_STORE, mmu_index, retaddr);
399 
400     if (source == SWITCH_TSS_CALL) {
401         /* Probe for future write of parent task */
402         probe_access(env, tss_base, 2, MMU_DATA_STORE,
403                      mmu_index, retaddr);
404     }
405     /* While true tss_limit may be larger, we don't access the iopb here. */
406     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
407                        MMU_DATA_LOAD, mmu_index, retaddr);
408 
409     /* save the current state in the old TSS */
410     old_eflags = cpu_compute_eflags(env);
411     if (old_type & 8) {
412         /* 32 bit */
413         access_stl(&old, env->tr.base + 0x20, next_eip);
414         access_stl(&old, env->tr.base + 0x24, old_eflags);
415         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
416         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
417         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
418         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
419         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
420         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
421         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
422         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
423         for (i = 0; i < 6; i++) {
424             access_stw(&old, env->tr.base + (0x48 + i * 4),
425                        env->segs[i].selector);
426         }
427     } else {
428         /* 16 bit */
429         access_stw(&old, env->tr.base + 0x0e, next_eip);
430         access_stw(&old, env->tr.base + 0x10, old_eflags);
431         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
432         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
433         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
434         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
435         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
436         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
437         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
438         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
439         for (i = 0; i < 4; i++) {
440             access_stw(&old, env->tr.base + (0x22 + i * 2),
441                        env->segs[i].selector);
442         }
443     }
444 
445     /* read all the registers from the new TSS */
446     if (type & 8) {
447         /* 32 bit */
448         new_cr3 = access_ldl(&new, tss_base + 0x1c);
449         new_eip = access_ldl(&new, tss_base + 0x20);
450         new_eflags = access_ldl(&new, tss_base + 0x24);
451         for (i = 0; i < 8; i++) {
452             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
453         }
454         for (i = 0; i < 6; i++) {
455             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
456         }
457         new_ldt = access_ldw(&new, tss_base + 0x60);
458         new_trap = access_ldl(&new, tss_base + 0x64);
459     } else {
460         /* 16 bit */
461         new_cr3 = 0;
462         new_eip = access_ldw(&new, tss_base + 0x0e);
463         new_eflags = access_ldw(&new, tss_base + 0x10);
464         for (i = 0; i < 8; i++) {
465             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
466         }
467         for (i = 0; i < 4; i++) {
468             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
469         }
470         new_ldt = access_ldw(&new, tss_base + 0x2a);
471         new_segs[R_FS] = 0;
472         new_segs[R_GS] = 0;
473         new_trap = 0;
474     }
475     /* XXX: avoid a compiler warning, see
476      http://support.amd.com/us/Processor_TechDocs/24593.pdf
477      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
478     (void)new_trap;
479 
480     /* clear busy bit (it is restartable) */
481     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
482         tss_set_busy(env, env->tr.selector, 0, retaddr);
483     }
484 
485     if (source == SWITCH_TSS_IRET) {
486         old_eflags &= ~NT_MASK;
487         if (old_type & 8) {
488             access_stl(&old, env->tr.base + 0x24, old_eflags);
489         } else {
490             access_stw(&old, env->tr.base + 0x10, old_eflags);
491 	}
492     }
493 
494     if (source == SWITCH_TSS_CALL) {
495         /*
496          * Thanks to the probe_access above, we know the first two
497          * bytes addressed by &new are writable too.
498          */
499         access_stw(&new, tss_base, env->tr.selector);
500         new_eflags |= NT_MASK;
501     }
502 
503     /* set busy bit */
504     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
505         tss_set_busy(env, tss_selector, 1, retaddr);
506     }
507 
508     /* set the new CPU state */
509 
510     /* now if an exception occurs, it will occur in the next task context */
511 
512     env->cr[0] |= CR0_TS_MASK;
513     env->hflags |= HF_TS_MASK;
514     env->tr.selector = tss_selector;
515     env->tr.base = tss_base;
516     env->tr.limit = tss_limit;
517     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
518 
519     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
520         cpu_x86_update_cr3(env, new_cr3);
521     }
522 
523     /* load all registers without an exception, then reload them with
524        possible exception */
525     env->eip = new_eip;
526     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
527         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
528     if (type & 8) {
529         cpu_load_eflags(env, new_eflags, eflags_mask);
530         for (i = 0; i < 8; i++) {
531             env->regs[i] = new_regs[i];
532         }
533     } else {
534         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
535         for (i = 0; i < 8; i++) {
536             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
537         }
538     }
539     if (new_eflags & VM_MASK) {
540         for (i = 0; i < 6; i++) {
541             load_seg_vm(env, i, new_segs[i]);
542         }
543     } else {
544         /* first just selectors as the rest may trigger exceptions */
545         for (i = 0; i < 6; i++) {
546             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
547         }
548     }
549 
550     env->ldt.selector = new_ldt & ~4;
551     env->ldt.base = 0;
552     env->ldt.limit = 0;
553     env->ldt.flags = 0;
554 
555     /* load the LDT */
556     if (new_ldt & 4) {
557         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
558     }
559 
560     if ((new_ldt & 0xfffc) != 0) {
561         dt = &env->gdt;
562         index = new_ldt & ~7;
563         if ((index + 7) > dt->limit) {
564             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
565         }
566         ptr = dt->base + index;
567         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
568         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
569         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
570             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
571         }
572         if (!(e2 & DESC_P_MASK)) {
573             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
574         }
575         load_seg_cache_raw_dt(&env->ldt, e1, e2);
576     }
577 
578     /* load the segments */
579     if (!(new_eflags & VM_MASK)) {
580         int cpl = new_segs[R_CS] & 3;
581         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
582         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
583         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
584         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
585         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
586         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
587     }
588 
589     /* check that env->eip is in the CS segment limits */
590     if (new_eip > env->segs[R_CS].limit) {
591         /* XXX: different exception if CALL? */
592         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
593     }
594 
595 #ifndef CONFIG_USER_ONLY
596     /* reset local breakpoints */
597     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
598         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
599     }
600 #endif
601     return type >> 3;
602 }
603 
604 static int switch_tss(CPUX86State *env, int tss_selector,
605                       uint32_t e1, uint32_t e2, int source,
606                       uint32_t next_eip)
607 {
608     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
609 }
610 
611 static inline unsigned int get_sp_mask(unsigned int e2)
612 {
613 #ifdef TARGET_X86_64
614     if (e2 & DESC_L_MASK) {
615         return 0;
616     } else
617 #endif
618     if (e2 & DESC_B_MASK) {
619         return 0xffffffff;
620     } else {
621         return 0xffff;
622     }
623 }
624 
625 static int exception_is_fault(int intno)
626 {
627     switch (intno) {
628         /*
629          * #DB can be both fault- and trap-like, but it never sets RF=1
630          * in the RFLAGS value pushed on the stack.
631          */
632     case EXCP01_DB:
633     case EXCP03_INT3:
634     case EXCP04_INTO:
635     case EXCP08_DBLE:
636     case EXCP12_MCHK:
637         return 0;
638     }
639     /* Everything else including reserved exception is a fault.  */
640     return 1;
641 }
642 
643 int exception_has_error_code(int intno)
644 {
645     switch (intno) {
646     case 8:
647     case 10:
648     case 11:
649     case 12:
650     case 13:
651     case 14:
652     case 17:
653         return 1;
654     }
655     return 0;
656 }
657 
658 /* protected mode interrupt */
659 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
660                                    int error_code, unsigned int next_eip,
661                                    int is_hw)
662 {
663     SegmentCache *dt;
664     target_ulong ptr;
665     int type, dpl, selector, ss_dpl, cpl;
666     int has_error_code, new_stack, shift;
667     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
668     uint32_t old_eip, eflags;
669     int vm86 = env->eflags & VM_MASK;
670     StackAccess sa;
671     bool set_rf;
672 
673     has_error_code = 0;
674     if (!is_int && !is_hw) {
675         has_error_code = exception_has_error_code(intno);
676     }
677     if (is_int) {
678         old_eip = next_eip;
679         set_rf = false;
680     } else {
681         old_eip = env->eip;
682         set_rf = exception_is_fault(intno);
683     }
684 
685     dt = &env->idt;
686     if (intno * 8 + 7 > dt->limit) {
687         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
688     }
689     ptr = dt->base + intno * 8;
690     e1 = cpu_ldl_kernel(env, ptr);
691     e2 = cpu_ldl_kernel(env, ptr + 4);
692     /* check gate type */
693     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
694     switch (type) {
695     case 5: /* task gate */
696     case 6: /* 286 interrupt gate */
697     case 7: /* 286 trap gate */
698     case 14: /* 386 interrupt gate */
699     case 15: /* 386 trap gate */
700         break;
701     default:
702         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
703         break;
704     }
705     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
706     cpl = env->hflags & HF_CPL_MASK;
707     /* check privilege if software int */
708     if (is_int && dpl < cpl) {
709         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
710     }
711 
712     sa.env = env;
713     sa.ra = 0;
714 
715     if (type == 5) {
716         /* task gate */
717         /* must do that check here to return the correct error code */
718         if (!(e2 & DESC_P_MASK)) {
719             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
720         }
721         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
722         if (has_error_code) {
723             /* push the error code on the destination stack */
724             cpl = env->hflags & HF_CPL_MASK;
725             sa.mmu_index = x86_mmu_index_pl(env, cpl);
726             if (env->segs[R_SS].flags & DESC_B_MASK) {
727                 sa.sp_mask = 0xffffffff;
728             } else {
729                 sa.sp_mask = 0xffff;
730             }
731             sa.sp = env->regs[R_ESP];
732             sa.ss_base = env->segs[R_SS].base;
733             if (shift) {
734                 pushl(&sa, error_code);
735             } else {
736                 pushw(&sa, error_code);
737             }
738             SET_ESP(sa.sp, sa.sp_mask);
739         }
740         return;
741     }
742 
743     /* Otherwise, trap or interrupt gate */
744 
745     /* check valid bit */
746     if (!(e2 & DESC_P_MASK)) {
747         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
748     }
749     selector = e1 >> 16;
750     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
751     if ((selector & 0xfffc) == 0) {
752         raise_exception_err(env, EXCP0D_GPF, 0);
753     }
754     if (load_segment(env, &e1, &e2, selector) != 0) {
755         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
756     }
757     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
758         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
759     }
760     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
761     if (dpl > cpl) {
762         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
763     }
764     if (!(e2 & DESC_P_MASK)) {
765         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
766     }
767     if (e2 & DESC_C_MASK) {
768         dpl = cpl;
769     }
770     sa.mmu_index = x86_mmu_index_pl(env, dpl);
771     if (dpl < cpl) {
772         /* to inner privilege */
773         uint32_t esp;
774         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
775         if ((ss & 0xfffc) == 0) {
776             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
777         }
778         if ((ss & 3) != dpl) {
779             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
780         }
781         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
782             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
783         }
784         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
785         if (ss_dpl != dpl) {
786             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
787         }
788         if (!(ss_e2 & DESC_S_MASK) ||
789             (ss_e2 & DESC_CS_MASK) ||
790             !(ss_e2 & DESC_W_MASK)) {
791             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
792         }
793         if (!(ss_e2 & DESC_P_MASK)) {
794             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
795         }
796         new_stack = 1;
797         sa.sp = esp;
798         sa.sp_mask = get_sp_mask(ss_e2);
799         sa.ss_base = get_seg_base(ss_e1, ss_e2);
800     } else  {
801         /* to same privilege */
802         if (vm86) {
803             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
804         }
805         new_stack = 0;
806         sa.sp = env->regs[R_ESP];
807         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
808         sa.ss_base = env->segs[R_SS].base;
809     }
810 
811     shift = type >> 3;
812 
813 #if 0
814     /* XXX: check that enough room is available */
815     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
816     if (vm86) {
817         push_size += 8;
818     }
819     push_size <<= shift;
820 #endif
821     eflags = cpu_compute_eflags(env);
822     /*
823      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
824      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
825      */
826     if (set_rf) {
827         eflags |= RF_MASK;
828     }
829 
830     if (shift == 1) {
831         if (new_stack) {
832             if (vm86) {
833                 pushl(&sa, env->segs[R_GS].selector);
834                 pushl(&sa, env->segs[R_FS].selector);
835                 pushl(&sa, env->segs[R_DS].selector);
836                 pushl(&sa, env->segs[R_ES].selector);
837             }
838             pushl(&sa, env->segs[R_SS].selector);
839             pushl(&sa, env->regs[R_ESP]);
840         }
841         pushl(&sa, eflags);
842         pushl(&sa, env->segs[R_CS].selector);
843         pushl(&sa, old_eip);
844         if (has_error_code) {
845             pushl(&sa, error_code);
846         }
847     } else {
848         if (new_stack) {
849             if (vm86) {
850                 pushw(&sa, env->segs[R_GS].selector);
851                 pushw(&sa, env->segs[R_FS].selector);
852                 pushw(&sa, env->segs[R_DS].selector);
853                 pushw(&sa, env->segs[R_ES].selector);
854             }
855             pushw(&sa, env->segs[R_SS].selector);
856             pushw(&sa, env->regs[R_ESP]);
857         }
858         pushw(&sa, eflags);
859         pushw(&sa, env->segs[R_CS].selector);
860         pushw(&sa, old_eip);
861         if (has_error_code) {
862             pushw(&sa, error_code);
863         }
864     }
865 
866     /* interrupt gate clear IF mask */
867     if ((type & 1) == 0) {
868         env->eflags &= ~IF_MASK;
869     }
870     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
871 
872     if (new_stack) {
873         if (vm86) {
874             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
875             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
876             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
877             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
878         }
879         ss = (ss & ~3) | dpl;
880         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
881                                get_seg_limit(ss_e1, ss_e2), ss_e2);
882     }
883     SET_ESP(sa.sp, sa.sp_mask);
884 
885     selector = (selector & ~3) | dpl;
886     cpu_x86_load_seg_cache(env, R_CS, selector,
887                    get_seg_base(e1, e2),
888                    get_seg_limit(e1, e2),
889                    e2);
890     env->eip = offset;
891 }
892 
893 #ifdef TARGET_X86_64
894 
895 static void pushq(StackAccess *sa, uint64_t val)
896 {
897     sa->sp -= 8;
898     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
899 }
900 
901 static uint64_t popq(StackAccess *sa)
902 {
903     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
904     sa->sp += 8;
905     return ret;
906 }
907 
908 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
909 {
910     X86CPU *cpu = env_archcpu(env);
911     int index, pg_mode;
912     target_ulong rsp;
913     int32_t sext;
914 
915 #if 0
916     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
917            env->tr.base, env->tr.limit);
918 #endif
919 
920     if (!(env->tr.flags & DESC_P_MASK)) {
921         cpu_abort(CPU(cpu), "invalid tss");
922     }
923     index = 8 * level + 4;
924     if ((index + 7) > env->tr.limit) {
925         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
926     }
927 
928     rsp = cpu_ldq_kernel(env, env->tr.base + index);
929 
930     /* test virtual address sign extension */
931     pg_mode = get_pg_mode(env);
932     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
933     if (sext != 0 && sext != -1) {
934         raise_exception_err(env, EXCP0C_STACK, 0);
935     }
936 
937     return rsp;
938 }
939 
940 /* 64 bit interrupt */
941 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
942                            int error_code, target_ulong next_eip, int is_hw)
943 {
944     SegmentCache *dt;
945     target_ulong ptr;
946     int type, dpl, selector, cpl, ist;
947     int has_error_code, new_stack;
948     uint32_t e1, e2, e3, eflags;
949     target_ulong old_eip, offset;
950     bool set_rf;
951     StackAccess sa;
952 
953     has_error_code = 0;
954     if (!is_int && !is_hw) {
955         has_error_code = exception_has_error_code(intno);
956     }
957     if (is_int) {
958         old_eip = next_eip;
959         set_rf = false;
960     } else {
961         old_eip = env->eip;
962         set_rf = exception_is_fault(intno);
963     }
964 
965     dt = &env->idt;
966     if (intno * 16 + 15 > dt->limit) {
967         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
968     }
969     ptr = dt->base + intno * 16;
970     e1 = cpu_ldl_kernel(env, ptr);
971     e2 = cpu_ldl_kernel(env, ptr + 4);
972     e3 = cpu_ldl_kernel(env, ptr + 8);
973     /* check gate type */
974     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
975     switch (type) {
976     case 14: /* 386 interrupt gate */
977     case 15: /* 386 trap gate */
978         break;
979     default:
980         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
981         break;
982     }
983     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
984     cpl = env->hflags & HF_CPL_MASK;
985     /* check privilege if software int */
986     if (is_int && dpl < cpl) {
987         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
988     }
989     /* check valid bit */
990     if (!(e2 & DESC_P_MASK)) {
991         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
992     }
993     selector = e1 >> 16;
994     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
995     ist = e2 & 7;
996     if ((selector & 0xfffc) == 0) {
997         raise_exception_err(env, EXCP0D_GPF, 0);
998     }
999 
1000     if (load_segment(env, &e1, &e2, selector) != 0) {
1001         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1002     }
1003     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1004         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1005     }
1006     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1007     if (dpl > cpl) {
1008         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1009     }
1010     if (!(e2 & DESC_P_MASK)) {
1011         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1012     }
1013     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1014         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1015     }
1016     if (e2 & DESC_C_MASK) {
1017         dpl = cpl;
1018     }
1019 
1020     sa.env = env;
1021     sa.ra = 0;
1022     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1023     sa.sp_mask = -1;
1024     sa.ss_base = 0;
1025     if (dpl < cpl || ist != 0) {
1026         /* to inner privilege */
1027         new_stack = 1;
1028         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1029     } else {
1030         /* to same privilege */
1031         if (env->eflags & VM_MASK) {
1032             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1033         }
1034         new_stack = 0;
1035         sa.sp = env->regs[R_ESP];
1036     }
1037     sa.sp &= ~0xfLL; /* align stack */
1038 
1039     /* See do_interrupt_protected.  */
1040     eflags = cpu_compute_eflags(env);
1041     if (set_rf) {
1042         eflags |= RF_MASK;
1043     }
1044 
1045     pushq(&sa, env->segs[R_SS].selector);
1046     pushq(&sa, env->regs[R_ESP]);
1047     pushq(&sa, eflags);
1048     pushq(&sa, env->segs[R_CS].selector);
1049     pushq(&sa, old_eip);
1050     if (has_error_code) {
1051         pushq(&sa, error_code);
1052     }
1053 
1054     /* interrupt gate clear IF mask */
1055     if ((type & 1) == 0) {
1056         env->eflags &= ~IF_MASK;
1057     }
1058     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1059 
1060     if (new_stack) {
1061         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1062         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1063     }
1064     env->regs[R_ESP] = sa.sp;
1065 
1066     selector = (selector & ~3) | dpl;
1067     cpu_x86_load_seg_cache(env, R_CS, selector,
1068                    get_seg_base(e1, e2),
1069                    get_seg_limit(e1, e2),
1070                    e2);
1071     env->eip = offset;
1072 }
1073 #endif /* TARGET_X86_64 */
1074 
1075 void helper_sysret(CPUX86State *env, int dflag)
1076 {
1077     int cpl, selector;
1078 
1079     if (!(env->efer & MSR_EFER_SCE)) {
1080         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1081     }
1082     cpl = env->hflags & HF_CPL_MASK;
1083     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1084         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1085     }
1086     selector = (env->star >> 48) & 0xffff;
1087 #ifdef TARGET_X86_64
1088     if (env->hflags & HF_LMA_MASK) {
1089         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1090                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1091                         NT_MASK);
1092         if (dflag == 2) {
1093             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1094                                    0, 0xffffffff,
1095                                    DESC_G_MASK | DESC_P_MASK |
1096                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1097                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1098                                    DESC_L_MASK);
1099             env->eip = env->regs[R_ECX];
1100         } else {
1101             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102                                    0, 0xffffffff,
1103                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106             env->eip = (uint32_t)env->regs[R_ECX];
1107         }
1108         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1109                                0, 0xffffffff,
1110                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112                                DESC_W_MASK | DESC_A_MASK);
1113     } else
1114 #endif
1115     {
1116         env->eflags |= IF_MASK;
1117         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1118                                0, 0xffffffff,
1119                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1120                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1121                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1122         env->eip = (uint32_t)env->regs[R_ECX];
1123         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1124                                0, 0xffffffff,
1125                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1126                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1127                                DESC_W_MASK | DESC_A_MASK);
1128     }
1129 }
1130 
1131 /* real mode interrupt */
1132 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1133                               int error_code, unsigned int next_eip)
1134 {
1135     SegmentCache *dt;
1136     target_ulong ptr;
1137     int selector;
1138     uint32_t offset;
1139     uint32_t old_cs, old_eip;
1140     StackAccess sa;
1141 
1142     /* real mode (simpler!) */
1143     dt = &env->idt;
1144     if (intno * 4 + 3 > dt->limit) {
1145         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1146     }
1147     ptr = dt->base + intno * 4;
1148     offset = cpu_lduw_kernel(env, ptr);
1149     selector = cpu_lduw_kernel(env, ptr + 2);
1150 
1151     sa.env = env;
1152     sa.ra = 0;
1153     sa.sp = env->regs[R_ESP];
1154     sa.sp_mask = 0xffff;
1155     sa.ss_base = env->segs[R_SS].base;
1156     sa.mmu_index = x86_mmu_index_pl(env, 0);
1157 
1158     if (is_int) {
1159         old_eip = next_eip;
1160     } else {
1161         old_eip = env->eip;
1162     }
1163     old_cs = env->segs[R_CS].selector;
1164     /* XXX: use SS segment size? */
1165     pushw(&sa, cpu_compute_eflags(env));
1166     pushw(&sa, old_cs);
1167     pushw(&sa, old_eip);
1168 
1169     /* update processor state */
1170     SET_ESP(sa.sp, sa.sp_mask);
1171     env->eip = offset;
1172     env->segs[R_CS].selector = selector;
1173     env->segs[R_CS].base = (selector << 4);
1174     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1175 }
1176 
1177 /*
1178  * Begin execution of an interruption. is_int is TRUE if coming from
1179  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1180  * instruction. It is only relevant if is_int is TRUE.
1181  */
1182 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1183                       int error_code, target_ulong next_eip, int is_hw)
1184 {
1185     CPUX86State *env = &cpu->env;
1186 
1187     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1188         if ((env->cr[0] & CR0_PE_MASK)) {
1189             static int count;
1190 
1191             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1192                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1193                      count, intno, error_code, is_int,
1194                      env->hflags & HF_CPL_MASK,
1195                      env->segs[R_CS].selector, env->eip,
1196                      (int)env->segs[R_CS].base + env->eip,
1197                      env->segs[R_SS].selector, env->regs[R_ESP]);
1198             if (intno == 0x0e) {
1199                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1200             } else {
1201                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1202             }
1203             qemu_log("\n");
1204             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1205 #if 0
1206             {
1207                 int i;
1208                 target_ulong ptr;
1209 
1210                 qemu_log("       code=");
1211                 ptr = env->segs[R_CS].base + env->eip;
1212                 for (i = 0; i < 16; i++) {
1213                     qemu_log(" %02x", ldub(ptr + i));
1214                 }
1215                 qemu_log("\n");
1216             }
1217 #endif
1218             count++;
1219         }
1220     }
1221     if (env->cr[0] & CR0_PE_MASK) {
1222 #if !defined(CONFIG_USER_ONLY)
1223         if (env->hflags & HF_GUEST_MASK) {
1224             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1225         }
1226 #endif
1227 #ifdef TARGET_X86_64
1228         if (env->hflags & HF_LMA_MASK) {
1229             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1230         } else
1231 #endif
1232         {
1233             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1234                                    is_hw);
1235         }
1236     } else {
1237 #if !defined(CONFIG_USER_ONLY)
1238         if (env->hflags & HF_GUEST_MASK) {
1239             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1240         }
1241 #endif
1242         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1243     }
1244 
1245 #if !defined(CONFIG_USER_ONLY)
1246     if (env->hflags & HF_GUEST_MASK) {
1247         CPUState *cs = CPU(cpu);
1248         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1249                                       offsetof(struct vmcb,
1250                                                control.event_inj));
1251 
1252         x86_stl_phys(cs,
1253                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1254                  event_inj & ~SVM_EVTINJ_VALID);
1255     }
1256 #endif
1257 }
1258 
1259 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1260 {
1261     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1262 }
1263 
1264 void helper_lldt(CPUX86State *env, int selector)
1265 {
1266     SegmentCache *dt;
1267     uint32_t e1, e2;
1268     int index, entry_limit;
1269     target_ulong ptr;
1270 
1271     selector &= 0xffff;
1272     if ((selector & 0xfffc) == 0) {
1273         /* XXX: NULL selector case: invalid LDT */
1274         env->ldt.base = 0;
1275         env->ldt.limit = 0;
1276     } else {
1277         if (selector & 0x4) {
1278             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1279         }
1280         dt = &env->gdt;
1281         index = selector & ~7;
1282 #ifdef TARGET_X86_64
1283         if (env->hflags & HF_LMA_MASK) {
1284             entry_limit = 15;
1285         } else
1286 #endif
1287         {
1288             entry_limit = 7;
1289         }
1290         if ((index + entry_limit) > dt->limit) {
1291             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1292         }
1293         ptr = dt->base + index;
1294         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1295         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1296         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1297             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1298         }
1299         if (!(e2 & DESC_P_MASK)) {
1300             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1301         }
1302 #ifdef TARGET_X86_64
1303         if (env->hflags & HF_LMA_MASK) {
1304             uint32_t e3;
1305 
1306             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1307             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1308             env->ldt.base |= (target_ulong)e3 << 32;
1309         } else
1310 #endif
1311         {
1312             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1313         }
1314     }
1315     env->ldt.selector = selector;
1316 }
1317 
1318 void helper_ltr(CPUX86State *env, int selector)
1319 {
1320     SegmentCache *dt;
1321     uint32_t e1, e2;
1322     int index, type, entry_limit;
1323     target_ulong ptr;
1324 
1325     selector &= 0xffff;
1326     if ((selector & 0xfffc) == 0) {
1327         /* NULL selector case: invalid TR */
1328         env->tr.base = 0;
1329         env->tr.limit = 0;
1330         env->tr.flags = 0;
1331     } else {
1332         if (selector & 0x4) {
1333             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1334         }
1335         dt = &env->gdt;
1336         index = selector & ~7;
1337 #ifdef TARGET_X86_64
1338         if (env->hflags & HF_LMA_MASK) {
1339             entry_limit = 15;
1340         } else
1341 #endif
1342         {
1343             entry_limit = 7;
1344         }
1345         if ((index + entry_limit) > dt->limit) {
1346             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1347         }
1348         ptr = dt->base + index;
1349         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1350         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1351         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1352         if ((e2 & DESC_S_MASK) ||
1353             (type != 1 && type != 9)) {
1354             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1355         }
1356         if (!(e2 & DESC_P_MASK)) {
1357             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1358         }
1359 #ifdef TARGET_X86_64
1360         if (env->hflags & HF_LMA_MASK) {
1361             uint32_t e3, e4;
1362 
1363             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1364             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1365             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1366                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1367             }
1368             load_seg_cache_raw_dt(&env->tr, e1, e2);
1369             env->tr.base |= (target_ulong)e3 << 32;
1370         } else
1371 #endif
1372         {
1373             load_seg_cache_raw_dt(&env->tr, e1, e2);
1374         }
1375         e2 |= DESC_TSS_BUSY_MASK;
1376         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1377     }
1378     env->tr.selector = selector;
1379 }
1380 
1381 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1382 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1383 {
1384     uint32_t e1, e2;
1385     int cpl, dpl, rpl;
1386     SegmentCache *dt;
1387     int index;
1388     target_ulong ptr;
1389 
1390     selector &= 0xffff;
1391     cpl = env->hflags & HF_CPL_MASK;
1392     if ((selector & 0xfffc) == 0) {
1393         /* null selector case */
1394         if (seg_reg == R_SS
1395 #ifdef TARGET_X86_64
1396             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1397 #endif
1398             ) {
1399             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1400         }
1401         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1402     } else {
1403 
1404         if (selector & 0x4) {
1405             dt = &env->ldt;
1406         } else {
1407             dt = &env->gdt;
1408         }
1409         index = selector & ~7;
1410         if ((index + 7) > dt->limit) {
1411             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412         }
1413         ptr = dt->base + index;
1414         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1415         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1416 
1417         if (!(e2 & DESC_S_MASK)) {
1418             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1419         }
1420         rpl = selector & 3;
1421         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1422         if (seg_reg == R_SS) {
1423             /* must be writable segment */
1424             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1425                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1426             }
1427             if (rpl != cpl || dpl != cpl) {
1428                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1429             }
1430         } else {
1431             /* must be readable segment */
1432             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1433                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1434             }
1435 
1436             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1437                 /* if not conforming code, test rights */
1438                 if (dpl < cpl || dpl < rpl) {
1439                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1440                 }
1441             }
1442         }
1443 
1444         if (!(e2 & DESC_P_MASK)) {
1445             if (seg_reg == R_SS) {
1446                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1447             } else {
1448                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1449             }
1450         }
1451 
1452         /* set the access bit if not already set */
1453         if (!(e2 & DESC_A_MASK)) {
1454             e2 |= DESC_A_MASK;
1455             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1456         }
1457 
1458         cpu_x86_load_seg_cache(env, seg_reg, selector,
1459                        get_seg_base(e1, e2),
1460                        get_seg_limit(e1, e2),
1461                        e2);
1462 #if 0
1463         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1464                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1465 #endif
1466     }
1467 }
1468 
1469 /* protected mode jump */
1470 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1471                            target_ulong next_eip)
1472 {
1473     int gate_cs, type;
1474     uint32_t e1, e2, cpl, dpl, rpl, limit;
1475 
1476     if ((new_cs & 0xfffc) == 0) {
1477         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1478     }
1479     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1480         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1481     }
1482     cpl = env->hflags & HF_CPL_MASK;
1483     if (e2 & DESC_S_MASK) {
1484         if (!(e2 & DESC_CS_MASK)) {
1485             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1486         }
1487         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1488         if (e2 & DESC_C_MASK) {
1489             /* conforming code segment */
1490             if (dpl > cpl) {
1491                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1492             }
1493         } else {
1494             /* non conforming code segment */
1495             rpl = new_cs & 3;
1496             if (rpl > cpl) {
1497                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1498             }
1499             if (dpl != cpl) {
1500                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1501             }
1502         }
1503         if (!(e2 & DESC_P_MASK)) {
1504             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1505         }
1506         limit = get_seg_limit(e1, e2);
1507         if (new_eip > limit &&
1508             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1509             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1510         }
1511         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1512                        get_seg_base(e1, e2), limit, e2);
1513         env->eip = new_eip;
1514     } else {
1515         /* jump to call or task gate */
1516         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1517         rpl = new_cs & 3;
1518         cpl = env->hflags & HF_CPL_MASK;
1519         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1520 
1521 #ifdef TARGET_X86_64
1522         if (env->efer & MSR_EFER_LMA) {
1523             if (type != 12) {
1524                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1525             }
1526         }
1527 #endif
1528         switch (type) {
1529         case 1: /* 286 TSS */
1530         case 9: /* 386 TSS */
1531         case 5: /* task gate */
1532             if (dpl < cpl || dpl < rpl) {
1533                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1534             }
1535             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1536             break;
1537         case 4: /* 286 call gate */
1538         case 12: /* 386 call gate */
1539             if ((dpl < cpl) || (dpl < rpl)) {
1540                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1541             }
1542             if (!(e2 & DESC_P_MASK)) {
1543                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1544             }
1545             gate_cs = e1 >> 16;
1546             new_eip = (e1 & 0xffff);
1547             if (type == 12) {
1548                 new_eip |= (e2 & 0xffff0000);
1549             }
1550 
1551 #ifdef TARGET_X86_64
1552             if (env->efer & MSR_EFER_LMA) {
1553                 /* load the upper 8 bytes of the 64-bit call gate */
1554                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1555                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1556                                            GETPC());
1557                 }
1558                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1559                 if (type != 0) {
1560                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1561                                            GETPC());
1562                 }
1563                 new_eip |= ((target_ulong)e1) << 32;
1564             }
1565 #endif
1566 
1567             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1568                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1569             }
1570             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1571             /* must be code segment */
1572             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1573                  (DESC_S_MASK | DESC_CS_MASK))) {
1574                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1575             }
1576             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1577                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1578                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1579             }
1580 #ifdef TARGET_X86_64
1581             if (env->efer & MSR_EFER_LMA) {
1582                 if (!(e2 & DESC_L_MASK)) {
1583                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1584                 }
1585                 if (e2 & DESC_B_MASK) {
1586                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1587                 }
1588             }
1589 #endif
1590             if (!(e2 & DESC_P_MASK)) {
1591                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1592             }
1593             limit = get_seg_limit(e1, e2);
1594             if (new_eip > limit &&
1595                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1596                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1597             }
1598             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1599                                    get_seg_base(e1, e2), limit, e2);
1600             env->eip = new_eip;
1601             break;
1602         default:
1603             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1604             break;
1605         }
1606     }
1607 }
1608 
1609 /* real mode call */
1610 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1611                        int shift, uint32_t next_eip)
1612 {
1613     StackAccess sa;
1614 
1615     sa.env = env;
1616     sa.ra = GETPC();
1617     sa.sp = env->regs[R_ESP];
1618     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1619     sa.ss_base = env->segs[R_SS].base;
1620     sa.mmu_index = x86_mmu_index_pl(env, 0);
1621 
1622     if (shift) {
1623         pushl(&sa, env->segs[R_CS].selector);
1624         pushl(&sa, next_eip);
1625     } else {
1626         pushw(&sa, env->segs[R_CS].selector);
1627         pushw(&sa, next_eip);
1628     }
1629 
1630     SET_ESP(sa.sp, sa.sp_mask);
1631     env->eip = new_eip;
1632     env->segs[R_CS].selector = new_cs;
1633     env->segs[R_CS].base = (new_cs << 4);
1634 }
1635 
1636 /* protected mode call */
1637 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1638                             int shift, target_ulong next_eip)
1639 {
1640     int new_stack, i;
1641     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1642     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1643     uint32_t val, limit, old_sp_mask;
1644     target_ulong old_ssp, offset;
1645     StackAccess sa;
1646 
1647     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1648     LOG_PCALL_STATE(env_cpu(env));
1649     if ((new_cs & 0xfffc) == 0) {
1650         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1651     }
1652     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1653         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1654     }
1655     cpl = env->hflags & HF_CPL_MASK;
1656     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1657 
1658     sa.env = env;
1659     sa.ra = GETPC();
1660 
1661     if (e2 & DESC_S_MASK) {
1662         /* "normal" far call, no stack switch possible */
1663         if (!(e2 & DESC_CS_MASK)) {
1664             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1665         }
1666         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1667         if (e2 & DESC_C_MASK) {
1668             /* conforming code segment */
1669             if (dpl > cpl) {
1670                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1671             }
1672         } else {
1673             /* non conforming code segment */
1674             rpl = new_cs & 3;
1675             if (rpl > cpl) {
1676                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1677             }
1678             if (dpl != cpl) {
1679                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1680             }
1681         }
1682         if (!(e2 & DESC_P_MASK)) {
1683             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1684         }
1685 
1686         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1687 #ifdef TARGET_X86_64
1688         /* XXX: check 16/32 bit cases in long mode */
1689         if (shift == 2) {
1690             /* 64 bit case */
1691             sa.sp = env->regs[R_ESP];
1692             sa.sp_mask = -1;
1693             sa.ss_base = 0;
1694             pushq(&sa, env->segs[R_CS].selector);
1695             pushq(&sa, next_eip);
1696             /* from this point, not restartable */
1697             env->regs[R_ESP] = sa.sp;
1698             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1699                                    get_seg_base(e1, e2),
1700                                    get_seg_limit(e1, e2), e2);
1701             env->eip = new_eip;
1702         } else
1703 #endif
1704         {
1705             sa.sp = env->regs[R_ESP];
1706             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1707             sa.ss_base = env->segs[R_SS].base;
1708             if (shift) {
1709                 pushl(&sa, env->segs[R_CS].selector);
1710                 pushl(&sa, next_eip);
1711             } else {
1712                 pushw(&sa, env->segs[R_CS].selector);
1713                 pushw(&sa, next_eip);
1714             }
1715 
1716             limit = get_seg_limit(e1, e2);
1717             if (new_eip > limit) {
1718                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1719             }
1720             /* from this point, not restartable */
1721             SET_ESP(sa.sp, sa.sp_mask);
1722             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1723                                    get_seg_base(e1, e2), limit, e2);
1724             env->eip = new_eip;
1725         }
1726     } else {
1727         /* check gate type */
1728         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1729         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1730         rpl = new_cs & 3;
1731 
1732 #ifdef TARGET_X86_64
1733         if (env->efer & MSR_EFER_LMA) {
1734             if (type != 12) {
1735                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1736             }
1737         }
1738 #endif
1739 
1740         switch (type) {
1741         case 1: /* available 286 TSS */
1742         case 9: /* available 386 TSS */
1743         case 5: /* task gate */
1744             if (dpl < cpl || dpl < rpl) {
1745                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1746             }
1747             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1748             return;
1749         case 4: /* 286 call gate */
1750         case 12: /* 386 call gate */
1751             break;
1752         default:
1753             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1754             break;
1755         }
1756         shift = type >> 3;
1757 
1758         if (dpl < cpl || dpl < rpl) {
1759             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1760         }
1761         /* check valid bit */
1762         if (!(e2 & DESC_P_MASK)) {
1763             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1764         }
1765         selector = e1 >> 16;
1766         param_count = e2 & 0x1f;
1767         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1768 #ifdef TARGET_X86_64
1769         if (env->efer & MSR_EFER_LMA) {
1770             /* load the upper 8 bytes of the 64-bit call gate */
1771             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1772                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1773                                        GETPC());
1774             }
1775             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1776             if (type != 0) {
1777                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1778                                        GETPC());
1779             }
1780             offset |= ((target_ulong)e1) << 32;
1781         }
1782 #endif
1783         if ((selector & 0xfffc) == 0) {
1784             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1785         }
1786 
1787         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1788             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1789         }
1790         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1791             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1792         }
1793         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1794         if (dpl > cpl) {
1795             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1796         }
1797 #ifdef TARGET_X86_64
1798         if (env->efer & MSR_EFER_LMA) {
1799             if (!(e2 & DESC_L_MASK)) {
1800                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1801             }
1802             if (e2 & DESC_B_MASK) {
1803                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1804             }
1805             shift++;
1806         }
1807 #endif
1808         if (!(e2 & DESC_P_MASK)) {
1809             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1810         }
1811 
1812         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1813             /* to inner privilege */
1814             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1815 #ifdef TARGET_X86_64
1816             if (shift == 2) {
1817                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1818                 new_stack = 1;
1819                 sa.sp = get_rsp_from_tss(env, dpl);
1820                 sa.sp_mask = -1;
1821                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1822                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1823                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1824             } else
1825 #endif
1826             {
1827                 uint32_t sp32;
1828                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1829                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1830                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1831                           env->regs[R_ESP]);
1832                 if ((ss & 0xfffc) == 0) {
1833                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1834                 }
1835                 if ((ss & 3) != dpl) {
1836                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1837                 }
1838                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1839                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1840                 }
1841                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1842                 if (ss_dpl != dpl) {
1843                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1844                 }
1845                 if (!(ss_e2 & DESC_S_MASK) ||
1846                     (ss_e2 & DESC_CS_MASK) ||
1847                     !(ss_e2 & DESC_W_MASK)) {
1848                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1849                 }
1850                 if (!(ss_e2 & DESC_P_MASK)) {
1851                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1852                 }
1853 
1854                 sa.sp = sp32;
1855                 sa.sp_mask = get_sp_mask(ss_e2);
1856                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1857             }
1858 
1859             /* push_size = ((param_count * 2) + 8) << shift; */
1860             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1861             old_ssp = env->segs[R_SS].base;
1862 
1863 #ifdef TARGET_X86_64
1864             if (shift == 2) {
1865                 /* XXX: verify if new stack address is canonical */
1866                 pushq(&sa, env->segs[R_SS].selector);
1867                 pushq(&sa, env->regs[R_ESP]);
1868                 /* parameters aren't supported for 64-bit call gates */
1869             } else
1870 #endif
1871             if (shift == 1) {
1872                 pushl(&sa, env->segs[R_SS].selector);
1873                 pushl(&sa, env->regs[R_ESP]);
1874                 for (i = param_count - 1; i >= 0; i--) {
1875                     val = cpu_ldl_data_ra(env,
1876                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1877                                           GETPC());
1878                     pushl(&sa, val);
1879                 }
1880             } else {
1881                 pushw(&sa, env->segs[R_SS].selector);
1882                 pushw(&sa, env->regs[R_ESP]);
1883                 for (i = param_count - 1; i >= 0; i--) {
1884                     val = cpu_lduw_data_ra(env,
1885                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1886                                            GETPC());
1887                     pushw(&sa, val);
1888                 }
1889             }
1890             new_stack = 1;
1891         } else {
1892             /* to same privilege */
1893             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1894             sa.sp = env->regs[R_ESP];
1895             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1896             sa.ss_base = env->segs[R_SS].base;
1897             /* push_size = (4 << shift); */
1898             new_stack = 0;
1899         }
1900 
1901 #ifdef TARGET_X86_64
1902         if (shift == 2) {
1903             pushq(&sa, env->segs[R_CS].selector);
1904             pushq(&sa, next_eip);
1905         } else
1906 #endif
1907         if (shift == 1) {
1908             pushl(&sa, env->segs[R_CS].selector);
1909             pushl(&sa, next_eip);
1910         } else {
1911             pushw(&sa, env->segs[R_CS].selector);
1912             pushw(&sa, next_eip);
1913         }
1914 
1915         /* from this point, not restartable */
1916 
1917         if (new_stack) {
1918 #ifdef TARGET_X86_64
1919             if (shift == 2) {
1920                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1921             } else
1922 #endif
1923             {
1924                 ss = (ss & ~3) | dpl;
1925                 cpu_x86_load_seg_cache(env, R_SS, ss,
1926                                        sa.ss_base,
1927                                        get_seg_limit(ss_e1, ss_e2),
1928                                        ss_e2);
1929             }
1930         }
1931 
1932         selector = (selector & ~3) | dpl;
1933         cpu_x86_load_seg_cache(env, R_CS, selector,
1934                        get_seg_base(e1, e2),
1935                        get_seg_limit(e1, e2),
1936                        e2);
1937         SET_ESP(sa.sp, sa.sp_mask);
1938         env->eip = offset;
1939     }
1940 }
1941 
1942 /* real and vm86 mode iret */
1943 void helper_iret_real(CPUX86State *env, int shift)
1944 {
1945     uint32_t new_cs, new_eip, new_eflags;
1946     int eflags_mask;
1947     StackAccess sa;
1948 
1949     sa.env = env;
1950     sa.ra = GETPC();
1951     sa.mmu_index = x86_mmu_index_pl(env, 0);
1952     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1953     sa.sp = env->regs[R_ESP];
1954     sa.ss_base = env->segs[R_SS].base;
1955 
1956     if (shift == 1) {
1957         /* 32 bits */
1958         new_eip = popl(&sa);
1959         new_cs = popl(&sa) & 0xffff;
1960         new_eflags = popl(&sa);
1961     } else {
1962         /* 16 bits */
1963         new_eip = popw(&sa);
1964         new_cs = popw(&sa);
1965         new_eflags = popw(&sa);
1966     }
1967     SET_ESP(sa.sp, sa.sp_mask);
1968     env->segs[R_CS].selector = new_cs;
1969     env->segs[R_CS].base = (new_cs << 4);
1970     env->eip = new_eip;
1971     if (env->eflags & VM_MASK) {
1972         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1973             NT_MASK;
1974     } else {
1975         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1976             RF_MASK | NT_MASK;
1977     }
1978     if (shift == 0) {
1979         eflags_mask &= 0xffff;
1980     }
1981     cpu_load_eflags(env, new_eflags, eflags_mask);
1982     env->hflags2 &= ~HF2_NMI_MASK;
1983 }
1984 
1985 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1986 {
1987     int dpl;
1988     uint32_t e2;
1989 
1990     /* XXX: on x86_64, we do not want to nullify FS and GS because
1991        they may still contain a valid base. I would be interested to
1992        know how a real x86_64 CPU behaves */
1993     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1994         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1995         return;
1996     }
1997 
1998     e2 = env->segs[seg_reg].flags;
1999     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2000     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2001         /* data or non conforming code segment */
2002         if (dpl < cpl) {
2003             cpu_x86_load_seg_cache(env, seg_reg, 0,
2004                                    env->segs[seg_reg].base,
2005                                    env->segs[seg_reg].limit,
2006                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2007         }
2008     }
2009 }
2010 
2011 /* protected mode iret */
2012 static inline void helper_ret_protected(CPUX86State *env, int shift,
2013                                         int is_iret, int addend,
2014                                         uintptr_t retaddr)
2015 {
2016     uint32_t new_cs, new_eflags, new_ss;
2017     uint32_t new_es, new_ds, new_fs, new_gs;
2018     uint32_t e1, e2, ss_e1, ss_e2;
2019     int cpl, dpl, rpl, eflags_mask, iopl;
2020     target_ulong new_eip, new_esp;
2021     StackAccess sa;
2022 
2023     cpl = env->hflags & HF_CPL_MASK;
2024 
2025     sa.env = env;
2026     sa.ra = retaddr;
2027     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2028 
2029 #ifdef TARGET_X86_64
2030     if (shift == 2) {
2031         sa.sp_mask = -1;
2032     } else
2033 #endif
2034     {
2035         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2036     }
2037     sa.sp = env->regs[R_ESP];
2038     sa.ss_base = env->segs[R_SS].base;
2039     new_eflags = 0; /* avoid warning */
2040 #ifdef TARGET_X86_64
2041     if (shift == 2) {
2042         new_eip = popq(&sa);
2043         new_cs = popq(&sa) & 0xffff;
2044         if (is_iret) {
2045             new_eflags = popq(&sa);
2046         }
2047     } else
2048 #endif
2049     {
2050         if (shift == 1) {
2051             /* 32 bits */
2052             new_eip = popl(&sa);
2053             new_cs = popl(&sa) & 0xffff;
2054             if (is_iret) {
2055                 new_eflags = popl(&sa);
2056                 if (new_eflags & VM_MASK) {
2057                     goto return_to_vm86;
2058                 }
2059             }
2060         } else {
2061             /* 16 bits */
2062             new_eip = popw(&sa);
2063             new_cs = popw(&sa);
2064             if (is_iret) {
2065                 new_eflags = popw(&sa);
2066             }
2067         }
2068     }
2069     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2070               new_cs, new_eip, shift, addend);
2071     LOG_PCALL_STATE(env_cpu(env));
2072     if ((new_cs & 0xfffc) == 0) {
2073         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2074     }
2075     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2076         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2077     }
2078     if (!(e2 & DESC_S_MASK) ||
2079         !(e2 & DESC_CS_MASK)) {
2080         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2081     }
2082     rpl = new_cs & 3;
2083     if (rpl < cpl) {
2084         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2085     }
2086     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2087     if (e2 & DESC_C_MASK) {
2088         if (dpl > rpl) {
2089             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2090         }
2091     } else {
2092         if (dpl != rpl) {
2093             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2094         }
2095     }
2096     if (!(e2 & DESC_P_MASK)) {
2097         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2098     }
2099 
2100     sa.sp += addend;
2101     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2102                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2103         /* return to same privilege level */
2104         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2105                        get_seg_base(e1, e2),
2106                        get_seg_limit(e1, e2),
2107                        e2);
2108     } else {
2109         /* return to different privilege level */
2110 #ifdef TARGET_X86_64
2111         if (shift == 2) {
2112             new_esp = popq(&sa);
2113             new_ss = popq(&sa) & 0xffff;
2114         } else
2115 #endif
2116         {
2117             if (shift == 1) {
2118                 /* 32 bits */
2119                 new_esp = popl(&sa);
2120                 new_ss = popl(&sa) & 0xffff;
2121             } else {
2122                 /* 16 bits */
2123                 new_esp = popw(&sa);
2124                 new_ss = popw(&sa);
2125             }
2126         }
2127         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2128                   new_ss, new_esp);
2129         if ((new_ss & 0xfffc) == 0) {
2130 #ifdef TARGET_X86_64
2131             /* NULL ss is allowed in long mode if cpl != 3 */
2132             /* XXX: test CS64? */
2133             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2134                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2135                                        0, 0xffffffff,
2136                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2137                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2138                                        DESC_W_MASK | DESC_A_MASK);
2139                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2140             } else
2141 #endif
2142             {
2143                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2144             }
2145         } else {
2146             if ((new_ss & 3) != rpl) {
2147                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2148             }
2149             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2150                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2151             }
2152             if (!(ss_e2 & DESC_S_MASK) ||
2153                 (ss_e2 & DESC_CS_MASK) ||
2154                 !(ss_e2 & DESC_W_MASK)) {
2155                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2156             }
2157             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2158             if (dpl != rpl) {
2159                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2160             }
2161             if (!(ss_e2 & DESC_P_MASK)) {
2162                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2163             }
2164             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2165                                    get_seg_base(ss_e1, ss_e2),
2166                                    get_seg_limit(ss_e1, ss_e2),
2167                                    ss_e2);
2168         }
2169 
2170         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2171                        get_seg_base(e1, e2),
2172                        get_seg_limit(e1, e2),
2173                        e2);
2174         sa.sp = new_esp;
2175 #ifdef TARGET_X86_64
2176         if (env->hflags & HF_CS64_MASK) {
2177             sa.sp_mask = -1;
2178         } else
2179 #endif
2180         {
2181             sa.sp_mask = get_sp_mask(ss_e2);
2182         }
2183 
2184         /* validate data segments */
2185         validate_seg(env, R_ES, rpl);
2186         validate_seg(env, R_DS, rpl);
2187         validate_seg(env, R_FS, rpl);
2188         validate_seg(env, R_GS, rpl);
2189 
2190         sa.sp += addend;
2191     }
2192     SET_ESP(sa.sp, sa.sp_mask);
2193     env->eip = new_eip;
2194     if (is_iret) {
2195         /* NOTE: 'cpl' is the _old_ CPL */
2196         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2197         if (cpl == 0) {
2198             eflags_mask |= IOPL_MASK;
2199         }
2200         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2201         if (cpl <= iopl) {
2202             eflags_mask |= IF_MASK;
2203         }
2204         if (shift == 0) {
2205             eflags_mask &= 0xffff;
2206         }
2207         cpu_load_eflags(env, new_eflags, eflags_mask);
2208     }
2209     return;
2210 
2211  return_to_vm86:
2212     new_esp = popl(&sa);
2213     new_ss = popl(&sa);
2214     new_es = popl(&sa);
2215     new_ds = popl(&sa);
2216     new_fs = popl(&sa);
2217     new_gs = popl(&sa);
2218 
2219     /* modify processor state */
2220     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2221                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2222                     VIP_MASK);
2223     load_seg_vm(env, R_CS, new_cs & 0xffff);
2224     load_seg_vm(env, R_SS, new_ss & 0xffff);
2225     load_seg_vm(env, R_ES, new_es & 0xffff);
2226     load_seg_vm(env, R_DS, new_ds & 0xffff);
2227     load_seg_vm(env, R_FS, new_fs & 0xffff);
2228     load_seg_vm(env, R_GS, new_gs & 0xffff);
2229 
2230     env->eip = new_eip & 0xffff;
2231     env->regs[R_ESP] = new_esp;
2232 }
2233 
2234 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2235 {
2236     int tss_selector, type;
2237     uint32_t e1, e2;
2238 
2239     /* specific case for TSS */
2240     if (env->eflags & NT_MASK) {
2241 #ifdef TARGET_X86_64
2242         if (env->hflags & HF_LMA_MASK) {
2243             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2244         }
2245 #endif
2246         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2247         if (tss_selector & 4) {
2248             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2249         }
2250         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2251             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2252         }
2253         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2254         /* NOTE: we check both segment and busy TSS */
2255         if (type != 3) {
2256             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2257         }
2258         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2259     } else {
2260         helper_ret_protected(env, shift, 1, 0, GETPC());
2261     }
2262     env->hflags2 &= ~HF2_NMI_MASK;
2263 }
2264 
2265 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2266 {
2267     helper_ret_protected(env, shift, 0, addend, GETPC());
2268 }
2269 
2270 void helper_sysenter(CPUX86State *env)
2271 {
2272     if (env->sysenter_cs == 0) {
2273         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2274     }
2275     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2276 
2277 #ifdef TARGET_X86_64
2278     if (env->hflags & HF_LMA_MASK) {
2279         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2280                                0, 0xffffffff,
2281                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2282                                DESC_S_MASK |
2283                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2284                                DESC_L_MASK);
2285     } else
2286 #endif
2287     {
2288         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2289                                0, 0xffffffff,
2290                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2291                                DESC_S_MASK |
2292                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2293     }
2294     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2295                            0, 0xffffffff,
2296                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2297                            DESC_S_MASK |
2298                            DESC_W_MASK | DESC_A_MASK);
2299     env->regs[R_ESP] = env->sysenter_esp;
2300     env->eip = env->sysenter_eip;
2301 }
2302 
2303 void helper_sysexit(CPUX86State *env, int dflag)
2304 {
2305     int cpl;
2306 
2307     cpl = env->hflags & HF_CPL_MASK;
2308     if (env->sysenter_cs == 0 || cpl != 0) {
2309         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2310     }
2311 #ifdef TARGET_X86_64
2312     if (dflag == 2) {
2313         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2314                                3, 0, 0xffffffff,
2315                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2316                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2317                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2318                                DESC_L_MASK);
2319         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2320                                3, 0, 0xffffffff,
2321                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2322                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2323                                DESC_W_MASK | DESC_A_MASK);
2324     } else
2325 #endif
2326     {
2327         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2328                                3, 0, 0xffffffff,
2329                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2330                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2331                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2332         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2333                                3, 0, 0xffffffff,
2334                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2335                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2336                                DESC_W_MASK | DESC_A_MASK);
2337     }
2338     env->regs[R_ESP] = env->regs[R_ECX];
2339     env->eip = env->regs[R_EDX];
2340 }
2341 
2342 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2343 {
2344     unsigned int limit;
2345     uint32_t e1, e2, selector;
2346     int rpl, dpl, cpl, type;
2347 
2348     selector = selector1 & 0xffff;
2349     assert(CC_OP == CC_OP_EFLAGS);
2350     if ((selector & 0xfffc) == 0) {
2351         goto fail;
2352     }
2353     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2354         goto fail;
2355     }
2356     rpl = selector & 3;
2357     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2358     cpl = env->hflags & HF_CPL_MASK;
2359     if (e2 & DESC_S_MASK) {
2360         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2361             /* conforming */
2362         } else {
2363             if (dpl < cpl || dpl < rpl) {
2364                 goto fail;
2365             }
2366         }
2367     } else {
2368         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2369         switch (type) {
2370         case 1:
2371         case 2:
2372         case 3:
2373         case 9:
2374         case 11:
2375             break;
2376         default:
2377             goto fail;
2378         }
2379         if (dpl < cpl || dpl < rpl) {
2380         fail:
2381             CC_SRC &= ~CC_Z;
2382             return 0;
2383         }
2384     }
2385     limit = get_seg_limit(e1, e2);
2386     CC_SRC |= CC_Z;
2387     return limit;
2388 }
2389 
2390 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2391 {
2392     uint32_t e1, e2, selector;
2393     int rpl, dpl, cpl, type;
2394 
2395     selector = selector1 & 0xffff;
2396     assert(CC_OP == CC_OP_EFLAGS);
2397     if ((selector & 0xfffc) == 0) {
2398         goto fail;
2399     }
2400     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2401         goto fail;
2402     }
2403     rpl = selector & 3;
2404     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2405     cpl = env->hflags & HF_CPL_MASK;
2406     if (e2 & DESC_S_MASK) {
2407         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2408             /* conforming */
2409         } else {
2410             if (dpl < cpl || dpl < rpl) {
2411                 goto fail;
2412             }
2413         }
2414     } else {
2415         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2416         switch (type) {
2417         case 1:
2418         case 2:
2419         case 3:
2420         case 4:
2421         case 5:
2422         case 9:
2423         case 11:
2424         case 12:
2425             break;
2426         default:
2427             goto fail;
2428         }
2429         if (dpl < cpl || dpl < rpl) {
2430         fail:
2431             CC_SRC &= ~CC_Z;
2432             return 0;
2433         }
2434     }
2435     CC_SRC |= CC_Z;
2436     return e2 & 0x00f0ff00;
2437 }
2438 
2439 void helper_verr(CPUX86State *env, target_ulong selector1)
2440 {
2441     uint32_t e1, e2, eflags, selector;
2442     int rpl, dpl, cpl;
2443 
2444     selector = selector1 & 0xffff;
2445     eflags = cpu_cc_compute_all(env) | CC_Z;
2446     if ((selector & 0xfffc) == 0) {
2447         goto fail;
2448     }
2449     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2450         goto fail;
2451     }
2452     if (!(e2 & DESC_S_MASK)) {
2453         goto fail;
2454     }
2455     rpl = selector & 3;
2456     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2457     cpl = env->hflags & HF_CPL_MASK;
2458     if (e2 & DESC_CS_MASK) {
2459         if (!(e2 & DESC_R_MASK)) {
2460             goto fail;
2461         }
2462         if (!(e2 & DESC_C_MASK)) {
2463             if (dpl < cpl || dpl < rpl) {
2464                 goto fail;
2465             }
2466         }
2467     } else {
2468         if (dpl < cpl || dpl < rpl) {
2469         fail:
2470             eflags &= ~CC_Z;
2471         }
2472     }
2473     CC_SRC = eflags;
2474     CC_OP = CC_OP_EFLAGS;
2475 }
2476 
2477 void helper_verw(CPUX86State *env, target_ulong selector1)
2478 {
2479     uint32_t e1, e2, eflags, selector;
2480     int rpl, dpl, cpl;
2481 
2482     selector = selector1 & 0xffff;
2483     eflags = cpu_cc_compute_all(env) | CC_Z;
2484     if ((selector & 0xfffc) == 0) {
2485         goto fail;
2486     }
2487     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2488         goto fail;
2489     }
2490     if (!(e2 & DESC_S_MASK)) {
2491         goto fail;
2492     }
2493     rpl = selector & 3;
2494     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2495     cpl = env->hflags & HF_CPL_MASK;
2496     if (e2 & DESC_CS_MASK) {
2497         goto fail;
2498     } else {
2499         if (dpl < cpl || dpl < rpl) {
2500             goto fail;
2501         }
2502         if (!(e2 & DESC_W_MASK)) {
2503         fail:
2504             eflags &= ~CC_Z;
2505         }
2506     }
2507     CC_SRC = eflags;
2508     CC_OP = CC_OP_EFLAGS;
2509 }
2510