xref: /qemu/target/i386/tcg/seg_helper.c (revision c7c332831fa79c6d58ac53dab1c195041a4ba687)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "accel/tcg/cpu-ldst.h"
26 #include "accel/tcg/probe.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 #include "tcg-cpu.h"
32 
33 #ifdef TARGET_X86_64
34 #define SET_ESP(val, sp_mask)                                   \
35     do {                                                        \
36         if ((sp_mask) == 0xffff) {                              \
37             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
38                 ((val) & 0xffff);                               \
39         } else if ((sp_mask) == 0xffffffffLL) {                 \
40             env->regs[R_ESP] = (uint32_t)(val);                 \
41         } else {                                                \
42             env->regs[R_ESP] = (val);                           \
43         }                                                       \
44     } while (0)
45 #else
46 #define SET_ESP(val, sp_mask)                                   \
47     do {                                                        \
48         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
49             ((val) & (sp_mask));                                \
50     } while (0)
51 #endif
52 
53 /* XXX: use mmu_index to have proper DPL support */
54 typedef struct StackAccess
55 {
56     CPUX86State *env;
57     uintptr_t ra;
58     target_ulong ss_base;
59     target_ulong sp;
60     target_ulong sp_mask;
61     int mmu_index;
62 } StackAccess;
63 
64 static void pushw(StackAccess *sa, uint16_t val)
65 {
66     sa->sp -= 2;
67     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
68                       val, sa->mmu_index, sa->ra);
69 }
70 
71 static void pushl(StackAccess *sa, uint32_t val)
72 {
73     sa->sp -= 4;
74     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
75                       val, sa->mmu_index, sa->ra);
76 }
77 
78 static uint16_t popw(StackAccess *sa)
79 {
80     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
81                                       sa->ss_base + (sa->sp & sa->sp_mask),
82                                       sa->mmu_index, sa->ra);
83     sa->sp += 2;
84     return ret;
85 }
86 
87 static uint32_t popl(StackAccess *sa)
88 {
89     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
90                                      sa->ss_base + (sa->sp & sa->sp_mask),
91                                      sa->mmu_index, sa->ra);
92     sa->sp += 4;
93     return ret;
94 }
95 
96 int get_pg_mode(CPUX86State *env)
97 {
98     int pg_mode = PG_MODE_PG;
99     if (!(env->cr[0] & CR0_PG_MASK)) {
100         return 0;
101     }
102     if (env->cr[0] & CR0_WP_MASK) {
103         pg_mode |= PG_MODE_WP;
104     }
105     if (env->cr[4] & CR4_PAE_MASK) {
106         pg_mode |= PG_MODE_PAE;
107         if (env->efer & MSR_EFER_NXE) {
108             pg_mode |= PG_MODE_NXE;
109         }
110     }
111     if (env->cr[4] & CR4_PSE_MASK) {
112         pg_mode |= PG_MODE_PSE;
113     }
114     if (env->cr[4] & CR4_SMEP_MASK) {
115         pg_mode |= PG_MODE_SMEP;
116     }
117     if (env->hflags & HF_LMA_MASK) {
118         pg_mode |= PG_MODE_LMA;
119         if (env->cr[4] & CR4_PKE_MASK) {
120             pg_mode |= PG_MODE_PKE;
121         }
122         if (env->cr[4] & CR4_PKS_MASK) {
123             pg_mode |= PG_MODE_PKS;
124         }
125         if (env->cr[4] & CR4_LA57_MASK) {
126             pg_mode |= PG_MODE_LA57;
127         }
128     }
129     return pg_mode;
130 }
131 
132 static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
133 {
134     int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
135     int mmu_index_base =
136         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
137         (pl < 3 && (env->eflags & AC_MASK)
138          ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
139 
140     return mmu_index_base + mmu_index_32;
141 }
142 
143 int cpu_mmu_index_kernel(CPUX86State *env)
144 {
145     return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
146 }
147 
148 /* return non zero if error */
149 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
150                                uint32_t *e2_ptr, int selector,
151                                uintptr_t retaddr)
152 {
153     SegmentCache *dt;
154     int index;
155     target_ulong ptr;
156 
157     if (selector & 0x4) {
158         dt = &env->ldt;
159     } else {
160         dt = &env->gdt;
161     }
162     index = selector & ~7;
163     if ((index + 7) > dt->limit) {
164         return -1;
165     }
166     ptr = dt->base + index;
167     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
168     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
169     return 0;
170 }
171 
172 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
173                                uint32_t *e2_ptr, int selector)
174 {
175     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
176 }
177 
178 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
179 {
180     unsigned int limit;
181 
182     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
183     if (e2 & DESC_G_MASK) {
184         limit = (limit << 12) | 0xfff;
185     }
186     return limit;
187 }
188 
189 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
190 {
191     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
192 }
193 
194 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
195                                          uint32_t e2)
196 {
197     sc->base = get_seg_base(e1, e2);
198     sc->limit = get_seg_limit(e1, e2);
199     sc->flags = e2;
200 }
201 
202 /* init the segment cache in vm86 mode. */
203 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
204 {
205     selector &= 0xffff;
206 
207     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
208                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
209                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
210 }
211 
212 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
213                                        uint32_t *esp_ptr, int dpl,
214                                        uintptr_t retaddr)
215 {
216     X86CPU *cpu = env_archcpu(env);
217     int type, index, shift;
218 
219 #if 0
220     {
221         int i;
222         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
223         for (i = 0; i < env->tr.limit; i++) {
224             printf("%02x ", env->tr.base[i]);
225             if ((i & 7) == 7) {
226                 printf("\n");
227             }
228         }
229         printf("\n");
230     }
231 #endif
232 
233     if (!(env->tr.flags & DESC_P_MASK)) {
234         cpu_abort(CPU(cpu), "invalid tss");
235     }
236     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
237     if ((type & 7) != 1) {
238         cpu_abort(CPU(cpu), "invalid tss type");
239     }
240     shift = type >> 3;
241     index = (dpl * 4 + 2) << shift;
242     if (index + (4 << shift) - 1 > env->tr.limit) {
243         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
244     }
245     if (shift == 0) {
246         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
247         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
248     } else {
249         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
250         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
251     }
252 }
253 
254 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
255                          int cpl, uintptr_t retaddr)
256 {
257     uint32_t e1, e2;
258     int rpl, dpl;
259 
260     if ((selector & 0xfffc) != 0) {
261         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
262             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
263         }
264         if (!(e2 & DESC_S_MASK)) {
265             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
266         }
267         rpl = selector & 3;
268         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
269         if (seg_reg == R_CS) {
270             if (!(e2 & DESC_CS_MASK)) {
271                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
272             }
273             if (dpl != rpl) {
274                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
275             }
276         } else if (seg_reg == R_SS) {
277             /* SS must be writable data */
278             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
279                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
280             }
281             if (dpl != cpl || dpl != rpl) {
282                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
283             }
284         } else {
285             /* not readable code */
286             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
287                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
288             }
289             /* if data or non conforming code, checks the rights */
290             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
291                 if (dpl < cpl || dpl < rpl) {
292                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
293                 }
294             }
295         }
296         if (!(e2 & DESC_P_MASK)) {
297             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
298         }
299         cpu_x86_load_seg_cache(env, seg_reg, selector,
300                                get_seg_base(e1, e2),
301                                get_seg_limit(e1, e2),
302                                e2);
303     } else {
304         if (seg_reg == R_SS || seg_reg == R_CS) {
305             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
306         }
307     }
308 }
309 
310 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
311                          uintptr_t retaddr)
312 {
313     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
314     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
315 
316     if (value) {
317         e2 |= DESC_TSS_BUSY_MASK;
318     } else {
319         e2 &= ~DESC_TSS_BUSY_MASK;
320     }
321 
322     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
323 }
324 
325 #define SWITCH_TSS_JMP  0
326 #define SWITCH_TSS_IRET 1
327 #define SWITCH_TSS_CALL 2
328 
329 static void switch_tss_ra(CPUX86State *env, int tss_selector,
330                           uint32_t e1, uint32_t e2, int source,
331                           uint32_t next_eip, bool has_error_code,
332                           uint32_t error_code, uintptr_t retaddr)
333 {
334     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
335     target_ulong tss_base;
336     uint32_t new_regs[8], new_segs[6];
337     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
338     uint32_t old_eflags, eflags_mask;
339     SegmentCache *dt;
340     int mmu_index, index;
341     target_ulong ptr;
342     X86Access old, new;
343 
344     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
345     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
346               source);
347 
348     /* if task gate, we read the TSS segment and we load it */
349     if (type == 5) {
350         if (!(e2 & DESC_P_MASK)) {
351             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
352         }
353         tss_selector = e1 >> 16;
354         if (tss_selector & 4) {
355             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
356         }
357         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
358             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
359         }
360         if (e2 & DESC_S_MASK) {
361             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
362         }
363         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
364         if ((type & 7) != 1) {
365             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
366         }
367     }
368 
369     if (!(e2 & DESC_P_MASK)) {
370         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
371     }
372 
373     if (type & 8) {
374         tss_limit_max = 103;
375     } else {
376         tss_limit_max = 43;
377     }
378     tss_limit = get_seg_limit(e1, e2);
379     tss_base = get_seg_base(e1, e2);
380     if ((tss_selector & 4) != 0 ||
381         tss_limit < tss_limit_max) {
382         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
383     }
384     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
385     if (old_type & 8) {
386         old_tss_limit_max = 103;
387     } else {
388         old_tss_limit_max = 43;
389     }
390 
391     /* new TSS must be busy iff the source is an IRET instruction  */
392     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
393         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
394     }
395 
396     /* X86Access avoids memory exceptions during the task switch */
397     mmu_index = cpu_mmu_index_kernel(env);
398     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
399                        MMU_DATA_STORE, mmu_index, retaddr);
400 
401     if (source == SWITCH_TSS_CALL) {
402         /* Probe for future write of parent task */
403         probe_access(env, tss_base, 2, MMU_DATA_STORE,
404                      mmu_index, retaddr);
405     }
406     /* While true tss_limit may be larger, we don't access the iopb here. */
407     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
408                        MMU_DATA_LOAD, mmu_index, retaddr);
409 
410     /* save the current state in the old TSS */
411     old_eflags = cpu_compute_eflags(env);
412     if (old_type & 8) {
413         /* 32 bit */
414         access_stl(&old, env->tr.base + 0x20, next_eip);
415         access_stl(&old, env->tr.base + 0x24, old_eflags);
416         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
417         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
418         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
419         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
420         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
421         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
422         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
423         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
424         for (i = 0; i < 6; i++) {
425             access_stw(&old, env->tr.base + (0x48 + i * 4),
426                        env->segs[i].selector);
427         }
428     } else {
429         /* 16 bit */
430         access_stw(&old, env->tr.base + 0x0e, next_eip);
431         access_stw(&old, env->tr.base + 0x10, old_eflags);
432         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
433         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
434         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
435         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
436         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
437         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
438         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
439         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
440         for (i = 0; i < 4; i++) {
441             access_stw(&old, env->tr.base + (0x22 + i * 2),
442                        env->segs[i].selector);
443         }
444     }
445 
446     /* read all the registers from the new TSS */
447     if (type & 8) {
448         /* 32 bit */
449         new_cr3 = access_ldl(&new, tss_base + 0x1c);
450         new_eip = access_ldl(&new, tss_base + 0x20);
451         new_eflags = access_ldl(&new, tss_base + 0x24);
452         for (i = 0; i < 8; i++) {
453             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
454         }
455         for (i = 0; i < 6; i++) {
456             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
457         }
458         new_ldt = access_ldw(&new, tss_base + 0x60);
459         new_trap = access_ldl(&new, tss_base + 0x64);
460     } else {
461         /* 16 bit */
462         new_cr3 = 0;
463         new_eip = access_ldw(&new, tss_base + 0x0e);
464         new_eflags = access_ldw(&new, tss_base + 0x10);
465         for (i = 0; i < 8; i++) {
466             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
467         }
468         for (i = 0; i < 4; i++) {
469             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
470         }
471         new_ldt = access_ldw(&new, tss_base + 0x2a);
472         new_segs[R_FS] = 0;
473         new_segs[R_GS] = 0;
474         new_trap = 0;
475     }
476     /* XXX: avoid a compiler warning, see
477      http://support.amd.com/us/Processor_TechDocs/24593.pdf
478      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
479     (void)new_trap;
480 
481     /* clear busy bit (it is restartable) */
482     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
483         tss_set_busy(env, env->tr.selector, 0, retaddr);
484     }
485 
486     if (source == SWITCH_TSS_IRET) {
487         old_eflags &= ~NT_MASK;
488         if (old_type & 8) {
489             access_stl(&old, env->tr.base + 0x24, old_eflags);
490         } else {
491             access_stw(&old, env->tr.base + 0x10, old_eflags);
492 	}
493     }
494 
495     if (source == SWITCH_TSS_CALL) {
496         /*
497          * Thanks to the probe_access above, we know the first two
498          * bytes addressed by &new are writable too.
499          */
500         access_stw(&new, tss_base, env->tr.selector);
501         new_eflags |= NT_MASK;
502     }
503 
504     /* set busy bit */
505     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
506         tss_set_busy(env, tss_selector, 1, retaddr);
507     }
508 
509     /* set the new CPU state */
510 
511     /* now if an exception occurs, it will occur in the next task context */
512 
513     env->cr[0] |= CR0_TS_MASK;
514     env->hflags |= HF_TS_MASK;
515     env->tr.selector = tss_selector;
516     env->tr.base = tss_base;
517     env->tr.limit = tss_limit;
518     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
519 
520     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
521         cpu_x86_update_cr3(env, new_cr3);
522     }
523 
524     /* load all registers without an exception, then reload them with
525        possible exception */
526     env->eip = new_eip;
527     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
528         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
529     if (type & 8) {
530         cpu_load_eflags(env, new_eflags, eflags_mask);
531         for (i = 0; i < 8; i++) {
532             env->regs[i] = new_regs[i];
533         }
534     } else {
535         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
536         for (i = 0; i < 8; i++) {
537             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
538         }
539     }
540     if (new_eflags & VM_MASK) {
541         for (i = 0; i < 6; i++) {
542             load_seg_vm(env, i, new_segs[i]);
543         }
544     } else {
545         /* first just selectors as the rest may trigger exceptions */
546         for (i = 0; i < 6; i++) {
547             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
548         }
549     }
550 
551     env->ldt.selector = new_ldt & ~4;
552     env->ldt.base = 0;
553     env->ldt.limit = 0;
554     env->ldt.flags = 0;
555 
556     /* load the LDT */
557     if (new_ldt & 4) {
558         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
559     }
560 
561     if ((new_ldt & 0xfffc) != 0) {
562         dt = &env->gdt;
563         index = new_ldt & ~7;
564         if ((index + 7) > dt->limit) {
565             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
566         }
567         ptr = dt->base + index;
568         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
569         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
570         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
571             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
572         }
573         if (!(e2 & DESC_P_MASK)) {
574             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
575         }
576         load_seg_cache_raw_dt(&env->ldt, e1, e2);
577     }
578 
579     /* load the segments */
580     if (!(new_eflags & VM_MASK)) {
581         int cpl = new_segs[R_CS] & 3;
582         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
583         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
584         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
585         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
586         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
587         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
588     }
589 
590     /* check that env->eip is in the CS segment limits */
591     if (new_eip > env->segs[R_CS].limit) {
592         /* XXX: different exception if CALL? */
593         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
594     }
595 
596 #ifndef CONFIG_USER_ONLY
597     /* reset local breakpoints */
598     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
599         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
600     }
601 #endif
602 
603     if (has_error_code) {
604         int cpl = env->hflags & HF_CPL_MASK;
605         StackAccess sa;
606 
607         /* push the error code */
608         sa.env = env;
609         sa.ra = retaddr;
610         sa.mmu_index = x86_mmu_index_pl(env, cpl);
611         sa.sp = env->regs[R_ESP];
612         if (env->segs[R_SS].flags & DESC_B_MASK) {
613             sa.sp_mask = 0xffffffff;
614         } else {
615             sa.sp_mask = 0xffff;
616         }
617         sa.ss_base = env->segs[R_SS].base;
618         if (type & 8) {
619             pushl(&sa, error_code);
620         } else {
621             pushw(&sa, error_code);
622         }
623         SET_ESP(sa.sp, sa.sp_mask);
624     }
625 }
626 
627 static void switch_tss(CPUX86State *env, int tss_selector,
628                        uint32_t e1, uint32_t e2, int source,
629                        uint32_t next_eip, bool has_error_code,
630                        int error_code)
631 {
632     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
633                   has_error_code, error_code, 0);
634 }
635 
636 static inline unsigned int get_sp_mask(unsigned int e2)
637 {
638 #ifdef TARGET_X86_64
639     if (e2 & DESC_L_MASK) {
640         return 0;
641     } else
642 #endif
643     if (e2 & DESC_B_MASK) {
644         return 0xffffffff;
645     } else {
646         return 0xffff;
647     }
648 }
649 
650 static int exception_is_fault(int intno)
651 {
652     switch (intno) {
653         /*
654          * #DB can be both fault- and trap-like, but it never sets RF=1
655          * in the RFLAGS value pushed on the stack.
656          */
657     case EXCP01_DB:
658     case EXCP03_INT3:
659     case EXCP04_INTO:
660     case EXCP08_DBLE:
661     case EXCP12_MCHK:
662         return 0;
663     }
664     /* Everything else including reserved exception is a fault.  */
665     return 1;
666 }
667 
668 int exception_has_error_code(int intno)
669 {
670     switch (intno) {
671     case 8:
672     case 10:
673     case 11:
674     case 12:
675     case 13:
676     case 14:
677     case 17:
678         return 1;
679     }
680     return 0;
681 }
682 
683 /* protected mode interrupt */
684 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
685                                    int error_code, unsigned int next_eip,
686                                    int is_hw)
687 {
688     SegmentCache *dt;
689     target_ulong ptr;
690     int type, dpl, selector, ss_dpl, cpl;
691     int has_error_code, new_stack, shift;
692     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
693     uint32_t old_eip, eflags;
694     int vm86 = env->eflags & VM_MASK;
695     StackAccess sa;
696     bool set_rf;
697 
698     has_error_code = 0;
699     if (!is_int && !is_hw) {
700         has_error_code = exception_has_error_code(intno);
701     }
702     if (is_int) {
703         old_eip = next_eip;
704         set_rf = false;
705     } else {
706         old_eip = env->eip;
707         set_rf = exception_is_fault(intno);
708     }
709 
710     dt = &env->idt;
711     if (intno * 8 + 7 > dt->limit) {
712         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
713     }
714     ptr = dt->base + intno * 8;
715     e1 = cpu_ldl_kernel(env, ptr);
716     e2 = cpu_ldl_kernel(env, ptr + 4);
717     /* check gate type */
718     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
719     switch (type) {
720     case 5: /* task gate */
721     case 6: /* 286 interrupt gate */
722     case 7: /* 286 trap gate */
723     case 14: /* 386 interrupt gate */
724     case 15: /* 386 trap gate */
725         break;
726     default:
727         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
728         break;
729     }
730     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731     cpl = env->hflags & HF_CPL_MASK;
732     /* check privilege if software int */
733     if (is_int && dpl < cpl) {
734         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
735     }
736 
737     sa.env = env;
738     sa.ra = 0;
739 
740     if (type == 5) {
741         /* task gate */
742         /* must do that check here to return the correct error code */
743         if (!(e2 & DESC_P_MASK)) {
744             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
745         }
746         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
747                    has_error_code, error_code);
748         return;
749     }
750 
751     /* Otherwise, trap or interrupt gate */
752 
753     /* check valid bit */
754     if (!(e2 & DESC_P_MASK)) {
755         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
756     }
757     selector = e1 >> 16;
758     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
759     if ((selector & 0xfffc) == 0) {
760         raise_exception_err(env, EXCP0D_GPF, 0);
761     }
762     if (load_segment(env, &e1, &e2, selector) != 0) {
763         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
764     }
765     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
766         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
767     }
768     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
769     if (dpl > cpl) {
770         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
771     }
772     if (!(e2 & DESC_P_MASK)) {
773         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
774     }
775     if (e2 & DESC_C_MASK) {
776         dpl = cpl;
777     }
778     sa.mmu_index = x86_mmu_index_pl(env, dpl);
779     if (dpl < cpl) {
780         /* to inner privilege */
781         uint32_t esp;
782         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
783         if ((ss & 0xfffc) == 0) {
784             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
785         }
786         if ((ss & 3) != dpl) {
787             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
788         }
789         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
790             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
791         }
792         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
793         if (ss_dpl != dpl) {
794             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
795         }
796         if (!(ss_e2 & DESC_S_MASK) ||
797             (ss_e2 & DESC_CS_MASK) ||
798             !(ss_e2 & DESC_W_MASK)) {
799             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
800         }
801         if (!(ss_e2 & DESC_P_MASK)) {
802             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
803         }
804         new_stack = 1;
805         sa.sp = esp;
806         sa.sp_mask = get_sp_mask(ss_e2);
807         sa.ss_base = get_seg_base(ss_e1, ss_e2);
808     } else  {
809         /* to same privilege */
810         if (vm86) {
811             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
812         }
813         new_stack = 0;
814         sa.sp = env->regs[R_ESP];
815         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
816         sa.ss_base = env->segs[R_SS].base;
817     }
818 
819     shift = type >> 3;
820 
821 #if 0
822     /* XXX: check that enough room is available */
823     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
824     if (vm86) {
825         push_size += 8;
826     }
827     push_size <<= shift;
828 #endif
829     eflags = cpu_compute_eflags(env);
830     /*
831      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
832      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
833      */
834     if (set_rf) {
835         eflags |= RF_MASK;
836     }
837 
838     if (shift == 1) {
839         if (new_stack) {
840             if (vm86) {
841                 pushl(&sa, env->segs[R_GS].selector);
842                 pushl(&sa, env->segs[R_FS].selector);
843                 pushl(&sa, env->segs[R_DS].selector);
844                 pushl(&sa, env->segs[R_ES].selector);
845             }
846             pushl(&sa, env->segs[R_SS].selector);
847             pushl(&sa, env->regs[R_ESP]);
848         }
849         pushl(&sa, eflags);
850         pushl(&sa, env->segs[R_CS].selector);
851         pushl(&sa, old_eip);
852         if (has_error_code) {
853             pushl(&sa, error_code);
854         }
855     } else {
856         if (new_stack) {
857             if (vm86) {
858                 pushw(&sa, env->segs[R_GS].selector);
859                 pushw(&sa, env->segs[R_FS].selector);
860                 pushw(&sa, env->segs[R_DS].selector);
861                 pushw(&sa, env->segs[R_ES].selector);
862             }
863             pushw(&sa, env->segs[R_SS].selector);
864             pushw(&sa, env->regs[R_ESP]);
865         }
866         pushw(&sa, eflags);
867         pushw(&sa, env->segs[R_CS].selector);
868         pushw(&sa, old_eip);
869         if (has_error_code) {
870             pushw(&sa, error_code);
871         }
872     }
873 
874     /* interrupt gate clear IF mask */
875     if ((type & 1) == 0) {
876         env->eflags &= ~IF_MASK;
877     }
878     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
879 
880     if (new_stack) {
881         if (vm86) {
882             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
883             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
884             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
885             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
886         }
887         ss = (ss & ~3) | dpl;
888         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
889                                get_seg_limit(ss_e1, ss_e2), ss_e2);
890     }
891     SET_ESP(sa.sp, sa.sp_mask);
892 
893     selector = (selector & ~3) | dpl;
894     cpu_x86_load_seg_cache(env, R_CS, selector,
895                    get_seg_base(e1, e2),
896                    get_seg_limit(e1, e2),
897                    e2);
898     env->eip = offset;
899 }
900 
901 #ifdef TARGET_X86_64
902 
903 static void pushq(StackAccess *sa, uint64_t val)
904 {
905     sa->sp -= 8;
906     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
907 }
908 
909 static uint64_t popq(StackAccess *sa)
910 {
911     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
912     sa->sp += 8;
913     return ret;
914 }
915 
916 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
917 {
918     X86CPU *cpu = env_archcpu(env);
919     int index, pg_mode;
920     target_ulong rsp;
921     int32_t sext;
922 
923 #if 0
924     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
925            env->tr.base, env->tr.limit);
926 #endif
927 
928     if (!(env->tr.flags & DESC_P_MASK)) {
929         cpu_abort(CPU(cpu), "invalid tss");
930     }
931     index = 8 * level + 4;
932     if ((index + 7) > env->tr.limit) {
933         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
934     }
935 
936     rsp = cpu_ldq_kernel(env, env->tr.base + index);
937 
938     /* test virtual address sign extension */
939     pg_mode = get_pg_mode(env);
940     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
941     if (sext != 0 && sext != -1) {
942         raise_exception_err(env, EXCP0C_STACK, 0);
943     }
944 
945     return rsp;
946 }
947 
948 /* 64 bit interrupt */
949 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
950                            int error_code, target_ulong next_eip, int is_hw)
951 {
952     SegmentCache *dt;
953     target_ulong ptr;
954     int type, dpl, selector, cpl, ist;
955     int has_error_code, new_stack;
956     uint32_t e1, e2, e3, eflags;
957     target_ulong old_eip, offset;
958     bool set_rf;
959     StackAccess sa;
960 
961     has_error_code = 0;
962     if (!is_int && !is_hw) {
963         has_error_code = exception_has_error_code(intno);
964     }
965     if (is_int) {
966         old_eip = next_eip;
967         set_rf = false;
968     } else {
969         old_eip = env->eip;
970         set_rf = exception_is_fault(intno);
971     }
972 
973     dt = &env->idt;
974     if (intno * 16 + 15 > dt->limit) {
975         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
976     }
977     ptr = dt->base + intno * 16;
978     e1 = cpu_ldl_kernel(env, ptr);
979     e2 = cpu_ldl_kernel(env, ptr + 4);
980     e3 = cpu_ldl_kernel(env, ptr + 8);
981     /* check gate type */
982     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
983     switch (type) {
984     case 14: /* 386 interrupt gate */
985     case 15: /* 386 trap gate */
986         break;
987     default:
988         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
989         break;
990     }
991     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
992     cpl = env->hflags & HF_CPL_MASK;
993     /* check privilege if software int */
994     if (is_int && dpl < cpl) {
995         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
996     }
997     /* check valid bit */
998     if (!(e2 & DESC_P_MASK)) {
999         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
1000     }
1001     selector = e1 >> 16;
1002     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1003     ist = e2 & 7;
1004     if ((selector & 0xfffc) == 0) {
1005         raise_exception_err(env, EXCP0D_GPF, 0);
1006     }
1007 
1008     if (load_segment(env, &e1, &e2, selector) != 0) {
1009         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1010     }
1011     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1012         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1013     }
1014     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1015     if (dpl > cpl) {
1016         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1017     }
1018     if (!(e2 & DESC_P_MASK)) {
1019         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1020     }
1021     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1022         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1023     }
1024     if (e2 & DESC_C_MASK) {
1025         dpl = cpl;
1026     }
1027 
1028     sa.env = env;
1029     sa.ra = 0;
1030     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1031     sa.sp_mask = -1;
1032     sa.ss_base = 0;
1033     if (dpl < cpl || ist != 0) {
1034         /* to inner privilege */
1035         new_stack = 1;
1036         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1037     } else {
1038         /* to same privilege */
1039         if (env->eflags & VM_MASK) {
1040             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1041         }
1042         new_stack = 0;
1043         sa.sp = env->regs[R_ESP];
1044     }
1045     sa.sp &= ~0xfLL; /* align stack */
1046 
1047     /* See do_interrupt_protected.  */
1048     eflags = cpu_compute_eflags(env);
1049     if (set_rf) {
1050         eflags |= RF_MASK;
1051     }
1052 
1053     pushq(&sa, env->segs[R_SS].selector);
1054     pushq(&sa, env->regs[R_ESP]);
1055     pushq(&sa, eflags);
1056     pushq(&sa, env->segs[R_CS].selector);
1057     pushq(&sa, old_eip);
1058     if (has_error_code) {
1059         pushq(&sa, error_code);
1060     }
1061 
1062     /* interrupt gate clear IF mask */
1063     if ((type & 1) == 0) {
1064         env->eflags &= ~IF_MASK;
1065     }
1066     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1067 
1068     if (new_stack) {
1069         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1070         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1071     }
1072     env->regs[R_ESP] = sa.sp;
1073 
1074     selector = (selector & ~3) | dpl;
1075     cpu_x86_load_seg_cache(env, R_CS, selector,
1076                    get_seg_base(e1, e2),
1077                    get_seg_limit(e1, e2),
1078                    e2);
1079     env->eip = offset;
1080 }
1081 #endif /* TARGET_X86_64 */
1082 
1083 void helper_sysret(CPUX86State *env, int dflag)
1084 {
1085     int cpl, selector;
1086 
1087     if (!(env->efer & MSR_EFER_SCE)) {
1088         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1089     }
1090     cpl = env->hflags & HF_CPL_MASK;
1091     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1092         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1093     }
1094     selector = (env->star >> 48) & 0xffff;
1095 #ifdef TARGET_X86_64
1096     if (env->hflags & HF_LMA_MASK) {
1097         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1098                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1099                         NT_MASK);
1100         if (dflag == 2) {
1101             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1102                                    0, 0xffffffff,
1103                                    DESC_G_MASK | DESC_P_MASK |
1104                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1106                                    DESC_L_MASK);
1107             env->eip = env->regs[R_ECX];
1108         } else {
1109             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1110                                    0, 0xffffffff,
1111                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1114             env->eip = (uint32_t)env->regs[R_ECX];
1115         }
1116         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1117                                0, 0xffffffff,
1118                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1119                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1120                                DESC_W_MASK | DESC_A_MASK);
1121     } else
1122 #endif
1123     {
1124         env->eflags |= IF_MASK;
1125         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1126                                0, 0xffffffff,
1127                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1128                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1129                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1130         env->eip = (uint32_t)env->regs[R_ECX];
1131         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1132                                0, 0xffffffff,
1133                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1134                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1135                                DESC_W_MASK | DESC_A_MASK);
1136     }
1137 }
1138 
1139 /* real mode interrupt */
1140 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1141                               int error_code, unsigned int next_eip)
1142 {
1143     SegmentCache *dt;
1144     target_ulong ptr;
1145     int selector;
1146     uint32_t offset;
1147     uint32_t old_cs, old_eip;
1148     StackAccess sa;
1149 
1150     /* real mode (simpler!) */
1151     dt = &env->idt;
1152     if (intno * 4 + 3 > dt->limit) {
1153         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1154     }
1155     ptr = dt->base + intno * 4;
1156     offset = cpu_lduw_kernel(env, ptr);
1157     selector = cpu_lduw_kernel(env, ptr + 2);
1158 
1159     sa.env = env;
1160     sa.ra = 0;
1161     sa.sp = env->regs[R_ESP];
1162     sa.sp_mask = 0xffff;
1163     sa.ss_base = env->segs[R_SS].base;
1164     sa.mmu_index = x86_mmu_index_pl(env, 0);
1165 
1166     if (is_int) {
1167         old_eip = next_eip;
1168     } else {
1169         old_eip = env->eip;
1170     }
1171     old_cs = env->segs[R_CS].selector;
1172     /* XXX: use SS segment size? */
1173     pushw(&sa, cpu_compute_eflags(env));
1174     pushw(&sa, old_cs);
1175     pushw(&sa, old_eip);
1176 
1177     /* update processor state */
1178     SET_ESP(sa.sp, sa.sp_mask);
1179     env->eip = offset;
1180     env->segs[R_CS].selector = selector;
1181     env->segs[R_CS].base = (selector << 4);
1182     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1183 }
1184 
1185 /*
1186  * Begin execution of an interruption. is_int is TRUE if coming from
1187  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1188  * instruction. It is only relevant if is_int is TRUE.
1189  */
1190 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1191                       int error_code, target_ulong next_eip, int is_hw)
1192 {
1193     CPUX86State *env = &cpu->env;
1194 
1195     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1196         if ((env->cr[0] & CR0_PE_MASK)) {
1197             static int count;
1198 
1199             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1200                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1201                      count, intno, error_code, is_int,
1202                      env->hflags & HF_CPL_MASK,
1203                      env->segs[R_CS].selector, env->eip,
1204                      (int)env->segs[R_CS].base + env->eip,
1205                      env->segs[R_SS].selector, env->regs[R_ESP]);
1206             if (intno == 0x0e) {
1207                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1208             } else {
1209                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1210             }
1211             qemu_log("\n");
1212             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1213 #if 0
1214             {
1215                 int i;
1216                 target_ulong ptr;
1217 
1218                 qemu_log("       code=");
1219                 ptr = env->segs[R_CS].base + env->eip;
1220                 for (i = 0; i < 16; i++) {
1221                     qemu_log(" %02x", ldub(ptr + i));
1222                 }
1223                 qemu_log("\n");
1224             }
1225 #endif
1226             count++;
1227         }
1228     }
1229     if (env->cr[0] & CR0_PE_MASK) {
1230 #if !defined(CONFIG_USER_ONLY)
1231         if (env->hflags & HF_GUEST_MASK) {
1232             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1233         }
1234 #endif
1235 #ifdef TARGET_X86_64
1236         if (env->hflags & HF_LMA_MASK) {
1237             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1238         } else
1239 #endif
1240         {
1241             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1242                                    is_hw);
1243         }
1244     } else {
1245 #if !defined(CONFIG_USER_ONLY)
1246         if (env->hflags & HF_GUEST_MASK) {
1247             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1248         }
1249 #endif
1250         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1251     }
1252 
1253 #if !defined(CONFIG_USER_ONLY)
1254     if (env->hflags & HF_GUEST_MASK) {
1255         CPUState *cs = CPU(cpu);
1256         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1257                                       offsetof(struct vmcb,
1258                                                control.event_inj));
1259 
1260         x86_stl_phys(cs,
1261                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1262                  event_inj & ~SVM_EVTINJ_VALID);
1263     }
1264 #endif
1265 }
1266 
1267 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1268 {
1269     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1270 }
1271 
1272 void helper_lldt(CPUX86State *env, int selector)
1273 {
1274     SegmentCache *dt;
1275     uint32_t e1, e2;
1276     int index, entry_limit;
1277     target_ulong ptr;
1278 
1279     selector &= 0xffff;
1280     if ((selector & 0xfffc) == 0) {
1281         /* XXX: NULL selector case: invalid LDT */
1282         env->ldt.base = 0;
1283         env->ldt.limit = 0;
1284     } else {
1285         if (selector & 0x4) {
1286             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1287         }
1288         dt = &env->gdt;
1289         index = selector & ~7;
1290 #ifdef TARGET_X86_64
1291         if (env->hflags & HF_LMA_MASK) {
1292             entry_limit = 15;
1293         } else
1294 #endif
1295         {
1296             entry_limit = 7;
1297         }
1298         if ((index + entry_limit) > dt->limit) {
1299             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1300         }
1301         ptr = dt->base + index;
1302         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1303         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1304         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1305             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1306         }
1307         if (!(e2 & DESC_P_MASK)) {
1308             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1309         }
1310 #ifdef TARGET_X86_64
1311         if (env->hflags & HF_LMA_MASK) {
1312             uint32_t e3;
1313 
1314             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1315             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1316             env->ldt.base |= (target_ulong)e3 << 32;
1317         } else
1318 #endif
1319         {
1320             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1321         }
1322     }
1323     env->ldt.selector = selector;
1324 }
1325 
1326 void helper_ltr(CPUX86State *env, int selector)
1327 {
1328     SegmentCache *dt;
1329     uint32_t e1, e2;
1330     int index, type, entry_limit;
1331     target_ulong ptr;
1332 
1333     selector &= 0xffff;
1334     if ((selector & 0xfffc) == 0) {
1335         /* NULL selector case: invalid TR */
1336         env->tr.base = 0;
1337         env->tr.limit = 0;
1338         env->tr.flags = 0;
1339     } else {
1340         if (selector & 0x4) {
1341             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1342         }
1343         dt = &env->gdt;
1344         index = selector & ~7;
1345 #ifdef TARGET_X86_64
1346         if (env->hflags & HF_LMA_MASK) {
1347             entry_limit = 15;
1348         } else
1349 #endif
1350         {
1351             entry_limit = 7;
1352         }
1353         if ((index + entry_limit) > dt->limit) {
1354             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1355         }
1356         ptr = dt->base + index;
1357         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1358         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1359         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1360         if ((e2 & DESC_S_MASK) ||
1361             (type != 1 && type != 9)) {
1362             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1363         }
1364         if (!(e2 & DESC_P_MASK)) {
1365             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1366         }
1367 #ifdef TARGET_X86_64
1368         if (env->hflags & HF_LMA_MASK) {
1369             uint32_t e3, e4;
1370 
1371             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1372             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1373             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1374                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1375             }
1376             load_seg_cache_raw_dt(&env->tr, e1, e2);
1377             env->tr.base |= (target_ulong)e3 << 32;
1378         } else
1379 #endif
1380         {
1381             load_seg_cache_raw_dt(&env->tr, e1, e2);
1382         }
1383         e2 |= DESC_TSS_BUSY_MASK;
1384         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1385     }
1386     env->tr.selector = selector;
1387 }
1388 
1389 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1390 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1391 {
1392     uint32_t e1, e2;
1393     int cpl, dpl, rpl;
1394     SegmentCache *dt;
1395     int index;
1396     target_ulong ptr;
1397 
1398     selector &= 0xffff;
1399     cpl = env->hflags & HF_CPL_MASK;
1400     if ((selector & 0xfffc) == 0) {
1401         /* null selector case */
1402         if (seg_reg == R_SS
1403 #ifdef TARGET_X86_64
1404             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1405 #endif
1406             ) {
1407             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1408         }
1409         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1410     } else {
1411 
1412         if (selector & 0x4) {
1413             dt = &env->ldt;
1414         } else {
1415             dt = &env->gdt;
1416         }
1417         index = selector & ~7;
1418         if ((index + 7) > dt->limit) {
1419             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1420         }
1421         ptr = dt->base + index;
1422         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1423         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1424 
1425         if (!(e2 & DESC_S_MASK)) {
1426             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1427         }
1428         rpl = selector & 3;
1429         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1430         if (seg_reg == R_SS) {
1431             /* must be writable segment */
1432             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1433                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1434             }
1435             if (rpl != cpl || dpl != cpl) {
1436                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1437             }
1438         } else {
1439             /* must be readable segment */
1440             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1441                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1442             }
1443 
1444             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1445                 /* if not conforming code, test rights */
1446                 if (dpl < cpl || dpl < rpl) {
1447                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1448                 }
1449             }
1450         }
1451 
1452         if (!(e2 & DESC_P_MASK)) {
1453             if (seg_reg == R_SS) {
1454                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1455             } else {
1456                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1457             }
1458         }
1459 
1460         /* set the access bit if not already set */
1461         if (!(e2 & DESC_A_MASK)) {
1462             e2 |= DESC_A_MASK;
1463             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1464         }
1465 
1466         cpu_x86_load_seg_cache(env, seg_reg, selector,
1467                        get_seg_base(e1, e2),
1468                        get_seg_limit(e1, e2),
1469                        e2);
1470 #if 0
1471         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1472                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1473 #endif
1474     }
1475 }
1476 
1477 /* protected mode jump */
1478 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1479                            target_ulong next_eip)
1480 {
1481     int gate_cs, type;
1482     uint32_t e1, e2, cpl, dpl, rpl, limit;
1483 
1484     if ((new_cs & 0xfffc) == 0) {
1485         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1486     }
1487     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1488         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1489     }
1490     cpl = env->hflags & HF_CPL_MASK;
1491     if (e2 & DESC_S_MASK) {
1492         if (!(e2 & DESC_CS_MASK)) {
1493             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1494         }
1495         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1496         if (e2 & DESC_C_MASK) {
1497             /* conforming code segment */
1498             if (dpl > cpl) {
1499                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1500             }
1501         } else {
1502             /* non conforming code segment */
1503             rpl = new_cs & 3;
1504             if (rpl > cpl) {
1505                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1506             }
1507             if (dpl != cpl) {
1508                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1509             }
1510         }
1511         if (!(e2 & DESC_P_MASK)) {
1512             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1513         }
1514         limit = get_seg_limit(e1, e2);
1515         if (new_eip > limit &&
1516             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1517             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1518         }
1519         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1520                        get_seg_base(e1, e2), limit, e2);
1521         env->eip = new_eip;
1522     } else {
1523         /* jump to call or task gate */
1524         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1525         rpl = new_cs & 3;
1526         cpl = env->hflags & HF_CPL_MASK;
1527         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1528 
1529 #ifdef TARGET_X86_64
1530         if (env->efer & MSR_EFER_LMA) {
1531             if (type != 12) {
1532                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1533             }
1534         }
1535 #endif
1536         switch (type) {
1537         case 1: /* 286 TSS */
1538         case 9: /* 386 TSS */
1539         case 5: /* task gate */
1540             if (dpl < cpl || dpl < rpl) {
1541                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1542             }
1543             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
1544                           false, 0, GETPC());
1545             break;
1546         case 4: /* 286 call gate */
1547         case 12: /* 386 call gate */
1548             if ((dpl < cpl) || (dpl < rpl)) {
1549                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1550             }
1551             if (!(e2 & DESC_P_MASK)) {
1552                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1553             }
1554             gate_cs = e1 >> 16;
1555             new_eip = (e1 & 0xffff);
1556             if (type == 12) {
1557                 new_eip |= (e2 & 0xffff0000);
1558             }
1559 
1560 #ifdef TARGET_X86_64
1561             if (env->efer & MSR_EFER_LMA) {
1562                 /* load the upper 8 bytes of the 64-bit call gate */
1563                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1564                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1565                                            GETPC());
1566                 }
1567                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1568                 if (type != 0) {
1569                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1570                                            GETPC());
1571                 }
1572                 new_eip |= ((target_ulong)e1) << 32;
1573             }
1574 #endif
1575 
1576             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1577                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1578             }
1579             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1580             /* must be code segment */
1581             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1582                  (DESC_S_MASK | DESC_CS_MASK))) {
1583                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1584             }
1585             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1586                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1587                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1588             }
1589 #ifdef TARGET_X86_64
1590             if (env->efer & MSR_EFER_LMA) {
1591                 if (!(e2 & DESC_L_MASK)) {
1592                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1593                 }
1594                 if (e2 & DESC_B_MASK) {
1595                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1596                 }
1597             }
1598 #endif
1599             if (!(e2 & DESC_P_MASK)) {
1600                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1601             }
1602             limit = get_seg_limit(e1, e2);
1603             if (new_eip > limit &&
1604                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1605                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1606             }
1607             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1608                                    get_seg_base(e1, e2), limit, e2);
1609             env->eip = new_eip;
1610             break;
1611         default:
1612             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1613             break;
1614         }
1615     }
1616 }
1617 
1618 /* real mode call */
1619 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1620                        int shift, uint32_t next_eip)
1621 {
1622     StackAccess sa;
1623 
1624     sa.env = env;
1625     sa.ra = GETPC();
1626     sa.sp = env->regs[R_ESP];
1627     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1628     sa.ss_base = env->segs[R_SS].base;
1629     sa.mmu_index = x86_mmu_index_pl(env, 0);
1630 
1631     if (shift) {
1632         pushl(&sa, env->segs[R_CS].selector);
1633         pushl(&sa, next_eip);
1634     } else {
1635         pushw(&sa, env->segs[R_CS].selector);
1636         pushw(&sa, next_eip);
1637     }
1638 
1639     SET_ESP(sa.sp, sa.sp_mask);
1640     env->eip = new_eip;
1641     env->segs[R_CS].selector = new_cs;
1642     env->segs[R_CS].base = (new_cs << 4);
1643 }
1644 
1645 /* protected mode call */
1646 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1647                             int shift, target_ulong next_eip)
1648 {
1649     int new_stack, i;
1650     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1651     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1652     uint32_t val, limit, old_sp_mask;
1653     target_ulong old_ssp, offset;
1654     StackAccess sa;
1655 
1656     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1657     LOG_PCALL_STATE(env_cpu(env));
1658     if ((new_cs & 0xfffc) == 0) {
1659         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1660     }
1661     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1662         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1663     }
1664     cpl = env->hflags & HF_CPL_MASK;
1665     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1666 
1667     sa.env = env;
1668     sa.ra = GETPC();
1669 
1670     if (e2 & DESC_S_MASK) {
1671         /* "normal" far call, no stack switch possible */
1672         if (!(e2 & DESC_CS_MASK)) {
1673             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1674         }
1675         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1676         if (e2 & DESC_C_MASK) {
1677             /* conforming code segment */
1678             if (dpl > cpl) {
1679                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1680             }
1681         } else {
1682             /* non conforming code segment */
1683             rpl = new_cs & 3;
1684             if (rpl > cpl) {
1685                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1686             }
1687             if (dpl != cpl) {
1688                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1689             }
1690         }
1691         if (!(e2 & DESC_P_MASK)) {
1692             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1693         }
1694 
1695         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1696 #ifdef TARGET_X86_64
1697         /* XXX: check 16/32 bit cases in long mode */
1698         if (shift == 2) {
1699             /* 64 bit case */
1700             sa.sp = env->regs[R_ESP];
1701             sa.sp_mask = -1;
1702             sa.ss_base = 0;
1703             pushq(&sa, env->segs[R_CS].selector);
1704             pushq(&sa, next_eip);
1705             /* from this point, not restartable */
1706             env->regs[R_ESP] = sa.sp;
1707             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1708                                    get_seg_base(e1, e2),
1709                                    get_seg_limit(e1, e2), e2);
1710             env->eip = new_eip;
1711         } else
1712 #endif
1713         {
1714             sa.sp = env->regs[R_ESP];
1715             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1716             sa.ss_base = env->segs[R_SS].base;
1717             if (shift) {
1718                 pushl(&sa, env->segs[R_CS].selector);
1719                 pushl(&sa, next_eip);
1720             } else {
1721                 pushw(&sa, env->segs[R_CS].selector);
1722                 pushw(&sa, next_eip);
1723             }
1724 
1725             limit = get_seg_limit(e1, e2);
1726             if (new_eip > limit) {
1727                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1728             }
1729             /* from this point, not restartable */
1730             SET_ESP(sa.sp, sa.sp_mask);
1731             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1732                                    get_seg_base(e1, e2), limit, e2);
1733             env->eip = new_eip;
1734         }
1735     } else {
1736         /* check gate type */
1737         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1738         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1739         rpl = new_cs & 3;
1740 
1741 #ifdef TARGET_X86_64
1742         if (env->efer & MSR_EFER_LMA) {
1743             if (type != 12) {
1744                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1745             }
1746         }
1747 #endif
1748 
1749         switch (type) {
1750         case 1: /* available 286 TSS */
1751         case 9: /* available 386 TSS */
1752         case 5: /* task gate */
1753             if (dpl < cpl || dpl < rpl) {
1754                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1755             }
1756             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
1757                           false, 0, GETPC());
1758             return;
1759         case 4: /* 286 call gate */
1760         case 12: /* 386 call gate */
1761             break;
1762         default:
1763             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1764             break;
1765         }
1766         shift = type >> 3;
1767 
1768         if (dpl < cpl || dpl < rpl) {
1769             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1770         }
1771         /* check valid bit */
1772         if (!(e2 & DESC_P_MASK)) {
1773             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1774         }
1775         selector = e1 >> 16;
1776         param_count = e2 & 0x1f;
1777         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1778 #ifdef TARGET_X86_64
1779         if (env->efer & MSR_EFER_LMA) {
1780             /* load the upper 8 bytes of the 64-bit call gate */
1781             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1782                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1783                                        GETPC());
1784             }
1785             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1786             if (type != 0) {
1787                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1788                                        GETPC());
1789             }
1790             offset |= ((target_ulong)e1) << 32;
1791         }
1792 #endif
1793         if ((selector & 0xfffc) == 0) {
1794             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1795         }
1796 
1797         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1798             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1799         }
1800         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1801             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1802         }
1803         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1804         if (dpl > cpl) {
1805             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1806         }
1807 #ifdef TARGET_X86_64
1808         if (env->efer & MSR_EFER_LMA) {
1809             if (!(e2 & DESC_L_MASK)) {
1810                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1811             }
1812             if (e2 & DESC_B_MASK) {
1813                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1814             }
1815             shift++;
1816         }
1817 #endif
1818         if (!(e2 & DESC_P_MASK)) {
1819             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1820         }
1821 
1822         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1823             /* to inner privilege */
1824             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1825 #ifdef TARGET_X86_64
1826             if (shift == 2) {
1827                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1828                 new_stack = 1;
1829                 sa.sp = get_rsp_from_tss(env, dpl);
1830                 sa.sp_mask = -1;
1831                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1832                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1833                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1834             } else
1835 #endif
1836             {
1837                 uint32_t sp32;
1838                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1839                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1840                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1841                           env->regs[R_ESP]);
1842                 if ((ss & 0xfffc) == 0) {
1843                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1844                 }
1845                 if ((ss & 3) != dpl) {
1846                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1847                 }
1848                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1849                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1850                 }
1851                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1852                 if (ss_dpl != dpl) {
1853                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1854                 }
1855                 if (!(ss_e2 & DESC_S_MASK) ||
1856                     (ss_e2 & DESC_CS_MASK) ||
1857                     !(ss_e2 & DESC_W_MASK)) {
1858                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1859                 }
1860                 if (!(ss_e2 & DESC_P_MASK)) {
1861                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1862                 }
1863 
1864                 sa.sp = sp32;
1865                 sa.sp_mask = get_sp_mask(ss_e2);
1866                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1867             }
1868 
1869             /* push_size = ((param_count * 2) + 8) << shift; */
1870             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1871             old_ssp = env->segs[R_SS].base;
1872 
1873 #ifdef TARGET_X86_64
1874             if (shift == 2) {
1875                 /* XXX: verify if new stack address is canonical */
1876                 pushq(&sa, env->segs[R_SS].selector);
1877                 pushq(&sa, env->regs[R_ESP]);
1878                 /* parameters aren't supported for 64-bit call gates */
1879             } else
1880 #endif
1881             if (shift == 1) {
1882                 pushl(&sa, env->segs[R_SS].selector);
1883                 pushl(&sa, env->regs[R_ESP]);
1884                 for (i = param_count - 1; i >= 0; i--) {
1885                     val = cpu_ldl_data_ra(env,
1886                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1887                                           GETPC());
1888                     pushl(&sa, val);
1889                 }
1890             } else {
1891                 pushw(&sa, env->segs[R_SS].selector);
1892                 pushw(&sa, env->regs[R_ESP]);
1893                 for (i = param_count - 1; i >= 0; i--) {
1894                     val = cpu_lduw_data_ra(env,
1895                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1896                                            GETPC());
1897                     pushw(&sa, val);
1898                 }
1899             }
1900             new_stack = 1;
1901         } else {
1902             /* to same privilege */
1903             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1904             sa.sp = env->regs[R_ESP];
1905             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1906             sa.ss_base = env->segs[R_SS].base;
1907             /* push_size = (4 << shift); */
1908             new_stack = 0;
1909         }
1910 
1911 #ifdef TARGET_X86_64
1912         if (shift == 2) {
1913             pushq(&sa, env->segs[R_CS].selector);
1914             pushq(&sa, next_eip);
1915         } else
1916 #endif
1917         if (shift == 1) {
1918             pushl(&sa, env->segs[R_CS].selector);
1919             pushl(&sa, next_eip);
1920         } else {
1921             pushw(&sa, env->segs[R_CS].selector);
1922             pushw(&sa, next_eip);
1923         }
1924 
1925         /* from this point, not restartable */
1926 
1927         if (new_stack) {
1928 #ifdef TARGET_X86_64
1929             if (shift == 2) {
1930                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1931             } else
1932 #endif
1933             {
1934                 ss = (ss & ~3) | dpl;
1935                 cpu_x86_load_seg_cache(env, R_SS, ss,
1936                                        sa.ss_base,
1937                                        get_seg_limit(ss_e1, ss_e2),
1938                                        ss_e2);
1939             }
1940         }
1941 
1942         selector = (selector & ~3) | dpl;
1943         cpu_x86_load_seg_cache(env, R_CS, selector,
1944                        get_seg_base(e1, e2),
1945                        get_seg_limit(e1, e2),
1946                        e2);
1947         SET_ESP(sa.sp, sa.sp_mask);
1948         env->eip = offset;
1949     }
1950 }
1951 
1952 /* real and vm86 mode iret */
1953 void helper_iret_real(CPUX86State *env, int shift)
1954 {
1955     uint32_t new_cs, new_eip, new_eflags;
1956     int eflags_mask;
1957     StackAccess sa;
1958 
1959     sa.env = env;
1960     sa.ra = GETPC();
1961     sa.mmu_index = x86_mmu_index_pl(env, 0);
1962     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1963     sa.sp = env->regs[R_ESP];
1964     sa.ss_base = env->segs[R_SS].base;
1965 
1966     if (shift == 1) {
1967         /* 32 bits */
1968         new_eip = popl(&sa);
1969         new_cs = popl(&sa) & 0xffff;
1970         new_eflags = popl(&sa);
1971     } else {
1972         /* 16 bits */
1973         new_eip = popw(&sa);
1974         new_cs = popw(&sa);
1975         new_eflags = popw(&sa);
1976     }
1977     SET_ESP(sa.sp, sa.sp_mask);
1978     env->segs[R_CS].selector = new_cs;
1979     env->segs[R_CS].base = (new_cs << 4);
1980     env->eip = new_eip;
1981     if (env->eflags & VM_MASK) {
1982         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1983             NT_MASK;
1984     } else {
1985         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1986             RF_MASK | NT_MASK;
1987     }
1988     if (shift == 0) {
1989         eflags_mask &= 0xffff;
1990     }
1991     cpu_load_eflags(env, new_eflags, eflags_mask);
1992     env->hflags2 &= ~HF2_NMI_MASK;
1993 }
1994 
1995 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1996 {
1997     int dpl;
1998     uint32_t e2;
1999 
2000     /* XXX: on x86_64, we do not want to nullify FS and GS because
2001        they may still contain a valid base. I would be interested to
2002        know how a real x86_64 CPU behaves */
2003     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2004         (env->segs[seg_reg].selector & 0xfffc) == 0) {
2005         return;
2006     }
2007 
2008     e2 = env->segs[seg_reg].flags;
2009     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2010     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2011         /* data or non conforming code segment */
2012         if (dpl < cpl) {
2013             cpu_x86_load_seg_cache(env, seg_reg, 0,
2014                                    env->segs[seg_reg].base,
2015                                    env->segs[seg_reg].limit,
2016                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2017         }
2018     }
2019 }
2020 
2021 /* protected mode iret */
2022 static inline void helper_ret_protected(CPUX86State *env, int shift,
2023                                         int is_iret, int addend,
2024                                         uintptr_t retaddr)
2025 {
2026     uint32_t new_cs, new_eflags, new_ss;
2027     uint32_t new_es, new_ds, new_fs, new_gs;
2028     uint32_t e1, e2, ss_e1, ss_e2;
2029     int cpl, dpl, rpl, eflags_mask, iopl;
2030     target_ulong new_eip, new_esp;
2031     StackAccess sa;
2032 
2033     cpl = env->hflags & HF_CPL_MASK;
2034 
2035     sa.env = env;
2036     sa.ra = retaddr;
2037     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2038 
2039 #ifdef TARGET_X86_64
2040     if (shift == 2) {
2041         sa.sp_mask = -1;
2042     } else
2043 #endif
2044     {
2045         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2046     }
2047     sa.sp = env->regs[R_ESP];
2048     sa.ss_base = env->segs[R_SS].base;
2049     new_eflags = 0; /* avoid warning */
2050 #ifdef TARGET_X86_64
2051     if (shift == 2) {
2052         new_eip = popq(&sa);
2053         new_cs = popq(&sa) & 0xffff;
2054         if (is_iret) {
2055             new_eflags = popq(&sa);
2056         }
2057     } else
2058 #endif
2059     {
2060         if (shift == 1) {
2061             /* 32 bits */
2062             new_eip = popl(&sa);
2063             new_cs = popl(&sa) & 0xffff;
2064             if (is_iret) {
2065                 new_eflags = popl(&sa);
2066                 if (new_eflags & VM_MASK) {
2067                     goto return_to_vm86;
2068                 }
2069             }
2070         } else {
2071             /* 16 bits */
2072             new_eip = popw(&sa);
2073             new_cs = popw(&sa);
2074             if (is_iret) {
2075                 new_eflags = popw(&sa);
2076             }
2077         }
2078     }
2079     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2080               new_cs, new_eip, shift, addend);
2081     LOG_PCALL_STATE(env_cpu(env));
2082     if ((new_cs & 0xfffc) == 0) {
2083         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2084     }
2085     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2086         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2087     }
2088     if (!(e2 & DESC_S_MASK) ||
2089         !(e2 & DESC_CS_MASK)) {
2090         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2091     }
2092     rpl = new_cs & 3;
2093     if (rpl < cpl) {
2094         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2095     }
2096     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2097     if (e2 & DESC_C_MASK) {
2098         if (dpl > rpl) {
2099             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2100         }
2101     } else {
2102         if (dpl != rpl) {
2103             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2104         }
2105     }
2106     if (!(e2 & DESC_P_MASK)) {
2107         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2108     }
2109 
2110     sa.sp += addend;
2111     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2112                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2113         /* return to same privilege level */
2114         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2115                        get_seg_base(e1, e2),
2116                        get_seg_limit(e1, e2),
2117                        e2);
2118     } else {
2119         /* return to different privilege level */
2120 #ifdef TARGET_X86_64
2121         if (shift == 2) {
2122             new_esp = popq(&sa);
2123             new_ss = popq(&sa) & 0xffff;
2124         } else
2125 #endif
2126         {
2127             if (shift == 1) {
2128                 /* 32 bits */
2129                 new_esp = popl(&sa);
2130                 new_ss = popl(&sa) & 0xffff;
2131             } else {
2132                 /* 16 bits */
2133                 new_esp = popw(&sa);
2134                 new_ss = popw(&sa);
2135             }
2136         }
2137         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2138                   new_ss, new_esp);
2139         if ((new_ss & 0xfffc) == 0) {
2140 #ifdef TARGET_X86_64
2141             /* NULL ss is allowed in long mode if cpl != 3 */
2142             /* XXX: test CS64? */
2143             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2144                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2145                                        0, 0xffffffff,
2146                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2147                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2148                                        DESC_W_MASK | DESC_A_MASK);
2149                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2150             } else
2151 #endif
2152             {
2153                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2154             }
2155         } else {
2156             if ((new_ss & 3) != rpl) {
2157                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2158             }
2159             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2160                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2161             }
2162             if (!(ss_e2 & DESC_S_MASK) ||
2163                 (ss_e2 & DESC_CS_MASK) ||
2164                 !(ss_e2 & DESC_W_MASK)) {
2165                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2166             }
2167             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2168             if (dpl != rpl) {
2169                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2170             }
2171             if (!(ss_e2 & DESC_P_MASK)) {
2172                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2173             }
2174             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2175                                    get_seg_base(ss_e1, ss_e2),
2176                                    get_seg_limit(ss_e1, ss_e2),
2177                                    ss_e2);
2178         }
2179 
2180         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2181                        get_seg_base(e1, e2),
2182                        get_seg_limit(e1, e2),
2183                        e2);
2184         sa.sp = new_esp;
2185 #ifdef TARGET_X86_64
2186         if (env->hflags & HF_CS64_MASK) {
2187             sa.sp_mask = -1;
2188         } else
2189 #endif
2190         {
2191             sa.sp_mask = get_sp_mask(ss_e2);
2192         }
2193 
2194         /* validate data segments */
2195         validate_seg(env, R_ES, rpl);
2196         validate_seg(env, R_DS, rpl);
2197         validate_seg(env, R_FS, rpl);
2198         validate_seg(env, R_GS, rpl);
2199 
2200         sa.sp += addend;
2201     }
2202     SET_ESP(sa.sp, sa.sp_mask);
2203     env->eip = new_eip;
2204     if (is_iret) {
2205         /* NOTE: 'cpl' is the _old_ CPL */
2206         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2207         if (cpl == 0) {
2208             eflags_mask |= IOPL_MASK;
2209         }
2210         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2211         if (cpl <= iopl) {
2212             eflags_mask |= IF_MASK;
2213         }
2214         if (shift == 0) {
2215             eflags_mask &= 0xffff;
2216         }
2217         cpu_load_eflags(env, new_eflags, eflags_mask);
2218     }
2219     return;
2220 
2221  return_to_vm86:
2222     new_esp = popl(&sa);
2223     new_ss = popl(&sa);
2224     new_es = popl(&sa);
2225     new_ds = popl(&sa);
2226     new_fs = popl(&sa);
2227     new_gs = popl(&sa);
2228 
2229     /* modify processor state */
2230     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2231                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2232                     VIP_MASK);
2233     load_seg_vm(env, R_CS, new_cs & 0xffff);
2234     load_seg_vm(env, R_SS, new_ss & 0xffff);
2235     load_seg_vm(env, R_ES, new_es & 0xffff);
2236     load_seg_vm(env, R_DS, new_ds & 0xffff);
2237     load_seg_vm(env, R_FS, new_fs & 0xffff);
2238     load_seg_vm(env, R_GS, new_gs & 0xffff);
2239 
2240     env->eip = new_eip & 0xffff;
2241     env->regs[R_ESP] = new_esp;
2242 }
2243 
2244 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2245 {
2246     int tss_selector, type;
2247     uint32_t e1, e2;
2248 
2249     /* specific case for TSS */
2250     if (env->eflags & NT_MASK) {
2251 #ifdef TARGET_X86_64
2252         if (env->hflags & HF_LMA_MASK) {
2253             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2254         }
2255 #endif
2256         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2257         if (tss_selector & 4) {
2258             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2259         }
2260         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2261             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2262         }
2263         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2264         /* NOTE: we check both segment and busy TSS */
2265         if (type != 3) {
2266             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2267         }
2268         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
2269                       false, 0, GETPC());
2270     } else {
2271         helper_ret_protected(env, shift, 1, 0, GETPC());
2272     }
2273     env->hflags2 &= ~HF2_NMI_MASK;
2274 }
2275 
2276 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2277 {
2278     helper_ret_protected(env, shift, 0, addend, GETPC());
2279 }
2280 
2281 void helper_sysenter(CPUX86State *env)
2282 {
2283     if (env->sysenter_cs == 0) {
2284         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2285     }
2286     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2287 
2288 #ifdef TARGET_X86_64
2289     if (env->hflags & HF_LMA_MASK) {
2290         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2291                                0, 0xffffffff,
2292                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2293                                DESC_S_MASK |
2294                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2295                                DESC_L_MASK);
2296     } else
2297 #endif
2298     {
2299         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2300                                0, 0xffffffff,
2301                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2302                                DESC_S_MASK |
2303                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2304     }
2305     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2306                            0, 0xffffffff,
2307                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2308                            DESC_S_MASK |
2309                            DESC_W_MASK | DESC_A_MASK);
2310     env->regs[R_ESP] = env->sysenter_esp;
2311     env->eip = env->sysenter_eip;
2312 }
2313 
2314 void helper_sysexit(CPUX86State *env, int dflag)
2315 {
2316     int cpl;
2317 
2318     cpl = env->hflags & HF_CPL_MASK;
2319     if (env->sysenter_cs == 0 || cpl != 0) {
2320         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2321     }
2322 #ifdef TARGET_X86_64
2323     if (dflag == 2) {
2324         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2325                                3, 0, 0xffffffff,
2326                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2327                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2328                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2329                                DESC_L_MASK);
2330         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2331                                3, 0, 0xffffffff,
2332                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2333                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2334                                DESC_W_MASK | DESC_A_MASK);
2335     } else
2336 #endif
2337     {
2338         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2339                                3, 0, 0xffffffff,
2340                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2341                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2342                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2343         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2344                                3, 0, 0xffffffff,
2345                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2346                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2347                                DESC_W_MASK | DESC_A_MASK);
2348     }
2349     env->regs[R_ESP] = env->regs[R_ECX];
2350     env->eip = env->regs[R_EDX];
2351 }
2352 
2353 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2354 {
2355     unsigned int limit;
2356     uint32_t e1, e2, selector;
2357     int rpl, dpl, cpl, type;
2358 
2359     selector = selector1 & 0xffff;
2360     assert(CC_OP == CC_OP_EFLAGS);
2361     if ((selector & 0xfffc) == 0) {
2362         goto fail;
2363     }
2364     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2365         goto fail;
2366     }
2367     rpl = selector & 3;
2368     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2369     cpl = env->hflags & HF_CPL_MASK;
2370     if (e2 & DESC_S_MASK) {
2371         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2372             /* conforming */
2373         } else {
2374             if (dpl < cpl || dpl < rpl) {
2375                 goto fail;
2376             }
2377         }
2378     } else {
2379         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2380         switch (type) {
2381         case 1:
2382         case 2:
2383         case 3:
2384         case 9:
2385         case 11:
2386             break;
2387         default:
2388             goto fail;
2389         }
2390         if (dpl < cpl || dpl < rpl) {
2391         fail:
2392             CC_SRC &= ~CC_Z;
2393             return 0;
2394         }
2395     }
2396     limit = get_seg_limit(e1, e2);
2397     CC_SRC |= CC_Z;
2398     return limit;
2399 }
2400 
2401 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2402 {
2403     uint32_t e1, e2, selector;
2404     int rpl, dpl, cpl, type;
2405 
2406     selector = selector1 & 0xffff;
2407     assert(CC_OP == CC_OP_EFLAGS);
2408     if ((selector & 0xfffc) == 0) {
2409         goto fail;
2410     }
2411     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2412         goto fail;
2413     }
2414     rpl = selector & 3;
2415     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416     cpl = env->hflags & HF_CPL_MASK;
2417     if (e2 & DESC_S_MASK) {
2418         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2419             /* conforming */
2420         } else {
2421             if (dpl < cpl || dpl < rpl) {
2422                 goto fail;
2423             }
2424         }
2425     } else {
2426         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2427         switch (type) {
2428         case 1:
2429         case 2:
2430         case 3:
2431         case 4:
2432         case 5:
2433         case 9:
2434         case 11:
2435         case 12:
2436             break;
2437         default:
2438             goto fail;
2439         }
2440         if (dpl < cpl || dpl < rpl) {
2441         fail:
2442             CC_SRC &= ~CC_Z;
2443             return 0;
2444         }
2445     }
2446     CC_SRC |= CC_Z;
2447     return e2 & 0x00f0ff00;
2448 }
2449 
2450 void helper_verr(CPUX86State *env, target_ulong selector1)
2451 {
2452     uint32_t e1, e2, eflags, selector;
2453     int rpl, dpl, cpl;
2454 
2455     selector = selector1 & 0xffff;
2456     eflags = cpu_cc_compute_all(env) | CC_Z;
2457     if ((selector & 0xfffc) == 0) {
2458         goto fail;
2459     }
2460     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2461         goto fail;
2462     }
2463     if (!(e2 & DESC_S_MASK)) {
2464         goto fail;
2465     }
2466     rpl = selector & 3;
2467     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2468     cpl = env->hflags & HF_CPL_MASK;
2469     if (e2 & DESC_CS_MASK) {
2470         if (!(e2 & DESC_R_MASK)) {
2471             goto fail;
2472         }
2473         if (!(e2 & DESC_C_MASK)) {
2474             if (dpl < cpl || dpl < rpl) {
2475                 goto fail;
2476             }
2477         }
2478     } else {
2479         if (dpl < cpl || dpl < rpl) {
2480         fail:
2481             eflags &= ~CC_Z;
2482         }
2483     }
2484     CC_SRC = eflags;
2485     CC_OP = CC_OP_EFLAGS;
2486 }
2487 
2488 void helper_verw(CPUX86State *env, target_ulong selector1)
2489 {
2490     uint32_t e1, e2, eflags, selector;
2491     int rpl, dpl, cpl;
2492 
2493     selector = selector1 & 0xffff;
2494     eflags = cpu_cc_compute_all(env) | CC_Z;
2495     if ((selector & 0xfffc) == 0) {
2496         goto fail;
2497     }
2498     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2499         goto fail;
2500     }
2501     if (!(e2 & DESC_S_MASK)) {
2502         goto fail;
2503     }
2504     rpl = selector & 3;
2505     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2506     cpl = env->hflags & HF_CPL_MASK;
2507     if (e2 & DESC_CS_MASK) {
2508         goto fail;
2509     } else {
2510         if (dpl < cpl || dpl < rpl) {
2511             goto fail;
2512         }
2513         if (!(e2 & DESC_W_MASK)) {
2514         fail:
2515             eflags &= ~CC_Z;
2516         }
2517     }
2518     CC_SRC = eflags;
2519     CC_OP = CC_OP_EFLAGS;
2520 }
2521