xref: /qemu/target/i386/tcg/excp_helper.c (revision 6578eb25a07c0056976f466baf640ef59ae45ab5)
1599b9a5aSBlue Swirl /*
2599b9a5aSBlue Swirl  *  x86 exception helpers
3599b9a5aSBlue Swirl  *
4599b9a5aSBlue Swirl  *  Copyright (c) 2003 Fabrice Bellard
5599b9a5aSBlue Swirl  *
6599b9a5aSBlue Swirl  * This library is free software; you can redistribute it and/or
7599b9a5aSBlue Swirl  * modify it under the terms of the GNU Lesser General Public
8599b9a5aSBlue Swirl  * License as published by the Free Software Foundation; either
9599b9a5aSBlue Swirl  * version 2 of the License, or (at your option) any later version.
10599b9a5aSBlue Swirl  *
11599b9a5aSBlue Swirl  * This library is distributed in the hope that it will be useful,
12599b9a5aSBlue Swirl  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13599b9a5aSBlue Swirl  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14599b9a5aSBlue Swirl  * Lesser General Public License for more details.
15599b9a5aSBlue Swirl  *
16599b9a5aSBlue Swirl  * You should have received a copy of the GNU Lesser General Public
17599b9a5aSBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18599b9a5aSBlue Swirl  */
19599b9a5aSBlue Swirl 
20b6a0aa05SPeter Maydell #include "qemu/osdep.h"
21599b9a5aSBlue Swirl #include "cpu.h"
2263c91552SPaolo Bonzini #include "exec/exec-all.h"
231de7afc9SPaolo Bonzini #include "qemu/log.h"
249c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
252ef6175aSRichard Henderson #include "exec/helper-proto.h"
26599b9a5aSBlue Swirl 
27599b9a5aSBlue Swirl void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
28599b9a5aSBlue Swirl {
29599b9a5aSBlue Swirl     raise_interrupt(env, intno, 1, 0, next_eip_addend);
30599b9a5aSBlue Swirl }
31599b9a5aSBlue Swirl 
32599b9a5aSBlue Swirl void helper_raise_exception(CPUX86State *env, int exception_index)
33599b9a5aSBlue Swirl {
34599b9a5aSBlue Swirl     raise_exception(env, exception_index);
35599b9a5aSBlue Swirl }
36599b9a5aSBlue Swirl 
37599b9a5aSBlue Swirl /*
38599b9a5aSBlue Swirl  * Check nested exceptions and change to double or triple fault if
39599b9a5aSBlue Swirl  * needed. It should only be called, if this is not an interrupt.
40599b9a5aSBlue Swirl  * Returns the new exception number.
41599b9a5aSBlue Swirl  */
4265c9d60aSPaolo Bonzini static int check_exception(CPUX86State *env, int intno, int *error_code,
4365c9d60aSPaolo Bonzini                            uintptr_t retaddr)
44599b9a5aSBlue Swirl {
45599b9a5aSBlue Swirl     int first_contributory = env->old_exception == 0 ||
46599b9a5aSBlue Swirl                               (env->old_exception >= 10 &&
47599b9a5aSBlue Swirl                                env->old_exception <= 13);
48599b9a5aSBlue Swirl     int second_contributory = intno == 0 ||
49599b9a5aSBlue Swirl                                (intno >= 10 && intno <= 13);
50599b9a5aSBlue Swirl 
51599b9a5aSBlue Swirl     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
52599b9a5aSBlue Swirl                 env->old_exception, intno);
53599b9a5aSBlue Swirl 
54599b9a5aSBlue Swirl #if !defined(CONFIG_USER_ONLY)
55599b9a5aSBlue Swirl     if (env->old_exception == EXCP08_DBLE) {
56599b9a5aSBlue Swirl         if (env->hflags & HF_SVMI_MASK) {
5765c9d60aSPaolo Bonzini             cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
58599b9a5aSBlue Swirl         }
59599b9a5aSBlue Swirl 
60599b9a5aSBlue Swirl         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
61599b9a5aSBlue Swirl 
62cf83f140SEric Blake         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
63599b9a5aSBlue Swirl         return EXCP_HLT;
64599b9a5aSBlue Swirl     }
65599b9a5aSBlue Swirl #endif
66599b9a5aSBlue Swirl 
67599b9a5aSBlue Swirl     if ((first_contributory && second_contributory)
68599b9a5aSBlue Swirl         || (env->old_exception == EXCP0E_PAGE &&
69599b9a5aSBlue Swirl             (second_contributory || (intno == EXCP0E_PAGE)))) {
70599b9a5aSBlue Swirl         intno = EXCP08_DBLE;
71599b9a5aSBlue Swirl         *error_code = 0;
72599b9a5aSBlue Swirl     }
73599b9a5aSBlue Swirl 
74599b9a5aSBlue Swirl     if (second_contributory || (intno == EXCP0E_PAGE) ||
75599b9a5aSBlue Swirl         (intno == EXCP08_DBLE)) {
76599b9a5aSBlue Swirl         env->old_exception = intno;
77599b9a5aSBlue Swirl     }
78599b9a5aSBlue Swirl 
79599b9a5aSBlue Swirl     return intno;
80599b9a5aSBlue Swirl }
81599b9a5aSBlue Swirl 
82599b9a5aSBlue Swirl /*
83599b9a5aSBlue Swirl  * Signal an interruption. It is executed in the main CPU loop.
84599b9a5aSBlue Swirl  * is_int is TRUE if coming from the int instruction. next_eip is the
85a78d0eabSliguang  * env->eip value AFTER the interrupt instruction. It is only relevant if
86599b9a5aSBlue Swirl  * is_int is TRUE.
87599b9a5aSBlue Swirl  */
88599b9a5aSBlue Swirl static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
89599b9a5aSBlue Swirl                                            int is_int, int error_code,
9091980095SPavel Dovgalyuk                                            int next_eip_addend,
9191980095SPavel Dovgalyuk                                            uintptr_t retaddr)
92599b9a5aSBlue Swirl {
9327103424SAndreas Färber     CPUState *cs = CPU(x86_env_get_cpu(env));
9427103424SAndreas Färber 
95599b9a5aSBlue Swirl     if (!is_int) {
96599b9a5aSBlue Swirl         cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
9765c9d60aSPaolo Bonzini                                       error_code, retaddr);
9865c9d60aSPaolo Bonzini         intno = check_exception(env, intno, &error_code, retaddr);
99599b9a5aSBlue Swirl     } else {
10065c9d60aSPaolo Bonzini         cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
101599b9a5aSBlue Swirl     }
102599b9a5aSBlue Swirl 
10327103424SAndreas Färber     cs->exception_index = intno;
104599b9a5aSBlue Swirl     env->error_code = error_code;
105599b9a5aSBlue Swirl     env->exception_is_int = is_int;
106599b9a5aSBlue Swirl     env->exception_next_eip = env->eip + next_eip_addend;
10791980095SPavel Dovgalyuk     cpu_loop_exit_restore(cs, retaddr);
108599b9a5aSBlue Swirl }
109599b9a5aSBlue Swirl 
110599b9a5aSBlue Swirl /* shortcuts to generate exceptions */
111599b9a5aSBlue Swirl 
112599b9a5aSBlue Swirl void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
113599b9a5aSBlue Swirl                                    int error_code, int next_eip_addend)
114599b9a5aSBlue Swirl {
11591980095SPavel Dovgalyuk     raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
116599b9a5aSBlue Swirl }
117599b9a5aSBlue Swirl 
118599b9a5aSBlue Swirl void raise_exception_err(CPUX86State *env, int exception_index,
119599b9a5aSBlue Swirl                          int error_code)
120599b9a5aSBlue Swirl {
12191980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
12291980095SPavel Dovgalyuk }
12391980095SPavel Dovgalyuk 
12491980095SPavel Dovgalyuk void raise_exception_err_ra(CPUX86State *env, int exception_index,
12591980095SPavel Dovgalyuk                             int error_code, uintptr_t retaddr)
12691980095SPavel Dovgalyuk {
12791980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
128599b9a5aSBlue Swirl }
129599b9a5aSBlue Swirl 
130599b9a5aSBlue Swirl void raise_exception(CPUX86State *env, int exception_index)
131599b9a5aSBlue Swirl {
13291980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, 0);
13391980095SPavel Dovgalyuk }
13491980095SPavel Dovgalyuk 
13591980095SPavel Dovgalyuk void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
13691980095SPavel Dovgalyuk {
13791980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
138599b9a5aSBlue Swirl }
1396578eb25SPaolo Bonzini 
1406578eb25SPaolo Bonzini #if defined(CONFIG_USER_ONLY)
1416578eb25SPaolo Bonzini int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
1426578eb25SPaolo Bonzini                              int is_write, int mmu_idx)
1436578eb25SPaolo Bonzini {
1446578eb25SPaolo Bonzini     X86CPU *cpu = X86_CPU(cs);
1456578eb25SPaolo Bonzini     CPUX86State *env = &cpu->env;
1466578eb25SPaolo Bonzini 
1476578eb25SPaolo Bonzini     /* user mode only emulation */
1486578eb25SPaolo Bonzini     is_write &= 1;
1496578eb25SPaolo Bonzini     env->cr[2] = addr;
1506578eb25SPaolo Bonzini     env->error_code = (is_write << PG_ERROR_W_BIT);
1516578eb25SPaolo Bonzini     env->error_code |= PG_ERROR_U_MASK;
1526578eb25SPaolo Bonzini     cs->exception_index = EXCP0E_PAGE;
1536578eb25SPaolo Bonzini     env->exception_is_int = 0;
1546578eb25SPaolo Bonzini     env->exception_next_eip = -1;
1556578eb25SPaolo Bonzini     return 1;
1566578eb25SPaolo Bonzini }
1576578eb25SPaolo Bonzini 
1586578eb25SPaolo Bonzini #else
1596578eb25SPaolo Bonzini 
1606578eb25SPaolo Bonzini /* return value:
1616578eb25SPaolo Bonzini  * -1 = cannot handle fault
1626578eb25SPaolo Bonzini  * 0  = nothing more to do
1636578eb25SPaolo Bonzini  * 1  = generate PF fault
1646578eb25SPaolo Bonzini  */
1656578eb25SPaolo Bonzini int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
1666578eb25SPaolo Bonzini                              int is_write1, int mmu_idx)
1676578eb25SPaolo Bonzini {
1686578eb25SPaolo Bonzini     X86CPU *cpu = X86_CPU(cs);
1696578eb25SPaolo Bonzini     CPUX86State *env = &cpu->env;
1706578eb25SPaolo Bonzini     uint64_t ptep, pte;
1716578eb25SPaolo Bonzini     int32_t a20_mask;
1726578eb25SPaolo Bonzini     target_ulong pde_addr, pte_addr;
1736578eb25SPaolo Bonzini     int error_code = 0;
1746578eb25SPaolo Bonzini     int is_dirty, prot, page_size, is_write, is_user;
1756578eb25SPaolo Bonzini     hwaddr paddr;
1766578eb25SPaolo Bonzini     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
1776578eb25SPaolo Bonzini     uint32_t page_offset;
1786578eb25SPaolo Bonzini     target_ulong vaddr;
1796578eb25SPaolo Bonzini 
1806578eb25SPaolo Bonzini     is_user = mmu_idx == MMU_USER_IDX;
1816578eb25SPaolo Bonzini #if defined(DEBUG_MMU)
1826578eb25SPaolo Bonzini     printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1836578eb25SPaolo Bonzini            addr, is_write1, is_user, env->eip);
1846578eb25SPaolo Bonzini #endif
1856578eb25SPaolo Bonzini     is_write = is_write1 & 1;
1866578eb25SPaolo Bonzini 
1876578eb25SPaolo Bonzini     a20_mask = x86_get_a20_mask(env);
1886578eb25SPaolo Bonzini     if (!(env->cr[0] & CR0_PG_MASK)) {
1896578eb25SPaolo Bonzini         pte = addr;
1906578eb25SPaolo Bonzini #ifdef TARGET_X86_64
1916578eb25SPaolo Bonzini         if (!(env->hflags & HF_LMA_MASK)) {
1926578eb25SPaolo Bonzini             /* Without long mode we can only address 32bits in real mode */
1936578eb25SPaolo Bonzini             pte = (uint32_t)pte;
1946578eb25SPaolo Bonzini         }
1956578eb25SPaolo Bonzini #endif
1966578eb25SPaolo Bonzini         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1976578eb25SPaolo Bonzini         page_size = 4096;
1986578eb25SPaolo Bonzini         goto do_mapping;
1996578eb25SPaolo Bonzini     }
2006578eb25SPaolo Bonzini 
2016578eb25SPaolo Bonzini     if (!(env->efer & MSR_EFER_NXE)) {
2026578eb25SPaolo Bonzini         rsvd_mask |= PG_NX_MASK;
2036578eb25SPaolo Bonzini     }
2046578eb25SPaolo Bonzini 
2056578eb25SPaolo Bonzini     if (env->cr[4] & CR4_PAE_MASK) {
2066578eb25SPaolo Bonzini         uint64_t pde, pdpe;
2076578eb25SPaolo Bonzini         target_ulong pdpe_addr;
2086578eb25SPaolo Bonzini 
2096578eb25SPaolo Bonzini #ifdef TARGET_X86_64
2106578eb25SPaolo Bonzini         if (env->hflags & HF_LMA_MASK) {
2116578eb25SPaolo Bonzini             bool la57 = env->cr[4] & CR4_LA57_MASK;
2126578eb25SPaolo Bonzini             uint64_t pml5e_addr, pml5e;
2136578eb25SPaolo Bonzini             uint64_t pml4e_addr, pml4e;
2146578eb25SPaolo Bonzini             int32_t sext;
2156578eb25SPaolo Bonzini 
2166578eb25SPaolo Bonzini             /* test virtual address sign extension */
2176578eb25SPaolo Bonzini             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
2186578eb25SPaolo Bonzini             if (sext != 0 && sext != -1) {
2196578eb25SPaolo Bonzini                 env->error_code = 0;
2206578eb25SPaolo Bonzini                 cs->exception_index = EXCP0D_GPF;
2216578eb25SPaolo Bonzini                 return 1;
2226578eb25SPaolo Bonzini             }
2236578eb25SPaolo Bonzini 
2246578eb25SPaolo Bonzini             if (la57) {
2256578eb25SPaolo Bonzini                 pml5e_addr = ((env->cr[3] & ~0xfff) +
2266578eb25SPaolo Bonzini                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
2276578eb25SPaolo Bonzini                 pml5e = x86_ldq_phys(cs, pml5e_addr);
2286578eb25SPaolo Bonzini                 if (!(pml5e & PG_PRESENT_MASK)) {
2296578eb25SPaolo Bonzini                     goto do_fault;
2306578eb25SPaolo Bonzini                 }
2316578eb25SPaolo Bonzini                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
2326578eb25SPaolo Bonzini                     goto do_fault_rsvd;
2336578eb25SPaolo Bonzini                 }
2346578eb25SPaolo Bonzini                 if (!(pml5e & PG_ACCESSED_MASK)) {
2356578eb25SPaolo Bonzini                     pml5e |= PG_ACCESSED_MASK;
2366578eb25SPaolo Bonzini                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
2376578eb25SPaolo Bonzini                 }
2386578eb25SPaolo Bonzini                 ptep = pml5e ^ PG_NX_MASK;
2396578eb25SPaolo Bonzini             } else {
2406578eb25SPaolo Bonzini                 pml5e = env->cr[3];
2416578eb25SPaolo Bonzini                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
2426578eb25SPaolo Bonzini             }
2436578eb25SPaolo Bonzini 
2446578eb25SPaolo Bonzini             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
2456578eb25SPaolo Bonzini                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
2466578eb25SPaolo Bonzini             pml4e = x86_ldq_phys(cs, pml4e_addr);
2476578eb25SPaolo Bonzini             if (!(pml4e & PG_PRESENT_MASK)) {
2486578eb25SPaolo Bonzini                 goto do_fault;
2496578eb25SPaolo Bonzini             }
2506578eb25SPaolo Bonzini             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
2516578eb25SPaolo Bonzini                 goto do_fault_rsvd;
2526578eb25SPaolo Bonzini             }
2536578eb25SPaolo Bonzini             if (!(pml4e & PG_ACCESSED_MASK)) {
2546578eb25SPaolo Bonzini                 pml4e |= PG_ACCESSED_MASK;
2556578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
2566578eb25SPaolo Bonzini             }
2576578eb25SPaolo Bonzini             ptep &= pml4e ^ PG_NX_MASK;
2586578eb25SPaolo Bonzini             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
2596578eb25SPaolo Bonzini                 a20_mask;
2606578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
2616578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
2626578eb25SPaolo Bonzini                 goto do_fault;
2636578eb25SPaolo Bonzini             }
2646578eb25SPaolo Bonzini             if (pdpe & rsvd_mask) {
2656578eb25SPaolo Bonzini                 goto do_fault_rsvd;
2666578eb25SPaolo Bonzini             }
2676578eb25SPaolo Bonzini             ptep &= pdpe ^ PG_NX_MASK;
2686578eb25SPaolo Bonzini             if (!(pdpe & PG_ACCESSED_MASK)) {
2696578eb25SPaolo Bonzini                 pdpe |= PG_ACCESSED_MASK;
2706578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
2716578eb25SPaolo Bonzini             }
2726578eb25SPaolo Bonzini             if (pdpe & PG_PSE_MASK) {
2736578eb25SPaolo Bonzini                 /* 1 GB page */
2746578eb25SPaolo Bonzini                 page_size = 1024 * 1024 * 1024;
2756578eb25SPaolo Bonzini                 pte_addr = pdpe_addr;
2766578eb25SPaolo Bonzini                 pte = pdpe;
2776578eb25SPaolo Bonzini                 goto do_check_protect;
2786578eb25SPaolo Bonzini             }
2796578eb25SPaolo Bonzini         } else
2806578eb25SPaolo Bonzini #endif
2816578eb25SPaolo Bonzini         {
2826578eb25SPaolo Bonzini             /* XXX: load them when cr3 is loaded ? */
2836578eb25SPaolo Bonzini             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
2846578eb25SPaolo Bonzini                 a20_mask;
2856578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
2866578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
2876578eb25SPaolo Bonzini                 goto do_fault;
2886578eb25SPaolo Bonzini             }
2896578eb25SPaolo Bonzini             rsvd_mask |= PG_HI_USER_MASK;
2906578eb25SPaolo Bonzini             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
2916578eb25SPaolo Bonzini                 goto do_fault_rsvd;
2926578eb25SPaolo Bonzini             }
2936578eb25SPaolo Bonzini             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
2946578eb25SPaolo Bonzini         }
2956578eb25SPaolo Bonzini 
2966578eb25SPaolo Bonzini         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
2976578eb25SPaolo Bonzini             a20_mask;
2986578eb25SPaolo Bonzini         pde = x86_ldq_phys(cs, pde_addr);
2996578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
3006578eb25SPaolo Bonzini             goto do_fault;
3016578eb25SPaolo Bonzini         }
3026578eb25SPaolo Bonzini         if (pde & rsvd_mask) {
3036578eb25SPaolo Bonzini             goto do_fault_rsvd;
3046578eb25SPaolo Bonzini         }
3056578eb25SPaolo Bonzini         ptep &= pde ^ PG_NX_MASK;
3066578eb25SPaolo Bonzini         if (pde & PG_PSE_MASK) {
3076578eb25SPaolo Bonzini             /* 2 MB page */
3086578eb25SPaolo Bonzini             page_size = 2048 * 1024;
3096578eb25SPaolo Bonzini             pte_addr = pde_addr;
3106578eb25SPaolo Bonzini             pte = pde;
3116578eb25SPaolo Bonzini             goto do_check_protect;
3126578eb25SPaolo Bonzini         }
3136578eb25SPaolo Bonzini         /* 4 KB page */
3146578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
3156578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
3166578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
3176578eb25SPaolo Bonzini         }
3186578eb25SPaolo Bonzini         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
3196578eb25SPaolo Bonzini             a20_mask;
3206578eb25SPaolo Bonzini         pte = x86_ldq_phys(cs, pte_addr);
3216578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
3226578eb25SPaolo Bonzini             goto do_fault;
3236578eb25SPaolo Bonzini         }
3246578eb25SPaolo Bonzini         if (pte & rsvd_mask) {
3256578eb25SPaolo Bonzini             goto do_fault_rsvd;
3266578eb25SPaolo Bonzini         }
3276578eb25SPaolo Bonzini         /* combine pde and pte nx, user and rw protections */
3286578eb25SPaolo Bonzini         ptep &= pte ^ PG_NX_MASK;
3296578eb25SPaolo Bonzini         page_size = 4096;
3306578eb25SPaolo Bonzini     } else {
3316578eb25SPaolo Bonzini         uint32_t pde;
3326578eb25SPaolo Bonzini 
3336578eb25SPaolo Bonzini         /* page directory entry */
3346578eb25SPaolo Bonzini         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
3356578eb25SPaolo Bonzini             a20_mask;
3366578eb25SPaolo Bonzini         pde = x86_ldl_phys(cs, pde_addr);
3376578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
3386578eb25SPaolo Bonzini             goto do_fault;
3396578eb25SPaolo Bonzini         }
3406578eb25SPaolo Bonzini         ptep = pde | PG_NX_MASK;
3416578eb25SPaolo Bonzini 
3426578eb25SPaolo Bonzini         /* if PSE bit is set, then we use a 4MB page */
3436578eb25SPaolo Bonzini         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
3446578eb25SPaolo Bonzini             page_size = 4096 * 1024;
3456578eb25SPaolo Bonzini             pte_addr = pde_addr;
3466578eb25SPaolo Bonzini 
3476578eb25SPaolo Bonzini             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
3486578eb25SPaolo Bonzini              * Leave bits 20-13 in place for setting accessed/dirty bits below.
3496578eb25SPaolo Bonzini              */
3506578eb25SPaolo Bonzini             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
3516578eb25SPaolo Bonzini             rsvd_mask = 0x200000;
3526578eb25SPaolo Bonzini             goto do_check_protect_pse36;
3536578eb25SPaolo Bonzini         }
3546578eb25SPaolo Bonzini 
3556578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
3566578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
3576578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
3586578eb25SPaolo Bonzini         }
3596578eb25SPaolo Bonzini 
3606578eb25SPaolo Bonzini         /* page directory entry */
3616578eb25SPaolo Bonzini         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
3626578eb25SPaolo Bonzini             a20_mask;
3636578eb25SPaolo Bonzini         pte = x86_ldl_phys(cs, pte_addr);
3646578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
3656578eb25SPaolo Bonzini             goto do_fault;
3666578eb25SPaolo Bonzini         }
3676578eb25SPaolo Bonzini         /* combine pde and pte user and rw protections */
3686578eb25SPaolo Bonzini         ptep &= pte | PG_NX_MASK;
3696578eb25SPaolo Bonzini         page_size = 4096;
3706578eb25SPaolo Bonzini         rsvd_mask = 0;
3716578eb25SPaolo Bonzini     }
3726578eb25SPaolo Bonzini 
3736578eb25SPaolo Bonzini do_check_protect:
3746578eb25SPaolo Bonzini     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
3756578eb25SPaolo Bonzini do_check_protect_pse36:
3766578eb25SPaolo Bonzini     if (pte & rsvd_mask) {
3776578eb25SPaolo Bonzini         goto do_fault_rsvd;
3786578eb25SPaolo Bonzini     }
3796578eb25SPaolo Bonzini     ptep ^= PG_NX_MASK;
3806578eb25SPaolo Bonzini 
3816578eb25SPaolo Bonzini     /* can the page can be put in the TLB?  prot will tell us */
3826578eb25SPaolo Bonzini     if (is_user && !(ptep & PG_USER_MASK)) {
3836578eb25SPaolo Bonzini         goto do_fault_protect;
3846578eb25SPaolo Bonzini     }
3856578eb25SPaolo Bonzini 
3866578eb25SPaolo Bonzini     prot = 0;
3876578eb25SPaolo Bonzini     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
3886578eb25SPaolo Bonzini         prot |= PAGE_READ;
3896578eb25SPaolo Bonzini         if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
3906578eb25SPaolo Bonzini             prot |= PAGE_WRITE;
3916578eb25SPaolo Bonzini         }
3926578eb25SPaolo Bonzini     }
3936578eb25SPaolo Bonzini     if (!(ptep & PG_NX_MASK) &&
3946578eb25SPaolo Bonzini         (mmu_idx == MMU_USER_IDX ||
3956578eb25SPaolo Bonzini          !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
3966578eb25SPaolo Bonzini         prot |= PAGE_EXEC;
3976578eb25SPaolo Bonzini     }
3986578eb25SPaolo Bonzini     if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
3996578eb25SPaolo Bonzini         (ptep & PG_USER_MASK) && env->pkru) {
4006578eb25SPaolo Bonzini         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
4016578eb25SPaolo Bonzini         uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
4026578eb25SPaolo Bonzini         uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
4036578eb25SPaolo Bonzini         uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
4046578eb25SPaolo Bonzini 
4056578eb25SPaolo Bonzini         if (pkru_ad) {
4066578eb25SPaolo Bonzini             pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
4076578eb25SPaolo Bonzini         } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
4086578eb25SPaolo Bonzini             pkru_prot &= ~PAGE_WRITE;
4096578eb25SPaolo Bonzini         }
4106578eb25SPaolo Bonzini 
4116578eb25SPaolo Bonzini         prot &= pkru_prot;
4126578eb25SPaolo Bonzini         if ((pkru_prot & (1 << is_write1)) == 0) {
4136578eb25SPaolo Bonzini             assert(is_write1 != 2);
4146578eb25SPaolo Bonzini             error_code |= PG_ERROR_PK_MASK;
4156578eb25SPaolo Bonzini             goto do_fault_protect;
4166578eb25SPaolo Bonzini         }
4176578eb25SPaolo Bonzini     }
4186578eb25SPaolo Bonzini 
4196578eb25SPaolo Bonzini     if ((prot & (1 << is_write1)) == 0) {
4206578eb25SPaolo Bonzini         goto do_fault_protect;
4216578eb25SPaolo Bonzini     }
4226578eb25SPaolo Bonzini 
4236578eb25SPaolo Bonzini     /* yes, it can! */
4246578eb25SPaolo Bonzini     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
4256578eb25SPaolo Bonzini     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
4266578eb25SPaolo Bonzini         pte |= PG_ACCESSED_MASK;
4276578eb25SPaolo Bonzini         if (is_dirty) {
4286578eb25SPaolo Bonzini             pte |= PG_DIRTY_MASK;
4296578eb25SPaolo Bonzini         }
4306578eb25SPaolo Bonzini         x86_stl_phys_notdirty(cs, pte_addr, pte);
4316578eb25SPaolo Bonzini     }
4326578eb25SPaolo Bonzini 
4336578eb25SPaolo Bonzini     if (!(pte & PG_DIRTY_MASK)) {
4346578eb25SPaolo Bonzini         /* only set write access if already dirty... otherwise wait
4356578eb25SPaolo Bonzini            for dirty access */
4366578eb25SPaolo Bonzini         assert(!is_write);
4376578eb25SPaolo Bonzini         prot &= ~PAGE_WRITE;
4386578eb25SPaolo Bonzini     }
4396578eb25SPaolo Bonzini 
4406578eb25SPaolo Bonzini  do_mapping:
4416578eb25SPaolo Bonzini     pte = pte & a20_mask;
4426578eb25SPaolo Bonzini 
4436578eb25SPaolo Bonzini     /* align to page_size */
4446578eb25SPaolo Bonzini     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
4456578eb25SPaolo Bonzini 
4466578eb25SPaolo Bonzini     /* Even if 4MB pages, we map only one 4KB page in the cache to
4476578eb25SPaolo Bonzini        avoid filling it too fast */
4486578eb25SPaolo Bonzini     vaddr = addr & TARGET_PAGE_MASK;
4496578eb25SPaolo Bonzini     page_offset = vaddr & (page_size - 1);
4506578eb25SPaolo Bonzini     paddr = pte + page_offset;
4516578eb25SPaolo Bonzini 
4526578eb25SPaolo Bonzini     assert(prot & (1 << is_write1));
4536578eb25SPaolo Bonzini     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
4546578eb25SPaolo Bonzini                             prot, mmu_idx, page_size);
4556578eb25SPaolo Bonzini     return 0;
4566578eb25SPaolo Bonzini  do_fault_rsvd:
4576578eb25SPaolo Bonzini     error_code |= PG_ERROR_RSVD_MASK;
4586578eb25SPaolo Bonzini  do_fault_protect:
4596578eb25SPaolo Bonzini     error_code |= PG_ERROR_P_MASK;
4606578eb25SPaolo Bonzini  do_fault:
4616578eb25SPaolo Bonzini     error_code |= (is_write << PG_ERROR_W_BIT);
4626578eb25SPaolo Bonzini     if (is_user)
4636578eb25SPaolo Bonzini         error_code |= PG_ERROR_U_MASK;
4646578eb25SPaolo Bonzini     if (is_write1 == 2 &&
4656578eb25SPaolo Bonzini         (((env->efer & MSR_EFER_NXE) &&
4666578eb25SPaolo Bonzini           (env->cr[4] & CR4_PAE_MASK)) ||
4676578eb25SPaolo Bonzini          (env->cr[4] & CR4_SMEP_MASK)))
4686578eb25SPaolo Bonzini         error_code |= PG_ERROR_I_D_MASK;
4696578eb25SPaolo Bonzini     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
4706578eb25SPaolo Bonzini         /* cr2 is not modified in case of exceptions */
4716578eb25SPaolo Bonzini         x86_stq_phys(cs,
4726578eb25SPaolo Bonzini                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
4736578eb25SPaolo Bonzini                  addr);
4746578eb25SPaolo Bonzini     } else {
4756578eb25SPaolo Bonzini         env->cr[2] = addr;
4766578eb25SPaolo Bonzini     }
4776578eb25SPaolo Bonzini     env->error_code = error_code;
4786578eb25SPaolo Bonzini     cs->exception_index = EXCP0E_PAGE;
4796578eb25SPaolo Bonzini     return 1;
4806578eb25SPaolo Bonzini }
4816578eb25SPaolo Bonzini #endif
482