xref: /qemu/target/i386/tcg/excp_helper.c (revision ed69e8314d403d1bfa8c0210f850ffe69bb89dbe)
1599b9a5aSBlue Swirl /*
2599b9a5aSBlue Swirl  *  x86 exception helpers
3599b9a5aSBlue Swirl  *
4599b9a5aSBlue Swirl  *  Copyright (c) 2003 Fabrice Bellard
5599b9a5aSBlue Swirl  *
6599b9a5aSBlue Swirl  * This library is free software; you can redistribute it and/or
7599b9a5aSBlue Swirl  * modify it under the terms of the GNU Lesser General Public
8599b9a5aSBlue Swirl  * License as published by the Free Software Foundation; either
9d9ff33adSChetan Pant  * version 2.1 of the License, or (at your option) any later version.
10599b9a5aSBlue Swirl  *
11599b9a5aSBlue Swirl  * This library is distributed in the hope that it will be useful,
12599b9a5aSBlue Swirl  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13599b9a5aSBlue Swirl  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14599b9a5aSBlue Swirl  * Lesser General Public License for more details.
15599b9a5aSBlue Swirl  *
16599b9a5aSBlue Swirl  * You should have received a copy of the GNU Lesser General Public
17599b9a5aSBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18599b9a5aSBlue Swirl  */
19599b9a5aSBlue Swirl 
20b6a0aa05SPeter Maydell #include "qemu/osdep.h"
21599b9a5aSBlue Swirl #include "cpu.h"
2263c91552SPaolo Bonzini #include "exec/exec-all.h"
231de7afc9SPaolo Bonzini #include "qemu/log.h"
2454d31236SMarkus Armbruster #include "sysemu/runstate.h"
252ef6175aSRichard Henderson #include "exec/helper-proto.h"
26*ed69e831SClaudio Fontana #include "helper-tcg.h"
27599b9a5aSBlue Swirl 
28599b9a5aSBlue Swirl void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
29599b9a5aSBlue Swirl {
30599b9a5aSBlue Swirl     raise_interrupt(env, intno, 1, 0, next_eip_addend);
31599b9a5aSBlue Swirl }
32599b9a5aSBlue Swirl 
33599b9a5aSBlue Swirl void helper_raise_exception(CPUX86State *env, int exception_index)
34599b9a5aSBlue Swirl {
35599b9a5aSBlue Swirl     raise_exception(env, exception_index);
36599b9a5aSBlue Swirl }
37599b9a5aSBlue Swirl 
38599b9a5aSBlue Swirl /*
39599b9a5aSBlue Swirl  * Check nested exceptions and change to double or triple fault if
40599b9a5aSBlue Swirl  * needed. It should only be called, if this is not an interrupt.
41599b9a5aSBlue Swirl  * Returns the new exception number.
42599b9a5aSBlue Swirl  */
4365c9d60aSPaolo Bonzini static int check_exception(CPUX86State *env, int intno, int *error_code,
4465c9d60aSPaolo Bonzini                            uintptr_t retaddr)
45599b9a5aSBlue Swirl {
46599b9a5aSBlue Swirl     int first_contributory = env->old_exception == 0 ||
47599b9a5aSBlue Swirl                               (env->old_exception >= 10 &&
48599b9a5aSBlue Swirl                                env->old_exception <= 13);
49599b9a5aSBlue Swirl     int second_contributory = intno == 0 ||
50599b9a5aSBlue Swirl                                (intno >= 10 && intno <= 13);
51599b9a5aSBlue Swirl 
52599b9a5aSBlue Swirl     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
53599b9a5aSBlue Swirl                 env->old_exception, intno);
54599b9a5aSBlue Swirl 
55599b9a5aSBlue Swirl #if !defined(CONFIG_USER_ONLY)
56599b9a5aSBlue Swirl     if (env->old_exception == EXCP08_DBLE) {
57f8dc4c64SPaolo Bonzini         if (env->hflags & HF_GUEST_MASK) {
5865c9d60aSPaolo Bonzini             cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
59599b9a5aSBlue Swirl         }
60599b9a5aSBlue Swirl 
61599b9a5aSBlue Swirl         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
62599b9a5aSBlue Swirl 
63cf83f140SEric Blake         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
64599b9a5aSBlue Swirl         return EXCP_HLT;
65599b9a5aSBlue Swirl     }
66599b9a5aSBlue Swirl #endif
67599b9a5aSBlue Swirl 
68599b9a5aSBlue Swirl     if ((first_contributory && second_contributory)
69599b9a5aSBlue Swirl         || (env->old_exception == EXCP0E_PAGE &&
70599b9a5aSBlue Swirl             (second_contributory || (intno == EXCP0E_PAGE)))) {
71599b9a5aSBlue Swirl         intno = EXCP08_DBLE;
72599b9a5aSBlue Swirl         *error_code = 0;
73599b9a5aSBlue Swirl     }
74599b9a5aSBlue Swirl 
75599b9a5aSBlue Swirl     if (second_contributory || (intno == EXCP0E_PAGE) ||
76599b9a5aSBlue Swirl         (intno == EXCP08_DBLE)) {
77599b9a5aSBlue Swirl         env->old_exception = intno;
78599b9a5aSBlue Swirl     }
79599b9a5aSBlue Swirl 
80599b9a5aSBlue Swirl     return intno;
81599b9a5aSBlue Swirl }
82599b9a5aSBlue Swirl 
83599b9a5aSBlue Swirl /*
84599b9a5aSBlue Swirl  * Signal an interruption. It is executed in the main CPU loop.
85599b9a5aSBlue Swirl  * is_int is TRUE if coming from the int instruction. next_eip is the
86a78d0eabSliguang  * env->eip value AFTER the interrupt instruction. It is only relevant if
87599b9a5aSBlue Swirl  * is_int is TRUE.
88599b9a5aSBlue Swirl  */
89599b9a5aSBlue Swirl static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
90599b9a5aSBlue Swirl                                            int is_int, int error_code,
9191980095SPavel Dovgalyuk                                            int next_eip_addend,
9291980095SPavel Dovgalyuk                                            uintptr_t retaddr)
93599b9a5aSBlue Swirl {
946aa9e42fSRichard Henderson     CPUState *cs = env_cpu(env);
9527103424SAndreas Färber 
96599b9a5aSBlue Swirl     if (!is_int) {
97599b9a5aSBlue Swirl         cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
9865c9d60aSPaolo Bonzini                                       error_code, retaddr);
9965c9d60aSPaolo Bonzini         intno = check_exception(env, intno, &error_code, retaddr);
100599b9a5aSBlue Swirl     } else {
10165c9d60aSPaolo Bonzini         cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
102599b9a5aSBlue Swirl     }
103599b9a5aSBlue Swirl 
10427103424SAndreas Färber     cs->exception_index = intno;
105599b9a5aSBlue Swirl     env->error_code = error_code;
106599b9a5aSBlue Swirl     env->exception_is_int = is_int;
107599b9a5aSBlue Swirl     env->exception_next_eip = env->eip + next_eip_addend;
10891980095SPavel Dovgalyuk     cpu_loop_exit_restore(cs, retaddr);
109599b9a5aSBlue Swirl }
110599b9a5aSBlue Swirl 
111599b9a5aSBlue Swirl /* shortcuts to generate exceptions */
112599b9a5aSBlue Swirl 
113599b9a5aSBlue Swirl void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
114599b9a5aSBlue Swirl                                    int error_code, int next_eip_addend)
115599b9a5aSBlue Swirl {
11691980095SPavel Dovgalyuk     raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
117599b9a5aSBlue Swirl }
118599b9a5aSBlue Swirl 
119599b9a5aSBlue Swirl void raise_exception_err(CPUX86State *env, int exception_index,
120599b9a5aSBlue Swirl                          int error_code)
121599b9a5aSBlue Swirl {
12291980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
12391980095SPavel Dovgalyuk }
12491980095SPavel Dovgalyuk 
12591980095SPavel Dovgalyuk void raise_exception_err_ra(CPUX86State *env, int exception_index,
12691980095SPavel Dovgalyuk                             int error_code, uintptr_t retaddr)
12791980095SPavel Dovgalyuk {
12891980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
129599b9a5aSBlue Swirl }
130599b9a5aSBlue Swirl 
131599b9a5aSBlue Swirl void raise_exception(CPUX86State *env, int exception_index)
132599b9a5aSBlue Swirl {
13391980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, 0);
13491980095SPavel Dovgalyuk }
13591980095SPavel Dovgalyuk 
13691980095SPavel Dovgalyuk void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
13791980095SPavel Dovgalyuk {
13891980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
139599b9a5aSBlue Swirl }
1406578eb25SPaolo Bonzini 
1415d004421SRichard Henderson #if !defined(CONFIG_USER_ONLY)
142fe441054SJan Kiszka static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
143fe441054SJan Kiszka                         int *prot)
144fe441054SJan Kiszka {
145fe441054SJan Kiszka     CPUX86State *env = &X86_CPU(cs)->env;
146fe441054SJan Kiszka     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
147fe441054SJan Kiszka     uint64_t ptep, pte;
148fe441054SJan Kiszka     uint64_t exit_info_1 = 0;
149fe441054SJan Kiszka     target_ulong pde_addr, pte_addr;
150fe441054SJan Kiszka     uint32_t page_offset;
151fe441054SJan Kiszka     int page_size;
152fe441054SJan Kiszka 
153fe441054SJan Kiszka     if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
154fe441054SJan Kiszka         return gphys;
155fe441054SJan Kiszka     }
156fe441054SJan Kiszka 
157fe441054SJan Kiszka     if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
158fe441054SJan Kiszka         rsvd_mask |= PG_NX_MASK;
159fe441054SJan Kiszka     }
160fe441054SJan Kiszka 
161fe441054SJan Kiszka     if (env->nested_pg_mode & SVM_NPT_PAE) {
162fe441054SJan Kiszka         uint64_t pde, pdpe;
163fe441054SJan Kiszka         target_ulong pdpe_addr;
164fe441054SJan Kiszka 
165fe441054SJan Kiszka #ifdef TARGET_X86_64
166fe441054SJan Kiszka         if (env->nested_pg_mode & SVM_NPT_LMA) {
167fe441054SJan Kiszka             uint64_t pml5e;
168fe441054SJan Kiszka             uint64_t pml4e_addr, pml4e;
169fe441054SJan Kiszka 
170fe441054SJan Kiszka             pml5e = env->nested_cr3;
171fe441054SJan Kiszka             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
172fe441054SJan Kiszka 
173fe441054SJan Kiszka             pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
174fe441054SJan Kiszka                     (((gphys >> 39) & 0x1ff) << 3);
175fe441054SJan Kiszka             pml4e = x86_ldq_phys(cs, pml4e_addr);
176fe441054SJan Kiszka             if (!(pml4e & PG_PRESENT_MASK)) {
177fe441054SJan Kiszka                 goto do_fault;
178fe441054SJan Kiszka             }
179fe441054SJan Kiszka             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
180fe441054SJan Kiszka                 goto do_fault_rsvd;
181fe441054SJan Kiszka             }
182fe441054SJan Kiszka             if (!(pml4e & PG_ACCESSED_MASK)) {
183fe441054SJan Kiszka                 pml4e |= PG_ACCESSED_MASK;
184fe441054SJan Kiszka                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
185fe441054SJan Kiszka             }
186fe441054SJan Kiszka             ptep &= pml4e ^ PG_NX_MASK;
187fe441054SJan Kiszka             pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
188fe441054SJan Kiszka                     (((gphys >> 30) & 0x1ff) << 3);
189fe441054SJan Kiszka             pdpe = x86_ldq_phys(cs, pdpe_addr);
190fe441054SJan Kiszka             if (!(pdpe & PG_PRESENT_MASK)) {
191fe441054SJan Kiszka                 goto do_fault;
192fe441054SJan Kiszka             }
193fe441054SJan Kiszka             if (pdpe & rsvd_mask) {
194fe441054SJan Kiszka                 goto do_fault_rsvd;
195fe441054SJan Kiszka             }
196fe441054SJan Kiszka             ptep &= pdpe ^ PG_NX_MASK;
197fe441054SJan Kiszka             if (!(pdpe & PG_ACCESSED_MASK)) {
198fe441054SJan Kiszka                 pdpe |= PG_ACCESSED_MASK;
199fe441054SJan Kiszka                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
200fe441054SJan Kiszka             }
201fe441054SJan Kiszka             if (pdpe & PG_PSE_MASK) {
202fe441054SJan Kiszka                 /* 1 GB page */
203fe441054SJan Kiszka                 page_size = 1024 * 1024 * 1024;
204fe441054SJan Kiszka                 pte_addr = pdpe_addr;
205fe441054SJan Kiszka                 pte = pdpe;
206fe441054SJan Kiszka                 goto do_check_protect;
207fe441054SJan Kiszka             }
208fe441054SJan Kiszka         } else
209fe441054SJan Kiszka #endif
210fe441054SJan Kiszka         {
211fe441054SJan Kiszka             pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
212fe441054SJan Kiszka             pdpe = x86_ldq_phys(cs, pdpe_addr);
213fe441054SJan Kiszka             if (!(pdpe & PG_PRESENT_MASK)) {
214fe441054SJan Kiszka                 goto do_fault;
215fe441054SJan Kiszka             }
216fe441054SJan Kiszka             rsvd_mask |= PG_HI_USER_MASK;
217fe441054SJan Kiszka             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
218fe441054SJan Kiszka                 goto do_fault_rsvd;
219fe441054SJan Kiszka             }
220fe441054SJan Kiszka             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
221fe441054SJan Kiszka         }
222fe441054SJan Kiszka 
223fe441054SJan Kiszka         pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
224fe441054SJan Kiszka         pde = x86_ldq_phys(cs, pde_addr);
225fe441054SJan Kiszka         if (!(pde & PG_PRESENT_MASK)) {
226fe441054SJan Kiszka             goto do_fault;
227fe441054SJan Kiszka         }
228fe441054SJan Kiszka         if (pde & rsvd_mask) {
229fe441054SJan Kiszka             goto do_fault_rsvd;
230fe441054SJan Kiszka         }
231fe441054SJan Kiszka         ptep &= pde ^ PG_NX_MASK;
232fe441054SJan Kiszka         if (pde & PG_PSE_MASK) {
233fe441054SJan Kiszka             /* 2 MB page */
234fe441054SJan Kiszka             page_size = 2048 * 1024;
235fe441054SJan Kiszka             pte_addr = pde_addr;
236fe441054SJan Kiszka             pte = pde;
237fe441054SJan Kiszka             goto do_check_protect;
238fe441054SJan Kiszka         }
239fe441054SJan Kiszka         /* 4 KB page */
240fe441054SJan Kiszka         if (!(pde & PG_ACCESSED_MASK)) {
241fe441054SJan Kiszka             pde |= PG_ACCESSED_MASK;
242fe441054SJan Kiszka             x86_stl_phys_notdirty(cs, pde_addr, pde);
243fe441054SJan Kiszka         }
244fe441054SJan Kiszka         pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
245fe441054SJan Kiszka         pte = x86_ldq_phys(cs, pte_addr);
246fe441054SJan Kiszka         if (!(pte & PG_PRESENT_MASK)) {
247fe441054SJan Kiszka             goto do_fault;
248fe441054SJan Kiszka         }
249fe441054SJan Kiszka         if (pte & rsvd_mask) {
250fe441054SJan Kiszka             goto do_fault_rsvd;
251fe441054SJan Kiszka         }
252fe441054SJan Kiszka         /* combine pde and pte nx, user and rw protections */
253fe441054SJan Kiszka         ptep &= pte ^ PG_NX_MASK;
254fe441054SJan Kiszka         page_size = 4096;
255fe441054SJan Kiszka     } else {
256fe441054SJan Kiszka         uint32_t pde;
257fe441054SJan Kiszka 
258fe441054SJan Kiszka         /* page directory entry */
259fe441054SJan Kiszka         pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
260fe441054SJan Kiszka         pde = x86_ldl_phys(cs, pde_addr);
261fe441054SJan Kiszka         if (!(pde & PG_PRESENT_MASK)) {
262fe441054SJan Kiszka             goto do_fault;
263fe441054SJan Kiszka         }
264fe441054SJan Kiszka         ptep = pde | PG_NX_MASK;
265fe441054SJan Kiszka 
266a2d57703SAlexander Boettcher         /* if host cr4 PSE bit is set, then we use a 4MB page */
267a2d57703SAlexander Boettcher         if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
268fe441054SJan Kiszka             page_size = 4096 * 1024;
269fe441054SJan Kiszka             pte_addr = pde_addr;
270fe441054SJan Kiszka 
271fe441054SJan Kiszka             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
272fe441054SJan Kiszka              * Leave bits 20-13 in place for setting accessed/dirty bits below.
273fe441054SJan Kiszka              */
274fe441054SJan Kiszka             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
275fe441054SJan Kiszka             rsvd_mask = 0x200000;
276fe441054SJan Kiszka             goto do_check_protect_pse36;
277fe441054SJan Kiszka         }
278fe441054SJan Kiszka 
279fe441054SJan Kiszka         if (!(pde & PG_ACCESSED_MASK)) {
280fe441054SJan Kiszka             pde |= PG_ACCESSED_MASK;
281fe441054SJan Kiszka             x86_stl_phys_notdirty(cs, pde_addr, pde);
282fe441054SJan Kiszka         }
283fe441054SJan Kiszka 
284fe441054SJan Kiszka         /* page directory entry */
285fe441054SJan Kiszka         pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
286fe441054SJan Kiszka         pte = x86_ldl_phys(cs, pte_addr);
287fe441054SJan Kiszka         if (!(pte & PG_PRESENT_MASK)) {
288fe441054SJan Kiszka             goto do_fault;
289fe441054SJan Kiszka         }
290fe441054SJan Kiszka         /* combine pde and pte user and rw protections */
291fe441054SJan Kiszka         ptep &= pte | PG_NX_MASK;
292fe441054SJan Kiszka         page_size = 4096;
293fe441054SJan Kiszka         rsvd_mask = 0;
294fe441054SJan Kiszka     }
295fe441054SJan Kiszka 
296fe441054SJan Kiszka  do_check_protect:
297fe441054SJan Kiszka     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
298fe441054SJan Kiszka  do_check_protect_pse36:
299fe441054SJan Kiszka     if (pte & rsvd_mask) {
300fe441054SJan Kiszka         goto do_fault_rsvd;
301fe441054SJan Kiszka     }
302fe441054SJan Kiszka     ptep ^= PG_NX_MASK;
303fe441054SJan Kiszka 
304fe441054SJan Kiszka     if (!(ptep & PG_USER_MASK)) {
305fe441054SJan Kiszka         goto do_fault_protect;
306fe441054SJan Kiszka     }
307fe441054SJan Kiszka     if (ptep & PG_NX_MASK) {
308fe441054SJan Kiszka         if (access_type == MMU_INST_FETCH) {
309fe441054SJan Kiszka             goto do_fault_protect;
310fe441054SJan Kiszka         }
311fe441054SJan Kiszka         *prot &= ~PAGE_EXEC;
312fe441054SJan Kiszka     }
313fe441054SJan Kiszka     if (!(ptep & PG_RW_MASK)) {
314fe441054SJan Kiszka         if (access_type == MMU_DATA_STORE) {
315fe441054SJan Kiszka             goto do_fault_protect;
316fe441054SJan Kiszka         }
317fe441054SJan Kiszka         *prot &= ~PAGE_WRITE;
318fe441054SJan Kiszka     }
319fe441054SJan Kiszka 
320fe441054SJan Kiszka     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
321fe441054SJan Kiszka     page_offset = gphys & (page_size - 1);
322fe441054SJan Kiszka     return pte + page_offset;
323fe441054SJan Kiszka 
324fe441054SJan Kiszka  do_fault_rsvd:
325fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_RSVD;
326fe441054SJan Kiszka  do_fault_protect:
327fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_P;
328fe441054SJan Kiszka  do_fault:
329fe441054SJan Kiszka     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
330fe441054SJan Kiszka                  gphys);
331fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_US;
332fe441054SJan Kiszka     if (access_type == MMU_DATA_STORE) {
333fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_RW;
334fe441054SJan Kiszka     } else if (access_type == MMU_INST_FETCH) {
335fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_ID;
336fe441054SJan Kiszka     }
337fe441054SJan Kiszka     if (prot) {
338fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_GPA;
339fe441054SJan Kiszka     } else { /* page table access */
340fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_GPT;
341fe441054SJan Kiszka     }
342fe441054SJan Kiszka     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
343fe441054SJan Kiszka }
344fe441054SJan Kiszka 
3456578eb25SPaolo Bonzini /* return value:
3466578eb25SPaolo Bonzini  * -1 = cannot handle fault
3476578eb25SPaolo Bonzini  * 0  = nothing more to do
3486578eb25SPaolo Bonzini  * 1  = generate PF fault
3496578eb25SPaolo Bonzini  */
3505d004421SRichard Henderson static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
3516578eb25SPaolo Bonzini                             int is_write1, int mmu_idx)
3526578eb25SPaolo Bonzini {
3536578eb25SPaolo Bonzini     X86CPU *cpu = X86_CPU(cs);
3546578eb25SPaolo Bonzini     CPUX86State *env = &cpu->env;
3556578eb25SPaolo Bonzini     uint64_t ptep, pte;
3566578eb25SPaolo Bonzini     int32_t a20_mask;
3576578eb25SPaolo Bonzini     target_ulong pde_addr, pte_addr;
3586578eb25SPaolo Bonzini     int error_code = 0;
3596578eb25SPaolo Bonzini     int is_dirty, prot, page_size, is_write, is_user;
3606578eb25SPaolo Bonzini     hwaddr paddr;
3616578eb25SPaolo Bonzini     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
3626578eb25SPaolo Bonzini     uint32_t page_offset;
3636578eb25SPaolo Bonzini     target_ulong vaddr;
3646578eb25SPaolo Bonzini 
3656578eb25SPaolo Bonzini     is_user = mmu_idx == MMU_USER_IDX;
3666578eb25SPaolo Bonzini #if defined(DEBUG_MMU)
3676578eb25SPaolo Bonzini     printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
3686578eb25SPaolo Bonzini            addr, is_write1, is_user, env->eip);
3696578eb25SPaolo Bonzini #endif
3706578eb25SPaolo Bonzini     is_write = is_write1 & 1;
3716578eb25SPaolo Bonzini 
3726578eb25SPaolo Bonzini     a20_mask = x86_get_a20_mask(env);
3736578eb25SPaolo Bonzini     if (!(env->cr[0] & CR0_PG_MASK)) {
3746578eb25SPaolo Bonzini         pte = addr;
3756578eb25SPaolo Bonzini #ifdef TARGET_X86_64
3766578eb25SPaolo Bonzini         if (!(env->hflags & HF_LMA_MASK)) {
3776578eb25SPaolo Bonzini             /* Without long mode we can only address 32bits in real mode */
3786578eb25SPaolo Bonzini             pte = (uint32_t)pte;
3796578eb25SPaolo Bonzini         }
3806578eb25SPaolo Bonzini #endif
3816578eb25SPaolo Bonzini         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3826578eb25SPaolo Bonzini         page_size = 4096;
3836578eb25SPaolo Bonzini         goto do_mapping;
3846578eb25SPaolo Bonzini     }
3856578eb25SPaolo Bonzini 
3866578eb25SPaolo Bonzini     if (!(env->efer & MSR_EFER_NXE)) {
3876578eb25SPaolo Bonzini         rsvd_mask |= PG_NX_MASK;
3886578eb25SPaolo Bonzini     }
3896578eb25SPaolo Bonzini 
3906578eb25SPaolo Bonzini     if (env->cr[4] & CR4_PAE_MASK) {
3916578eb25SPaolo Bonzini         uint64_t pde, pdpe;
3926578eb25SPaolo Bonzini         target_ulong pdpe_addr;
3936578eb25SPaolo Bonzini 
3946578eb25SPaolo Bonzini #ifdef TARGET_X86_64
3956578eb25SPaolo Bonzini         if (env->hflags & HF_LMA_MASK) {
3966578eb25SPaolo Bonzini             bool la57 = env->cr[4] & CR4_LA57_MASK;
3976578eb25SPaolo Bonzini             uint64_t pml5e_addr, pml5e;
3986578eb25SPaolo Bonzini             uint64_t pml4e_addr, pml4e;
3996578eb25SPaolo Bonzini             int32_t sext;
4006578eb25SPaolo Bonzini 
4016578eb25SPaolo Bonzini             /* test virtual address sign extension */
4026578eb25SPaolo Bonzini             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
4036578eb25SPaolo Bonzini             if (sext != 0 && sext != -1) {
4046578eb25SPaolo Bonzini                 env->error_code = 0;
4056578eb25SPaolo Bonzini                 cs->exception_index = EXCP0D_GPF;
4066578eb25SPaolo Bonzini                 return 1;
4076578eb25SPaolo Bonzini             }
4086578eb25SPaolo Bonzini 
4096578eb25SPaolo Bonzini             if (la57) {
4106578eb25SPaolo Bonzini                 pml5e_addr = ((env->cr[3] & ~0xfff) +
4116578eb25SPaolo Bonzini                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
412fe441054SJan Kiszka                 pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
4136578eb25SPaolo Bonzini                 pml5e = x86_ldq_phys(cs, pml5e_addr);
4146578eb25SPaolo Bonzini                 if (!(pml5e & PG_PRESENT_MASK)) {
4156578eb25SPaolo Bonzini                     goto do_fault;
4166578eb25SPaolo Bonzini                 }
4176578eb25SPaolo Bonzini                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
4186578eb25SPaolo Bonzini                     goto do_fault_rsvd;
4196578eb25SPaolo Bonzini                 }
4206578eb25SPaolo Bonzini                 if (!(pml5e & PG_ACCESSED_MASK)) {
4216578eb25SPaolo Bonzini                     pml5e |= PG_ACCESSED_MASK;
4226578eb25SPaolo Bonzini                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
4236578eb25SPaolo Bonzini                 }
4246578eb25SPaolo Bonzini                 ptep = pml5e ^ PG_NX_MASK;
4256578eb25SPaolo Bonzini             } else {
4266578eb25SPaolo Bonzini                 pml5e = env->cr[3];
4276578eb25SPaolo Bonzini                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
4286578eb25SPaolo Bonzini             }
4296578eb25SPaolo Bonzini 
4306578eb25SPaolo Bonzini             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
4316578eb25SPaolo Bonzini                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
432fe441054SJan Kiszka             pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
4336578eb25SPaolo Bonzini             pml4e = x86_ldq_phys(cs, pml4e_addr);
4346578eb25SPaolo Bonzini             if (!(pml4e & PG_PRESENT_MASK)) {
4356578eb25SPaolo Bonzini                 goto do_fault;
4366578eb25SPaolo Bonzini             }
4376578eb25SPaolo Bonzini             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
4386578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4396578eb25SPaolo Bonzini             }
4406578eb25SPaolo Bonzini             if (!(pml4e & PG_ACCESSED_MASK)) {
4416578eb25SPaolo Bonzini                 pml4e |= PG_ACCESSED_MASK;
4426578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
4436578eb25SPaolo Bonzini             }
4446578eb25SPaolo Bonzini             ptep &= pml4e ^ PG_NX_MASK;
4456578eb25SPaolo Bonzini             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
4466578eb25SPaolo Bonzini                 a20_mask;
447fe441054SJan Kiszka             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
4486578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
4496578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
4506578eb25SPaolo Bonzini                 goto do_fault;
4516578eb25SPaolo Bonzini             }
4526578eb25SPaolo Bonzini             if (pdpe & rsvd_mask) {
4536578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4546578eb25SPaolo Bonzini             }
4556578eb25SPaolo Bonzini             ptep &= pdpe ^ PG_NX_MASK;
4566578eb25SPaolo Bonzini             if (!(pdpe & PG_ACCESSED_MASK)) {
4576578eb25SPaolo Bonzini                 pdpe |= PG_ACCESSED_MASK;
4586578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
4596578eb25SPaolo Bonzini             }
4606578eb25SPaolo Bonzini             if (pdpe & PG_PSE_MASK) {
4616578eb25SPaolo Bonzini                 /* 1 GB page */
4626578eb25SPaolo Bonzini                 page_size = 1024 * 1024 * 1024;
4636578eb25SPaolo Bonzini                 pte_addr = pdpe_addr;
4646578eb25SPaolo Bonzini                 pte = pdpe;
4656578eb25SPaolo Bonzini                 goto do_check_protect;
4666578eb25SPaolo Bonzini             }
4676578eb25SPaolo Bonzini         } else
4686578eb25SPaolo Bonzini #endif
4696578eb25SPaolo Bonzini         {
4706578eb25SPaolo Bonzini             /* XXX: load them when cr3 is loaded ? */
4716578eb25SPaolo Bonzini             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
4726578eb25SPaolo Bonzini                 a20_mask;
473fe441054SJan Kiszka             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
4746578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
4756578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
4766578eb25SPaolo Bonzini                 goto do_fault;
4776578eb25SPaolo Bonzini             }
4786578eb25SPaolo Bonzini             rsvd_mask |= PG_HI_USER_MASK;
4796578eb25SPaolo Bonzini             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
4806578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4816578eb25SPaolo Bonzini             }
4826578eb25SPaolo Bonzini             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
4836578eb25SPaolo Bonzini         }
4846578eb25SPaolo Bonzini 
4856578eb25SPaolo Bonzini         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
4866578eb25SPaolo Bonzini             a20_mask;
487fe441054SJan Kiszka         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
4886578eb25SPaolo Bonzini         pde = x86_ldq_phys(cs, pde_addr);
4896578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
4906578eb25SPaolo Bonzini             goto do_fault;
4916578eb25SPaolo Bonzini         }
4926578eb25SPaolo Bonzini         if (pde & rsvd_mask) {
4936578eb25SPaolo Bonzini             goto do_fault_rsvd;
4946578eb25SPaolo Bonzini         }
4956578eb25SPaolo Bonzini         ptep &= pde ^ PG_NX_MASK;
4966578eb25SPaolo Bonzini         if (pde & PG_PSE_MASK) {
4976578eb25SPaolo Bonzini             /* 2 MB page */
4986578eb25SPaolo Bonzini             page_size = 2048 * 1024;
4996578eb25SPaolo Bonzini             pte_addr = pde_addr;
5006578eb25SPaolo Bonzini             pte = pde;
5016578eb25SPaolo Bonzini             goto do_check_protect;
5026578eb25SPaolo Bonzini         }
5036578eb25SPaolo Bonzini         /* 4 KB page */
5046578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
5056578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
5066578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
5076578eb25SPaolo Bonzini         }
5086578eb25SPaolo Bonzini         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
5096578eb25SPaolo Bonzini             a20_mask;
510fe441054SJan Kiszka         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
5116578eb25SPaolo Bonzini         pte = x86_ldq_phys(cs, pte_addr);
5126578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
5136578eb25SPaolo Bonzini             goto do_fault;
5146578eb25SPaolo Bonzini         }
5156578eb25SPaolo Bonzini         if (pte & rsvd_mask) {
5166578eb25SPaolo Bonzini             goto do_fault_rsvd;
5176578eb25SPaolo Bonzini         }
5186578eb25SPaolo Bonzini         /* combine pde and pte nx, user and rw protections */
5196578eb25SPaolo Bonzini         ptep &= pte ^ PG_NX_MASK;
5206578eb25SPaolo Bonzini         page_size = 4096;
5216578eb25SPaolo Bonzini     } else {
5226578eb25SPaolo Bonzini         uint32_t pde;
5236578eb25SPaolo Bonzini 
5246578eb25SPaolo Bonzini         /* page directory entry */
5256578eb25SPaolo Bonzini         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
5266578eb25SPaolo Bonzini             a20_mask;
527fe441054SJan Kiszka         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
5286578eb25SPaolo Bonzini         pde = x86_ldl_phys(cs, pde_addr);
5296578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
5306578eb25SPaolo Bonzini             goto do_fault;
5316578eb25SPaolo Bonzini         }
5326578eb25SPaolo Bonzini         ptep = pde | PG_NX_MASK;
5336578eb25SPaolo Bonzini 
5346578eb25SPaolo Bonzini         /* if PSE bit is set, then we use a 4MB page */
5356578eb25SPaolo Bonzini         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
5366578eb25SPaolo Bonzini             page_size = 4096 * 1024;
5376578eb25SPaolo Bonzini             pte_addr = pde_addr;
5386578eb25SPaolo Bonzini 
5396578eb25SPaolo Bonzini             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
5406578eb25SPaolo Bonzini              * Leave bits 20-13 in place for setting accessed/dirty bits below.
5416578eb25SPaolo Bonzini              */
5426578eb25SPaolo Bonzini             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
5436578eb25SPaolo Bonzini             rsvd_mask = 0x200000;
5446578eb25SPaolo Bonzini             goto do_check_protect_pse36;
5456578eb25SPaolo Bonzini         }
5466578eb25SPaolo Bonzini 
5476578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
5486578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
5496578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
5506578eb25SPaolo Bonzini         }
5516578eb25SPaolo Bonzini 
5526578eb25SPaolo Bonzini         /* page directory entry */
5536578eb25SPaolo Bonzini         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
5546578eb25SPaolo Bonzini             a20_mask;
555fe441054SJan Kiszka         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
5566578eb25SPaolo Bonzini         pte = x86_ldl_phys(cs, pte_addr);
5576578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
5586578eb25SPaolo Bonzini             goto do_fault;
5596578eb25SPaolo Bonzini         }
5606578eb25SPaolo Bonzini         /* combine pde and pte user and rw protections */
5616578eb25SPaolo Bonzini         ptep &= pte | PG_NX_MASK;
5626578eb25SPaolo Bonzini         page_size = 4096;
5636578eb25SPaolo Bonzini         rsvd_mask = 0;
5646578eb25SPaolo Bonzini     }
5656578eb25SPaolo Bonzini 
5666578eb25SPaolo Bonzini do_check_protect:
5676578eb25SPaolo Bonzini     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
5686578eb25SPaolo Bonzini do_check_protect_pse36:
5696578eb25SPaolo Bonzini     if (pte & rsvd_mask) {
5706578eb25SPaolo Bonzini         goto do_fault_rsvd;
5716578eb25SPaolo Bonzini     }
5726578eb25SPaolo Bonzini     ptep ^= PG_NX_MASK;
5736578eb25SPaolo Bonzini 
5746578eb25SPaolo Bonzini     /* can the page can be put in the TLB?  prot will tell us */
5756578eb25SPaolo Bonzini     if (is_user && !(ptep & PG_USER_MASK)) {
5766578eb25SPaolo Bonzini         goto do_fault_protect;
5776578eb25SPaolo Bonzini     }
5786578eb25SPaolo Bonzini 
5796578eb25SPaolo Bonzini     prot = 0;
5806578eb25SPaolo Bonzini     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
5816578eb25SPaolo Bonzini         prot |= PAGE_READ;
5826578eb25SPaolo Bonzini         if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
5836578eb25SPaolo Bonzini             prot |= PAGE_WRITE;
5846578eb25SPaolo Bonzini         }
5856578eb25SPaolo Bonzini     }
5866578eb25SPaolo Bonzini     if (!(ptep & PG_NX_MASK) &&
5876578eb25SPaolo Bonzini         (mmu_idx == MMU_USER_IDX ||
5886578eb25SPaolo Bonzini          !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
5896578eb25SPaolo Bonzini         prot |= PAGE_EXEC;
5906578eb25SPaolo Bonzini     }
5916578eb25SPaolo Bonzini     if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
5926578eb25SPaolo Bonzini         (ptep & PG_USER_MASK) && env->pkru) {
5936578eb25SPaolo Bonzini         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
5946578eb25SPaolo Bonzini         uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
5956578eb25SPaolo Bonzini         uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
5966578eb25SPaolo Bonzini         uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
5976578eb25SPaolo Bonzini 
5986578eb25SPaolo Bonzini         if (pkru_ad) {
5996578eb25SPaolo Bonzini             pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
6006578eb25SPaolo Bonzini         } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
6016578eb25SPaolo Bonzini             pkru_prot &= ~PAGE_WRITE;
6026578eb25SPaolo Bonzini         }
6036578eb25SPaolo Bonzini 
6046578eb25SPaolo Bonzini         prot &= pkru_prot;
6056578eb25SPaolo Bonzini         if ((pkru_prot & (1 << is_write1)) == 0) {
6066578eb25SPaolo Bonzini             assert(is_write1 != 2);
6076578eb25SPaolo Bonzini             error_code |= PG_ERROR_PK_MASK;
6086578eb25SPaolo Bonzini             goto do_fault_protect;
6096578eb25SPaolo Bonzini         }
6106578eb25SPaolo Bonzini     }
6116578eb25SPaolo Bonzini 
6126578eb25SPaolo Bonzini     if ((prot & (1 << is_write1)) == 0) {
6136578eb25SPaolo Bonzini         goto do_fault_protect;
6146578eb25SPaolo Bonzini     }
6156578eb25SPaolo Bonzini 
6166578eb25SPaolo Bonzini     /* yes, it can! */
6176578eb25SPaolo Bonzini     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
6186578eb25SPaolo Bonzini     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
6196578eb25SPaolo Bonzini         pte |= PG_ACCESSED_MASK;
6206578eb25SPaolo Bonzini         if (is_dirty) {
6216578eb25SPaolo Bonzini             pte |= PG_DIRTY_MASK;
6226578eb25SPaolo Bonzini         }
6236578eb25SPaolo Bonzini         x86_stl_phys_notdirty(cs, pte_addr, pte);
6246578eb25SPaolo Bonzini     }
6256578eb25SPaolo Bonzini 
6266578eb25SPaolo Bonzini     if (!(pte & PG_DIRTY_MASK)) {
6276578eb25SPaolo Bonzini         /* only set write access if already dirty... otherwise wait
6286578eb25SPaolo Bonzini            for dirty access */
6296578eb25SPaolo Bonzini         assert(!is_write);
6306578eb25SPaolo Bonzini         prot &= ~PAGE_WRITE;
6316578eb25SPaolo Bonzini     }
6326578eb25SPaolo Bonzini 
6336578eb25SPaolo Bonzini  do_mapping:
6346578eb25SPaolo Bonzini     pte = pte & a20_mask;
6356578eb25SPaolo Bonzini 
6366578eb25SPaolo Bonzini     /* align to page_size */
6376578eb25SPaolo Bonzini     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
638fe441054SJan Kiszka     page_offset = addr & (page_size - 1);
639fe441054SJan Kiszka     paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
6406578eb25SPaolo Bonzini 
6416578eb25SPaolo Bonzini     /* Even if 4MB pages, we map only one 4KB page in the cache to
6426578eb25SPaolo Bonzini        avoid filling it too fast */
6436578eb25SPaolo Bonzini     vaddr = addr & TARGET_PAGE_MASK;
644fe441054SJan Kiszka     paddr &= TARGET_PAGE_MASK;
6456578eb25SPaolo Bonzini 
6466578eb25SPaolo Bonzini     assert(prot & (1 << is_write1));
6476578eb25SPaolo Bonzini     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
6486578eb25SPaolo Bonzini                             prot, mmu_idx, page_size);
6496578eb25SPaolo Bonzini     return 0;
6506578eb25SPaolo Bonzini  do_fault_rsvd:
6516578eb25SPaolo Bonzini     error_code |= PG_ERROR_RSVD_MASK;
6526578eb25SPaolo Bonzini  do_fault_protect:
6536578eb25SPaolo Bonzini     error_code |= PG_ERROR_P_MASK;
6546578eb25SPaolo Bonzini  do_fault:
6556578eb25SPaolo Bonzini     error_code |= (is_write << PG_ERROR_W_BIT);
6566578eb25SPaolo Bonzini     if (is_user)
6576578eb25SPaolo Bonzini         error_code |= PG_ERROR_U_MASK;
6586578eb25SPaolo Bonzini     if (is_write1 == 2 &&
6596578eb25SPaolo Bonzini         (((env->efer & MSR_EFER_NXE) &&
6606578eb25SPaolo Bonzini           (env->cr[4] & CR4_PAE_MASK)) ||
6616578eb25SPaolo Bonzini          (env->cr[4] & CR4_SMEP_MASK)))
6626578eb25SPaolo Bonzini         error_code |= PG_ERROR_I_D_MASK;
6636578eb25SPaolo Bonzini     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
6646578eb25SPaolo Bonzini         /* cr2 is not modified in case of exceptions */
6656578eb25SPaolo Bonzini         x86_stq_phys(cs,
6666578eb25SPaolo Bonzini                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6676578eb25SPaolo Bonzini                  addr);
6686578eb25SPaolo Bonzini     } else {
6696578eb25SPaolo Bonzini         env->cr[2] = addr;
6706578eb25SPaolo Bonzini     }
6716578eb25SPaolo Bonzini     env->error_code = error_code;
6726578eb25SPaolo Bonzini     cs->exception_index = EXCP0E_PAGE;
6736578eb25SPaolo Bonzini     return 1;
6746578eb25SPaolo Bonzini }
6756578eb25SPaolo Bonzini #endif
6765d004421SRichard Henderson 
6775d004421SRichard Henderson bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
6785d004421SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
6795d004421SRichard Henderson                       bool probe, uintptr_t retaddr)
6805d004421SRichard Henderson {
6815d004421SRichard Henderson     X86CPU *cpu = X86_CPU(cs);
6825d004421SRichard Henderson     CPUX86State *env = &cpu->env;
6835d004421SRichard Henderson 
6845d004421SRichard Henderson #ifdef CONFIG_USER_ONLY
6855d004421SRichard Henderson     /* user mode only emulation */
6865d004421SRichard Henderson     env->cr[2] = addr;
6875d004421SRichard Henderson     env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
6885d004421SRichard Henderson     env->error_code |= PG_ERROR_U_MASK;
6895d004421SRichard Henderson     cs->exception_index = EXCP0E_PAGE;
6905d004421SRichard Henderson     env->exception_is_int = 0;
6915d004421SRichard Henderson     env->exception_next_eip = -1;
6925d004421SRichard Henderson     cpu_loop_exit_restore(cs, retaddr);
6935d004421SRichard Henderson #else
6945d004421SRichard Henderson     env->retaddr = retaddr;
6955d004421SRichard Henderson     if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
6965d004421SRichard Henderson         /* FIXME: On error in get_hphys we have already jumped out.  */
6975d004421SRichard Henderson         g_assert(!probe);
6985d004421SRichard Henderson         raise_exception_err_ra(env, cs->exception_index,
6995d004421SRichard Henderson                                env->error_code, retaddr);
7005d004421SRichard Henderson     }
7015d004421SRichard Henderson     return true;
7025d004421SRichard Henderson #endif
7035d004421SRichard Henderson }
704