xref: /qemu/target/i386/tcg/excp_helper.c (revision d9ff33ada7f32ca59f99b270a2d0eb223b3c9c8f)
1599b9a5aSBlue Swirl /*
2599b9a5aSBlue Swirl  *  x86 exception helpers
3599b9a5aSBlue Swirl  *
4599b9a5aSBlue Swirl  *  Copyright (c) 2003 Fabrice Bellard
5599b9a5aSBlue Swirl  *
6599b9a5aSBlue Swirl  * This library is free software; you can redistribute it and/or
7599b9a5aSBlue Swirl  * modify it under the terms of the GNU Lesser General Public
8599b9a5aSBlue Swirl  * License as published by the Free Software Foundation; either
9d9ff33adSChetan Pant  * version 2.1 of the License, or (at your option) any later version.
10599b9a5aSBlue Swirl  *
11599b9a5aSBlue Swirl  * This library is distributed in the hope that it will be useful,
12599b9a5aSBlue Swirl  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13599b9a5aSBlue Swirl  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14599b9a5aSBlue Swirl  * Lesser General Public License for more details.
15599b9a5aSBlue Swirl  *
16599b9a5aSBlue Swirl  * You should have received a copy of the GNU Lesser General Public
17599b9a5aSBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18599b9a5aSBlue Swirl  */
19599b9a5aSBlue Swirl 
20b6a0aa05SPeter Maydell #include "qemu/osdep.h"
21599b9a5aSBlue Swirl #include "cpu.h"
2263c91552SPaolo Bonzini #include "exec/exec-all.h"
231de7afc9SPaolo Bonzini #include "qemu/log.h"
2454d31236SMarkus Armbruster #include "sysemu/runstate.h"
252ef6175aSRichard Henderson #include "exec/helper-proto.h"
26599b9a5aSBlue Swirl 
27599b9a5aSBlue Swirl void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
28599b9a5aSBlue Swirl {
29599b9a5aSBlue Swirl     raise_interrupt(env, intno, 1, 0, next_eip_addend);
30599b9a5aSBlue Swirl }
31599b9a5aSBlue Swirl 
32599b9a5aSBlue Swirl void helper_raise_exception(CPUX86State *env, int exception_index)
33599b9a5aSBlue Swirl {
34599b9a5aSBlue Swirl     raise_exception(env, exception_index);
35599b9a5aSBlue Swirl }
36599b9a5aSBlue Swirl 
37599b9a5aSBlue Swirl /*
38599b9a5aSBlue Swirl  * Check nested exceptions and change to double or triple fault if
39599b9a5aSBlue Swirl  * needed. It should only be called, if this is not an interrupt.
40599b9a5aSBlue Swirl  * Returns the new exception number.
41599b9a5aSBlue Swirl  */
4265c9d60aSPaolo Bonzini static int check_exception(CPUX86State *env, int intno, int *error_code,
4365c9d60aSPaolo Bonzini                            uintptr_t retaddr)
44599b9a5aSBlue Swirl {
45599b9a5aSBlue Swirl     int first_contributory = env->old_exception == 0 ||
46599b9a5aSBlue Swirl                               (env->old_exception >= 10 &&
47599b9a5aSBlue Swirl                                env->old_exception <= 13);
48599b9a5aSBlue Swirl     int second_contributory = intno == 0 ||
49599b9a5aSBlue Swirl                                (intno >= 10 && intno <= 13);
50599b9a5aSBlue Swirl 
51599b9a5aSBlue Swirl     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
52599b9a5aSBlue Swirl                 env->old_exception, intno);
53599b9a5aSBlue Swirl 
54599b9a5aSBlue Swirl #if !defined(CONFIG_USER_ONLY)
55599b9a5aSBlue Swirl     if (env->old_exception == EXCP08_DBLE) {
56f8dc4c64SPaolo Bonzini         if (env->hflags & HF_GUEST_MASK) {
5765c9d60aSPaolo Bonzini             cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
58599b9a5aSBlue Swirl         }
59599b9a5aSBlue Swirl 
60599b9a5aSBlue Swirl         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
61599b9a5aSBlue Swirl 
62cf83f140SEric Blake         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
63599b9a5aSBlue Swirl         return EXCP_HLT;
64599b9a5aSBlue Swirl     }
65599b9a5aSBlue Swirl #endif
66599b9a5aSBlue Swirl 
67599b9a5aSBlue Swirl     if ((first_contributory && second_contributory)
68599b9a5aSBlue Swirl         || (env->old_exception == EXCP0E_PAGE &&
69599b9a5aSBlue Swirl             (second_contributory || (intno == EXCP0E_PAGE)))) {
70599b9a5aSBlue Swirl         intno = EXCP08_DBLE;
71599b9a5aSBlue Swirl         *error_code = 0;
72599b9a5aSBlue Swirl     }
73599b9a5aSBlue Swirl 
74599b9a5aSBlue Swirl     if (second_contributory || (intno == EXCP0E_PAGE) ||
75599b9a5aSBlue Swirl         (intno == EXCP08_DBLE)) {
76599b9a5aSBlue Swirl         env->old_exception = intno;
77599b9a5aSBlue Swirl     }
78599b9a5aSBlue Swirl 
79599b9a5aSBlue Swirl     return intno;
80599b9a5aSBlue Swirl }
81599b9a5aSBlue Swirl 
82599b9a5aSBlue Swirl /*
83599b9a5aSBlue Swirl  * Signal an interruption. It is executed in the main CPU loop.
84599b9a5aSBlue Swirl  * is_int is TRUE if coming from the int instruction. next_eip is the
85a78d0eabSliguang  * env->eip value AFTER the interrupt instruction. It is only relevant if
86599b9a5aSBlue Swirl  * is_int is TRUE.
87599b9a5aSBlue Swirl  */
88599b9a5aSBlue Swirl static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
89599b9a5aSBlue Swirl                                            int is_int, int error_code,
9091980095SPavel Dovgalyuk                                            int next_eip_addend,
9191980095SPavel Dovgalyuk                                            uintptr_t retaddr)
92599b9a5aSBlue Swirl {
936aa9e42fSRichard Henderson     CPUState *cs = env_cpu(env);
9427103424SAndreas Färber 
95599b9a5aSBlue Swirl     if (!is_int) {
96599b9a5aSBlue Swirl         cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
9765c9d60aSPaolo Bonzini                                       error_code, retaddr);
9865c9d60aSPaolo Bonzini         intno = check_exception(env, intno, &error_code, retaddr);
99599b9a5aSBlue Swirl     } else {
10065c9d60aSPaolo Bonzini         cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
101599b9a5aSBlue Swirl     }
102599b9a5aSBlue Swirl 
10327103424SAndreas Färber     cs->exception_index = intno;
104599b9a5aSBlue Swirl     env->error_code = error_code;
105599b9a5aSBlue Swirl     env->exception_is_int = is_int;
106599b9a5aSBlue Swirl     env->exception_next_eip = env->eip + next_eip_addend;
10791980095SPavel Dovgalyuk     cpu_loop_exit_restore(cs, retaddr);
108599b9a5aSBlue Swirl }
109599b9a5aSBlue Swirl 
110599b9a5aSBlue Swirl /* shortcuts to generate exceptions */
111599b9a5aSBlue Swirl 
112599b9a5aSBlue Swirl void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
113599b9a5aSBlue Swirl                                    int error_code, int next_eip_addend)
114599b9a5aSBlue Swirl {
11591980095SPavel Dovgalyuk     raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
116599b9a5aSBlue Swirl }
117599b9a5aSBlue Swirl 
118599b9a5aSBlue Swirl void raise_exception_err(CPUX86State *env, int exception_index,
119599b9a5aSBlue Swirl                          int error_code)
120599b9a5aSBlue Swirl {
12191980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
12291980095SPavel Dovgalyuk }
12391980095SPavel Dovgalyuk 
12491980095SPavel Dovgalyuk void raise_exception_err_ra(CPUX86State *env, int exception_index,
12591980095SPavel Dovgalyuk                             int error_code, uintptr_t retaddr)
12691980095SPavel Dovgalyuk {
12791980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
128599b9a5aSBlue Swirl }
129599b9a5aSBlue Swirl 
130599b9a5aSBlue Swirl void raise_exception(CPUX86State *env, int exception_index)
131599b9a5aSBlue Swirl {
13291980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, 0);
13391980095SPavel Dovgalyuk }
13491980095SPavel Dovgalyuk 
13591980095SPavel Dovgalyuk void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
13691980095SPavel Dovgalyuk {
13791980095SPavel Dovgalyuk     raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
138599b9a5aSBlue Swirl }
1396578eb25SPaolo Bonzini 
1405d004421SRichard Henderson #if !defined(CONFIG_USER_ONLY)
141fe441054SJan Kiszka static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
142fe441054SJan Kiszka                         int *prot)
143fe441054SJan Kiszka {
144fe441054SJan Kiszka     CPUX86State *env = &X86_CPU(cs)->env;
145fe441054SJan Kiszka     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
146fe441054SJan Kiszka     uint64_t ptep, pte;
147fe441054SJan Kiszka     uint64_t exit_info_1 = 0;
148fe441054SJan Kiszka     target_ulong pde_addr, pte_addr;
149fe441054SJan Kiszka     uint32_t page_offset;
150fe441054SJan Kiszka     int page_size;
151fe441054SJan Kiszka 
152fe441054SJan Kiszka     if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
153fe441054SJan Kiszka         return gphys;
154fe441054SJan Kiszka     }
155fe441054SJan Kiszka 
156fe441054SJan Kiszka     if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
157fe441054SJan Kiszka         rsvd_mask |= PG_NX_MASK;
158fe441054SJan Kiszka     }
159fe441054SJan Kiszka 
160fe441054SJan Kiszka     if (env->nested_pg_mode & SVM_NPT_PAE) {
161fe441054SJan Kiszka         uint64_t pde, pdpe;
162fe441054SJan Kiszka         target_ulong pdpe_addr;
163fe441054SJan Kiszka 
164fe441054SJan Kiszka #ifdef TARGET_X86_64
165fe441054SJan Kiszka         if (env->nested_pg_mode & SVM_NPT_LMA) {
166fe441054SJan Kiszka             uint64_t pml5e;
167fe441054SJan Kiszka             uint64_t pml4e_addr, pml4e;
168fe441054SJan Kiszka 
169fe441054SJan Kiszka             pml5e = env->nested_cr3;
170fe441054SJan Kiszka             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
171fe441054SJan Kiszka 
172fe441054SJan Kiszka             pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
173fe441054SJan Kiszka                     (((gphys >> 39) & 0x1ff) << 3);
174fe441054SJan Kiszka             pml4e = x86_ldq_phys(cs, pml4e_addr);
175fe441054SJan Kiszka             if (!(pml4e & PG_PRESENT_MASK)) {
176fe441054SJan Kiszka                 goto do_fault;
177fe441054SJan Kiszka             }
178fe441054SJan Kiszka             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
179fe441054SJan Kiszka                 goto do_fault_rsvd;
180fe441054SJan Kiszka             }
181fe441054SJan Kiszka             if (!(pml4e & PG_ACCESSED_MASK)) {
182fe441054SJan Kiszka                 pml4e |= PG_ACCESSED_MASK;
183fe441054SJan Kiszka                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
184fe441054SJan Kiszka             }
185fe441054SJan Kiszka             ptep &= pml4e ^ PG_NX_MASK;
186fe441054SJan Kiszka             pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
187fe441054SJan Kiszka                     (((gphys >> 30) & 0x1ff) << 3);
188fe441054SJan Kiszka             pdpe = x86_ldq_phys(cs, pdpe_addr);
189fe441054SJan Kiszka             if (!(pdpe & PG_PRESENT_MASK)) {
190fe441054SJan Kiszka                 goto do_fault;
191fe441054SJan Kiszka             }
192fe441054SJan Kiszka             if (pdpe & rsvd_mask) {
193fe441054SJan Kiszka                 goto do_fault_rsvd;
194fe441054SJan Kiszka             }
195fe441054SJan Kiszka             ptep &= pdpe ^ PG_NX_MASK;
196fe441054SJan Kiszka             if (!(pdpe & PG_ACCESSED_MASK)) {
197fe441054SJan Kiszka                 pdpe |= PG_ACCESSED_MASK;
198fe441054SJan Kiszka                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
199fe441054SJan Kiszka             }
200fe441054SJan Kiszka             if (pdpe & PG_PSE_MASK) {
201fe441054SJan Kiszka                 /* 1 GB page */
202fe441054SJan Kiszka                 page_size = 1024 * 1024 * 1024;
203fe441054SJan Kiszka                 pte_addr = pdpe_addr;
204fe441054SJan Kiszka                 pte = pdpe;
205fe441054SJan Kiszka                 goto do_check_protect;
206fe441054SJan Kiszka             }
207fe441054SJan Kiszka         } else
208fe441054SJan Kiszka #endif
209fe441054SJan Kiszka         {
210fe441054SJan Kiszka             pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
211fe441054SJan Kiszka             pdpe = x86_ldq_phys(cs, pdpe_addr);
212fe441054SJan Kiszka             if (!(pdpe & PG_PRESENT_MASK)) {
213fe441054SJan Kiszka                 goto do_fault;
214fe441054SJan Kiszka             }
215fe441054SJan Kiszka             rsvd_mask |= PG_HI_USER_MASK;
216fe441054SJan Kiszka             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
217fe441054SJan Kiszka                 goto do_fault_rsvd;
218fe441054SJan Kiszka             }
219fe441054SJan Kiszka             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
220fe441054SJan Kiszka         }
221fe441054SJan Kiszka 
222fe441054SJan Kiszka         pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
223fe441054SJan Kiszka         pde = x86_ldq_phys(cs, pde_addr);
224fe441054SJan Kiszka         if (!(pde & PG_PRESENT_MASK)) {
225fe441054SJan Kiszka             goto do_fault;
226fe441054SJan Kiszka         }
227fe441054SJan Kiszka         if (pde & rsvd_mask) {
228fe441054SJan Kiszka             goto do_fault_rsvd;
229fe441054SJan Kiszka         }
230fe441054SJan Kiszka         ptep &= pde ^ PG_NX_MASK;
231fe441054SJan Kiszka         if (pde & PG_PSE_MASK) {
232fe441054SJan Kiszka             /* 2 MB page */
233fe441054SJan Kiszka             page_size = 2048 * 1024;
234fe441054SJan Kiszka             pte_addr = pde_addr;
235fe441054SJan Kiszka             pte = pde;
236fe441054SJan Kiszka             goto do_check_protect;
237fe441054SJan Kiszka         }
238fe441054SJan Kiszka         /* 4 KB page */
239fe441054SJan Kiszka         if (!(pde & PG_ACCESSED_MASK)) {
240fe441054SJan Kiszka             pde |= PG_ACCESSED_MASK;
241fe441054SJan Kiszka             x86_stl_phys_notdirty(cs, pde_addr, pde);
242fe441054SJan Kiszka         }
243fe441054SJan Kiszka         pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
244fe441054SJan Kiszka         pte = x86_ldq_phys(cs, pte_addr);
245fe441054SJan Kiszka         if (!(pte & PG_PRESENT_MASK)) {
246fe441054SJan Kiszka             goto do_fault;
247fe441054SJan Kiszka         }
248fe441054SJan Kiszka         if (pte & rsvd_mask) {
249fe441054SJan Kiszka             goto do_fault_rsvd;
250fe441054SJan Kiszka         }
251fe441054SJan Kiszka         /* combine pde and pte nx, user and rw protections */
252fe441054SJan Kiszka         ptep &= pte ^ PG_NX_MASK;
253fe441054SJan Kiszka         page_size = 4096;
254fe441054SJan Kiszka     } else {
255fe441054SJan Kiszka         uint32_t pde;
256fe441054SJan Kiszka 
257fe441054SJan Kiszka         /* page directory entry */
258fe441054SJan Kiszka         pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
259fe441054SJan Kiszka         pde = x86_ldl_phys(cs, pde_addr);
260fe441054SJan Kiszka         if (!(pde & PG_PRESENT_MASK)) {
261fe441054SJan Kiszka             goto do_fault;
262fe441054SJan Kiszka         }
263fe441054SJan Kiszka         ptep = pde | PG_NX_MASK;
264fe441054SJan Kiszka 
265a2d57703SAlexander Boettcher         /* if host cr4 PSE bit is set, then we use a 4MB page */
266a2d57703SAlexander Boettcher         if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
267fe441054SJan Kiszka             page_size = 4096 * 1024;
268fe441054SJan Kiszka             pte_addr = pde_addr;
269fe441054SJan Kiszka 
270fe441054SJan Kiszka             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
271fe441054SJan Kiszka              * Leave bits 20-13 in place for setting accessed/dirty bits below.
272fe441054SJan Kiszka              */
273fe441054SJan Kiszka             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
274fe441054SJan Kiszka             rsvd_mask = 0x200000;
275fe441054SJan Kiszka             goto do_check_protect_pse36;
276fe441054SJan Kiszka         }
277fe441054SJan Kiszka 
278fe441054SJan Kiszka         if (!(pde & PG_ACCESSED_MASK)) {
279fe441054SJan Kiszka             pde |= PG_ACCESSED_MASK;
280fe441054SJan Kiszka             x86_stl_phys_notdirty(cs, pde_addr, pde);
281fe441054SJan Kiszka         }
282fe441054SJan Kiszka 
283fe441054SJan Kiszka         /* page directory entry */
284fe441054SJan Kiszka         pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
285fe441054SJan Kiszka         pte = x86_ldl_phys(cs, pte_addr);
286fe441054SJan Kiszka         if (!(pte & PG_PRESENT_MASK)) {
287fe441054SJan Kiszka             goto do_fault;
288fe441054SJan Kiszka         }
289fe441054SJan Kiszka         /* combine pde and pte user and rw protections */
290fe441054SJan Kiszka         ptep &= pte | PG_NX_MASK;
291fe441054SJan Kiszka         page_size = 4096;
292fe441054SJan Kiszka         rsvd_mask = 0;
293fe441054SJan Kiszka     }
294fe441054SJan Kiszka 
295fe441054SJan Kiszka  do_check_protect:
296fe441054SJan Kiszka     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
297fe441054SJan Kiszka  do_check_protect_pse36:
298fe441054SJan Kiszka     if (pte & rsvd_mask) {
299fe441054SJan Kiszka         goto do_fault_rsvd;
300fe441054SJan Kiszka     }
301fe441054SJan Kiszka     ptep ^= PG_NX_MASK;
302fe441054SJan Kiszka 
303fe441054SJan Kiszka     if (!(ptep & PG_USER_MASK)) {
304fe441054SJan Kiszka         goto do_fault_protect;
305fe441054SJan Kiszka     }
306fe441054SJan Kiszka     if (ptep & PG_NX_MASK) {
307fe441054SJan Kiszka         if (access_type == MMU_INST_FETCH) {
308fe441054SJan Kiszka             goto do_fault_protect;
309fe441054SJan Kiszka         }
310fe441054SJan Kiszka         *prot &= ~PAGE_EXEC;
311fe441054SJan Kiszka     }
312fe441054SJan Kiszka     if (!(ptep & PG_RW_MASK)) {
313fe441054SJan Kiszka         if (access_type == MMU_DATA_STORE) {
314fe441054SJan Kiszka             goto do_fault_protect;
315fe441054SJan Kiszka         }
316fe441054SJan Kiszka         *prot &= ~PAGE_WRITE;
317fe441054SJan Kiszka     }
318fe441054SJan Kiszka 
319fe441054SJan Kiszka     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
320fe441054SJan Kiszka     page_offset = gphys & (page_size - 1);
321fe441054SJan Kiszka     return pte + page_offset;
322fe441054SJan Kiszka 
323fe441054SJan Kiszka  do_fault_rsvd:
324fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_RSVD;
325fe441054SJan Kiszka  do_fault_protect:
326fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_P;
327fe441054SJan Kiszka  do_fault:
328fe441054SJan Kiszka     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
329fe441054SJan Kiszka                  gphys);
330fe441054SJan Kiszka     exit_info_1 |= SVM_NPTEXIT_US;
331fe441054SJan Kiszka     if (access_type == MMU_DATA_STORE) {
332fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_RW;
333fe441054SJan Kiszka     } else if (access_type == MMU_INST_FETCH) {
334fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_ID;
335fe441054SJan Kiszka     }
336fe441054SJan Kiszka     if (prot) {
337fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_GPA;
338fe441054SJan Kiszka     } else { /* page table access */
339fe441054SJan Kiszka         exit_info_1 |= SVM_NPTEXIT_GPT;
340fe441054SJan Kiszka     }
341fe441054SJan Kiszka     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
342fe441054SJan Kiszka }
343fe441054SJan Kiszka 
3446578eb25SPaolo Bonzini /* return value:
3456578eb25SPaolo Bonzini  * -1 = cannot handle fault
3466578eb25SPaolo Bonzini  * 0  = nothing more to do
3476578eb25SPaolo Bonzini  * 1  = generate PF fault
3486578eb25SPaolo Bonzini  */
3495d004421SRichard Henderson static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
3506578eb25SPaolo Bonzini                             int is_write1, int mmu_idx)
3516578eb25SPaolo Bonzini {
3526578eb25SPaolo Bonzini     X86CPU *cpu = X86_CPU(cs);
3536578eb25SPaolo Bonzini     CPUX86State *env = &cpu->env;
3546578eb25SPaolo Bonzini     uint64_t ptep, pte;
3556578eb25SPaolo Bonzini     int32_t a20_mask;
3566578eb25SPaolo Bonzini     target_ulong pde_addr, pte_addr;
3576578eb25SPaolo Bonzini     int error_code = 0;
3586578eb25SPaolo Bonzini     int is_dirty, prot, page_size, is_write, is_user;
3596578eb25SPaolo Bonzini     hwaddr paddr;
3606578eb25SPaolo Bonzini     uint64_t rsvd_mask = PG_HI_RSVD_MASK;
3616578eb25SPaolo Bonzini     uint32_t page_offset;
3626578eb25SPaolo Bonzini     target_ulong vaddr;
3636578eb25SPaolo Bonzini 
3646578eb25SPaolo Bonzini     is_user = mmu_idx == MMU_USER_IDX;
3656578eb25SPaolo Bonzini #if defined(DEBUG_MMU)
3666578eb25SPaolo Bonzini     printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
3676578eb25SPaolo Bonzini            addr, is_write1, is_user, env->eip);
3686578eb25SPaolo Bonzini #endif
3696578eb25SPaolo Bonzini     is_write = is_write1 & 1;
3706578eb25SPaolo Bonzini 
3716578eb25SPaolo Bonzini     a20_mask = x86_get_a20_mask(env);
3726578eb25SPaolo Bonzini     if (!(env->cr[0] & CR0_PG_MASK)) {
3736578eb25SPaolo Bonzini         pte = addr;
3746578eb25SPaolo Bonzini #ifdef TARGET_X86_64
3756578eb25SPaolo Bonzini         if (!(env->hflags & HF_LMA_MASK)) {
3766578eb25SPaolo Bonzini             /* Without long mode we can only address 32bits in real mode */
3776578eb25SPaolo Bonzini             pte = (uint32_t)pte;
3786578eb25SPaolo Bonzini         }
3796578eb25SPaolo Bonzini #endif
3806578eb25SPaolo Bonzini         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3816578eb25SPaolo Bonzini         page_size = 4096;
3826578eb25SPaolo Bonzini         goto do_mapping;
3836578eb25SPaolo Bonzini     }
3846578eb25SPaolo Bonzini 
3856578eb25SPaolo Bonzini     if (!(env->efer & MSR_EFER_NXE)) {
3866578eb25SPaolo Bonzini         rsvd_mask |= PG_NX_MASK;
3876578eb25SPaolo Bonzini     }
3886578eb25SPaolo Bonzini 
3896578eb25SPaolo Bonzini     if (env->cr[4] & CR4_PAE_MASK) {
3906578eb25SPaolo Bonzini         uint64_t pde, pdpe;
3916578eb25SPaolo Bonzini         target_ulong pdpe_addr;
3926578eb25SPaolo Bonzini 
3936578eb25SPaolo Bonzini #ifdef TARGET_X86_64
3946578eb25SPaolo Bonzini         if (env->hflags & HF_LMA_MASK) {
3956578eb25SPaolo Bonzini             bool la57 = env->cr[4] & CR4_LA57_MASK;
3966578eb25SPaolo Bonzini             uint64_t pml5e_addr, pml5e;
3976578eb25SPaolo Bonzini             uint64_t pml4e_addr, pml4e;
3986578eb25SPaolo Bonzini             int32_t sext;
3996578eb25SPaolo Bonzini 
4006578eb25SPaolo Bonzini             /* test virtual address sign extension */
4016578eb25SPaolo Bonzini             sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
4026578eb25SPaolo Bonzini             if (sext != 0 && sext != -1) {
4036578eb25SPaolo Bonzini                 env->error_code = 0;
4046578eb25SPaolo Bonzini                 cs->exception_index = EXCP0D_GPF;
4056578eb25SPaolo Bonzini                 return 1;
4066578eb25SPaolo Bonzini             }
4076578eb25SPaolo Bonzini 
4086578eb25SPaolo Bonzini             if (la57) {
4096578eb25SPaolo Bonzini                 pml5e_addr = ((env->cr[3] & ~0xfff) +
4106578eb25SPaolo Bonzini                         (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
411fe441054SJan Kiszka                 pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
4126578eb25SPaolo Bonzini                 pml5e = x86_ldq_phys(cs, pml5e_addr);
4136578eb25SPaolo Bonzini                 if (!(pml5e & PG_PRESENT_MASK)) {
4146578eb25SPaolo Bonzini                     goto do_fault;
4156578eb25SPaolo Bonzini                 }
4166578eb25SPaolo Bonzini                 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
4176578eb25SPaolo Bonzini                     goto do_fault_rsvd;
4186578eb25SPaolo Bonzini                 }
4196578eb25SPaolo Bonzini                 if (!(pml5e & PG_ACCESSED_MASK)) {
4206578eb25SPaolo Bonzini                     pml5e |= PG_ACCESSED_MASK;
4216578eb25SPaolo Bonzini                     x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
4226578eb25SPaolo Bonzini                 }
4236578eb25SPaolo Bonzini                 ptep = pml5e ^ PG_NX_MASK;
4246578eb25SPaolo Bonzini             } else {
4256578eb25SPaolo Bonzini                 pml5e = env->cr[3];
4266578eb25SPaolo Bonzini                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
4276578eb25SPaolo Bonzini             }
4286578eb25SPaolo Bonzini 
4296578eb25SPaolo Bonzini             pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
4306578eb25SPaolo Bonzini                     (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
431fe441054SJan Kiszka             pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
4326578eb25SPaolo Bonzini             pml4e = x86_ldq_phys(cs, pml4e_addr);
4336578eb25SPaolo Bonzini             if (!(pml4e & PG_PRESENT_MASK)) {
4346578eb25SPaolo Bonzini                 goto do_fault;
4356578eb25SPaolo Bonzini             }
4366578eb25SPaolo Bonzini             if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
4376578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4386578eb25SPaolo Bonzini             }
4396578eb25SPaolo Bonzini             if (!(pml4e & PG_ACCESSED_MASK)) {
4406578eb25SPaolo Bonzini                 pml4e |= PG_ACCESSED_MASK;
4416578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
4426578eb25SPaolo Bonzini             }
4436578eb25SPaolo Bonzini             ptep &= pml4e ^ PG_NX_MASK;
4446578eb25SPaolo Bonzini             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
4456578eb25SPaolo Bonzini                 a20_mask;
446fe441054SJan Kiszka             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
4476578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
4486578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
4496578eb25SPaolo Bonzini                 goto do_fault;
4506578eb25SPaolo Bonzini             }
4516578eb25SPaolo Bonzini             if (pdpe & rsvd_mask) {
4526578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4536578eb25SPaolo Bonzini             }
4546578eb25SPaolo Bonzini             ptep &= pdpe ^ PG_NX_MASK;
4556578eb25SPaolo Bonzini             if (!(pdpe & PG_ACCESSED_MASK)) {
4566578eb25SPaolo Bonzini                 pdpe |= PG_ACCESSED_MASK;
4576578eb25SPaolo Bonzini                 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
4586578eb25SPaolo Bonzini             }
4596578eb25SPaolo Bonzini             if (pdpe & PG_PSE_MASK) {
4606578eb25SPaolo Bonzini                 /* 1 GB page */
4616578eb25SPaolo Bonzini                 page_size = 1024 * 1024 * 1024;
4626578eb25SPaolo Bonzini                 pte_addr = pdpe_addr;
4636578eb25SPaolo Bonzini                 pte = pdpe;
4646578eb25SPaolo Bonzini                 goto do_check_protect;
4656578eb25SPaolo Bonzini             }
4666578eb25SPaolo Bonzini         } else
4676578eb25SPaolo Bonzini #endif
4686578eb25SPaolo Bonzini         {
4696578eb25SPaolo Bonzini             /* XXX: load them when cr3 is loaded ? */
4706578eb25SPaolo Bonzini             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
4716578eb25SPaolo Bonzini                 a20_mask;
472fe441054SJan Kiszka             pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
4736578eb25SPaolo Bonzini             pdpe = x86_ldq_phys(cs, pdpe_addr);
4746578eb25SPaolo Bonzini             if (!(pdpe & PG_PRESENT_MASK)) {
4756578eb25SPaolo Bonzini                 goto do_fault;
4766578eb25SPaolo Bonzini             }
4776578eb25SPaolo Bonzini             rsvd_mask |= PG_HI_USER_MASK;
4786578eb25SPaolo Bonzini             if (pdpe & (rsvd_mask | PG_NX_MASK)) {
4796578eb25SPaolo Bonzini                 goto do_fault_rsvd;
4806578eb25SPaolo Bonzini             }
4816578eb25SPaolo Bonzini             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
4826578eb25SPaolo Bonzini         }
4836578eb25SPaolo Bonzini 
4846578eb25SPaolo Bonzini         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
4856578eb25SPaolo Bonzini             a20_mask;
486fe441054SJan Kiszka         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
4876578eb25SPaolo Bonzini         pde = x86_ldq_phys(cs, pde_addr);
4886578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
4896578eb25SPaolo Bonzini             goto do_fault;
4906578eb25SPaolo Bonzini         }
4916578eb25SPaolo Bonzini         if (pde & rsvd_mask) {
4926578eb25SPaolo Bonzini             goto do_fault_rsvd;
4936578eb25SPaolo Bonzini         }
4946578eb25SPaolo Bonzini         ptep &= pde ^ PG_NX_MASK;
4956578eb25SPaolo Bonzini         if (pde & PG_PSE_MASK) {
4966578eb25SPaolo Bonzini             /* 2 MB page */
4976578eb25SPaolo Bonzini             page_size = 2048 * 1024;
4986578eb25SPaolo Bonzini             pte_addr = pde_addr;
4996578eb25SPaolo Bonzini             pte = pde;
5006578eb25SPaolo Bonzini             goto do_check_protect;
5016578eb25SPaolo Bonzini         }
5026578eb25SPaolo Bonzini         /* 4 KB page */
5036578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
5046578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
5056578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
5066578eb25SPaolo Bonzini         }
5076578eb25SPaolo Bonzini         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
5086578eb25SPaolo Bonzini             a20_mask;
509fe441054SJan Kiszka         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
5106578eb25SPaolo Bonzini         pte = x86_ldq_phys(cs, pte_addr);
5116578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
5126578eb25SPaolo Bonzini             goto do_fault;
5136578eb25SPaolo Bonzini         }
5146578eb25SPaolo Bonzini         if (pte & rsvd_mask) {
5156578eb25SPaolo Bonzini             goto do_fault_rsvd;
5166578eb25SPaolo Bonzini         }
5176578eb25SPaolo Bonzini         /* combine pde and pte nx, user and rw protections */
5186578eb25SPaolo Bonzini         ptep &= pte ^ PG_NX_MASK;
5196578eb25SPaolo Bonzini         page_size = 4096;
5206578eb25SPaolo Bonzini     } else {
5216578eb25SPaolo Bonzini         uint32_t pde;
5226578eb25SPaolo Bonzini 
5236578eb25SPaolo Bonzini         /* page directory entry */
5246578eb25SPaolo Bonzini         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
5256578eb25SPaolo Bonzini             a20_mask;
526fe441054SJan Kiszka         pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
5276578eb25SPaolo Bonzini         pde = x86_ldl_phys(cs, pde_addr);
5286578eb25SPaolo Bonzini         if (!(pde & PG_PRESENT_MASK)) {
5296578eb25SPaolo Bonzini             goto do_fault;
5306578eb25SPaolo Bonzini         }
5316578eb25SPaolo Bonzini         ptep = pde | PG_NX_MASK;
5326578eb25SPaolo Bonzini 
5336578eb25SPaolo Bonzini         /* if PSE bit is set, then we use a 4MB page */
5346578eb25SPaolo Bonzini         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
5356578eb25SPaolo Bonzini             page_size = 4096 * 1024;
5366578eb25SPaolo Bonzini             pte_addr = pde_addr;
5376578eb25SPaolo Bonzini 
5386578eb25SPaolo Bonzini             /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
5396578eb25SPaolo Bonzini              * Leave bits 20-13 in place for setting accessed/dirty bits below.
5406578eb25SPaolo Bonzini              */
5416578eb25SPaolo Bonzini             pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
5426578eb25SPaolo Bonzini             rsvd_mask = 0x200000;
5436578eb25SPaolo Bonzini             goto do_check_protect_pse36;
5446578eb25SPaolo Bonzini         }
5456578eb25SPaolo Bonzini 
5466578eb25SPaolo Bonzini         if (!(pde & PG_ACCESSED_MASK)) {
5476578eb25SPaolo Bonzini             pde |= PG_ACCESSED_MASK;
5486578eb25SPaolo Bonzini             x86_stl_phys_notdirty(cs, pde_addr, pde);
5496578eb25SPaolo Bonzini         }
5506578eb25SPaolo Bonzini 
5516578eb25SPaolo Bonzini         /* page directory entry */
5526578eb25SPaolo Bonzini         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
5536578eb25SPaolo Bonzini             a20_mask;
554fe441054SJan Kiszka         pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
5556578eb25SPaolo Bonzini         pte = x86_ldl_phys(cs, pte_addr);
5566578eb25SPaolo Bonzini         if (!(pte & PG_PRESENT_MASK)) {
5576578eb25SPaolo Bonzini             goto do_fault;
5586578eb25SPaolo Bonzini         }
5596578eb25SPaolo Bonzini         /* combine pde and pte user and rw protections */
5606578eb25SPaolo Bonzini         ptep &= pte | PG_NX_MASK;
5616578eb25SPaolo Bonzini         page_size = 4096;
5626578eb25SPaolo Bonzini         rsvd_mask = 0;
5636578eb25SPaolo Bonzini     }
5646578eb25SPaolo Bonzini 
5656578eb25SPaolo Bonzini do_check_protect:
5666578eb25SPaolo Bonzini     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
5676578eb25SPaolo Bonzini do_check_protect_pse36:
5686578eb25SPaolo Bonzini     if (pte & rsvd_mask) {
5696578eb25SPaolo Bonzini         goto do_fault_rsvd;
5706578eb25SPaolo Bonzini     }
5716578eb25SPaolo Bonzini     ptep ^= PG_NX_MASK;
5726578eb25SPaolo Bonzini 
5736578eb25SPaolo Bonzini     /* can the page can be put in the TLB?  prot will tell us */
5746578eb25SPaolo Bonzini     if (is_user && !(ptep & PG_USER_MASK)) {
5756578eb25SPaolo Bonzini         goto do_fault_protect;
5766578eb25SPaolo Bonzini     }
5776578eb25SPaolo Bonzini 
5786578eb25SPaolo Bonzini     prot = 0;
5796578eb25SPaolo Bonzini     if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
5806578eb25SPaolo Bonzini         prot |= PAGE_READ;
5816578eb25SPaolo Bonzini         if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
5826578eb25SPaolo Bonzini             prot |= PAGE_WRITE;
5836578eb25SPaolo Bonzini         }
5846578eb25SPaolo Bonzini     }
5856578eb25SPaolo Bonzini     if (!(ptep & PG_NX_MASK) &&
5866578eb25SPaolo Bonzini         (mmu_idx == MMU_USER_IDX ||
5876578eb25SPaolo Bonzini          !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
5886578eb25SPaolo Bonzini         prot |= PAGE_EXEC;
5896578eb25SPaolo Bonzini     }
5906578eb25SPaolo Bonzini     if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
5916578eb25SPaolo Bonzini         (ptep & PG_USER_MASK) && env->pkru) {
5926578eb25SPaolo Bonzini         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
5936578eb25SPaolo Bonzini         uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
5946578eb25SPaolo Bonzini         uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
5956578eb25SPaolo Bonzini         uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
5966578eb25SPaolo Bonzini 
5976578eb25SPaolo Bonzini         if (pkru_ad) {
5986578eb25SPaolo Bonzini             pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
5996578eb25SPaolo Bonzini         } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
6006578eb25SPaolo Bonzini             pkru_prot &= ~PAGE_WRITE;
6016578eb25SPaolo Bonzini         }
6026578eb25SPaolo Bonzini 
6036578eb25SPaolo Bonzini         prot &= pkru_prot;
6046578eb25SPaolo Bonzini         if ((pkru_prot & (1 << is_write1)) == 0) {
6056578eb25SPaolo Bonzini             assert(is_write1 != 2);
6066578eb25SPaolo Bonzini             error_code |= PG_ERROR_PK_MASK;
6076578eb25SPaolo Bonzini             goto do_fault_protect;
6086578eb25SPaolo Bonzini         }
6096578eb25SPaolo Bonzini     }
6106578eb25SPaolo Bonzini 
6116578eb25SPaolo Bonzini     if ((prot & (1 << is_write1)) == 0) {
6126578eb25SPaolo Bonzini         goto do_fault_protect;
6136578eb25SPaolo Bonzini     }
6146578eb25SPaolo Bonzini 
6156578eb25SPaolo Bonzini     /* yes, it can! */
6166578eb25SPaolo Bonzini     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
6176578eb25SPaolo Bonzini     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
6186578eb25SPaolo Bonzini         pte |= PG_ACCESSED_MASK;
6196578eb25SPaolo Bonzini         if (is_dirty) {
6206578eb25SPaolo Bonzini             pte |= PG_DIRTY_MASK;
6216578eb25SPaolo Bonzini         }
6226578eb25SPaolo Bonzini         x86_stl_phys_notdirty(cs, pte_addr, pte);
6236578eb25SPaolo Bonzini     }
6246578eb25SPaolo Bonzini 
6256578eb25SPaolo Bonzini     if (!(pte & PG_DIRTY_MASK)) {
6266578eb25SPaolo Bonzini         /* only set write access if already dirty... otherwise wait
6276578eb25SPaolo Bonzini            for dirty access */
6286578eb25SPaolo Bonzini         assert(!is_write);
6296578eb25SPaolo Bonzini         prot &= ~PAGE_WRITE;
6306578eb25SPaolo Bonzini     }
6316578eb25SPaolo Bonzini 
6326578eb25SPaolo Bonzini  do_mapping:
6336578eb25SPaolo Bonzini     pte = pte & a20_mask;
6346578eb25SPaolo Bonzini 
6356578eb25SPaolo Bonzini     /* align to page_size */
6366578eb25SPaolo Bonzini     pte &= PG_ADDRESS_MASK & ~(page_size - 1);
637fe441054SJan Kiszka     page_offset = addr & (page_size - 1);
638fe441054SJan Kiszka     paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
6396578eb25SPaolo Bonzini 
6406578eb25SPaolo Bonzini     /* Even if 4MB pages, we map only one 4KB page in the cache to
6416578eb25SPaolo Bonzini        avoid filling it too fast */
6426578eb25SPaolo Bonzini     vaddr = addr & TARGET_PAGE_MASK;
643fe441054SJan Kiszka     paddr &= TARGET_PAGE_MASK;
6446578eb25SPaolo Bonzini 
6456578eb25SPaolo Bonzini     assert(prot & (1 << is_write1));
6466578eb25SPaolo Bonzini     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
6476578eb25SPaolo Bonzini                             prot, mmu_idx, page_size);
6486578eb25SPaolo Bonzini     return 0;
6496578eb25SPaolo Bonzini  do_fault_rsvd:
6506578eb25SPaolo Bonzini     error_code |= PG_ERROR_RSVD_MASK;
6516578eb25SPaolo Bonzini  do_fault_protect:
6526578eb25SPaolo Bonzini     error_code |= PG_ERROR_P_MASK;
6536578eb25SPaolo Bonzini  do_fault:
6546578eb25SPaolo Bonzini     error_code |= (is_write << PG_ERROR_W_BIT);
6556578eb25SPaolo Bonzini     if (is_user)
6566578eb25SPaolo Bonzini         error_code |= PG_ERROR_U_MASK;
6576578eb25SPaolo Bonzini     if (is_write1 == 2 &&
6586578eb25SPaolo Bonzini         (((env->efer & MSR_EFER_NXE) &&
6596578eb25SPaolo Bonzini           (env->cr[4] & CR4_PAE_MASK)) ||
6606578eb25SPaolo Bonzini          (env->cr[4] & CR4_SMEP_MASK)))
6616578eb25SPaolo Bonzini         error_code |= PG_ERROR_I_D_MASK;
6626578eb25SPaolo Bonzini     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
6636578eb25SPaolo Bonzini         /* cr2 is not modified in case of exceptions */
6646578eb25SPaolo Bonzini         x86_stq_phys(cs,
6656578eb25SPaolo Bonzini                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6666578eb25SPaolo Bonzini                  addr);
6676578eb25SPaolo Bonzini     } else {
6686578eb25SPaolo Bonzini         env->cr[2] = addr;
6696578eb25SPaolo Bonzini     }
6706578eb25SPaolo Bonzini     env->error_code = error_code;
6716578eb25SPaolo Bonzini     cs->exception_index = EXCP0E_PAGE;
6726578eb25SPaolo Bonzini     return 1;
6736578eb25SPaolo Bonzini }
6746578eb25SPaolo Bonzini #endif
6755d004421SRichard Henderson 
6765d004421SRichard Henderson bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
6775d004421SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
6785d004421SRichard Henderson                       bool probe, uintptr_t retaddr)
6795d004421SRichard Henderson {
6805d004421SRichard Henderson     X86CPU *cpu = X86_CPU(cs);
6815d004421SRichard Henderson     CPUX86State *env = &cpu->env;
6825d004421SRichard Henderson 
6835d004421SRichard Henderson #ifdef CONFIG_USER_ONLY
6845d004421SRichard Henderson     /* user mode only emulation */
6855d004421SRichard Henderson     env->cr[2] = addr;
6865d004421SRichard Henderson     env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
6875d004421SRichard Henderson     env->error_code |= PG_ERROR_U_MASK;
6885d004421SRichard Henderson     cs->exception_index = EXCP0E_PAGE;
6895d004421SRichard Henderson     env->exception_is_int = 0;
6905d004421SRichard Henderson     env->exception_next_eip = -1;
6915d004421SRichard Henderson     cpu_loop_exit_restore(cs, retaddr);
6925d004421SRichard Henderson #else
6935d004421SRichard Henderson     env->retaddr = retaddr;
6945d004421SRichard Henderson     if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
6955d004421SRichard Henderson         /* FIXME: On error in get_hphys we have already jumped out.  */
6965d004421SRichard Henderson         g_assert(!probe);
6975d004421SRichard Henderson         raise_exception_err_ra(env, cs->exception_index,
6985d004421SRichard Henderson                                env->error_code, retaddr);
6995d004421SRichard Henderson     }
7005d004421SRichard Henderson     return true;
7015d004421SRichard Henderson #endif
7025d004421SRichard Henderson }
703