xref: /qemu/target/i386/tcg/system/excp_helper.c (revision ffd5a60e9b67e14f7bac7ea29300ea46a944e508)
1 /*
2  *  x86 exception helpers - system code
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "accel/tcg/cpu-ldst.h"
23 #include "exec/cputlb.h"
24 #include "exec/page-protection.h"
25 #include "exec/target_page.h"
26 #include "exec/tlb-flags.h"
27 #include "exec/tswap.h"
28 #include "tcg/helper-tcg.h"
29 
30 typedef struct TranslateParams {
31     target_ulong addr;
32     target_ulong cr3;
33     int pg_mode;
34     int mmu_idx;
35     int ptw_idx;
36     MMUAccessType access_type;
37 } TranslateParams;
38 
39 typedef struct TranslateResult {
40     hwaddr paddr;
41     int prot;
42     int page_size;
43 } TranslateResult;
44 
45 typedef enum TranslateFaultStage2 {
46     S2_NONE,
47     S2_GPA,
48     S2_GPT,
49 } TranslateFaultStage2;
50 
51 typedef struct TranslateFault {
52     int exception_index;
53     int error_code;
54     target_ulong cr2;
55     TranslateFaultStage2 stage2;
56 } TranslateFault;
57 
58 typedef struct PTETranslate {
59     CPUX86State *env;
60     TranslateFault *err;
61     int ptw_idx;
62     void *haddr;
63     hwaddr gaddr;
64 } PTETranslate;
65 
66 static bool ptw_translate(PTETranslate *inout, hwaddr addr)
67 {
68     int flags;
69 
70     inout->gaddr = addr;
71     flags = probe_access_full_mmu(inout->env, addr, 0, MMU_DATA_STORE,
72                                   inout->ptw_idx, &inout->haddr, NULL);
73 
74     if (unlikely(flags & TLB_INVALID_MASK)) {
75         TranslateFault *err = inout->err;
76 
77         assert(inout->ptw_idx == MMU_NESTED_IDX);
78         *err = (TranslateFault){
79             .error_code = inout->env->error_code,
80             .cr2 = addr,
81             .stage2 = S2_GPT,
82         };
83         return false;
84     }
85     return true;
86 }
87 
88 static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
89 {
90     if (likely(in->haddr)) {
91         return ldl_p(in->haddr);
92     }
93     return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
94 }
95 
96 static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra)
97 {
98     if (likely(in->haddr)) {
99         return ldq_p(in->haddr);
100     }
101     return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
102 }
103 
104 /*
105  * Note that we can use a 32-bit cmpxchg for all page table entries,
106  * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and
107  * PG_DIRTY_MASK are all in the low 32 bits.
108  */
109 static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new)
110 {
111     uint32_t cmp;
112 
113     CPUState *cpu = env_cpu(in->env);
114     /* We are in cpu_exec, and start_exclusive can't be called directly.*/
115     g_assert(cpu->running);
116     cpu_exec_end(cpu);
117     /* Does x86 really perform a rmw cycle on mmio for ptw? */
118     start_exclusive();
119     cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0);
120     if (cmp == old) {
121         cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0);
122     }
123     end_exclusive();
124     cpu_exec_start(cpu);
125     return cmp == old;
126 }
127 
128 static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
129 {
130     if (set & ~old) {
131         uint32_t new = old | set;
132         if (likely(in->haddr)) {
133             old = cpu_to_le32(old);
134             new = cpu_to_le32(new);
135             return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old;
136         }
137         return ptw_setl_slow(in, old, new);
138     }
139     return true;
140 }
141 
142 static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
143                           TranslateResult *out, TranslateFault *err,
144                           uint64_t ra)
145 {
146     const target_ulong addr = in->addr;
147     const int pg_mode = in->pg_mode;
148     const bool is_user = is_mmu_index_user(in->mmu_idx);
149     const MMUAccessType access_type = in->access_type;
150     uint64_t ptep, pte, rsvd_mask;
151     PTETranslate pte_trans = {
152         .env = env,
153         .err = err,
154         .ptw_idx = in->ptw_idx,
155     };
156     hwaddr pte_addr, paddr;
157     uint32_t pkr;
158     int page_size;
159     int error_code;
160     int prot;
161 
162  restart_all:
163     rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits);
164     rsvd_mask &= PG_ADDRESS_MASK;
165     if (!(pg_mode & PG_MODE_NXE)) {
166         rsvd_mask |= PG_NX_MASK;
167     }
168 
169     if (pg_mode & PG_MODE_PAE) {
170 #ifdef TARGET_X86_64
171         if (pg_mode & PG_MODE_LMA) {
172             if (pg_mode & PG_MODE_LA57) {
173                 /*
174                  * Page table level 5
175                  */
176                 pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3);
177                 if (!ptw_translate(&pte_trans, pte_addr)) {
178                     return false;
179                 }
180             restart_5:
181                 pte = ptw_ldq(&pte_trans, ra);
182                 if (!(pte & PG_PRESENT_MASK)) {
183                     goto do_fault;
184                 }
185                 if (pte & (rsvd_mask | PG_PSE_MASK)) {
186                     goto do_fault_rsvd;
187                 }
188                 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
189                     goto restart_5;
190                 }
191                 ptep = pte ^ PG_NX_MASK;
192             } else {
193                 pte = in->cr3;
194                 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
195             }
196 
197             /*
198              * Page table level 4
199              */
200             pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3);
201             if (!ptw_translate(&pte_trans, pte_addr)) {
202                 return false;
203             }
204         restart_4:
205             pte = ptw_ldq(&pte_trans, ra);
206             if (!(pte & PG_PRESENT_MASK)) {
207                 goto do_fault;
208             }
209             if (pte & (rsvd_mask | PG_PSE_MASK)) {
210                 goto do_fault_rsvd;
211             }
212             if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
213                 goto restart_4;
214             }
215             ptep &= pte ^ PG_NX_MASK;
216 
217             /*
218              * Page table level 3
219              */
220             pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3);
221             if (!ptw_translate(&pte_trans, pte_addr)) {
222                 return false;
223             }
224         restart_3_lma:
225             pte = ptw_ldq(&pte_trans, ra);
226             if (!(pte & PG_PRESENT_MASK)) {
227                 goto do_fault;
228             }
229             if (pte & rsvd_mask) {
230                 goto do_fault_rsvd;
231             }
232             if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
233                 goto restart_3_lma;
234             }
235             ptep &= pte ^ PG_NX_MASK;
236             if (pte & PG_PSE_MASK) {
237                 /* 1 GB page */
238                 page_size = 1024 * 1024 * 1024;
239                 goto do_check_protect;
240             }
241         } else
242 #endif
243         {
244             /*
245              * Page table level 3
246              */
247             pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18);
248             if (!ptw_translate(&pte_trans, pte_addr)) {
249                 return false;
250             }
251             rsvd_mask |= PG_HI_USER_MASK;
252         restart_3_nolma:
253             pte = ptw_ldq(&pte_trans, ra);
254             if (!(pte & PG_PRESENT_MASK)) {
255                 goto do_fault;
256             }
257             if (pte & (rsvd_mask | PG_NX_MASK)) {
258                 goto do_fault_rsvd;
259             }
260             if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
261                 goto restart_3_nolma;
262             }
263             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
264         }
265 
266         /*
267          * Page table level 2
268          */
269         pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3);
270         if (!ptw_translate(&pte_trans, pte_addr)) {
271             return false;
272         }
273     restart_2_pae:
274         pte = ptw_ldq(&pte_trans, ra);
275         if (!(pte & PG_PRESENT_MASK)) {
276             goto do_fault;
277         }
278         if (pte & rsvd_mask) {
279             goto do_fault_rsvd;
280         }
281         if (pte & PG_PSE_MASK) {
282             /* 2 MB page */
283             page_size = 2048 * 1024;
284             ptep &= pte ^ PG_NX_MASK;
285             goto do_check_protect;
286         }
287         if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
288             goto restart_2_pae;
289         }
290         ptep &= pte ^ PG_NX_MASK;
291 
292         /*
293          * Page table level 1
294          */
295         pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3);
296         if (!ptw_translate(&pte_trans, pte_addr)) {
297             return false;
298         }
299         pte = ptw_ldq(&pte_trans, ra);
300         if (!(pte & PG_PRESENT_MASK)) {
301             goto do_fault;
302         }
303         if (pte & rsvd_mask) {
304             goto do_fault_rsvd;
305         }
306         /* combine pde and pte nx, user and rw protections */
307         ptep &= pte ^ PG_NX_MASK;
308         page_size = 4096;
309     } else if (pg_mode & PG_MODE_PG) {
310         /*
311          * Page table level 2
312          */
313         pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc);
314         if (!ptw_translate(&pte_trans, pte_addr)) {
315             return false;
316         }
317     restart_2_nopae:
318         pte = ptw_ldl(&pte_trans, ra);
319         if (!(pte & PG_PRESENT_MASK)) {
320             goto do_fault;
321         }
322         ptep = pte | PG_NX_MASK;
323 
324         /* if PSE bit is set, then we use a 4MB page */
325         if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
326             page_size = 4096 * 1024;
327             /*
328              * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
329              * Leave bits 20-13 in place for setting accessed/dirty bits below.
330              */
331             pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13));
332             rsvd_mask = 0x200000;
333             goto do_check_protect_pse36;
334         }
335         if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) {
336             goto restart_2_nopae;
337         }
338 
339         /*
340          * Page table level 1
341          */
342         pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc);
343         if (!ptw_translate(&pte_trans, pte_addr)) {
344             return false;
345         }
346         pte = ptw_ldl(&pte_trans, ra);
347         if (!(pte & PG_PRESENT_MASK)) {
348             goto do_fault;
349         }
350         /* combine pde and pte user and rw protections */
351         ptep &= pte | PG_NX_MASK;
352         page_size = 4096;
353         rsvd_mask = 0;
354     } else {
355         /*
356          * No paging (real mode), let's tentatively resolve the address as 1:1
357          * here, but conditionally still perform an NPT walk on it later.
358          */
359         page_size = 0x40000000;
360         paddr = in->addr;
361         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
362         goto stage2;
363     }
364 
365 do_check_protect:
366     rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
367 do_check_protect_pse36:
368     if (pte & rsvd_mask) {
369         goto do_fault_rsvd;
370     }
371     ptep ^= PG_NX_MASK;
372 
373     /* can the page can be put in the TLB?  prot will tell us */
374     if (is_user && !(ptep & PG_USER_MASK)) {
375         goto do_fault_protect;
376     }
377 
378     prot = 0;
379     if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) {
380         prot |= PAGE_READ;
381         if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
382             prot |= PAGE_WRITE;
383         }
384     }
385     if (!(ptep & PG_NX_MASK) &&
386         (is_user ||
387          !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
388         prot |= PAGE_EXEC;
389     }
390 
391     if (ptep & PG_USER_MASK) {
392         pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
393     } else {
394         pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
395     }
396     if (pkr) {
397         uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
398         uint32_t pkr_ad = (pkr >> pk * 2) & 1;
399         uint32_t pkr_wd = (pkr >> pk * 2) & 2;
400         uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
401 
402         if (pkr_ad) {
403             pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
404         } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
405             pkr_prot &= ~PAGE_WRITE;
406         }
407         if ((pkr_prot & (1 << access_type)) == 0) {
408             goto do_fault_pk_protect;
409         }
410         prot &= pkr_prot;
411     }
412 
413     if ((prot & (1 << access_type)) == 0) {
414         goto do_fault_protect;
415     }
416 
417     /* yes, it can! */
418     {
419         uint32_t set = PG_ACCESSED_MASK;
420         if (access_type == MMU_DATA_STORE) {
421             set |= PG_DIRTY_MASK;
422         } else if (!(pte & PG_DIRTY_MASK)) {
423             /*
424              * Only set write access if already dirty...
425              * otherwise wait for dirty access.
426              */
427             prot &= ~PAGE_WRITE;
428         }
429         if (!ptw_setl(&pte_trans, pte, set)) {
430             /*
431              * We can arrive here from any of 3 levels and 2 formats.
432              * The only safe thing is to restart the entire lookup.
433              */
434             goto restart_all;
435         }
436     }
437 
438     /* merge offset within page */
439     paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
440  stage2:
441 
442     /*
443      * Note that NPT is walked (for both paging structures and final guest
444      * addresses) using the address with the A20 bit set.
445      */
446     if (in->ptw_idx == MMU_NESTED_IDX) {
447         CPUTLBEntryFull *full;
448         int flags, nested_page_size;
449 
450         flags = probe_access_full_mmu(env, paddr, 0, access_type,
451                                       MMU_NESTED_IDX, &pte_trans.haddr, &full);
452         if (unlikely(flags & TLB_INVALID_MASK)) {
453             *err = (TranslateFault){
454                 .error_code = env->error_code,
455                 .cr2 = paddr,
456                 .stage2 = S2_GPA,
457             };
458             return false;
459         }
460 
461         /* Merge stage1 & stage2 protection bits. */
462         prot &= full->prot;
463 
464         /* Re-verify resulting protection. */
465         if ((prot & (1 << access_type)) == 0) {
466             goto do_fault_protect;
467         }
468 
469         /* Merge stage1 & stage2 addresses to final physical address. */
470         nested_page_size = 1 << full->lg_page_size;
471         paddr = (full->phys_addr & ~(nested_page_size - 1))
472               | (paddr & (nested_page_size - 1));
473 
474         /*
475          * Use the larger of stage1 & stage2 page sizes, so that
476          * invalidation works.
477          */
478         if (nested_page_size > page_size) {
479             page_size = nested_page_size;
480         }
481     }
482 
483     out->paddr = paddr & x86_get_a20_mask(env);
484     out->prot = prot;
485     out->page_size = page_size;
486     return true;
487 
488  do_fault_rsvd:
489     error_code = PG_ERROR_RSVD_MASK;
490     goto do_fault_cont;
491  do_fault_protect:
492     error_code = PG_ERROR_P_MASK;
493     goto do_fault_cont;
494  do_fault_pk_protect:
495     assert(access_type != MMU_INST_FETCH);
496     error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK;
497     goto do_fault_cont;
498  do_fault:
499     error_code = 0;
500  do_fault_cont:
501     if (is_user) {
502         error_code |= PG_ERROR_U_MASK;
503     }
504     switch (access_type) {
505     case MMU_DATA_LOAD:
506         break;
507     case MMU_DATA_STORE:
508         error_code |= PG_ERROR_W_MASK;
509         break;
510     case MMU_INST_FETCH:
511         if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) {
512             error_code |= PG_ERROR_I_D_MASK;
513         }
514         break;
515     }
516     *err = (TranslateFault){
517         .exception_index = EXCP0E_PAGE,
518         .error_code = error_code,
519         .cr2 = addr,
520     };
521     return false;
522 }
523 
524 static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err,
525                                     uintptr_t retaddr)
526 {
527     uint64_t exit_info_1 = err->error_code;
528 
529     switch (err->stage2) {
530     case S2_GPT:
531         exit_info_1 |= SVM_NPTEXIT_GPT;
532         break;
533     case S2_GPA:
534         exit_info_1 |= SVM_NPTEXIT_GPA;
535         break;
536     default:
537         g_assert_not_reached();
538     }
539 
540     x86_stq_phys(env_cpu(env),
541                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
542                  err->cr2);
543     cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr);
544 }
545 
546 static bool get_physical_address(CPUX86State *env, vaddr addr,
547                                  MMUAccessType access_type, int mmu_idx,
548                                  TranslateResult *out, TranslateFault *err,
549                                  uint64_t ra)
550 {
551     TranslateParams in;
552     bool use_stage2 = env->hflags2 & HF2_NPT_MASK;
553 
554     in.addr = addr;
555     in.access_type = access_type;
556 
557     switch (mmu_idx) {
558     case MMU_PHYS_IDX:
559         break;
560 
561     case MMU_NESTED_IDX:
562         if (likely(use_stage2)) {
563             in.cr3 = env->nested_cr3;
564             in.pg_mode = env->nested_pg_mode;
565             in.mmu_idx =
566                 env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX;
567             in.ptw_idx = MMU_PHYS_IDX;
568 
569             if (!mmu_translate(env, &in, out, err, ra)) {
570                 err->stage2 = S2_GPA;
571                 return false;
572             }
573             return true;
574         }
575         break;
576 
577     default:
578         if (is_mmu_index_32(mmu_idx)) {
579             addr = (uint32_t)addr;
580         }
581 
582         if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) {
583             in.cr3 = env->cr[3];
584             in.mmu_idx = mmu_idx;
585             in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX;
586             in.pg_mode = get_pg_mode(env);
587 
588             if (in.pg_mode & PG_MODE_LMA) {
589                 /* test virtual address sign extension */
590                 int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47;
591                 int64_t sext = (int64_t)addr >> shift;
592                 if (sext != 0 && sext != -1) {
593                     *err = (TranslateFault){
594                         .exception_index = EXCP0D_GPF,
595                         .cr2 = addr,
596                     };
597                     return false;
598                 }
599             }
600             return mmu_translate(env, &in, out, err, ra);
601         }
602         break;
603     }
604 
605     /* No translation needed. */
606     out->paddr = addr & x86_get_a20_mask(env);
607     out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
608     out->page_size = TARGET_PAGE_SIZE;
609     return true;
610 }
611 
612 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
613                       MMUAccessType access_type, int mmu_idx,
614                       bool probe, uintptr_t retaddr)
615 {
616     CPUX86State *env = cpu_env(cs);
617     TranslateResult out;
618     TranslateFault err;
619 
620     if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err,
621                              retaddr)) {
622         /*
623          * Even if 4MB pages, we map only one 4KB page in the cache to
624          * avoid filling it too fast.
625          */
626         assert(out.prot & (1 << access_type));
627         tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK,
628                                 out.paddr & TARGET_PAGE_MASK,
629                                 cpu_get_mem_attrs(env),
630                                 out.prot, mmu_idx, out.page_size);
631         return true;
632     }
633 
634     if (probe) {
635         /* This will be used if recursing for stage2 translation. */
636         env->error_code = err.error_code;
637         return false;
638     }
639 
640     if (err.stage2 != S2_NONE) {
641         raise_stage2(env, &err, retaddr);
642     }
643 
644     if (env->intercept_exceptions & (1 << err.exception_index)) {
645         /* cr2 is not modified in case of exceptions */
646         x86_stq_phys(cs, env->vm_vmcb +
647                      offsetof(struct vmcb, control.exit_info_2),
648                      err.cr2);
649     } else {
650         env->cr[2] = err.cr2;
651     }
652     raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr);
653 }
654 
655 G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
656                                             MMUAccessType access_type,
657                                             int mmu_idx, uintptr_t retaddr)
658 {
659     X86CPU *cpu = X86_CPU(cs);
660     handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
661 }
662