1 #ifndef _X86_VM_H_
2 #define _X86_VM_H_
3
4 #include "processor.h"
5 #include "vmalloc.h"
6 #include "asm/page.h"
7 #include "asm/io.h"
8 #include "asm/bitops.h"
9
10 void setup_5level_page_table(void);
11
12 struct pte_search {
13 int level;
14 pteval_t *pte;
15 };
16
found_huge_pte(struct pte_search search)17 static inline bool found_huge_pte(struct pte_search search)
18 {
19 return (search.level == 2 || search.level == 3) &&
20 (*search.pte & PT_PRESENT_MASK) &&
21 (*search.pte & PT_PAGE_SIZE_MASK);
22 }
23
found_leaf_pte(struct pte_search search)24 static inline bool found_leaf_pte(struct pte_search search)
25 {
26 return search.level == 1 || found_huge_pte(search);
27 }
28
29 struct pte_search find_pte_level(pgd_t *cr3, void *virt,
30 int lowest_level);
31 pteval_t *get_pte(pgd_t *cr3, void *virt);
32 pteval_t *get_pte_level(pgd_t *cr3, void *virt, int pte_level);
33 pteval_t *install_pte(pgd_t *cr3,
34 int pte_level,
35 void *virt,
36 pteval_t pte,
37 pteval_t *pt_page);
38
39 pteval_t *install_large_page(pgd_t *cr3, phys_addr_t phys, void *virt);
40 void install_pages(pgd_t *cr3, phys_addr_t phys, size_t len, void *virt);
41 bool any_present_pages(pgd_t *cr3, void *virt, size_t len);
42 void set_pte_opt_mask(void);
43 void reset_pte_opt_mask(void);
44
45 enum x86_mmu_flags {
46 X86_MMU_MAP_USER = BIT(0),
47 X86_MMU_MAP_HUGE = BIT(1),
48 };
49 void __setup_mmu_range(pgd_t *cr3, phys_addr_t start, size_t len,
50 enum x86_mmu_flags mmu_flags);
51
current_page_table(void)52 static inline void *current_page_table(void)
53 {
54 return phys_to_virt(read_cr3());
55 }
56
57 void split_large_page(unsigned long *ptep, int level);
58 void force_4k_page(void *addr);
59
60 struct vm_vcpu_info {
61 u64 cr3;
62 u64 cr4;
63 u64 cr0;
64 };
65
66 typedef void (*pte_callback_t)(struct pte_search search, void *va);
67 void walk_pte(void *virt, size_t len, pte_callback_t callback);
68
69 #endif
70