xref: /kvm-unit-tests/lib/arm64/asm/pgtable.h (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 #ifndef _ASMARM64_PGTABLE_H_
2 #define _ASMARM64_PGTABLE_H_
3 /*
4  * Adapted from arch/arm64/include/asm/pgtable.h
5  *              include/asm-generic/pgtable-nopmd.h
6  *              include/linux/mm.h
7  *
8  * Note: some Linux function APIs have been modified. Nothing crazy,
9  *       but if a function took, for example, an mm_struct, then
10  *       that was either removed or replaced.
11  *
12  * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16 #include <alloc.h>
17 #include <alloc_page.h>
18 #include <asm/setup.h>
19 #include <asm/page.h>
20 #include <asm/pgtable-hwdef.h>
21 
22 #include <linux/compiler.h>
23 
24 /*
25  * We can convert va <=> pa page table addresses with simple casts
26  * because we always allocate their pages with alloc_page(), and
27  * alloc_page() always returns identity mapped pages.
28  */
29 #define pgtable_va(x)		((void *)(unsigned long)(x))
30 #define pgtable_pa(x)		((unsigned long)(x))
31 
32 #define pgd_none(pgd)		(!pgd_val(pgd))
33 #define pud_none(pud)		(!pud_val(pud))
34 #define pmd_none(pmd)		(!pmd_val(pmd))
35 #define pte_none(pte)		(!pte_val(pte))
36 
37 #define pgd_valid(pgd)		(pgd_val(pgd) & PGD_VALID)
38 #define pud_valid(pud)		(pud_val(pud) & PUD_VALID)
39 #define pmd_valid(pmd)		(pmd_val(pmd) & PMD_SECT_VALID)
40 #define pte_valid(pte)		(pte_val(pte) & PTE_VALID)
41 
42 #define pmd_huge(pmd)	\
43 	((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
44 
45 #define pgd_index(addr) \
46 	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
47 #define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
48 
49 #define pgd_free(pgd) free(pgd)
50 static inline pgd_t *pgd_alloc(void)
51 {
52 	pgd_t *pgd = memalign(PAGE_SIZE, PTRS_PER_PGD * sizeof(pgd_t));
53 	memset(pgd, 0, PTRS_PER_PGD * sizeof(pgd_t));
54 	return pgd;
55 }
56 
57 static inline pud_t *pgd_page_vaddr(pgd_t pgd)
58 {
59 	return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
60 }
61 
62 static inline pmd_t *pud_page_vaddr(pud_t pud)
63 {
64 	return pgtable_va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
65 }
66 
67 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
68 {
69 	return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
70 }
71 
72 #if PGTABLE_LEVELS > 2
73 #define pmd_index(addr)							\
74 	(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
75 #define pmd_offset(pud, addr)						\
76 	(pud_page_vaddr(*(pud)) + pmd_index(addr))
77 #define pmd_free(pmd)	free_page(pmd)
78 static inline pmd_t *pmd_alloc_one(void)
79 {
80 	assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
81 	pmd_t *pmd = alloc_page();
82 	return pmd;
83 }
84 static inline pmd_t *pmd_alloc(pud_t *pud, unsigned long addr)
85 {
86 	if (pud_none(*pud)) {
87 		pud_t entry;
88 		pud_val(entry) = pgtable_pa(pmd_alloc_one()) | PMD_TYPE_TABLE;
89 		WRITE_ONCE(*pud, entry);
90 	}
91 	return pmd_offset(pud, addr);
92 }
93 #else
94 #define pmd_offset(pud, addr)	((pmd_t *)pud)
95 #define pmd_free(pmd)
96 #define pmd_alloc(pud, addr)	pmd_offset(pud, addr)
97 #endif
98 
99 #if PGTABLE_LEVELS > 3
100 #define pud_index(addr)                                 \
101 	(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
102 #define pud_offset(pgd, addr)                           \
103 	(pgd_page_vaddr(*(pgd)) + pud_index(addr))
104 #define pud_free(pud) free_page(pud)
105 static inline pud_t *pud_alloc_one(void)
106 {
107 	assert(PTRS_PER_PUD * sizeof(pud_t) == PAGE_SIZE);
108 	pud_t *pud = alloc_page();
109 	return pud;
110 }
111 static inline pud_t *pud_alloc(pgd_t *pgd, unsigned long addr)
112 {
113 	if (pgd_none(*pgd)) {
114 		pgd_t entry;
115 		pgd_val(entry) = pgtable_pa(pud_alloc_one()) | PMD_TYPE_TABLE;
116 		WRITE_ONCE(*pgd, entry);
117 	}
118 	return pud_offset(pgd, addr);
119 }
120 #else
121 #define pud_offset(pgd, addr)	((pud_t *)pgd)
122 #define pud_free(pud)
123 #define pud_alloc(pgd, addr)	pud_offset(pgd, addr)
124 #endif
125 
126 #define pte_index(addr) \
127 	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
128 #define pte_offset(pmd, addr) \
129 	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
130 
131 #define pte_free(pte) free_page(pte)
132 static inline pte_t *pte_alloc_one(void)
133 {
134 	assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
135 	pte_t *pte = alloc_page();
136 	return pte;
137 }
138 static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
139 {
140 	if (pmd_none(*pmd)) {
141 		pmd_t entry;
142 		pmd_val(entry) = pgtable_pa(pte_alloc_one()) | PMD_TYPE_TABLE;
143 		WRITE_ONCE(*pmd, entry);
144 	}
145 	return pte_offset(pmd, addr);
146 }
147 
148 #endif /* _ASMARM64_PGTABLE_H_ */
149