xref: /kvm-unit-tests/lib/arm/asm/pgtable.h (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 #ifndef _ASMARM_PGTABLE_H_
2 #define _ASMARM_PGTABLE_H_
3 /*
4  * Adapted from arch/arm/include/asm/pgtable.h
5  *              arch/arm/include/asm/pgtable-3level.h
6  *              arch/arm/include/asm/pgalloc.h
7  *
8  * Note: some Linux function APIs have been modified. Nothing crazy,
9  *       but if a function took, for example, an mm_struct, then
10  *       that was either removed or replaced.
11  *
12  * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16 #include <alloc_page.h>
17 
18 /*
19  * We can convert va <=> pa page table addresses with simple casts
20  * because we always allocate their pages with alloc_page(), and
21  * alloc_page() always returns identity mapped pages.
22  */
23 #include <linux/compiler.h>
24 
25 #define pgtable_va(x)		((void *)(unsigned long)(x))
26 #define pgtable_pa(x)		((unsigned long)(x))
27 
28 #define pgd_none(pgd)		(!pgd_val(pgd))
29 #define pmd_none(pmd)		(!pmd_val(pmd))
30 #define pte_none(pte)		(!pte_val(pte))
31 
32 #define pgd_valid(pgd)		(pgd_val(pgd) & PGD_VALID)
33 #define pmd_valid(pmd)		(pmd_val(pmd) & PMD_SECT_VALID)
34 #define pte_valid(pte)		(pte_val(pte) & L_PTE_VALID)
35 
36 #define pmd_huge(pmd)	\
37 	((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
38 
39 #define pgd_index(addr) \
40 	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
41 #define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
42 
43 #define pgd_free(pgd) free(pgd)
44 static inline pgd_t *pgd_alloc(void)
45 {
46 	pgd_t *pgd = memalign(L1_CACHE_BYTES, PTRS_PER_PGD * sizeof(pgd_t));
47 	memset(pgd, 0, PTRS_PER_PGD * sizeof(pgd_t));
48 	return pgd;
49 }
50 
51 static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
52 {
53 	return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
54 }
55 
56 /* For compatibility with arm64 page tables */
57 #define pud_valid(pud)		pgd_valid(pud)
58 #define pud_offset(pgd, addr)	((pud_t *)pgd)
59 #define pud_free(pud)
60 #define pud_alloc(pgd, addr)	pud_offset(pgd, addr)
61 
62 #define pmd_index(addr) \
63 	(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
64 #define pmd_offset(pgd, addr) \
65 	(pgd_page_vaddr(*(pgd)) + pmd_index(addr))
66 
67 #define pmd_free(pmd) free_page(pmd)
68 static inline pmd_t *pmd_alloc_one(void)
69 {
70 	assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
71 	pmd_t *pmd = alloc_page();
72 	return pmd;
73 }
74 static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
75 {
76 	if (pgd_none(*pgd)) {
77 		pgd_t entry;
78 		pgd_val(entry) = pgtable_pa(pmd_alloc_one()) | PMD_TYPE_TABLE;
79 		WRITE_ONCE(*pgd, entry);
80 	}
81 	return pmd_offset(pgd, addr);
82 }
83 
84 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
85 {
86 	return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
87 }
88 
89 #define pte_index(addr) \
90 	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
91 #define pte_offset(pmd, addr) \
92 	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
93 
94 #define pte_free(pte) free_page(pte)
95 static inline pte_t *pte_alloc_one(void)
96 {
97 	assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
98 	pte_t *pte = alloc_page();
99 	return pte;
100 }
101 static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
102 {
103 	if (pmd_none(*pmd)) {
104 		pmd_t entry;
105 		pmd_val(entry) = pgtable_pa(pte_alloc_one()) | PMD_TYPE_TABLE;
106 		WRITE_ONCE(*pmd, entry);
107 	}
108 	return pte_offset(pmd, addr);
109 }
110 
111 #endif /* _ASMARM_PGTABLE_H_ */
112