1 #ifndef _ASMARM_PGTABLE_H_
2 #define _ASMARM_PGTABLE_H_
3 /*
4 * Adapted from arch/arm/include/asm/pgtable.h
5 * arch/arm/include/asm/pgtable-3level.h
6 * arch/arm/include/asm/pgalloc.h
7 *
8 * Note: some Linux function APIs have been modified. Nothing crazy,
9 * but if a function took, for example, an mm_struct, then
10 * that was either removed or replaced.
11 *
12 * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2.
15 */
16 #include <alloc.h>
17 #include <alloc_page.h>
18 #include <asm/setup.h>
19
20 /*
21 * We can convert va <=> pa page table addresses with simple casts
22 * because we always allocate their pages with alloc_page(), and
23 * alloc_page() always returns identity mapped pages.
24 */
25 #include <linux/compiler.h>
26
27 #define pgtable_va(x) ((void *)(unsigned long)(x))
28 #define pgtable_pa(x) ((unsigned long)(x))
29
30 #define pgd_none(pgd) (!pgd_val(pgd))
31 #define pmd_none(pmd) (!pmd_val(pmd))
32 #define pte_none(pte) (!pte_val(pte))
33
34 #define pgd_valid(pgd) (pgd_val(pgd) & PGD_VALID)
35 #define pmd_valid(pmd) (pmd_val(pmd) & PMD_SECT_VALID)
36 #define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
37
38 #define pmd_huge(pmd) \
39 ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
40
41 #define pgd_index(addr) \
42 (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
43 #define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
44
45 #define pgd_free(pgd) free(pgd)
pgd_alloc(void)46 static inline pgd_t *pgd_alloc(void)
47 {
48 pgd_t *pgd = memalign(L1_CACHE_BYTES, PTRS_PER_PGD * sizeof(pgd_t));
49 memset(pgd, 0, PTRS_PER_PGD * sizeof(pgd_t));
50 return pgd;
51 }
52
pgd_page_vaddr(pgd_t pgd)53 static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
54 {
55 return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
56 }
57
58 /* For compatibility with arm64 page tables */
59 #define pud_valid(pud) pgd_valid(pud)
60 #define pud_offset(pgd, addr) ((pud_t *)pgd)
61 #define pud_free(pud)
62 #define pud_alloc(pgd, addr) pud_offset(pgd, addr)
63
64 #define pmd_index(addr) \
65 (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
66 #define pmd_offset(pgd, addr) \
67 (pgd_page_vaddr(*(pgd)) + pmd_index(addr))
68
69 #define pmd_free(pmd) free_page(pmd)
pmd_alloc_one(void)70 static inline pmd_t *pmd_alloc_one(void)
71 {
72 assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
73 pmd_t *pmd = alloc_page();
74 return pmd;
75 }
pmd_alloc(pgd_t * pgd,unsigned long addr)76 static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
77 {
78 if (pgd_none(*pgd)) {
79 pgd_t entry;
80 pgd_val(entry) = pgtable_pa(pmd_alloc_one()) | PMD_TYPE_TABLE;
81 WRITE_ONCE(*pgd, entry);
82 }
83 return pmd_offset(pgd, addr);
84 }
85
pmd_page_vaddr(pmd_t pmd)86 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
87 {
88 return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
89 }
90
91 #define pte_index(addr) \
92 (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
93 #define pte_offset(pmd, addr) \
94 (pmd_page_vaddr(*(pmd)) + pte_index(addr))
95
96 #define pte_free(pte) free_page(pte)
pte_alloc_one(void)97 static inline pte_t *pte_alloc_one(void)
98 {
99 assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
100 pte_t *pte = alloc_page();
101 return pte;
102 }
pte_alloc(pmd_t * pmd,unsigned long addr)103 static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
104 {
105 if (pmd_none(*pmd)) {
106 pmd_t entry;
107 pmd_val(entry) = pgtable_pa(pte_alloc_one()) | PMD_TYPE_TABLE;
108 WRITE_ONCE(*pmd, entry);
109 }
110 return pte_offset(pmd, addr);
111 }
112
113 #endif /* _ASMARM_PGTABLE_H_ */
114