xref: /kvm-unit-tests/lib/arm/asm/pgtable.h (revision f02b6363de4a34d6873f7041d2a8d7f35f177e16)
1 #ifndef _ASMARM_PGTABLE_H_
2 #define _ASMARM_PGTABLE_H_
3 /*
4  * Adapted from arch/arm/include/asm/pgtable.h
5  *              arch/arm/include/asm/pgtable-3level.h
6  *              arch/arm/include/asm/pgalloc.h
7  *
8  * Note: some Linux function APIs have been modified. Nothing crazy,
9  *       but if a function took, for example, an mm_struct, then
10  *       that was either removed or replaced.
11  *
12  * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16 
17 /*
18  * We can convert va <=> pa page table addresses with simple casts
19  * because we always allocate their pages with alloc_page(), and
20  * alloc_page() always returns identity mapped pages.
21  */
22 #define pgtable_va(x)		((void *)(unsigned long)(x))
23 #define pgtable_pa(x)		((unsigned long)(x))
24 
25 #define pgd_none(pgd)		(!pgd_val(pgd))
26 #define pmd_none(pmd)		(!pmd_val(pmd))
27 #define pte_none(pte)		(!pte_val(pte))
28 
29 #define pgd_index(addr) \
30 	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
31 #define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
32 
33 #define pgd_free(pgd) free(pgd)
34 static inline pgd_t *pgd_alloc(void)
35 {
36 	pgd_t *pgd = memalign(L1_CACHE_BYTES, PTRS_PER_PGD * sizeof(pgd_t));
37 	memset(pgd, 0, PTRS_PER_PGD * sizeof(pgd_t));
38 	return pgd;
39 }
40 
41 static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
42 {
43 	return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
44 }
45 
46 #define pmd_index(addr) \
47 	(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
48 #define pmd_offset(pgd, addr) \
49 	(pgd_page_vaddr(*(pgd)) + pmd_index(addr))
50 
51 #define pmd_free(pmd) free_page(pmd)
52 static inline pmd_t *pmd_alloc_one(void)
53 {
54 	assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
55 	pmd_t *pmd = alloc_page();
56 	memset(pmd, 0, PTRS_PER_PMD * sizeof(pmd_t));
57 	return pmd;
58 }
59 static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
60 {
61 	if (pgd_none(*pgd)) {
62 		pmd_t *pmd = pmd_alloc_one();
63 		pgd_val(*pgd) = pgtable_pa(pmd) | PMD_TYPE_TABLE;
64 	}
65 	return pmd_offset(pgd, addr);
66 }
67 
68 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
69 {
70 	return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
71 }
72 
73 #define pte_index(addr) \
74 	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
75 #define pte_offset(pmd, addr) \
76 	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
77 
78 #define pte_free(pte) free_page(pte)
79 static inline pte_t *pte_alloc_one(void)
80 {
81 	assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
82 	pte_t *pte = alloc_page();
83 	memset(pte, 0, PTRS_PER_PTE * sizeof(pte_t));
84 	return pte;
85 }
86 static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
87 {
88 	if (pmd_none(*pmd)) {
89 		pte_t *pte = pte_alloc_one();
90 		pmd_val(*pmd) = pgtable_pa(pte) | PMD_TYPE_TABLE;
91 	}
92 	return pte_offset(pmd, addr);
93 }
94 
95 #endif /* _ASMARM_PGTABLE_H_ */
96