1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 #ifndef _ASMARM64_PGTABLE_H_ 3 #define _ASMARM64_PGTABLE_H_ 4 /* 5 * Copyright (C) 2024, IBM Inc, Nicholas Piggin <npiggin@gmail.com> 6 * 7 * Derived from Linux kernel MMU code. 8 */ 9 #include <alloc.h> 10 #include <alloc_page.h> 11 #include <asm/io.h> 12 #include <asm/setup.h> 13 #include <asm/page.h> 14 #include <asm/pgtable-hwdef.h> 15 16 #include <linux/compiler.h> 17 18 /* 19 * We can convert va <=> pa page table addresses with simple casts 20 * because we always allocate their pages with alloc_page(), and 21 * alloc_page() always returns identity mapped pages. 22 */ 23 #define pgtable_va(x) ((void *)(unsigned long)(x)) 24 #define pgtable_pa(x) ((unsigned long)(x)) 25 26 #define pgd_none(pgd) (!pgd_val(pgd)) 27 #define pud_none(pud) (!pud_val(pud)) 28 #define pmd_none(pmd) (!pmd_val(pmd)) 29 #define pte_none(pte) (!pte_val(pte)) 30 31 #define pgd_valid(pgd) (pgd_val(pgd) & cpu_to_be64(_PAGE_VALID)) 32 #define pud_valid(pud) (pud_val(pud) & cpu_to_be64(_PAGE_VALID)) 33 #define pmd_valid(pmd) (pmd_val(pmd) & cpu_to_be64(_PAGE_VALID)) 34 #define pte_valid(pte) (pte_val(pte) & cpu_to_be64(_PAGE_VALID)) 35 36 #define pmd_huge(pmd) false 37 38 static inline pud_t *pgd_page_vaddr(pgd_t pgd) 39 { 40 return pgtable_va(be64_to_cpu(pgd_val(pgd)) & PHYS_MASK & ~0xfffULL); 41 } 42 43 static inline pmd_t *pud_page_vaddr(pud_t pud) 44 { 45 return pgtable_va(be64_to_cpu(pud_val(pud)) & PHYS_MASK & ~0xfffULL); 46 } 47 48 static inline pte_t *pmd_page_vaddr(pmd_t pmd) 49 { 50 return pgtable_va(be64_to_cpu(pmd_val(pmd)) & PHYS_MASK & ~0xfffULL); 51 } 52 53 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 54 #define pgd_offset(pt, addr) ((pt) + pgd_index(addr)) 55 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 56 #define pud_offset(pgd, addr) (pgd_page_vaddr(*(pgd)) + pud_index(addr)) 57 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 58 #define pmd_offset(pud, addr) (pud_page_vaddr(*(pud)) + pmd_index(addr)) 59 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 60 #define pte_offset(pmd, addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) 61 62 #define pgd_free(pgd) free(pgd) 63 static inline pgd_t *pgd_alloc_one(void) 64 { 65 size_t sz = PTRS_PER_PGD * sizeof(pgd_t); 66 pgd_t *pgd = memalign_pages(sz, sz); 67 memset(pgd, 0, sz); 68 return pgd; 69 } 70 71 #define pud_free(pud) free(pud) 72 static inline pud_t *pud_alloc_one(void) 73 { 74 size_t sz = PTRS_PER_PGD * sizeof(pud_t); 75 pud_t *pud = memalign_pages(sz, sz); 76 memset(pud, 0, sz); 77 return pud; 78 } 79 static inline pud_t *pud_alloc(pgd_t *pgd, unsigned long addr) 80 { 81 if (pgd_none(*pgd)) { 82 pgd_t entry; 83 pgd_val(entry) = cpu_to_be64(pgtable_pa(pud_alloc_one()) | _PAGE_VALID | (12 - 3) /* 4k pud page */); 84 WRITE_ONCE(*pgd, entry); 85 } 86 return pud_offset(pgd, addr); 87 } 88 89 #define pmd_free(pmd) free(pmd) 90 static inline pmd_t *pmd_alloc_one(void) 91 { 92 size_t sz = PTRS_PER_PMD * sizeof(pmd_t); 93 pmd_t *pmd = memalign_pages(sz, sz); 94 memset(pmd, 0, sz); 95 return pmd; 96 } 97 static inline pmd_t *pmd_alloc(pud_t *pud, unsigned long addr) 98 { 99 if (pud_none(*pud)) { 100 pud_t entry; 101 pud_val(entry) = cpu_to_be64(pgtable_pa(pmd_alloc_one()) | _PAGE_VALID | (12 - 3) /* 4k pmd page */); 102 WRITE_ONCE(*pud, entry); 103 } 104 return pmd_offset(pud, addr); 105 } 106 107 #define pte_free(pte) free(pte) 108 static inline pte_t *pte_alloc_one(void) 109 { 110 size_t sz = PTRS_PER_PTE * sizeof(pte_t); 111 pte_t *pte = memalign_pages(sz, sz); 112 memset(pte, 0, sz); 113 return pte; 114 } 115 static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr) 116 { 117 if (pmd_none(*pmd)) { 118 pmd_t entry; 119 pmd_val(entry) = cpu_to_be64(pgtable_pa(pte_alloc_one()) | _PAGE_VALID | (21 - PAGE_SHIFT) /* 4k/256B pte page */); 120 WRITE_ONCE(*pmd, entry); 121 } 122 return pte_offset(pmd, addr); 123 } 124 125 #endif /* _ASMPPC64_PGTABLE_H_ */ 126