1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/pgd.c 4 * 5 * Copyright (C) 1998-2005 Russell King 6 */ 7 #include <linux/mm.h> 8 #include <linux/gfp.h> 9 #include <linux/highmem.h> 10 #include <linux/slab.h> 11 12 #include <asm/cp15.h> 13 #include <asm/pgalloc.h> 14 #include <asm/page.h> 15 #include <asm/tlbflush.h> 16 17 #include "mm.h" 18 19 #ifdef CONFIG_ARM_LPAE 20 #define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO) 21 #define _pgd_free(mm, pgd) kfree(pgd) 22 #else 23 #define _pgd_alloc(mm) __pgd_alloc(mm, 2) 24 #define _pgd_free(mm, pgd) __pgd_free(mm, pgd) 25 #endif 26 27 /* 28 * need to get a 16k page for level 1 29 */ 30 pgd_t *pgd_alloc(struct mm_struct *mm) 31 { 32 pgd_t *new_pgd, *init_pgd; 33 p4d_t *new_p4d, *init_p4d; 34 pud_t *new_pud, *init_pud; 35 pmd_t *new_pmd, *init_pmd; 36 pte_t *new_pte, *init_pte; 37 38 new_pgd = _pgd_alloc(mm); 39 if (!new_pgd) 40 goto no_pgd; 41 42 /* 43 * Copy over the kernel and IO PGD entries 44 */ 45 init_pgd = pgd_offset_k(0); 46 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, 47 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 48 49 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 50 51 #ifdef CONFIG_ARM_LPAE 52 /* 53 * Allocate PMD table for modules and pkmap mappings. 54 */ 55 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), 56 MODULES_VADDR); 57 if (!new_p4d) 58 goto no_p4d; 59 60 new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR); 61 if (!new_pud) 62 goto no_pud; 63 64 new_pmd = pmd_alloc(mm, new_pud, 0); 65 if (!new_pmd) 66 goto no_pmd; 67 #ifdef CONFIG_KASAN 68 /* 69 * Copy PMD table for KASAN shadow mappings. 70 */ 71 init_pgd = pgd_offset_k(TASK_SIZE); 72 init_p4d = p4d_offset(init_pgd, TASK_SIZE); 73 init_pud = pud_offset(init_p4d, TASK_SIZE); 74 init_pmd = pmd_offset(init_pud, TASK_SIZE); 75 new_pmd = pmd_offset(new_pud, TASK_SIZE); 76 memcpy(new_pmd, init_pmd, 77 (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE)) 78 * sizeof(pmd_t)); 79 clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t)); 80 #endif /* CONFIG_KASAN */ 81 #endif /* CONFIG_LPAE */ 82 83 if (!vectors_high()) { 84 /* 85 * On ARM, first page must always be allocated since it 86 * contains the machine vectors. The vectors are always high 87 * with LPAE. 88 */ 89 new_p4d = p4d_alloc(mm, new_pgd, 0); 90 if (!new_p4d) 91 goto no_p4d; 92 93 new_pud = pud_alloc(mm, new_p4d, 0); 94 if (!new_pud) 95 goto no_pud; 96 97 new_pmd = pmd_alloc(mm, new_pud, 0); 98 if (!new_pmd) 99 goto no_pmd; 100 101 new_pte = pte_alloc_map(mm, new_pmd, 0); 102 if (!new_pte) 103 goto no_pte; 104 105 #ifndef CONFIG_ARM_LPAE 106 /* 107 * Modify the PTE pointer to have the correct domain. This 108 * needs to be the vectors domain to avoid the low vectors 109 * being unmapped. 110 */ 111 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; 112 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); 113 #endif 114 115 init_p4d = p4d_offset(init_pgd, 0); 116 init_pud = pud_offset(init_p4d, 0); 117 init_pmd = pmd_offset(init_pud, 0); 118 init_pte = pte_offset_map(init_pmd, 0); 119 set_pte_ext(new_pte + 0, init_pte[0], 0); 120 set_pte_ext(new_pte + 1, init_pte[1], 0); 121 pte_unmap(init_pte); 122 pte_unmap(new_pte); 123 } 124 125 return new_pgd; 126 127 no_pte: 128 pmd_free(mm, new_pmd); 129 mm_dec_nr_pmds(mm); 130 no_pmd: 131 pud_free(mm, new_pud); 132 no_pud: 133 p4d_free(mm, new_p4d); 134 no_p4d: 135 _pgd_free(mm, new_pgd); 136 no_pgd: 137 return NULL; 138 } 139 140 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) 141 { 142 pgd_t *pgd; 143 p4d_t *p4d; 144 pud_t *pud; 145 pmd_t *pmd; 146 pgtable_t pte; 147 148 if (!pgd_base) 149 return; 150 151 pgd = pgd_base + pgd_index(0); 152 if (pgd_none_or_clear_bad(pgd)) 153 goto no_pgd; 154 155 p4d = p4d_offset(pgd, 0); 156 if (p4d_none_or_clear_bad(p4d)) 157 goto no_p4d; 158 159 pud = pud_offset(p4d, 0); 160 if (pud_none_or_clear_bad(pud)) 161 goto no_pud; 162 163 pmd = pmd_offset(pud, 0); 164 if (pmd_none_or_clear_bad(pmd)) 165 goto no_pmd; 166 167 pte = pmd_pgtable(*pmd); 168 pmd_clear(pmd); 169 pte_free(mm, pte); 170 mm_dec_nr_ptes(mm); 171 no_pmd: 172 pud_clear(pud); 173 pmd_free(mm, pmd); 174 mm_dec_nr_pmds(mm); 175 no_pud: 176 p4d_clear(p4d); 177 pud_free(mm, pud); 178 no_p4d: 179 pgd_clear(pgd); 180 p4d_free(mm, p4d); 181 no_pgd: 182 #ifdef CONFIG_ARM_LPAE 183 /* 184 * Free modules/pkmap or identity pmd tables. 185 */ 186 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { 187 if (pgd_none_or_clear_bad(pgd)) 188 continue; 189 if (pgd_val(*pgd) & L_PGD_SWAPPER) 190 continue; 191 p4d = p4d_offset(pgd, 0); 192 if (p4d_none_or_clear_bad(p4d)) 193 continue; 194 pud = pud_offset(p4d, 0); 195 if (pud_none_or_clear_bad(pud)) 196 continue; 197 pmd = pmd_offset(pud, 0); 198 pud_clear(pud); 199 pmd_free(mm, pmd); 200 mm_dec_nr_pmds(mm); 201 p4d_clear(p4d); 202 pud_free(mm, pud); 203 mm_dec_nr_puds(mm); 204 pgd_clear(pgd); 205 p4d_free(mm, p4d); 206 } 207 #endif 208 _pgd_free(mm, pgd_base); 209 } 210