1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright 2023 Google LLC 3 // Author: Ard Biesheuvel <ardb@google.com> 4 5 #include <linux/init.h> 6 #include <linux/libfdt.h> 7 #include <linux/linkage.h> 8 #include <linux/types.h> 9 #include <linux/sizes.h> 10 #include <linux/string.h> 11 12 #include <asm/memory.h> 13 #include <asm/pgalloc.h> 14 #include <asm/pgtable.h> 15 #include <asm/tlbflush.h> 16 17 #include "pi.h" 18 19 extern const u8 __eh_frame_start[], __eh_frame_end[]; 20 21 extern void idmap_cpu_replace_ttbr1(void *pgdir); 22 23 static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset, 24 void *start, void *end, pgprot_t prot, 25 bool may_use_cont, int root_level) 26 { 27 map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET, 28 ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start, 29 prot, root_level, (pte_t *)pg_dir, may_use_cont, 0); 30 } 31 32 static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start, 33 void *end, int root_level) 34 { 35 map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0), 36 false, root_level); 37 } 38 39 static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level) 40 { 41 bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS); 42 bool twopass = IS_ENABLED(CONFIG_RELOCATABLE); 43 u64 pgdp = (u64)init_pg_dir + PAGE_SIZE; 44 pgprot_t text_prot = PAGE_KERNEL_ROX; 45 pgprot_t data_prot = PAGE_KERNEL; 46 pgprot_t prot; 47 48 /* 49 * External debuggers may need to write directly to the text mapping to 50 * install SW breakpoints. Allow this (only) when explicitly requested 51 * with rodata=off. 52 */ 53 if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF)) 54 text_prot = PAGE_KERNEL_EXEC; 55 56 /* 57 * We only enable the shadow call stack dynamically if we are running 58 * on a system that does not implement PAC or BTI. PAC and SCS provide 59 * roughly the same level of protection, and BTI relies on the PACIASP 60 * instructions serving as landing pads, preventing us from patching 61 * those instructions into something else. 62 */ 63 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac()) 64 enable_scs = false; 65 66 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) { 67 enable_scs = false; 68 69 /* 70 * If we have a CPU that supports BTI and a kernel built for 71 * BTI then mark the kernel executable text as guarded pages 72 * now so we don't have to rewrite the page tables later. 73 */ 74 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); 75 } 76 77 /* Map all code read-write on the first pass if needed */ 78 twopass |= enable_scs; 79 prot = twopass ? data_prot : text_prot; 80 81 map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot, 82 !twopass, root_level); 83 map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata, 84 __inittext_begin, data_prot, false, root_level); 85 map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin, 86 __inittext_end, prot, false, root_level); 87 map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin, 88 __initdata_end, data_prot, false, root_level); 89 map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot, 90 true, root_level); 91 dsb(ishst); 92 93 idmap_cpu_replace_ttbr1(init_pg_dir); 94 95 if (twopass) { 96 if (IS_ENABLED(CONFIG_RELOCATABLE)) 97 relocate_kernel(kaslr_offset); 98 99 if (enable_scs) { 100 scs_patch(__eh_frame_start + va_offset, 101 __eh_frame_end - __eh_frame_start); 102 asm("ic ialluis"); 103 104 dynamic_scs_is_enabled = true; 105 } 106 107 /* 108 * Unmap the text region before remapping it, to avoid 109 * potential TLB conflicts when creating the contiguous 110 * descriptors. 111 */ 112 unmap_segment(init_pg_dir, va_offset, _stext, _etext, 113 root_level); 114 dsb(ishst); 115 isb(); 116 __tlbi(vmalle1); 117 isb(); 118 119 /* 120 * Remap these segments with different permissions 121 * No new page table allocations should be needed 122 */ 123 map_segment(init_pg_dir, NULL, va_offset, _stext, _etext, 124 text_prot, true, root_level); 125 map_segment(init_pg_dir, NULL, va_offset, __inittext_begin, 126 __inittext_end, text_prot, false, root_level); 127 } 128 129 /* Copy the root page table to its final location */ 130 memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE); 131 dsb(ishst); 132 idmap_cpu_replace_ttbr1(swapper_pg_dir); 133 } 134 135 static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr) 136 { 137 u64 sctlr = read_sysreg(sctlr_el1); 138 u64 tcr = read_sysreg(tcr_el1) | TCR_DS; 139 u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1); 140 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, 141 ID_AA64MMFR0_EL1_PARANGE_SHIFT); 142 143 tcr &= ~TCR_IPS_MASK; 144 tcr |= parange << TCR_IPS_SHIFT; 145 146 asm(" msr sctlr_el1, %0 ;" 147 " isb ;" 148 " msr ttbr0_el1, %1 ;" 149 " msr tcr_el1, %2 ;" 150 " isb ;" 151 " tlbi vmalle1 ;" 152 " dsb nsh ;" 153 " isb ;" 154 " msr sctlr_el1, %3 ;" 155 " isb ;" 156 :: "r"(sctlr & ~SCTLR_ELx_M), "r"(ttbr), "r"(tcr), "r"(sctlr)); 157 } 158 159 static void __init remap_idmap_for_lpa2(void) 160 { 161 /* clear the bits that change meaning once LPA2 is turned on */ 162 ptdesc_t mask = PTE_SHARED; 163 164 /* 165 * We have to clear bits [9:8] in all block or page descriptors in the 166 * initial ID map, as otherwise they will be (mis)interpreted as 167 * physical address bits once we flick the LPA2 switch (TCR.DS). Since 168 * we cannot manipulate live descriptors in that way without creating 169 * potential TLB conflicts, let's create another temporary ID map in a 170 * LPA2 compatible fashion, and update the initial ID map while running 171 * from that. 172 */ 173 create_init_idmap(init_pg_dir, mask); 174 dsb(ishst); 175 set_ttbr0_for_lpa2((u64)init_pg_dir); 176 177 /* 178 * Recreate the initial ID map with the same granularity as before. 179 * Don't bother with the FDT, we no longer need it after this. 180 */ 181 memset(init_idmap_pg_dir, 0, 182 (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir); 183 184 create_init_idmap(init_idmap_pg_dir, mask); 185 dsb(ishst); 186 187 /* switch back to the updated initial ID map */ 188 set_ttbr0_for_lpa2((u64)init_idmap_pg_dir); 189 190 /* wipe the temporary ID map from memory */ 191 memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir); 192 } 193 194 static void __init map_fdt(u64 fdt) 195 { 196 static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE); 197 u64 efdt = fdt + MAX_FDT_SIZE; 198 u64 ptep = (u64)ptes; 199 200 /* 201 * Map up to MAX_FDT_SIZE bytes, but avoid overlap with 202 * the kernel image. 203 */ 204 map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt, 205 fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL, 206 (pte_t *)init_idmap_pg_dir, false, 0); 207 dsb(ishst); 208 } 209 210 /* 211 * PI version of the Cavium Eratum 27456 detection, which makes it 212 * impossible to use non-global mappings. 213 */ 214 static bool __init ng_mappings_allowed(void) 215 { 216 static const struct midr_range cavium_erratum_27456_cpus[] __initconst = { 217 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 218 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 219 /* Cavium ThunderX, T81 pass 1.0 */ 220 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 221 {}, 222 }; 223 224 for (const struct midr_range *r = cavium_erratum_27456_cpus; r->model; r++) { 225 if (midr_is_cpu_model_range(read_cpuid_id(), r->model, 226 r->rv_min, r->rv_max)) 227 return false; 228 } 229 230 return true; 231 } 232 233 asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt) 234 { 235 static char const chosen_str[] __initconst = "/chosen"; 236 u64 va_base, pa_base = (u64)&_text; 237 u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN; 238 int root_level = 4 - CONFIG_PGTABLE_LEVELS; 239 int va_bits = VA_BITS; 240 int chosen; 241 242 map_fdt((u64)fdt); 243 244 /* Clear BSS and the initial page tables */ 245 memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start); 246 247 /* Parse the command line for CPU feature overrides */ 248 chosen = fdt_path_offset(fdt, chosen_str); 249 init_feature_override(boot_status, fdt, chosen); 250 251 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) { 252 va_bits = VA_BITS_MIN; 253 } else if (IS_ENABLED(CONFIG_ARM64_LPA2) && !cpu_has_lpa2()) { 254 va_bits = VA_BITS_MIN; 255 root_level++; 256 } 257 258 if (va_bits > VA_BITS_MIN) 259 sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits)); 260 261 /* 262 * The virtual KASLR displacement modulo 2MiB is decided by the 263 * physical placement of the image, as otherwise, we might not be able 264 * to create the early kernel mapping using 2 MiB block descriptors. So 265 * take the low bits of the KASLR offset from the physical address, and 266 * fill in the high bits from the seed. 267 */ 268 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 269 u64 kaslr_seed = kaslr_early_init(fdt, chosen); 270 271 if (kaslr_seed && kaslr_requires_kpti()) 272 arm64_use_ng_mappings = ng_mappings_allowed(); 273 274 kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1); 275 } 276 277 if (IS_ENABLED(CONFIG_ARM64_LPA2) && va_bits > VA_BITS_MIN) 278 remap_idmap_for_lpa2(); 279 280 va_base = KIMAGE_VADDR + kaslr_offset; 281 map_kernel(kaslr_offset, va_base - pa_base, root_level); 282 } 283