1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <auxinfo.h> 10#include <asm/asm-offsets.h> 11#include <asm/assembler.h> 12#include <asm/ptrace.h> 13#include <asm/page.h> 14#include <asm/pgtable-hwdef.h> 15#include <asm/thread_info.h> 16#include <asm/sysreg.h> 17 18.macro zero_range, tmp1, tmp2 199998: cmp \tmp1, \tmp2 20 b.eq 9997f 21 stp xzr, xzr, [\tmp1], #16 22 b 9998b 239997: 24.endm 25 26.section .init 27 28/* 29 * Bootloader params are in x0-x3. See kernel doc 30 * Documentation/arm64/booting.txt 31 */ 32.globl start 33start: 34 /* get our base address */ 35 adrp x4, start 36 add x4, x4, :lo12:start 37 38 /* 39 * Update all R_AARCH64_RELATIVE relocations using the table 40 * of Elf64_Rela entries between reloc_start/end. The build 41 * will not emit other relocation types. 42 * 43 * struct Elf64_Rela { 44 * uint64_t r_offset; 45 * uint64_t r_info; 46 * int64_t r_addend; 47 * } 48 */ 49 adrp x5, reloc_start 50 add x5, x5, :lo12:reloc_start 51 adrp x6, reloc_end 52 add x6, x6, :lo12:reloc_end 531: 54 cmp x5, x6 55 b.hs 1f 56 ldr x7, [x5] // r_offset 57 ldr x8, [x5, #16] // r_addend 58 add x8, x8, x4 // val = base + r_addend 59 str x8, [x4, x7] // base[r_offset] = val 60 add x5, x5, #24 61 b 1b 62 631: 64 /* zero BSS */ 65 adrp x4, bss 66 add x4, x4, :lo12:bss 67 adrp x5, ebss 68 add x5, x5, :lo12:ebss 69 zero_range x4, x5 70 71 /* zero and set up stack */ 72 adrp x5, stacktop 73 add x5, x5, :lo12:stacktop 74 sub x4, x5, #THREAD_SIZE 75 zero_range x4, x5 76 mov x4, #1 77 msr spsel, x4 78 adrp x4, stackptr 79 add sp, x4, :lo12:stackptr 80 81 /* enable FP/ASIMD */ 82 mov x4, #(3 << 20) 83 msr cpacr_el1, x4 84 85 /* set up exception handling */ 86 bl exceptions_init 87 88 /* complete setup */ 89 bl setup // x0 is the addr of the dtb 90 bl get_mmu_off 91 cbnz x0, 1f 92 bl setup_vm 93 941: 95 /* run the test */ 96 adrp x0, __argc 97 ldr x0, [x0, :lo12:__argc] 98 adrp x1, __argv 99 add x1, x1, :lo12:__argv 100 adrp x2, __environ 101 add x2, x2, :lo12:__environ 102 bl main 103 bl exit 104 b halt 105 106exceptions_init: 107 adrp x4, vector_table 108 add x4, x4, :lo12:vector_table 109 msr vbar_el1, x4 110 isb 111 ret 112 113.text 114 115.globl get_mmu_off 116get_mmu_off: 117 adrp x0, auxinfo 118 ldr x0, [x0, :lo12:auxinfo + 8] 119 and x0, x0, #AUXINFO_MMU_OFF 120 ret 121 122.globl secondary_entry 123secondary_entry: 124 /* Enable FP/ASIMD */ 125 mov x0, #(3 << 20) 126 msr cpacr_el1, x0 127 128 /* set up exception handling */ 129 bl exceptions_init 130 131 /* enable the MMU unless requested off */ 132 bl get_mmu_off 133 cbnz x0, 1f 134 adrp x0, mmu_idmap 135 ldr x0, [x0, :lo12:mmu_idmap] 136 bl asm_mmu_enable 137 1381: 139 /* set the stack */ 140 adrp x0, secondary_data 141 ldr x0, [x0, :lo12:secondary_data] 142 mov sp, x0 143 144 /* finish init in C code */ 145 bl secondary_cinit 146 147 /* x0 is now the entry function, run it */ 148 blr x0 149 b do_idle 150 151.globl halt 152halt: 1531: wfi 154 b 1b 155 156/* 157 * asm_mmu_enable 158 * Inputs: 159 * x0 is the base address of the translation table 160 * Outputs: none 161 * 162 * Adapted from 163 * arch/arm64/kernel/head.S 164 * arch/arm64/mm/proc.S 165 */ 166 167/* 168 * Memory region attributes for LPAE: 169 * 170 * n = AttrIndx[2:0] 171 * n MAIR 172 * DEVICE_nGnRnE 000 00000000 173 * DEVICE_nGnRE 001 00000100 174 * DEVICE_GRE 010 00001100 175 * NORMAL_NC 011 01000100 176 * NORMAL 100 11111111 177 * NORMAL_WT 101 10111011 178 * DEVICE_nGRE 110 00001000 179 */ 180#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 181 182#if PAGE_SIZE == SZ_64K 183#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 184#elif PAGE_SIZE == SZ_16K 185#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 186#elif PAGE_SIZE == SZ_4K 187#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 188#endif 189 190.globl asm_mmu_enable 191asm_mmu_enable: 192 tlbi vmalle1 // invalidate I + D TLBs 193 dsb nsh 194 195 /* TCR */ 196 ldr x1, =TCR_TxSZ(VA_BITS) | \ 197 TCR_TG_FLAGS | \ 198 TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ 199 TCR_SHARED 200 mrs x2, id_aa64mmfr0_el1 201 bfi x1, x2, #32, #3 202 msr tcr_el1, x1 203 204 /* MAIR */ 205 ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 206 MAIR(0x04, MT_DEVICE_nGnRE) | \ 207 MAIR(0x0c, MT_DEVICE_GRE) | \ 208 MAIR(0x44, MT_NORMAL_NC) | \ 209 MAIR(0xff, MT_NORMAL) | \ 210 MAIR(0xbb, MT_NORMAL_WT) | \ 211 MAIR(0x08, MT_DEVICE_nGRE) 212 msr mair_el1, x1 213 214 /* TTBR0 */ 215 msr ttbr0_el1, x0 216 isb 217 218 /* SCTLR */ 219 mrs x1, sctlr_el1 220 orr x1, x1, SCTLR_EL1_C 221 orr x1, x1, SCTLR_EL1_I 222 orr x1, x1, SCTLR_EL1_M 223 msr sctlr_el1, x1 224 isb 225 226 ret 227 228.globl asm_mmu_disable 229asm_mmu_disable: 230 mrs x0, sctlr_el1 231 bic x0, x0, SCTLR_EL1_M 232 msr sctlr_el1, x0 233 isb 234 235 /* Clean + invalidate the entire memory */ 236 adrp x0, __phys_offset 237 ldr x0, [x0, :lo12:__phys_offset] 238 adrp x1, __phys_end 239 ldr x1, [x1, :lo12:__phys_end] 240 sub x1, x1, x0 241 dcache_by_line_op civac, sy, x0, x1, x2, x3 242 243 ret 244 245/* 246 * Vectors 247 * Adapted from arch/arm64/kernel/entry.S 248 */ 249.macro vector_stub, name, vec 250\name: 251 stp x0, x1, [sp, #-S_FRAME_SIZE]! 252 stp x2, x3, [sp, #16] 253 stp x4, x5, [sp, #32] 254 stp x6, x7, [sp, #48] 255 stp x8, x9, [sp, #64] 256 stp x10, x11, [sp, #80] 257 stp x12, x13, [sp, #96] 258 stp x14, x15, [sp, #112] 259 stp x16, x17, [sp, #128] 260 stp x18, x19, [sp, #144] 261 stp x20, x21, [sp, #160] 262 stp x22, x23, [sp, #176] 263 stp x24, x25, [sp, #192] 264 stp x26, x27, [sp, #208] 265 stp x28, x29, [sp, #224] 266 267 str x30, [sp, #S_LR] 268 269 .if \vec >= 8 270 mrs x1, sp_el0 271 .else 272 add x1, sp, #S_FRAME_SIZE 273 .endif 274 str x1, [sp, #S_SP] 275 276 mrs x1, elr_el1 277 mrs x2, spsr_el1 278 stp x1, x2, [sp, #S_PC] 279 280 mov x0, \vec 281 mov x1, sp 282 mrs x2, esr_el1 283 bl do_handle_exception 284 285 ldp x1, x2, [sp, #S_PC] 286 msr spsr_el1, x2 287 msr elr_el1, x1 288 289 .if \vec >= 8 290 ldr x1, [sp, #S_SP] 291 msr sp_el0, x1 292 .endif 293 294 ldr x30, [sp, #S_LR] 295 296 ldp x28, x29, [sp, #224] 297 ldp x26, x27, [sp, #208] 298 ldp x24, x25, [sp, #192] 299 ldp x22, x23, [sp, #176] 300 ldp x20, x21, [sp, #160] 301 ldp x18, x19, [sp, #144] 302 ldp x16, x17, [sp, #128] 303 ldp x14, x15, [sp, #112] 304 ldp x12, x13, [sp, #96] 305 ldp x10, x11, [sp, #80] 306 ldp x8, x9, [sp, #64] 307 ldp x6, x7, [sp, #48] 308 ldp x4, x5, [sp, #32] 309 ldp x2, x3, [sp, #16] 310 ldp x0, x1, [sp], #S_FRAME_SIZE 311 312 eret 313.endm 314 315vector_stub el1t_sync, 0 316vector_stub el1t_irq, 1 317vector_stub el1t_fiq, 2 318vector_stub el1t_error, 3 319 320vector_stub el1h_sync, 4 321vector_stub el1h_irq, 5 322vector_stub el1h_fiq, 6 323vector_stub el1h_error, 7 324 325vector_stub el0_sync_64, 8 326vector_stub el0_irq_64, 9 327vector_stub el0_fiq_64, 10 328vector_stub el0_error_64, 11 329 330vector_stub el0_sync_32, 12 331vector_stub el0_irq_32, 13 332vector_stub el0_fiq_32, 14 333vector_stub el0_error_32, 15 334 335.section .text.ex 336 337.macro ventry, label 338.align 7 339 b \label 340.endm 341 342.align 11 343vector_table: 344 ventry el1t_sync // Synchronous EL1t 345 ventry el1t_irq // IRQ EL1t 346 ventry el1t_fiq // FIQ EL1t 347 ventry el1t_error // Error EL1t 348 349 ventry el1h_sync // Synchronous EL1h 350 ventry el1h_irq // IRQ EL1h 351 ventry el1h_fiq // FIQ EL1h 352 ventry el1h_error // Error EL1h 353 354 ventry el0_sync_64 // Synchronous 64-bit EL0 355 ventry el0_irq_64 // IRQ 64-bit EL0 356 ventry el0_fiq_64 // FIQ 64-bit EL0 357 ventry el0_error_64 // Error 64-bit EL0 358 359 ventry el0_sync_32 // Synchronous 32-bit EL0 360 ventry el0_irq_32 // IRQ 32-bit EL0 361 ventry el0_fiq_32 // FIQ 32-bit EL0 362 ventry el0_error_32 // Error 32-bit EL0 363