1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <auxinfo.h> 10#include <asm/asm-offsets.h> 11#include <asm/ptrace.h> 12#include <asm/processor.h> 13#include <asm/page.h> 14#include <asm/pgtable-hwdef.h> 15 16.section .init 17 18/* 19 * Bootloader params are in x0-x3. See kernel doc 20 * Documentation/arm64/booting.txt 21 */ 22.globl start 23start: 24 /* get our base address */ 25 adrp x4, start 26 add x4, x4, :lo12:start 27 28 /* 29 * Update all R_AARCH64_RELATIVE relocations using the table 30 * of Elf64_Rela entries between reloc_start/end. The build 31 * will not emit other relocation types. 32 * 33 * struct Elf64_Rela { 34 * uint64_t r_offset; 35 * uint64_t r_info; 36 * int64_t r_addend; 37 * } 38 */ 39 adrp x5, reloc_start 40 add x5, x5, :lo12:reloc_start 41 adrp x6, reloc_end 42 add x6, x6, :lo12:reloc_end 431: 44 cmp x5, x6 45 b.hs 1f 46 ldr x7, [x5] // r_offset 47 ldr x8, [x5, #16] // r_addend 48 add x8, x8, x4 // val = base + r_addend 49 str x8, [x4, x7] // base[r_offset] = val 50 add x5, x5, #24 51 b 1b 52 531: 54 /* set up stack */ 55 mov x4, #1 56 msr spsel, x4 57 isb 58 adrp x4, stackptr 59 add sp, x4, :lo12:stackptr 60 61 /* enable FP/ASIMD */ 62 mov x4, #(3 << 20) 63 msr cpacr_el1, x4 64 65 /* set up exception handling */ 66 bl exceptions_init 67 68 /* complete setup */ 69 bl setup // x0 is the addr of the dtb 70 bl get_mmu_off 71 cbnz x0, 1f 72 bl setup_vm 73 741: 75 /* run the test */ 76 adrp x0, __argc 77 ldr x0, [x0, :lo12:__argc] 78 adrp x1, __argv 79 add x1, x1, :lo12:__argv 80 adrp x2, __environ 81 add x2, x2, :lo12:__environ 82 bl main 83 bl exit 84 b halt 85 86exceptions_init: 87 adrp x4, vector_table 88 add x4, x4, :lo12:vector_table 89 msr vbar_el1, x4 90 isb 91 ret 92 93.text 94 95.globl get_mmu_off 96get_mmu_off: 97 adrp x0, auxinfo 98 ldr x0, [x0, :lo12:auxinfo + 8] 99 and x0, x0, #AUXINFO_MMU_OFF 100 ret 101 102.globl secondary_entry 103secondary_entry: 104 /* Enable FP/ASIMD */ 105 mov x0, #(3 << 20) 106 msr cpacr_el1, x0 107 108 /* set up exception handling */ 109 bl exceptions_init 110 111 /* enable the MMU unless requested off */ 112 bl get_mmu_off 113 cbnz x0, 1f 114 adrp x0, mmu_idmap 115 ldr x0, [x0, :lo12:mmu_idmap] 116 bl asm_mmu_enable 117 1181: 119 /* set the stack */ 120 adrp x0, secondary_data 121 ldr x0, [x0, :lo12:secondary_data] 122 mov sp, x0 123 124 /* finish init in C code */ 125 bl secondary_cinit 126 127 /* x0 is now the entry function, run it */ 128 blr x0 129 b do_idle 130 131.globl halt 132halt: 1331: wfi 134 b 1b 135 136/* 137 * asm_mmu_enable 138 * Inputs: 139 * x0 is the base address of the translation table 140 * Outputs: none 141 * 142 * Adapted from 143 * arch/arm64/kernel/head.S 144 * arch/arm64/mm/proc.S 145 */ 146 147/* 148 * Memory region attributes for LPAE: 149 * 150 * n = AttrIndx[2:0] 151 * n MAIR 152 * DEVICE_nGnRnE 000 00000000 153 * DEVICE_nGnRE 001 00000100 154 * DEVICE_GRE 010 00001100 155 * NORMAL_NC 011 01000100 156 * NORMAL 100 11111111 157 */ 158#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 159 160.globl asm_mmu_enable 161asm_mmu_enable: 162 tlbi vmalle1 // invalidate I + D TLBs 163 dsb nsh 164 165 /* TCR */ 166 ldr x1, =TCR_TxSZ(VA_BITS) | \ 167 TCR_TG0_64K | TCR_TG1_64K | \ 168 TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ 169 TCR_SHARED 170 mrs x2, id_aa64mmfr0_el1 171 bfi x1, x2, #32, #3 172 msr tcr_el1, x1 173 174 /* MAIR */ 175 ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 176 MAIR(0x04, MT_DEVICE_nGnRE) | \ 177 MAIR(0x0c, MT_DEVICE_GRE) | \ 178 MAIR(0x44, MT_NORMAL_NC) | \ 179 MAIR(0xff, MT_NORMAL) 180 msr mair_el1, x1 181 182 /* TTBR0 */ 183 msr ttbr0_el1, x0 184 isb 185 186 /* SCTLR */ 187 mrs x1, sctlr_el1 188 orr x1, x1, SCTLR_EL1_C 189 orr x1, x1, SCTLR_EL1_I 190 orr x1, x1, SCTLR_EL1_M 191 msr sctlr_el1, x1 192 isb 193 194 ret 195 196.globl asm_mmu_disable 197asm_mmu_disable: 198 mrs x0, sctlr_el1 199 bic x0, x0, SCTLR_EL1_M 200 msr sctlr_el1, x0 201 isb 202 ret 203 204/* 205 * Vectors 206 * Adapted from arch/arm64/kernel/entry.S 207 */ 208.macro vector_stub, name, vec 209\name: 210 stp x0, x1, [sp, #-S_FRAME_SIZE]! 211 stp x2, x3, [sp, #16] 212 stp x4, x5, [sp, #32] 213 stp x6, x7, [sp, #48] 214 stp x8, x9, [sp, #64] 215 stp x10, x11, [sp, #80] 216 stp x12, x13, [sp, #96] 217 stp x14, x15, [sp, #112] 218 stp x16, x17, [sp, #128] 219 stp x18, x19, [sp, #144] 220 stp x20, x21, [sp, #160] 221 stp x22, x23, [sp, #176] 222 stp x24, x25, [sp, #192] 223 stp x26, x27, [sp, #208] 224 stp x28, x29, [sp, #224] 225 226 str x30, [sp, #S_LR] 227 228 .if \vec >= 8 229 mrs x1, sp_el0 230 .else 231 add x1, sp, #S_FRAME_SIZE 232 .endif 233 str x1, [sp, #S_SP] 234 235 mrs x1, elr_el1 236 mrs x2, spsr_el1 237 stp x1, x2, [sp, #S_PC] 238 239 mov x0, \vec 240 mov x1, sp 241 mrs x2, esr_el1 242 bl do_handle_exception 243 244 ldp x1, x2, [sp, #S_PC] 245 msr spsr_el1, x2 246 msr elr_el1, x1 247 248 .if \vec >= 8 249 ldr x1, [sp, #S_SP] 250 msr sp_el0, x1 251 .endif 252 253 ldr x30, [sp, #S_LR] 254 255 ldp x28, x29, [sp, #224] 256 ldp x26, x27, [sp, #208] 257 ldp x24, x25, [sp, #192] 258 ldp x22, x23, [sp, #176] 259 ldp x20, x21, [sp, #160] 260 ldp x18, x19, [sp, #144] 261 ldp x16, x17, [sp, #128] 262 ldp x14, x15, [sp, #112] 263 ldp x12, x13, [sp, #96] 264 ldp x10, x11, [sp, #80] 265 ldp x8, x9, [sp, #64] 266 ldp x6, x7, [sp, #48] 267 ldp x4, x5, [sp, #32] 268 ldp x2, x3, [sp, #16] 269 ldp x0, x1, [sp], #S_FRAME_SIZE 270 271 eret 272.endm 273 274vector_stub el1t_sync, 0 275vector_stub el1t_irq, 1 276vector_stub el1t_fiq, 2 277vector_stub el1t_error, 3 278 279vector_stub el1h_sync, 4 280vector_stub el1h_irq, 5 281vector_stub el1h_fiq, 6 282vector_stub el1h_error, 7 283 284vector_stub el0_sync_64, 8 285vector_stub el0_irq_64, 9 286vector_stub el0_fiq_64, 10 287vector_stub el0_error_64, 11 288 289vector_stub el0_sync_32, 12 290vector_stub el0_irq_32, 13 291vector_stub el0_fiq_32, 14 292vector_stub el0_error_32, 15 293 294.section .text.ex 295 296.macro ventry, label 297.align 7 298 b \label 299.endm 300 301.align 11 302vector_table: 303 ventry el1t_sync // Synchronous EL1t 304 ventry el1t_irq // IRQ EL1t 305 ventry el1t_fiq // FIQ EL1t 306 ventry el1t_error // Error EL1t 307 308 ventry el1h_sync // Synchronous EL1h 309 ventry el1h_irq // IRQ EL1h 310 ventry el1h_fiq // FIQ EL1h 311 ventry el1h_error // Error EL1h 312 313 ventry el0_sync_64 // Synchronous 64-bit EL0 314 ventry el0_irq_64 // IRQ 64-bit EL0 315 ventry el0_fiq_64 // FIQ 64-bit EL0 316 ventry el0_error_64 // Error 64-bit EL0 317 318 ventry el0_sync_32 // Synchronous 32-bit EL0 319 ventry el0_irq_32 // IRQ 32-bit EL0 320 ventry el0_fiq_32 // FIQ 32-bit EL0 321 ventry el0_error_32 // Error 32-bit EL0 322