1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <asm/asm-offsets.h> 10#include <asm/ptrace.h> 11#include <asm/processor.h> 12#include <asm/page.h> 13#include <asm/pgtable-hwdef.h> 14 15.section .init 16 17.globl start 18start: 19 /* 20 * bootloader params are in x0-x3 21 * The physical address of the dtb is in x0, x1-x3 are reserved 22 * See the kernel doc Documentation/arm64/booting.txt 23 */ 24 mov x4, #1 25 msr spsel, x4 26 isb 27 adr x4, stackptr 28 mov sp, x4 29 stp x0, x1, [sp, #-16]! 30 31 /* Enable FP/ASIMD */ 32 mov x0, #(3 << 20) 33 msr cpacr_el1, x0 34 35 /* set up exception handling */ 36 bl exceptions_init 37 38 /* complete setup */ 39 ldp x0, x1, [sp], #16 40 bl setup 41 42 /* run the test */ 43 adr x0, __argc 44 ldr x0, [x0] 45 adr x1, __argv 46 bl main 47 bl exit 48 b halt 49 50exceptions_init: 51 adr x0, vector_table 52 msr vbar_el1, x0 53 isb 54 ret 55 56.text 57 58.globl secondary_entry 59secondary_entry: 60 /* Enable FP/ASIMD */ 61 mov x0, #(3 << 20) 62 msr cpacr_el1, x0 63 64 /* set up exception handling */ 65 bl exceptions_init 66 67 /* enable the MMU */ 68 adr x0, mmu_idmap 69 ldr x0, [x0] 70 bl asm_mmu_enable 71 72 /* set the stack */ 73 adr x1, secondary_data 74 ldr x0, [x1] 75 mov sp, x0 76 77 /* finish init in C code */ 78 bl secondary_cinit 79 80 /* x0 is now the entry function, run it */ 81 br x0 82 83.globl halt 84halt: 851: wfi 86 b 1b 87 88/* 89 * asm_mmu_enable 90 * Inputs: 91 * x0 is the base address of the translation table 92 * Outputs: none 93 * 94 * Adapted from 95 * arch/arm64/kernel/head.S 96 * arch/arm64/mm/proc.S 97 */ 98 99/* 100 * Memory region attributes for LPAE: 101 * 102 * n = AttrIndx[2:0] 103 * n MAIR 104 * DEVICE_nGnRnE 000 00000000 105 * DEVICE_nGnRE 001 00000100 106 * DEVICE_GRE 010 00001100 107 * NORMAL_NC 011 01000100 108 * NORMAL 100 11111111 109 */ 110#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 111 112.globl asm_mmu_enable 113asm_mmu_enable: 114 ic iallu // I+BTB cache invalidate 115 tlbi vmalle1is // invalidate I + D TLBs 116 dsb ish 117 118 /* TCR */ 119 ldr x1, =TCR_TxSZ(VA_BITS) | \ 120 TCR_TG0_64K | TCR_TG1_64K | \ 121 TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ 122 TCR_SHARED 123 mrs x2, id_aa64mmfr0_el1 124 bfi x1, x2, #32, #3 125 msr tcr_el1, x1 126 127 /* MAIR */ 128 ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 129 MAIR(0x04, MT_DEVICE_nGnRE) | \ 130 MAIR(0x0c, MT_DEVICE_GRE) | \ 131 MAIR(0x44, MT_NORMAL_NC) | \ 132 MAIR(0xff, MT_NORMAL) 133 msr mair_el1, x1 134 135 /* TTBR0 */ 136 msr ttbr0_el1, x0 137 isb 138 139 /* SCTLR */ 140 mrs x1, sctlr_el1 141 orr x1, x1, SCTLR_EL1_C 142 orr x1, x1, SCTLR_EL1_I 143 orr x1, x1, SCTLR_EL1_M 144 msr sctlr_el1, x1 145 isb 146 147 ret 148 149/* 150 * Vectors 151 * Adapted from arch/arm64/kernel/entry.S 152 */ 153.macro vector_stub, name, vec 154\name: 155 stp x0, x1, [sp, #-S_FRAME_SIZE]! 156 stp x2, x3, [sp, #16] 157 stp x4, x5, [sp, #32] 158 stp x6, x7, [sp, #48] 159 stp x8, x9, [sp, #64] 160 stp x10, x11, [sp, #80] 161 stp x12, x13, [sp, #96] 162 stp x14, x15, [sp, #112] 163 stp x16, x17, [sp, #128] 164 stp x18, x19, [sp, #144] 165 stp x20, x21, [sp, #160] 166 stp x22, x23, [sp, #176] 167 stp x24, x25, [sp, #192] 168 stp x26, x27, [sp, #208] 169 stp x28, x29, [sp, #224] 170 171 str x30, [sp, #S_LR] 172 173 .if \vec >= 8 174 mrs x1, sp_el0 175 .else 176 add x1, sp, #S_FRAME_SIZE 177 .endif 178 str x1, [sp, #S_SP] 179 180 mrs x1, elr_el1 181 mrs x2, spsr_el1 182 stp x1, x2, [sp, #S_PC] 183 184 mov x0, \vec 185 mov x1, sp 186 mrs x2, esr_el1 187 bl do_handle_exception 188 189 ldp x1, x2, [sp, #S_PC] 190 msr spsr_el1, x2 191 msr elr_el1, x1 192 193 .if \vec >= 8 194 ldr x1, [sp, #S_SP] 195 msr sp_el0, x1 196 .endif 197 198 ldr x30, [sp, #S_LR] 199 200 ldp x28, x29, [sp, #224] 201 ldp x26, x27, [sp, #208] 202 ldp x24, x25, [sp, #192] 203 ldp x22, x23, [sp, #176] 204 ldp x20, x21, [sp, #160] 205 ldp x18, x19, [sp, #144] 206 ldp x16, x17, [sp, #128] 207 ldp x14, x15, [sp, #112] 208 ldp x12, x13, [sp, #96] 209 ldp x10, x11, [sp, #80] 210 ldp x8, x9, [sp, #64] 211 ldp x6, x7, [sp, #48] 212 ldp x4, x5, [sp, #32] 213 ldp x2, x3, [sp, #16] 214 ldp x0, x1, [sp], #S_FRAME_SIZE 215 216 eret 217.endm 218 219vector_stub el1t_sync, 0 220vector_stub el1t_irq, 1 221vector_stub el1t_fiq, 2 222vector_stub el1t_error, 3 223 224vector_stub el1h_sync, 4 225vector_stub el1h_irq, 5 226vector_stub el1h_fiq, 6 227vector_stub el1h_error, 7 228 229vector_stub el0_sync_64, 8 230vector_stub el0_irq_64, 9 231vector_stub el0_fiq_64, 10 232vector_stub el0_error_64, 11 233 234vector_stub el0_sync_32, 12 235vector_stub el0_irq_32, 13 236vector_stub el0_fiq_32, 14 237vector_stub el0_error_32, 15 238 239.section .text.ex 240 241.macro ventry, label 242.align 7 243 b \label 244.endm 245 246.align 11 247vector_table: 248 ventry el1t_sync // Synchronous EL1t 249 ventry el1t_irq // IRQ EL1t 250 ventry el1t_fiq // FIQ EL1t 251 ventry el1t_error // Error EL1t 252 253 ventry el1h_sync // Synchronous EL1h 254 ventry el1h_irq // IRQ EL1h 255 ventry el1h_fiq // FIQ EL1h 256 ventry el1h_error // Error EL1h 257 258 ventry el0_sync_64 // Synchronous 64-bit EL0 259 ventry el0_irq_64 // IRQ 64-bit EL0 260 ventry el0_fiq_64 // FIQ 64-bit EL0 261 ventry el0_error_64 // Error 64-bit EL0 262 263 ventry el0_sync_32 // Synchronous 32-bit EL0 264 ventry el0_irq_32 // IRQ 32-bit EL0 265 ventry el0_fiq_32 // FIQ 32-bit EL0 266 ventry el0_error_32 // Error 32-bit EL0 267