1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <asm/asm-offsets.h> 10#include <asm/ptrace.h> 11#include <asm/processor.h> 12#include <asm/page.h> 13#include <asm/pgtable-hwdef.h> 14 15.section .init 16 17.globl start 18start: 19 /* 20 * bootloader params are in x0-x3 21 * The physical address of the dtb is in x0, x1-x3 are reserved 22 * See the kernel doc Documentation/arm64/booting.txt 23 */ 24 mov x4, #1 25 msr spsel, x4 26 isb 27 adr x4, stackptr 28 mov sp, x4 29 stp x0, x1, [sp, #-16]! 30 31 /* Enable FP/ASIMD */ 32 mov x0, #(3 << 20) 33 msr cpacr_el1, x0 34 35 /* set up exception handling */ 36 bl exceptions_init 37 38 /* complete setup */ 39 ldp x0, x1, [sp], #16 40 bl setup 41 42 /* run the test */ 43 adr x0, __argc 44 ldr x0, [x0] 45 adr x1, __argv 46 bl main 47 bl exit 48 b halt 49 50exceptions_init: 51 adr x0, vector_table 52 msr vbar_el1, x0 53 isb 54 ret 55 56.text 57 58.globl secondary_entry 59secondary_entry: 60 /* Enable FP/ASIMD */ 61 mov x0, #(3 << 20) 62 msr cpacr_el1, x0 63 64 /* set up exception handling */ 65 bl exceptions_init 66 67 /* enable the MMU */ 68 adr x0, mmu_idmap 69 ldr x0, [x0] 70 bl asm_mmu_enable 71 72 /* set the stack */ 73 adr x1, secondary_data 74 ldr x0, [x1] 75 mov sp, x0 76 77 /* finish init in C code */ 78 bl secondary_cinit 79 80 /* x0 is now the entry function, run it */ 81 br x0 82 83.globl halt 84halt: 851: wfi 86 b 1b 87 88/* 89 * asm_mmu_enable 90 * Inputs: 91 * x0 is the base address of the translation table 92 * Outputs: none 93 * 94 * Adapted from 95 * arch/arm64/kernel/head.S 96 * arch/arm64/mm/proc.S 97 */ 98 99/* 100 * Memory region attributes for LPAE: 101 * 102 * n = AttrIndx[2:0] 103 * n MAIR 104 * DEVICE_nGnRnE 000 00000000 105 * DEVICE_nGnRE 001 00000100 106 * DEVICE_GRE 010 00001100 107 * NORMAL_NC 011 01000100 108 * NORMAL 100 11111111 109 */ 110#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 111 112.globl asm_mmu_enable 113asm_mmu_enable: 114 ic iallu // I+BTB cache invalidate 115 tlbi vmalle1is // invalidate I + D TLBs 116 dsb ish 117 118 /* TCR */ 119 ldr x1, =TCR_TxSZ(VA_BITS) | \ 120 TCR_TG0_64K | TCR_TG1_64K | \ 121 TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ 122 TCR_SHARED 123 mrs x2, id_aa64mmfr0_el1 124 bfi x1, x2, #32, #3 125 msr tcr_el1, x1 126 127 /* MAIR */ 128 ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 129 MAIR(0x04, MT_DEVICE_nGnRE) | \ 130 MAIR(0x0c, MT_DEVICE_GRE) | \ 131 MAIR(0x44, MT_NORMAL_NC) | \ 132 MAIR(0xff, MT_NORMAL) 133 msr mair_el1, x1 134 135 /* TTBR0 */ 136 msr ttbr0_el1, x0 137 isb 138 139 /* SCTLR */ 140 mrs x1, sctlr_el1 141 orr x1, x1, SCTLR_EL1_C 142 orr x1, x1, SCTLR_EL1_I 143 orr x1, x1, SCTLR_EL1_M 144 msr sctlr_el1, x1 145 isb 146 147 ret 148 149.globl asm_mmu_disable 150asm_mmu_disable: 151 mrs x0, sctlr_el1 152 bic x0, x0, SCTLR_EL1_M 153 msr sctlr_el1, x0 154 isb 155 ret 156 157/* 158 * Vectors 159 * Adapted from arch/arm64/kernel/entry.S 160 */ 161.macro vector_stub, name, vec 162\name: 163 stp x0, x1, [sp, #-S_FRAME_SIZE]! 164 stp x2, x3, [sp, #16] 165 stp x4, x5, [sp, #32] 166 stp x6, x7, [sp, #48] 167 stp x8, x9, [sp, #64] 168 stp x10, x11, [sp, #80] 169 stp x12, x13, [sp, #96] 170 stp x14, x15, [sp, #112] 171 stp x16, x17, [sp, #128] 172 stp x18, x19, [sp, #144] 173 stp x20, x21, [sp, #160] 174 stp x22, x23, [sp, #176] 175 stp x24, x25, [sp, #192] 176 stp x26, x27, [sp, #208] 177 stp x28, x29, [sp, #224] 178 179 str x30, [sp, #S_LR] 180 181 .if \vec >= 8 182 mrs x1, sp_el0 183 .else 184 add x1, sp, #S_FRAME_SIZE 185 .endif 186 str x1, [sp, #S_SP] 187 188 mrs x1, elr_el1 189 mrs x2, spsr_el1 190 stp x1, x2, [sp, #S_PC] 191 192 mov x0, \vec 193 mov x1, sp 194 mrs x2, esr_el1 195 bl do_handle_exception 196 197 ldp x1, x2, [sp, #S_PC] 198 msr spsr_el1, x2 199 msr elr_el1, x1 200 201 .if \vec >= 8 202 ldr x1, [sp, #S_SP] 203 msr sp_el0, x1 204 .endif 205 206 ldr x30, [sp, #S_LR] 207 208 ldp x28, x29, [sp, #224] 209 ldp x26, x27, [sp, #208] 210 ldp x24, x25, [sp, #192] 211 ldp x22, x23, [sp, #176] 212 ldp x20, x21, [sp, #160] 213 ldp x18, x19, [sp, #144] 214 ldp x16, x17, [sp, #128] 215 ldp x14, x15, [sp, #112] 216 ldp x12, x13, [sp, #96] 217 ldp x10, x11, [sp, #80] 218 ldp x8, x9, [sp, #64] 219 ldp x6, x7, [sp, #48] 220 ldp x4, x5, [sp, #32] 221 ldp x2, x3, [sp, #16] 222 ldp x0, x1, [sp], #S_FRAME_SIZE 223 224 eret 225.endm 226 227vector_stub el1t_sync, 0 228vector_stub el1t_irq, 1 229vector_stub el1t_fiq, 2 230vector_stub el1t_error, 3 231 232vector_stub el1h_sync, 4 233vector_stub el1h_irq, 5 234vector_stub el1h_fiq, 6 235vector_stub el1h_error, 7 236 237vector_stub el0_sync_64, 8 238vector_stub el0_irq_64, 9 239vector_stub el0_fiq_64, 10 240vector_stub el0_error_64, 11 241 242vector_stub el0_sync_32, 12 243vector_stub el0_irq_32, 13 244vector_stub el0_fiq_32, 14 245vector_stub el0_error_32, 15 246 247.section .text.ex 248 249.macro ventry, label 250.align 7 251 b \label 252.endm 253 254.align 11 255vector_table: 256 ventry el1t_sync // Synchronous EL1t 257 ventry el1t_irq // IRQ EL1t 258 ventry el1t_fiq // FIQ EL1t 259 ventry el1t_error // Error EL1t 260 261 ventry el1h_sync // Synchronous EL1h 262 ventry el1h_irq // IRQ EL1h 263 ventry el1h_fiq // FIQ EL1h 264 ventry el1h_error // Error EL1h 265 266 ventry el0_sync_64 // Synchronous 64-bit EL0 267 ventry el0_irq_64 // IRQ 64-bit EL0 268 ventry el0_fiq_64 // FIQ 64-bit EL0 269 ventry el0_error_64 // Error 64-bit EL0 270 271 ventry el0_sync_32 // Synchronous 32-bit EL0 272 ventry el0_irq_32 // IRQ 32-bit EL0 273 ventry el0_fiq_32 // FIQ 32-bit EL0 274 ventry el0_error_32 // Error 32-bit EL0 275