1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <asm/asm-offsets.h> 10#include <asm/ptrace.h> 11#include <asm/processor.h> 12#include <asm/page.h> 13#include <asm/pgtable-hwdef.h> 14 15.section .init 16 17.globl start 18start: 19 /* 20 * bootloader params are in x0-x3 21 * The physical address of the dtb is in x0, x1-x3 are reserved 22 * See the kernel doc Documentation/arm64/booting.txt 23 */ 24 mov x4, #1 25 msr spsel, x4 26 isb 27 adr x4, stackptr 28 mov sp, x4 29 stp x0, x1, [sp, #-16]! 30 31 /* Enable FP/ASIMD */ 32 mov x0, #(3 << 20) 33 msr cpacr_el1, x0 34 35 /* set up exception handling */ 36 bl exceptions_init 37 38 /* complete setup */ 39 ldp x0, x1, [sp], #16 40 bl setup 41 42 /* run the test */ 43 adr x0, __argc 44 ldr x0, [x0] 45 adr x1, __argv 46 adr x2, __environ 47 bl main 48 bl exit 49 b halt 50 51exceptions_init: 52 adr x0, vector_table 53 msr vbar_el1, x0 54 isb 55 ret 56 57.text 58 59.globl secondary_entry 60secondary_entry: 61 /* Enable FP/ASIMD */ 62 mov x0, #(3 << 20) 63 msr cpacr_el1, x0 64 65 /* set up exception handling */ 66 bl exceptions_init 67 68 /* enable the MMU */ 69 adr x0, mmu_idmap 70 ldr x0, [x0] 71 bl asm_mmu_enable 72 73 /* set the stack */ 74 adr x1, secondary_data 75 ldr x0, [x1] 76 mov sp, x0 77 78 /* finish init in C code */ 79 bl secondary_cinit 80 81 /* x0 is now the entry function, run it */ 82 br x0 83 84.globl halt 85halt: 861: wfi 87 b 1b 88 89/* 90 * asm_mmu_enable 91 * Inputs: 92 * x0 is the base address of the translation table 93 * Outputs: none 94 * 95 * Adapted from 96 * arch/arm64/kernel/head.S 97 * arch/arm64/mm/proc.S 98 */ 99 100/* 101 * Memory region attributes for LPAE: 102 * 103 * n = AttrIndx[2:0] 104 * n MAIR 105 * DEVICE_nGnRnE 000 00000000 106 * DEVICE_nGnRE 001 00000100 107 * DEVICE_GRE 010 00001100 108 * NORMAL_NC 011 01000100 109 * NORMAL 100 11111111 110 */ 111#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 112 113.globl asm_mmu_enable 114asm_mmu_enable: 115 ic iallu // I+BTB cache invalidate 116 tlbi vmalle1is // invalidate I + D TLBs 117 dsb ish 118 119 /* TCR */ 120 ldr x1, =TCR_TxSZ(VA_BITS) | \ 121 TCR_TG0_64K | TCR_TG1_64K | \ 122 TCR_IRGN_WBWA | TCR_ORGN_WBWA | \ 123 TCR_SHARED 124 mrs x2, id_aa64mmfr0_el1 125 bfi x1, x2, #32, #3 126 msr tcr_el1, x1 127 128 /* MAIR */ 129 ldr x1, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 130 MAIR(0x04, MT_DEVICE_nGnRE) | \ 131 MAIR(0x0c, MT_DEVICE_GRE) | \ 132 MAIR(0x44, MT_NORMAL_NC) | \ 133 MAIR(0xff, MT_NORMAL) 134 msr mair_el1, x1 135 136 /* TTBR0 */ 137 msr ttbr0_el1, x0 138 isb 139 140 /* SCTLR */ 141 mrs x1, sctlr_el1 142 orr x1, x1, SCTLR_EL1_C 143 orr x1, x1, SCTLR_EL1_I 144 orr x1, x1, SCTLR_EL1_M 145 msr sctlr_el1, x1 146 isb 147 148 ret 149 150.globl asm_mmu_disable 151asm_mmu_disable: 152 mrs x0, sctlr_el1 153 bic x0, x0, SCTLR_EL1_M 154 msr sctlr_el1, x0 155 isb 156 ret 157 158/* 159 * Vectors 160 * Adapted from arch/arm64/kernel/entry.S 161 */ 162.macro vector_stub, name, vec 163\name: 164 stp x0, x1, [sp, #-S_FRAME_SIZE]! 165 stp x2, x3, [sp, #16] 166 stp x4, x5, [sp, #32] 167 stp x6, x7, [sp, #48] 168 stp x8, x9, [sp, #64] 169 stp x10, x11, [sp, #80] 170 stp x12, x13, [sp, #96] 171 stp x14, x15, [sp, #112] 172 stp x16, x17, [sp, #128] 173 stp x18, x19, [sp, #144] 174 stp x20, x21, [sp, #160] 175 stp x22, x23, [sp, #176] 176 stp x24, x25, [sp, #192] 177 stp x26, x27, [sp, #208] 178 stp x28, x29, [sp, #224] 179 180 str x30, [sp, #S_LR] 181 182 .if \vec >= 8 183 mrs x1, sp_el0 184 .else 185 add x1, sp, #S_FRAME_SIZE 186 .endif 187 str x1, [sp, #S_SP] 188 189 mrs x1, elr_el1 190 mrs x2, spsr_el1 191 stp x1, x2, [sp, #S_PC] 192 193 mov x0, \vec 194 mov x1, sp 195 mrs x2, esr_el1 196 bl do_handle_exception 197 198 ldp x1, x2, [sp, #S_PC] 199 msr spsr_el1, x2 200 msr elr_el1, x1 201 202 .if \vec >= 8 203 ldr x1, [sp, #S_SP] 204 msr sp_el0, x1 205 .endif 206 207 ldr x30, [sp, #S_LR] 208 209 ldp x28, x29, [sp, #224] 210 ldp x26, x27, [sp, #208] 211 ldp x24, x25, [sp, #192] 212 ldp x22, x23, [sp, #176] 213 ldp x20, x21, [sp, #160] 214 ldp x18, x19, [sp, #144] 215 ldp x16, x17, [sp, #128] 216 ldp x14, x15, [sp, #112] 217 ldp x12, x13, [sp, #96] 218 ldp x10, x11, [sp, #80] 219 ldp x8, x9, [sp, #64] 220 ldp x6, x7, [sp, #48] 221 ldp x4, x5, [sp, #32] 222 ldp x2, x3, [sp, #16] 223 ldp x0, x1, [sp], #S_FRAME_SIZE 224 225 eret 226.endm 227 228vector_stub el1t_sync, 0 229vector_stub el1t_irq, 1 230vector_stub el1t_fiq, 2 231vector_stub el1t_error, 3 232 233vector_stub el1h_sync, 4 234vector_stub el1h_irq, 5 235vector_stub el1h_fiq, 6 236vector_stub el1h_error, 7 237 238vector_stub el0_sync_64, 8 239vector_stub el0_irq_64, 9 240vector_stub el0_fiq_64, 10 241vector_stub el0_error_64, 11 242 243vector_stub el0_sync_32, 12 244vector_stub el0_irq_32, 13 245vector_stub el0_fiq_32, 14 246vector_stub el0_error_32, 15 247 248.section .text.ex 249 250.macro ventry, label 251.align 7 252 b \label 253.endm 254 255.align 11 256vector_table: 257 ventry el1t_sync // Synchronous EL1t 258 ventry el1t_irq // IRQ EL1t 259 ventry el1t_fiq // FIQ EL1t 260 ventry el1t_error // Error EL1t 261 262 ventry el1h_sync // Synchronous EL1h 263 ventry el1h_irq // IRQ EL1h 264 ventry el1h_fiq // FIQ EL1h 265 ventry el1h_error // Error EL1h 266 267 ventry el0_sync_64 // Synchronous 64-bit EL0 268 ventry el0_irq_64 // IRQ 64-bit EL0 269 ventry el0_fiq_64 // FIQ 64-bit EL0 270 ventry el0_error_64 // Error 64-bit EL0 271 272 ventry el0_sync_32 // Synchronous 32-bit EL0 273 ventry el0_irq_32 // IRQ 32-bit EL0 274 ventry el0_fiq_32 // FIQ 32-bit EL0 275 ventry el0_error_32 // Error 32-bit EL0 276