1/* 2 * Boot entry point and assembler functions for armv7 tests. 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <auxinfo.h> 10#include <asm/assembler.h> 11#include <asm/thread_info.h> 12#include <asm/asm-offsets.h> 13#include <asm/pgtable-hwdef.h> 14#include <asm/ptrace.h> 15#include <asm/sysreg.h> 16 17#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7) 18 19.macro zero_range, tmp1, tmp2, tmp3, tmp4 20 mov \tmp3, #0 21 mov \tmp4, #0 229998: cmp \tmp1, \tmp2 23 beq 9997f 24 strd \tmp3, \tmp4, [\tmp1], #8 25 b 9998b 269997: 27.endm 28 29.arm 30 31.section .init 32 33.globl start 34start: 35 /* zero BSS */ 36 ldr r4, =bss 37 ldr r5, =ebss 38 zero_range r4, r5, r6, r7 39 40 /* zero stack */ 41 ldr r5, =stacktop 42 sub r4, r5, #THREAD_SIZE 43 zero_range r4, r5, r6, r7 44 45 /* 46 * set stack, making room at top of stack for cpu0's 47 * exception stacks. Must start wtih stackptr, not 48 * stacktop, so the thread size masking (shifts) work. 49 */ 50 ldr sp, =stackptr 51 lsr sp, #THREAD_SHIFT 52 lsl sp, #THREAD_SHIFT 53 add sp, #THREAD_START_SP 54 55 /* 56 * save sp before pushing anything on the stack 57 * lr makes a good temp register right now 58 */ 59 mov lr, sp 60 61 /* 62 * bootloader params are in r0-r2 63 * See the kernel doc Documentation/arm/Booting 64 * r0 = 0 65 * r1 = machine type number 66 * r2 = physical address of the dtb 67 * 68 * As we have no need for r0's nor r1's value, then 69 * put the dtb in r0. This allows setup to be consistent 70 * with arm64. 71 */ 72 mov r0, r2 73 push {r0-r1} 74 75 /* set up vector table, mode stacks, and enable the VFP */ 76 mov r0, lr @ lr is stack top (see above), 77 @ which is the exception stacks base 78 bl exceptions_init 79 bl enable_vfp 80 81 /* complete setup */ 82 pop {r0-r1} 83 mov r3, #0 84 ldr r2, =stacktop @ r2,r3 is the base of free memory 85 bl setup @ r0 is the addr of the dtb 86 87 /* run the test */ 88 ldr r0, =__argc 89 ldr r0, [r0] 90 ldr r1, =__argv 91 ldr r2, =__environ 92 bl main 93 bl exit 94 b halt 95 96.text 97 98/* 99 * psci_invoke_hvc / psci_invoke_smc 100 * 101 * Inputs: 102 * r0 -- function_id 103 * r1 -- arg0 104 * r2 -- arg1 105 * r3 -- arg2 106 * 107 * Outputs: 108 * r0 -- return code 109 */ 110.globl psci_invoke_hvc 111psci_invoke_hvc: 112 hvc #0 113 mov pc, lr 114 115.globl psci_invoke_smc 116psci_invoke_smc: 117 smc #0 118 mov pc, lr 119 120enable_vfp: 121 /* Enable full access to CP10 and CP11: */ 122 mov r0, #(3 << 22 | 3 << 20) 123 mcr p15, 0, r0, c1, c0, 2 124 isb 125 /* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */ 126 mov r0, #(1 << 30) 127 vmsr fpexc, r0 128 mov pc, lr 129 130get_mmu_off: 131 ldr r0, =auxinfo 132 ldr r0, [r0, #4] 133 and r0, #AUXINFO_MMU_OFF 134 mov pc, lr 135 136.global secondary_entry 137secondary_entry: 138 /* enable the MMU unless requested off */ 139 bl get_mmu_off 140 cmp r0, #0 141 bne 1f 142 mov r1, #0 143 ldr r0, =mmu_idmap 144 ldr r0, [r0] 145 bl asm_mmu_enable 146 1471: 148 /* 149 * Set the stack, and set up vector table 150 * and exception stacks. Exception stacks 151 * space starts at stack top and grows up. 152 */ 153 ldr r1, =secondary_data 154 ldr r0, [r1] 155 mov sp, r0 156 bl exceptions_init 157 bl enable_vfp 158 159 /* finish init in C code */ 160 bl secondary_cinit 161 162 /* r0 is now the entry function, run it */ 163 blx r0 164 b do_idle 165 166.globl halt 167halt: 1681: wfi 169 b 1b 170 171/* 172 * asm_mmu_enable 173 * Inputs: 174 * (r0 - lo, r1 - hi) is the base address of the translation table 175 * Outputs: none 176 */ 177.equ PRRR, 0xeeaa4400 @ MAIR0 (from Linux kernel) 178.equ NMRR, 0xff000004 @ MAIR1 (from Linux kernel) 179.globl asm_mmu_enable 180asm_mmu_enable: 181 /* TLBIALL */ 182 mcr p15, 0, r2, c8, c7, 0 183 dsb nsh 184 185 /* TTBCR */ 186 ldr r2, =(TTBCR_EAE | \ 187 TTBCR_SH0_SHARED | \ 188 TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA) 189 mcr p15, 0, r2, c2, c0, 2 190 isb 191 192 /* MAIR */ 193 ldr r2, =PRRR 194 mcr p15, 0, r2, c10, c2, 0 195 ldr r2, =NMRR 196 mcr p15, 0, r2, c10, c2, 1 197 198 /* TTBR0 */ 199 mcrr p15, 0, r0, r1, c2 200 isb 201 202 /* SCTLR */ 203 mrc p15, 0, r2, c1, c0, 0 204 orr r2, #CR_C 205 orr r2, #CR_I 206 orr r2, #CR_M 207 mcr p15, 0, r2, c1, c0, 0 208 isb 209 210 mov pc, lr 211 212.globl asm_mmu_disable 213asm_mmu_disable: 214 /* SCTLR */ 215 mrc p15, 0, r0, c1, c0, 0 216 bic r0, #CR_M 217 mcr p15, 0, r0, c1, c0, 0 218 isb 219 220 ldr r0, =__phys_offset 221 ldr r0, [r0] 222 ldr r1, =__phys_end 223 ldr r1, [r1] 224 sub r1, r1, r0 225 dcache_by_line_op dccimvac, sy, r0, r1, r2, r3 226 227 mov pc, lr 228 229/* 230 * Vectors 231 */ 232 233.macro set_mode_stack mode, stack 234 add \stack, #S_FRAME_SIZE 235 msr cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT) 236 isb 237 mov sp, \stack 238.endm 239 240/* 241 * exceptions_init 242 * 243 * Input r0 is the stack top, which is the exception stacks base 244 */ 245exceptions_init: 246 mrc p15, 0, r2, c1, c0, 0 @ read SCTLR 247 bic r2, #CR_V @ SCTLR.V := 0 248 mcr p15, 0, r2, c1, c0, 0 @ write SCTLR 249 ldr r2, =vector_table 250 mcr p15, 0, r2, c12, c0, 0 @ write VBAR 251 252 mrs r2, cpsr 253 254 /* 255 * The first frame is reserved for svc mode 256 */ 257 set_mode_stack UND_MODE, r0 258 set_mode_stack ABT_MODE, r0 259 set_mode_stack IRQ_MODE, r0 260 set_mode_stack FIQ_MODE, r0 261 262 msr cpsr_cxsf, r2 @ back to svc mode 263 isb 264 mov pc, lr 265 266/* 267 * Vector stubs 268 * Simplified version of the Linux kernel implementation 269 * arch/arm/kernel/entry-armv.S 270 * 271 * Each mode has an S_FRAME_SIZE sized memory region, 272 * and the mode's stack pointer has been initialized 273 * to the base of that region in exceptions_init. 274 */ 275.macro vector_stub, name, vec, mode, correction=0 276.align 5 277vector_\name: 278.if \correction 279 sub lr, lr, #\correction 280.endif 281 /* 282 * Save r0, r1, lr_<exception> (parent PC) 283 * and spsr_<exception> (parent CPSR) 284 */ 285 str r0, [sp, #S_R0] 286 str r1, [sp, #S_R1] 287 str lr, [sp, #S_PC] 288 mrs r0, spsr 289 str r0, [sp, #S_PSR] 290 291 /* Prepare for SVC32 mode. */ 292 mrs r0, cpsr 293 bic r0, #MODE_MASK 294 orr r0, #SVC_MODE 295 msr spsr_cxsf, r0 296 297 /* Branch to handler in SVC mode */ 298 mov r0, #\vec 299 mov r1, sp 300 ldr lr, =vector_common 301 movs pc, lr 302.endm 303 304vector_stub rst, 0, UND_MODE 305vector_stub und, 1, UND_MODE 306vector_stub pabt, 3, ABT_MODE, 4 307vector_stub dabt, 4, ABT_MODE, 8 308vector_stub irq, 6, IRQ_MODE, 4 309vector_stub fiq, 7, FIQ_MODE, 4 310 311.align 5 312vector_svc: 313 /* 314 * Save r0, r1, lr_<exception> (parent PC) 315 * and spsr_<exception> (parent CPSR) 316 */ 317 push { r1 } 318 lsr r1, sp, #THREAD_SHIFT 319 lsl r1, #THREAD_SHIFT 320 add r1, #THREAD_START_SP 321 str r0, [r1, #S_R0] 322 pop { r0 } 323 str r0, [r1, #S_R1] 324 str lr, [r1, #S_PC] 325 mrs r0, spsr 326 str r0, [r1, #S_PSR] 327 328 /* 329 * Branch to handler, still in SVC mode. 330 * r0 := 2 is the svc vector number. 331 */ 332 mov r0, #2 333 ldr lr, =vector_common 334 mov pc, lr 335 336vector_common: 337 /* make room for pt_regs */ 338 sub sp, #S_FRAME_SIZE 339 tst sp, #4 @ check stack alignment 340 subne sp, #4 341 342 /* store registers r0-r12 */ 343 stmia sp, { r0-r12 } @ stored wrong r0 and r1, fix later 344 345 /* get registers saved in the stub */ 346 ldr r2, [r1, #S_R0] @ r0 347 ldr r3, [r1, #S_R1] @ r1 348 ldr r4, [r1, #S_PC] @ lr_<exception> (parent PC) 349 ldr r5, [r1, #S_PSR] @ spsr_<exception> (parent CPSR) 350 351 /* fix r0 and r1 */ 352 str r2, [sp, #S_R0] 353 str r3, [sp, #S_R1] 354 355 /* store sp_svc, if we were in usr mode we'll fix this later */ 356 add r6, sp, #S_FRAME_SIZE 357 addne r6, #4 @ stack wasn't aligned 358 str r6, [sp, #S_SP] 359 360 str lr, [sp, #S_LR] @ store lr_svc, fix later for usr mode 361 str r4, [sp, #S_PC] @ store lr_<exception> 362 str r5, [sp, #S_PSR] @ store spsr_<exception> 363 364 /* set ORIG_r0 */ 365 mov r2, #-1 366 str r2, [sp, #S_OLD_R0] 367 368 /* if we were in usr mode then we need sp_usr and lr_usr instead */ 369 and r1, r5, #MODE_MASK 370 cmp r1, #USR_MODE 371 bne 1f 372 add r1, sp, #S_SP 373 stmia r1, { sp,lr }^ 374 375 /* Call the handler. r0 is the vector number, r1 := pt_regs */ 3761: mov r1, sp 377 bl do_handle_exception 378 379 /* 380 * make sure we restore sp_svc on mode change. No need to 381 * worry about lr_svc though, as that gets clobbered on 382 * exception entry anyway. 383 */ 384 str r6, [sp, #S_SP] 385 386 /* return from exception */ 387 msr spsr_cxsf, r5 388 ldmia sp, { r0-pc }^ 389 390.align 5 391vector_addrexcptn: 392 b vector_addrexcptn 393 394.section .text.ex 395.align 5 396vector_table: 397 b vector_rst 398 b vector_und 399 b vector_svc 400 b vector_pabt 401 b vector_dabt 402 b vector_addrexcptn @ should never happen 403 b vector_irq 404 b vector_fiq 405