1/* 2 * Boot entry point and assembler functions for aarch64 tests. 3 * 4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8#define __ASSEMBLY__ 9#include <asm/asm-offsets.h> 10#include <asm/ptrace.h> 11 12.section .init 13 14.globl start 15start: 16 /* 17 * bootloader params are in x0-x3 18 * The physical address of the dtb is in x0, x1-x3 are reserved 19 * See the kernel doc Documentation/arm64/booting.txt 20 */ 21 adr x4, stacktop 22 mov sp, x4 23 stp x0, x1, [sp, #-16]! 24 25 /* Enable FP/ASIMD */ 26 mov x0, #(3 << 20) 27 msr cpacr_el1, x0 28 29 /* set up exception handling */ 30 bl exceptions_init 31 32 /* complete setup */ 33 ldp x0, x1, [sp], #16 34 bl setup 35 36 /* run the test */ 37 adr x0, __argc 38 ldr x0, [x0] 39 adr x1, __argv 40 bl main 41 bl exit 42 b halt 43 44exceptions_init: 45 adr x0, vector_table 46 msr vbar_el1, x0 47 isb 48 ret 49 50.text 51 52.globl halt 53halt: 541: wfi 55 b 1b 56 57/* 58 * Vectors 59 * Adapted from arch/arm64/kernel/entry.S 60 */ 61.macro vector_stub, name, vec 62\name: 63 stp x0, x1, [sp, #-S_FRAME_SIZE]! 64 stp x2, x3, [sp, #16] 65 stp x4, x5, [sp, #32] 66 stp x6, x7, [sp, #48] 67 stp x8, x9, [sp, #64] 68 stp x10, x11, [sp, #80] 69 stp x12, x13, [sp, #96] 70 stp x14, x15, [sp, #112] 71 stp x16, x17, [sp, #128] 72 stp x18, x19, [sp, #144] 73 stp x20, x21, [sp, #160] 74 stp x22, x23, [sp, #176] 75 stp x24, x25, [sp, #192] 76 stp x26, x27, [sp, #208] 77 stp x28, x29, [sp, #224] 78 79 str x30, [sp, #S_LR] 80 81 .if \vec >= 8 82 mrs x1, sp_el0 83 .else 84 add x1, sp, #S_FRAME_SIZE 85 .endif 86 str x1, [sp, #S_SP] 87 88 mrs x1, elr_el1 89 mrs x2, spsr_el1 90 stp x1, x2, [sp, #S_PC] 91 92 and x2, x2, #PSR_MODE_MASK 93 cmp x2, #PSR_MODE_EL0t 94 b.ne 1f 95 adr x2, user_mode 96 str xzr, [x2] /* we're in kernel mode now */ 97 981: mov x0, \vec 99 mov x1, sp 100 mrs x2, esr_el1 101 bl do_handle_exception 102 103 ldp x1, x2, [sp, #S_PC] 104 msr spsr_el1, x2 105 msr elr_el1, x1 106 107 and x2, x2, #PSR_MODE_MASK 108 cmp x2, #PSR_MODE_EL0t 109 b.ne 1f 110 adr x2, user_mode 111 mov x1, #1 112 str x1, [x2] /* we're going back to user mode */ 113 1141: 115 .if \vec >= 8 116 ldr x1, [sp, #S_SP] 117 msr sp_el0, x1 118 .endif 119 120 ldr x30, [sp, #S_LR] 121 122 ldp x28, x29, [sp, #224] 123 ldp x26, x27, [sp, #208] 124 ldp x24, x25, [sp, #192] 125 ldp x22, x23, [sp, #176] 126 ldp x20, x21, [sp, #160] 127 ldp x18, x19, [sp, #144] 128 ldp x16, x17, [sp, #128] 129 ldp x14, x15, [sp, #112] 130 ldp x12, x13, [sp, #96] 131 ldp x10, x11, [sp, #80] 132 ldp x8, x9, [sp, #64] 133 ldp x6, x7, [sp, #48] 134 ldp x4, x5, [sp, #32] 135 ldp x2, x3, [sp, #16] 136 ldp x0, x1, [sp], #S_FRAME_SIZE 137 138 eret 139.endm 140 141vector_stub el1t_sync, 0 142vector_stub el1t_irq, 1 143vector_stub el1t_fiq, 2 144vector_stub el1t_error, 3 145 146vector_stub el1h_sync, 4 147vector_stub el1h_irq, 5 148vector_stub el1h_fiq, 6 149vector_stub el1h_error, 7 150 151vector_stub el0_sync_64, 8 152vector_stub el0_irq_64, 9 153vector_stub el0_fiq_64, 10 154vector_stub el0_error_64, 11 155 156vector_stub el0_sync_32, 12 157vector_stub el0_irq_32, 13 158vector_stub el0_fiq_32, 14 159vector_stub el0_error_32, 15 160 161.section .text.ex 162 163.macro ventry, label 164.align 7 165 b \label 166.endm 167 168.align 11 169vector_table: 170 ventry el1t_sync // Synchronous EL1t 171 ventry el1t_irq // IRQ EL1t 172 ventry el1t_fiq // FIQ EL1t 173 ventry el1t_error // Error EL1t 174 175 ventry el1h_sync // Synchronous EL1h 176 ventry el1h_irq // IRQ EL1h 177 ventry el1h_fiq // FIQ EL1h 178 ventry el1h_error // Error EL1h 179 180 ventry el0_sync_64 // Synchronous 64-bit EL0 181 ventry el0_irq_64 // IRQ 64-bit EL0 182 ventry el0_fiq_64 // FIQ 64-bit EL0 183 ventry el0_error_64 // Error 64-bit EL0 184 185 ventry el0_sync_32 // Synchronous 32-bit EL0 186 ventry el0_irq_32 // IRQ 32-bit EL0 187 ventry el0_fiq_32 // FIQ 32-bit EL0 188 ventry el0_error_32 // Error 32-bit EL0 189