xref: /kvm-unit-tests/arm/cstart64.S (revision 1693644d49d196ef1a22c868a16d2aa92fc2dfbf)
1/*
2 * Boot entry point and assembler functions for aarch64 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11#include <asm/processor.h>
12#include <asm/page.h>
13#include <asm/pgtable-hwdef.h>
14
15.section .init
16
17.globl start
18start:
19	/*
20	 * bootloader params are in x0-x3
21	 * The physical address of the dtb is in x0, x1-x3 are reserved
22	 * See the kernel doc Documentation/arm64/booting.txt
23	 */
24	mov	x4, #1
25	msr	spsel, x4
26	isb
27	adr     x4, stackptr
28	mov	sp, x4
29	stp	x0, x1, [sp, #-16]!
30
31	/* Enable FP/ASIMD */
32	mov	x0, #(3 << 20)
33	msr	cpacr_el1, x0
34
35	/* set up exception handling */
36	bl	exceptions_init
37
38	/* complete setup */
39	ldp	x0, x1, [sp], #16
40	bl	setup
41
42	/* run the test */
43	adr	x0, __argc
44	ldr	x0, [x0]
45	adr	x1, __argv
46	bl	main
47	bl	exit
48	b	halt
49
50exceptions_init:
51	adr	x0, vector_table
52	msr	vbar_el1, x0
53	isb
54	ret
55
56.text
57
58.globl halt
59halt:
601:	wfi
61	b	1b
62
63/*
64 * asm_mmu_enable
65 *   Inputs:
66 *     x0 is the base address of the translation table
67 *   Outputs: none
68 *
69 * Adapted from
70 *   arch/arm64/kernel/head.S
71 *   arch/arm64/mm/proc.S
72 */
73
74/*
75 * Memory region attributes for LPAE:
76 *
77 *   n = AttrIndx[2:0]
78 *                      n       MAIR
79 *   DEVICE_nGnRnE      000     00000000
80 *   DEVICE_nGnRE       001     00000100
81 *   DEVICE_GRE         010     00001100
82 *   NORMAL_NC          011     01000100
83 *   NORMAL             100     11111111
84 */
85#define MAIR(attr, mt) ((attr) << ((mt) * 8))
86
87.globl asm_mmu_enable
88asm_mmu_enable:
89	ic	iallu			// I+BTB cache invalidate
90	tlbi	vmalle1is		// invalidate I + D TLBs
91	dsb	ish
92
93	/* TCR */
94	ldr	x1, =TCR_TxSZ(VA_BITS) |		\
95		     TCR_TG0_64K | TCR_TG1_64K |	\
96		     TCR_IRGN_WBWA | TCR_ORGN_WBWA |	\
97		     TCR_SHARED
98	mrs	x2, id_aa64mmfr0_el1
99	bfi	x1, x2, #32, #3
100	msr	tcr_el1, x1
101
102	/* MAIR */
103	ldr	x1, =MAIR(0x00, MT_DEVICE_nGnRnE) |	\
104		     MAIR(0x04, MT_DEVICE_nGnRE) |	\
105		     MAIR(0x0c, MT_DEVICE_GRE) |	\
106		     MAIR(0x44, MT_NORMAL_NC) |		\
107		     MAIR(0xff, MT_NORMAL)
108	msr	mair_el1, x1
109
110	/* TTBR0 */
111	msr	ttbr0_el1, x0
112	isb
113
114	/* SCTLR */
115	mrs	x1, sctlr_el1
116	orr	x1, x1, SCTLR_EL1_C
117	orr	x1, x1, SCTLR_EL1_I
118	orr	x1, x1, SCTLR_EL1_M
119	msr	sctlr_el1, x1
120	isb
121
122	ret
123
124/*
125 * Vectors
126 * Adapted from arch/arm64/kernel/entry.S
127 */
128.macro vector_stub, name, vec
129\name:
130	stp	 x0,  x1, [sp, #-S_FRAME_SIZE]!
131	stp	 x2,  x3, [sp,  #16]
132	stp	 x4,  x5, [sp,  #32]
133	stp	 x6,  x7, [sp,  #48]
134	stp	 x8,  x9, [sp,  #64]
135	stp	x10, x11, [sp,  #80]
136	stp	x12, x13, [sp,  #96]
137	stp	x14, x15, [sp, #112]
138	stp	x16, x17, [sp, #128]
139	stp	x18, x19, [sp, #144]
140	stp	x20, x21, [sp, #160]
141	stp	x22, x23, [sp, #176]
142	stp	x24, x25, [sp, #192]
143	stp	x26, x27, [sp, #208]
144	stp	x28, x29, [sp, #224]
145
146	str	x30, [sp, #S_LR]
147
148	.if \vec >= 8
149	mrs	x1, sp_el0
150	.else
151	add	x1, sp, #S_FRAME_SIZE
152	.endif
153	str	x1, [sp, #S_SP]
154
155	mrs	x1, elr_el1
156	mrs	x2, spsr_el1
157	stp	x1, x2, [sp, #S_PC]
158
159	and	x2, x2, #PSR_MODE_MASK
160	cmp	x2, #PSR_MODE_EL0t
161	b.ne	1f
162	adr	x2, user_mode
163	str	xzr, [x2]		/* we're in kernel mode now */
164
1651:	mov	x0, \vec
166	mov	x1, sp
167	mrs	x2, esr_el1
168	bl	do_handle_exception
169
170	ldp	x1, x2, [sp, #S_PC]
171	msr	spsr_el1, x2
172	msr	elr_el1, x1
173
174	and	x2, x2, #PSR_MODE_MASK
175	cmp	x2, #PSR_MODE_EL0t
176	b.ne	1f
177	adr	x2, user_mode
178	mov	x1, #1
179	str	x1, [x2]		/* we're going back to user mode */
180
1811:
182	.if \vec >= 8
183	ldr	x1, [sp, #S_SP]
184	msr	sp_el0, x1
185	.endif
186
187	ldr	x30, [sp, #S_LR]
188
189	ldp	x28, x29, [sp, #224]
190	ldp	x26, x27, [sp, #208]
191	ldp	x24, x25, [sp, #192]
192	ldp	x22, x23, [sp, #176]
193	ldp	x20, x21, [sp, #160]
194	ldp	x18, x19, [sp, #144]
195	ldp	x16, x17, [sp, #128]
196	ldp	x14, x15, [sp, #112]
197	ldp	x12, x13, [sp,  #96]
198	ldp	x10, x11, [sp,  #80]
199	ldp	 x8,  x9, [sp,  #64]
200	ldp	 x6,  x7, [sp,  #48]
201	ldp	 x4,  x5, [sp,  #32]
202	ldp	 x2,  x3, [sp,  #16]
203	ldp	 x0,  x1, [sp], #S_FRAME_SIZE
204
205	eret
206.endm
207
208vector_stub	el1t_sync,     0
209vector_stub	el1t_irq,      1
210vector_stub	el1t_fiq,      2
211vector_stub	el1t_error,    3
212
213vector_stub	el1h_sync,     4
214vector_stub	el1h_irq,      5
215vector_stub	el1h_fiq,      6
216vector_stub	el1h_error,    7
217
218vector_stub	el0_sync_64,   8
219vector_stub	el0_irq_64,    9
220vector_stub	el0_fiq_64,   10
221vector_stub	el0_error_64, 11
222
223vector_stub	el0_sync_32,  12
224vector_stub	el0_irq_32,   13
225vector_stub	el0_fiq_32,   14
226vector_stub	el0_error_32, 15
227
228.section .text.ex
229
230.macro ventry, label
231.align 7
232	b	\label
233.endm
234
235.align 11
236vector_table:
237	ventry	el1t_sync			// Synchronous EL1t
238	ventry	el1t_irq			// IRQ EL1t
239	ventry	el1t_fiq			// FIQ EL1t
240	ventry	el1t_error			// Error EL1t
241
242	ventry	el1h_sync			// Synchronous EL1h
243	ventry	el1h_irq			// IRQ EL1h
244	ventry	el1h_fiq			// FIQ EL1h
245	ventry	el1h_error			// Error EL1h
246
247	ventry	el0_sync_64			// Synchronous 64-bit EL0
248	ventry	el0_irq_64			// IRQ 64-bit EL0
249	ventry	el0_fiq_64			// FIQ 64-bit EL0
250	ventry	el0_error_64			// Error 64-bit EL0
251
252	ventry	el0_sync_32			// Synchronous 32-bit EL0
253	ventry	el0_irq_32			// IRQ 32-bit EL0
254	ventry	el0_fiq_32			// FIQ 32-bit EL0
255	ventry	el0_error_32			// Error 32-bit EL0
256