xref: /kvm-unit-tests/arm/cstart64.S (revision d74708246bd9a593e03ecca476a5f1ed36e47288)
1/*
2 * Boot entry point and assembler functions for aarch64 tests.
3 *
4 * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h>
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/pgtable-hwdef.h>
15
16.section .init
17
18/*
19 * Bootloader params are in x0-x3. See kernel doc
20 * Documentation/arm64/booting.txt
21 */
22.globl start
23start:
24	/* get our base address */
25	adrp	x4, start
26	add     x4, x4, :lo12:start
27
28	/*
29	 * Update all R_AARCH64_RELATIVE relocations using the table
30	 * of Elf64_Rela entries between reloc_start/end. The build
31	 * will not emit other relocation types.
32	 *
33	 * struct Elf64_Rela {
34	 * 	uint64_t r_offset;
35	 * 	uint64_t r_info;
36	 * 	int64_t  r_addend;
37	 * }
38	 */
39	adrp	x5, reloc_start
40	add     x5, x5, :lo12:reloc_start
41	adrp	x6, reloc_end
42	add     x6, x6, :lo12:reloc_end
431:
44	cmp	x5, x6
45	b.hs	1f
46	ldr	x7, [x5]			// r_offset
47	ldr	x8, [x5, #16]			// r_addend
48	add	x8, x8, x4			// val = base + r_addend
49	str	x8, [x4, x7]			// base[r_offset] = val
50	add	x5, x5, #24
51	b	1b
52
531:
54	/* set up stack */
55	mov	x4, #1
56	msr	spsel, x4
57	isb
58	adrp    x4, stackptr
59	add     sp, x4, :lo12:stackptr
60
61	/* enable FP/ASIMD */
62	mov	x4, #(3 << 20)
63	msr	cpacr_el1, x4
64
65	/* set up exception handling */
66	bl	exceptions_init
67
68	/* complete setup */
69	bl	setup				// x0 is the addr of the dtb
70	bl	get_mmu_off
71	cbnz	x0, 1f
72	bl	setup_vm
73
741:
75	/* run the test */
76	adrp	x0, __argc
77	ldr	x0, [x0, :lo12:__argc]
78	adrp	x1, __argv
79	add	x1, x1, :lo12:__argv
80	adrp	x2, __environ
81	add	x2, x2, :lo12:__environ
82	bl	main
83	bl	exit
84	b	halt
85
86exceptions_init:
87	adrp	x4, vector_table
88	add	x4, x4, :lo12:vector_table
89	msr	vbar_el1, x4
90	isb
91	ret
92
93.text
94
95.globl get_mmu_off
96get_mmu_off:
97	adrp	x0, auxinfo
98	ldr	x0, [x0, :lo12:auxinfo + 8]
99	and	x0, x0, #AUXINFO_MMU_OFF
100	ret
101
102.globl secondary_entry
103secondary_entry:
104	/* Enable FP/ASIMD */
105	mov	x0, #(3 << 20)
106	msr	cpacr_el1, x0
107
108	/* set up exception handling */
109	bl	exceptions_init
110
111	/* enable the MMU unless requested off */
112	bl	get_mmu_off
113	cbnz	x0, 1f
114	adrp	x0, mmu_idmap
115	ldr	x0, [x0, :lo12:mmu_idmap]
116	bl	asm_mmu_enable
117
1181:
119	/* set the stack */
120	adrp	x0, secondary_data
121	ldr	x0, [x0, :lo12:secondary_data]
122	mov	sp, x0
123
124	/* finish init in C code */
125	bl	secondary_cinit
126
127	/* x0 is now the entry function, run it */
128	blr	x0
129	b	do_idle
130
131.globl halt
132halt:
1331:	wfi
134	b	1b
135
136/*
137 * asm_mmu_enable
138 *   Inputs:
139 *     x0 is the base address of the translation table
140 *   Outputs: none
141 *
142 * Adapted from
143 *   arch/arm64/kernel/head.S
144 *   arch/arm64/mm/proc.S
145 */
146
147/*
148 * Memory region attributes for LPAE:
149 *
150 *   n = AttrIndx[2:0]
151 *                      n       MAIR
152 *   DEVICE_nGnRnE      000     00000000
153 *   DEVICE_nGnRE       001     00000100
154 *   DEVICE_GRE         010     00001100
155 *   NORMAL_NC          011     01000100
156 *   NORMAL             100     11111111
157 *   NORMAL_WT          101     10111011
158 *   DEVICE_nGRE        110     00001000
159 */
160#define MAIR(attr, mt) ((attr) << ((mt) * 8))
161
162#if PAGE_SIZE == SZ_64K
163#define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
164#elif PAGE_SIZE == SZ_16K
165#define TCR_TG_FLAGS	TCR_TG0_16K | TCR_TG1_16K
166#elif PAGE_SIZE == SZ_4K
167#define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
168#endif
169
170.globl asm_mmu_enable
171asm_mmu_enable:
172	tlbi	vmalle1			// invalidate I + D TLBs
173	dsb	nsh
174
175	/* TCR */
176	ldr	x1, =TCR_TxSZ(VA_BITS) |		\
177		     TCR_TG_FLAGS  |			\
178		     TCR_IRGN_WBWA | TCR_ORGN_WBWA |	\
179		     TCR_SHARED
180	mrs	x2, id_aa64mmfr0_el1
181	bfi	x1, x2, #32, #3
182	msr	tcr_el1, x1
183
184	/* MAIR */
185	ldr	x1, =MAIR(0x00, MT_DEVICE_nGnRnE) |	\
186		     MAIR(0x04, MT_DEVICE_nGnRE) |	\
187		     MAIR(0x0c, MT_DEVICE_GRE) |	\
188		     MAIR(0x44, MT_NORMAL_NC) |		\
189		     MAIR(0xff, MT_NORMAL) |	        \
190		     MAIR(0xbb, MT_NORMAL_WT) |         \
191		     MAIR(0x08, MT_DEVICE_nGRE)
192	msr	mair_el1, x1
193
194	/* TTBR0 */
195	msr	ttbr0_el1, x0
196	isb
197
198	/* SCTLR */
199	mrs	x1, sctlr_el1
200	orr	x1, x1, SCTLR_EL1_C
201	orr	x1, x1, SCTLR_EL1_I
202	orr	x1, x1, SCTLR_EL1_M
203	msr	sctlr_el1, x1
204	isb
205
206	ret
207
208/* Taken with small changes from arch/arm64/incluse/asm/assembler.h */
209.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2
210	adrp	\tmp1, dcache_line_size
211	ldr	\tmp1, [\tmp1, :lo12:dcache_line_size]
212	sub	\tmp2, \tmp1, #1
213	bic	\start, \start, \tmp2
2149998:
215	dc	\op , \start
216	add	\start, \start, \tmp1
217	cmp	\start, \end
218	b.lo	9998b
219	dsb	\domain
220.endm
221
222.globl asm_mmu_disable
223asm_mmu_disable:
224	mrs	x0, sctlr_el1
225	bic	x0, x0, SCTLR_EL1_M
226	msr	sctlr_el1, x0
227	isb
228
229	/* Clean + invalidate the entire memory */
230	adrp	x0, __phys_offset
231	ldr	x0, [x0, :lo12:__phys_offset]
232	adrp	x1, __phys_end
233	ldr	x1, [x1, :lo12:__phys_end]
234	dcache_by_line_op civac, sy, x0, x1, x2, x3
235	isb
236
237	ret
238
239/*
240 * Vectors
241 * Adapted from arch/arm64/kernel/entry.S
242 */
243.macro vector_stub, name, vec
244\name:
245	stp	 x0,  x1, [sp, #-S_FRAME_SIZE]!
246	stp	 x2,  x3, [sp,  #16]
247	stp	 x4,  x5, [sp,  #32]
248	stp	 x6,  x7, [sp,  #48]
249	stp	 x8,  x9, [sp,  #64]
250	stp	x10, x11, [sp,  #80]
251	stp	x12, x13, [sp,  #96]
252	stp	x14, x15, [sp, #112]
253	stp	x16, x17, [sp, #128]
254	stp	x18, x19, [sp, #144]
255	stp	x20, x21, [sp, #160]
256	stp	x22, x23, [sp, #176]
257	stp	x24, x25, [sp, #192]
258	stp	x26, x27, [sp, #208]
259	stp	x28, x29, [sp, #224]
260
261	str	x30, [sp, #S_LR]
262
263	.if \vec >= 8
264	mrs	x1, sp_el0
265	.else
266	add	x1, sp, #S_FRAME_SIZE
267	.endif
268	str	x1, [sp, #S_SP]
269
270	mrs	x1, elr_el1
271	mrs	x2, spsr_el1
272	stp	x1, x2, [sp, #S_PC]
273
274	mov	x0, \vec
275	mov	x1, sp
276	mrs	x2, esr_el1
277	bl	do_handle_exception
278
279	ldp	x1, x2, [sp, #S_PC]
280	msr	spsr_el1, x2
281	msr	elr_el1, x1
282
283	.if \vec >= 8
284	ldr	x1, [sp, #S_SP]
285	msr	sp_el0, x1
286	.endif
287
288	ldr	x30, [sp, #S_LR]
289
290	ldp	x28, x29, [sp, #224]
291	ldp	x26, x27, [sp, #208]
292	ldp	x24, x25, [sp, #192]
293	ldp	x22, x23, [sp, #176]
294	ldp	x20, x21, [sp, #160]
295	ldp	x18, x19, [sp, #144]
296	ldp	x16, x17, [sp, #128]
297	ldp	x14, x15, [sp, #112]
298	ldp	x12, x13, [sp,  #96]
299	ldp	x10, x11, [sp,  #80]
300	ldp	 x8,  x9, [sp,  #64]
301	ldp	 x6,  x7, [sp,  #48]
302	ldp	 x4,  x5, [sp,  #32]
303	ldp	 x2,  x3, [sp,  #16]
304	ldp	 x0,  x1, [sp], #S_FRAME_SIZE
305
306	eret
307.endm
308
309vector_stub	el1t_sync,     0
310vector_stub	el1t_irq,      1
311vector_stub	el1t_fiq,      2
312vector_stub	el1t_error,    3
313
314vector_stub	el1h_sync,     4
315vector_stub	el1h_irq,      5
316vector_stub	el1h_fiq,      6
317vector_stub	el1h_error,    7
318
319vector_stub	el0_sync_64,   8
320vector_stub	el0_irq_64,    9
321vector_stub	el0_fiq_64,   10
322vector_stub	el0_error_64, 11
323
324vector_stub	el0_sync_32,  12
325vector_stub	el0_irq_32,   13
326vector_stub	el0_fiq_32,   14
327vector_stub	el0_error_32, 15
328
329.section .text.ex
330
331.macro ventry, label
332.align 7
333	b	\label
334.endm
335
336.align 11
337vector_table:
338	ventry	el1t_sync			// Synchronous EL1t
339	ventry	el1t_irq			// IRQ EL1t
340	ventry	el1t_fiq			// FIQ EL1t
341	ventry	el1t_error			// Error EL1t
342
343	ventry	el1h_sync			// Synchronous EL1h
344	ventry	el1h_irq			// IRQ EL1h
345	ventry	el1h_fiq			// FIQ EL1h
346	ventry	el1h_error			// Error EL1h
347
348	ventry	el0_sync_64			// Synchronous 64-bit EL0
349	ventry	el0_irq_64			// IRQ 64-bit EL0
350	ventry	el0_fiq_64			// FIQ 64-bit EL0
351	ventry	el0_error_64			// Error 64-bit EL0
352
353	ventry	el0_sync_32			// Synchronous 32-bit EL0
354	ventry	el0_irq_32			// IRQ 32-bit EL0
355	ventry	el0_fiq_32			// FIQ 32-bit EL0
356	ventry	el0_error_32			// Error 32-bit EL0
357