xref: /kvm-unit-tests/arm/cstart64.S (revision f372d35fb1bea99e401cc6eafc798aecfce29a07)
1/*
2 * Boot entry point and assembler functions for aarch64 tests.
3 *
4 * Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/asm-offsets.h>
11#include <asm/assembler.h>
12#include <asm/ptrace.h>
13#include <asm/page.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/processor.h>
16#include <asm/thread_info.h>
17#include <asm/sysreg.h>
18
19#ifdef CONFIG_EFI
20#include "efi/crt0-efi-aarch64.S"
21#else
22
23.macro zero_range, tmp1, tmp2
249998:	cmp	\tmp1, \tmp2
25	b.eq	9997f
26	stp	xzr, xzr, [\tmp1], #16
27	b	9998b
289997:
29.endm
30
31.section .init
32
33/*
34 * Bootloader params are in x0-x3. See kernel doc
35 * Documentation/arm64/booting.txt
36 */
37.globl start
38start:
39	/* get our base address */
40	adrp	x4, start
41	add     x4, x4, :lo12:start
42
43	/*
44	 * Update all R_AARCH64_RELATIVE relocations using the table
45	 * of Elf64_Rela entries between reloc_start/end. The build
46	 * will not emit other relocation types.
47	 *
48	 * struct Elf64_Rela {
49	 * 	uint64_t r_offset;
50	 * 	uint64_t r_info;
51	 * 	int64_t  r_addend;
52	 * }
53	 */
54	adrp	x5, reloc_start
55	add     x5, x5, :lo12:reloc_start
56	adrp	x6, reloc_end
57	add     x6, x6, :lo12:reloc_end
581:
59	cmp	x5, x6
60	b.hs	1f
61	ldr	x7, [x5]			// r_offset
62	ldr	x8, [x5, #16]			// r_addend
63	add	x8, x8, x4			// val = base + r_addend
64	str	x8, [x4, x7]			// base[r_offset] = val
65	add	x5, x5, #24
66	b	1b
67
681:
69	/* zero BSS */
70	adrp	x4, bss
71	add	x4, x4, :lo12:bss
72	adrp    x5, ebss
73	add     x5, x5, :lo12:ebss
74	zero_range x4, x5
75
76	/* zero and set up stack */
77	adrp    x5, stacktop
78	add     x5, x5, :lo12:stacktop
79	sub	x4, x5, #THREAD_SIZE
80	zero_range x4, x5
81
82	/* set SCTLR_EL1 to a known value */
83	ldr	x4, =INIT_SCTLR_EL1_MMU_OFF
84	msr	sctlr_el1, x4
85	isb
86
87	mov	x4, #1
88	msr	spsel, x4
89	adrp    x4, stackptr
90	add     sp, x4, :lo12:stackptr
91
92	/* enable FP/ASIMD and SVE */
93	mov	x4, (3 << 20)
94	orr	x4, x4, (3 << 16)
95	msr	cpacr_el1, x4
96
97	/* set up exception handling */
98	bl	exceptions_init
99
100	/* complete setup */
101	adrp	x1, stacktop
102	add	x1, x1, :lo12:stacktop		// x1 is the base of free memory
103	bl	setup				// x0 is the addr of the dtb
104
105	/* run the test */
106	adrp	x0, __argc
107	ldr	w0, [x0, :lo12:__argc]
108	adrp	x1, __argv
109	add	x1, x1, :lo12:__argv
110	adrp	x2, __environ
111	add	x2, x2, :lo12:__environ
112	bl	main
113	bl	exit
114	b	halt
115
116#endif
117
118.text
119
120/*
121 * arm_smccc_hvc / arm_smccc_smc
122 *
123 * Inputs:
124 *   w0 -- function_id
125 *   x1 -- arg0
126 *   x2 -- arg1
127 *   x3 -- arg2
128 *   x4 -- arg3
129 *   x5 -- arg4
130 *   x6 -- arg5
131 *   x7 -- arg6
132 *   sp -- { arg7, arg8, arg9, arg10, result }
133 *
134 * Outputs:
135 *   x0 -- return code
136 *
137 * If result pointer is not NULL:
138 *   result.r0 -- return code
139 *   result.r1 -- x1
140 *   result.r2 -- x2
141 *   result.r3 -- x3
142 *   result.r4 -- x4
143 *   result.r5 -- x5
144 *   result.r6 -- x6
145 *   result.r7 -- x7
146 *   result.r8 -- x8
147 *   result.r9 -- x9
148 */
149.macro do_smccc_call instr
150	/* Save x8-x11 on stack */
151	stp	x9, x8,	  [sp, #-16]!
152	stp	x11, x10, [sp, #-16]!
153	/* Load arg7 - arg10 from the stack */
154	ldp	x8, x9,   [sp, #32]
155	ldp	x10, x11, [sp, #48]
156	\instr	#0
157	/* Get the result address */
158	ldr	x10, [sp, #64]
159	cmp	x10, xzr
160	b.eq	1f
161	stp	x0, x1, [x10, #0]
162	stp	x2, x3, [x10, #16]
163	stp	x4, x5, [x10, #32]
164	stp	x6, x7, [x10, #48]
165	stp	x8, x9, [x10, #64]
1661:
167	/* Restore x8-x11 from stack */
168	ldp	x11, x10, [sp], #16
169	ldp	x9, x8,   [sp], #16
170	ret
171.endm
172
173.globl arm_smccc_hvc
174arm_smccc_hvc:
175	do_smccc_call hvc
176
177.globl arm_smccc_smc
178arm_smccc_smc:
179	do_smccc_call smc
180
181get_mmu_off:
182	adrp	x0, auxinfo
183	ldr	x0, [x0, :lo12:auxinfo + 8]
184	and	x0, x0, #AUXINFO_MMU_OFF
185	ret
186
187.globl secondary_entry
188secondary_entry:
189	/* enable FP/ASIMD and SVE */
190	mov	x0, #(3 << 20)
191	orr	x0, x0, #(3 << 16)
192	msr	cpacr_el1, x0
193
194	/* set up exception handling */
195	bl	exceptions_init
196
197	/* enable the MMU unless requested off */
198	bl	get_mmu_off
199	cbnz	x0, 1f
200	adrp	x0, mmu_idmap
201	ldr	x0, [x0, :lo12:mmu_idmap]
202	bl	asm_mmu_enable
203
2041:
205	/* set the stack */
206	adrp	x0, secondary_data
207	ldr	x0, [x0, :lo12:secondary_data]
208	mov	sp, x0
209
210	/* finish init in C code */
211	bl	secondary_cinit
212
213	/* x0 is now the entry function, run it */
214	blr	x0
215	b	do_idle
216
217.globl halt
218halt:
2191:	wfi
220	b	1b
221
222/*
223 * asm_mmu_enable
224 *   Inputs:
225 *     x0 is the base address of the translation table
226 *   Outputs: none
227 *
228 * Adapted from
229 *   arch/arm64/kernel/head.S
230 *   arch/arm64/mm/proc.S
231 */
232
233/*
234 * Memory region attributes for LPAE:
235 *
236 *   n = AttrIndx[2:0]
237 *                      n       MAIR
238 *   DEVICE_nGnRnE      000     00000000
239 *   DEVICE_nGnRE       001     00000100
240 *   DEVICE_GRE         010     00001100
241 *   NORMAL_NC          011     01000100
242 *   NORMAL             100     11111111
243 *   NORMAL_WT          101     10111011
244 *   DEVICE_nGRE        110     00001000
245 */
246#define MAIR(attr, mt) ((attr) << ((mt) * 8))
247
248#if PAGE_SIZE == SZ_64K
249#define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
250#elif PAGE_SIZE == SZ_16K
251#define TCR_TG_FLAGS	TCR_TG0_16K | TCR_TG1_16K
252#elif PAGE_SIZE == SZ_4K
253#define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
254#endif
255
256.globl asm_mmu_enable
257asm_mmu_enable:
258	tlbi	vmalle1			// invalidate I + D TLBs
259	dsb	nsh
260
261	/* TCR */
262	ldr	x1, =TCR_TxSZ(VA_BITS) |		\
263		     TCR_TG_FLAGS  |			\
264		     TCR_IRGN_WBWA | TCR_ORGN_WBWA |	\
265		     TCR_SHARED |			\
266		     TCR_EPD1
267	mrs	x2, id_aa64mmfr0_el1
268	bfi	x1, x2, #32, #3
269	msr	tcr_el1, x1
270
271	/* MAIR */
272	ldr	x1, =MAIR(0x00, MT_DEVICE_nGnRnE) |	\
273		     MAIR(0x04, MT_DEVICE_nGnRE) |	\
274		     MAIR(0x0c, MT_DEVICE_GRE) |	\
275		     MAIR(0x44, MT_NORMAL_NC) |		\
276		     MAIR(0xff, MT_NORMAL) |	        \
277		     MAIR(0xbb, MT_NORMAL_WT) |         \
278		     MAIR(0x08, MT_DEVICE_nGRE)
279	msr	mair_el1, x1
280
281	/* TTBR0 */
282	msr	ttbr0_el1, x0
283	isb
284
285	/* SCTLR */
286	mrs	x1, sctlr_el1
287	orr	x1, x1, SCTLR_EL1_C
288	orr	x1, x1, SCTLR_EL1_I
289	orr	x1, x1, SCTLR_EL1_M
290	msr	sctlr_el1, x1
291	isb
292
293	ret
294
295.globl asm_mmu_disable
296asm_mmu_disable:
297	mrs	x0, sctlr_el1
298	bic	x0, x0, SCTLR_EL1_M
299	msr	sctlr_el1, x0
300	isb
301
302	/* Clean + invalidate the entire memory */
303	adrp	x0, __phys_offset
304	ldr	x0, [x0, :lo12:__phys_offset]
305	adrp	x1, __phys_end
306	ldr	x1, [x1, :lo12:__phys_end]
307	sub	x1, x1, x0
308	dcache_by_line_op civac, sy, x0, x1, x2, x3
309
310	ret
311
312/*
313 * Vectors
314 */
315
316.globl exceptions_init
317exceptions_init:
318	adrp	x4, vector_table
319	add	x4, x4, :lo12:vector_table
320	msr	vbar_el1, x4
321	isb
322	ret
323
324/*
325 * Vector stubs
326 * Adapted from arch/arm64/kernel/entry.S
327 * Declare as weak to allow external tests to redefine and override a
328 * vector_stub.
329 */
330.macro vector_stub, name, vec
331.weak \name
332\name:
333	stp	 x0,  x1, [sp, #-S_FRAME_SIZE]!
334	stp	 x2,  x3, [sp,  #16]
335	stp	 x4,  x5, [sp,  #32]
336	stp	 x6,  x7, [sp,  #48]
337	stp	 x8,  x9, [sp,  #64]
338	stp	x10, x11, [sp,  #80]
339	stp	x12, x13, [sp,  #96]
340	stp	x14, x15, [sp, #112]
341	stp	x16, x17, [sp, #128]
342	stp	x18, x19, [sp, #144]
343	stp	x20, x21, [sp, #160]
344	stp	x22, x23, [sp, #176]
345	stp	x24, x25, [sp, #192]
346	stp	x26, x27, [sp, #208]
347	stp	x28, x29, [sp, #224]
348
349	str	x30, [sp, #S_LR]
350
351	.if \vec >= 8
352	mrs	x1, sp_el0
353	.else
354	add	x1, sp, #S_FRAME_SIZE
355	.endif
356	str	x1, [sp, #S_SP]
357
358	mrs	x1, elr_el1
359	mrs	x2, spsr_el1
360	stp	x1, x2, [sp, #S_PC]
361
362	/*
363	 * Save a frame pointer using the link to allow unwinding of
364	 * exceptions.
365	 */
366	stp	x29, x1, [sp, #S_FP]
367	add 	x29, sp, #S_FP
368
369	mov	x0, \vec
370	mov	x1, sp
371	mrs	x2, esr_el1
372	bl	do_handle_exception
373
374	ldp	x1, x2, [sp, #S_PC]
375	msr	spsr_el1, x2
376	msr	elr_el1, x1
377
378	.if \vec >= 8
379	ldr	x1, [sp, #S_SP]
380	msr	sp_el0, x1
381	.endif
382
383	ldr	x30, [sp, #S_LR]
384
385	ldp	x28, x29, [sp, #224]
386	ldp	x26, x27, [sp, #208]
387	ldp	x24, x25, [sp, #192]
388	ldp	x22, x23, [sp, #176]
389	ldp	x20, x21, [sp, #160]
390	ldp	x18, x19, [sp, #144]
391	ldp	x16, x17, [sp, #128]
392	ldp	x14, x15, [sp, #112]
393	ldp	x12, x13, [sp,  #96]
394	ldp	x10, x11, [sp,  #80]
395	ldp	 x8,  x9, [sp,  #64]
396	ldp	 x6,  x7, [sp,  #48]
397	ldp	 x4,  x5, [sp,  #32]
398	ldp	 x2,  x3, [sp,  #16]
399	ldp	 x0,  x1, [sp], #S_FRAME_SIZE
400
401	eret
402.endm
403
404.globl vector_stub_start
405vector_stub_start:
406
407vector_stub	el1t_sync,     0
408vector_stub	el1t_irq,      1
409vector_stub	el1t_fiq,      2
410vector_stub	el1t_error,    3
411
412vector_stub	el1h_sync,     4
413vector_stub	el1h_irq,      5
414vector_stub	el1h_fiq,      6
415vector_stub	el1h_error,    7
416
417vector_stub	el0_sync_64,   8
418vector_stub	el0_irq_64,    9
419vector_stub	el0_fiq_64,   10
420vector_stub	el0_error_64, 11
421
422vector_stub	el0_sync_32,  12
423vector_stub	el0_irq_32,   13
424vector_stub	el0_fiq_32,   14
425vector_stub	el0_error_32, 15
426
427.globl vector_stub_end
428vector_stub_end:
429
430.section .text.ex
431
432.macro ventry, label
433.align 7
434	b	\label
435.endm
436
437
438/*
439 * Declare as weak to allow external tests to redefine and override the
440 * default vector table.
441 */
442.align 11
443.weak vector_table
444vector_table:
445	ventry	el1t_sync			// Synchronous EL1t
446	ventry	el1t_irq			// IRQ EL1t
447	ventry	el1t_fiq			// FIQ EL1t
448	ventry	el1t_error			// Error EL1t
449
450	ventry	el1h_sync			// Synchronous EL1h
451	ventry	el1h_irq			// IRQ EL1h
452	ventry	el1h_fiq			// FIQ EL1h
453	ventry	el1h_error			// Error EL1h
454
455	ventry	el0_sync_64			// Synchronous 64-bit EL0
456	ventry	el0_irq_64			// IRQ 64-bit EL0
457	ventry	el0_fiq_64			// FIQ 64-bit EL0
458	ventry	el0_error_64			// Error 64-bit EL0
459
460	ventry	el0_sync_32			// Synchronous 32-bit EL0
461	ventry	el0_irq_32			// IRQ 32-bit EL0
462	ventry	el0_fiq_32			// FIQ 32-bit EL0
463	ventry	el0_error_32			// Error 32-bit EL0
464