xref: /kvm-unit-tests/arm/cstart.S (revision 0cc3a351b925928827baa4b69cf0e46ff5837083)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#include <auxinfo.h>
9#include <asm/assembler.h>
10#include <asm/thread_info.h>
11#include <asm/asm-offsets.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include <asm/sysreg.h>
15
16#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
17
18.macro zero_range, tmp1, tmp2, tmp3, tmp4
19	mov	\tmp3, #0
20	mov	\tmp4, #0
219998:	cmp	\tmp1, \tmp2
22	beq	9997f
23	strd	\tmp3, \tmp4, [\tmp1], #8
24	b	9998b
259997:
26.endm
27
28.arm
29
30.section .init
31
32.globl start
33start:
34	/* zero BSS */
35	ldr	r4, =bss
36	ldr	r5, =ebss
37	zero_range r4, r5, r6, r7
38
39	/* zero stack */
40	ldr	r5, =stacktop
41	sub	r4, r5, #THREAD_SIZE
42	zero_range r4, r5, r6, r7
43
44	/*
45	 * set stack, making room at top of stack for cpu0's
46	 * exception stacks. Must start with stackptr, not
47	 * stacktop, so the thread size masking (shifts) work.
48	 */
49	ldr	sp, =stackptr
50	lsr	sp, #THREAD_SHIFT
51	lsl	sp, #THREAD_SHIFT
52	add	sp, #THREAD_START_SP
53
54	/*
55	 * save sp before pushing anything on the stack
56	 * lr makes a good temp register right now
57	 */
58	mov	lr, sp
59
60	/*
61	 * bootloader params are in r0-r2
62	 * See the kernel doc Documentation/arm/Booting
63	 *   r0 = 0
64	 *   r1 = machine type number
65	 *   r2 = physical address of the dtb
66	 *
67	 * As we have no need for r0's nor r1's value, then
68	 * put the dtb in r0. This allows setup to be consistent
69	 * with arm64.
70	 */
71	mov	r0, r2
72	push	{r0-r1}
73
74	/* set up vector table, mode stacks, and enable the VFP */
75	mov	r0, lr			@ lr is stack top (see above),
76					@ which is the exception stacks base
77	bl	exceptions_init
78	bl	enable_vfp
79
80	/* complete setup */
81	pop	{r0-r1}
82	mov	r3, #0
83	ldr	r2, =stacktop		@ r2,r3 is the base of free memory
84	bl	setup			@ r0 is the addr of the dtb
85
86	/* run the test */
87	ldr	r0, =__argc
88	ldr	r0, [r0]
89	ldr	r1, =__argv
90	ldr	r2, =__environ
91	bl	main
92	bl	exit
93	b	halt
94
95.text
96
97/*
98 * arm_smccc_hvc / arm_smccc_smc
99 *
100 * Inputs:
101 *   r0 -- function_id
102 *   r1 -- arg0
103 *   r2 -- arg1
104 *   r3 -- arg2
105 *   [sp] - arg3
106 *   [sp + #4] - arg4
107 *   [sp + #8] - arg5
108 *   [sp + #12] - arg6
109 *   [sp + #16] - arg7
110 *   [sp + #20] - arg8
111 *   [sp + #24] - arg9
112 *   [sp + #28] - arg10
113 *   [sp + #32] - result (as a pointer to a struct smccc_result)
114 *
115 * Outputs:
116 *   r0 -- return code
117 *
118 * If result pointer is not NULL:
119 *   result.r0 -- return code
120 *   result.r1 -- r1
121 *   result.r2 -- r2
122 *   result.r3 -- r3
123 *   result.r4 -- r4
124 *   result.r5 -- r5
125 *   result.r6 -- r6
126 *   result.r7 -- r7
127 *   result.r8 -- r8
128 *   result.r9 -- r9
129 */
130.macro do_smccc_call instr
131	mov	r12, sp
132	push	{r4-r11}
133	ldm	r12, {r4-r11}
134	\instr	#0
135	ldr	r10, [sp, #64]
136	cmp	r10, #0
137	beq	1f
138	stm	r10, {r0-r9}
1391:
140	pop	{r4-r11}
141	mov	pc, lr
142.endm
143
144.globl arm_smccc_hvc
145arm_smccc_hvc:
146	do_smccc_call hvc
147
148.globl arm_smccc_smc
149arm_smccc_smc:
150	do_smccc_call smc
151
152enable_vfp:
153	/* Enable full access to CP10 and CP11: */
154	mov	r0, #(3 << 22 | 3 << 20)
155	mcr	p15, 0, r0, c1, c0, 2
156	isb
157	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
158	mov	r0, #(1 << 30)
159	vmsr	fpexc, r0
160	mov	pc, lr
161
162get_mmu_off:
163	ldr	r0, =auxinfo
164	ldr	r0, [r0, #4]
165	and	r0, #AUXINFO_MMU_OFF
166	mov	pc, lr
167
168.global secondary_entry
169secondary_entry:
170	/* enable the MMU unless requested off */
171	bl	get_mmu_off
172	cmp	r0, #0
173	bne	1f
174	mov	r1, #0
175	ldr	r0, =mmu_idmap
176	ldr	r0, [r0]
177	bl	asm_mmu_enable
178
1791:
180	/*
181	 * Set the stack, and set up vector table
182	 * and exception stacks. Exception stacks
183	 * space starts at stack top and grows up.
184	 */
185	ldr	r1, =secondary_data
186	ldr	r0, [r1]
187	mov	sp, r0
188	bl	exceptions_init
189	bl	enable_vfp
190
191	/* finish init in C code */
192	bl	secondary_cinit
193
194	/* r0 is now the entry function, run it */
195	blx	r0
196	b	do_idle
197
198.globl halt
199halt:
2001:	wfi
201	b	1b
202
203/*
204 * asm_mmu_enable
205 *   Inputs:
206 *     (r0 - lo, r1 - hi) is the base address of the translation table
207 *   Outputs: none
208 */
209.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
210.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
211.globl asm_mmu_enable
212asm_mmu_enable:
213	/* TLBIALL */
214	mcr	p15, 0, r2, c8, c7, 0
215	dsb	nsh
216
217	/* TTBCR */
218	ldr	r2, =(TTBCR_EAE | 				\
219		      TTBCR_SH0_SHARED | 			\
220		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
221	mcr	p15, 0, r2, c2, c0, 2
222	isb
223
224	/* MAIR */
225	ldr	r2, =PRRR
226	mcr	p15, 0, r2, c10, c2, 0
227	ldr	r2, =NMRR
228	mcr	p15, 0, r2, c10, c2, 1
229
230	/* TTBR0 */
231	mcrr	p15, 0, r0, r1, c2
232	isb
233
234	/* SCTLR */
235	mrc	p15, 0, r2, c1, c0, 0
236	orr	r2, #CR_C
237	orr	r2, #CR_I
238	orr	r2, #CR_M
239	mcr	p15, 0, r2, c1, c0, 0
240	isb
241
242	mov     pc, lr
243
244.globl asm_mmu_disable
245asm_mmu_disable:
246	/* SCTLR */
247	mrc	p15, 0, r0, c1, c0, 0
248	bic	r0, #CR_M
249	mcr	p15, 0, r0, c1, c0, 0
250	isb
251
252	ldr	r0, =__phys_offset
253	ldr	r0, [r0]
254	ldr	r1, =__phys_end
255	ldr	r1, [r1]
256	sub	r1, r1, r0
257	dcache_by_line_op dccimvac, sy, r0, r1, r2, r3
258
259	mov     pc, lr
260
261/*
262 * Vectors
263 */
264
265.macro set_mode_stack mode, stack
266	add	\stack, #S_FRAME_SIZE
267	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
268	isb
269	mov	sp, \stack
270.endm
271
272/*
273 * exceptions_init
274 *
275 * Input r0 is the stack top, which is the exception stacks base
276 */
277.globl exceptions_init
278exceptions_init:
279	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
280	bic	r2, #CR_V		@ SCTLR.V := 0
281	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
282	ldr	r2, =vector_table
283	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
284
285	mrs	r2, cpsr
286
287	/*
288	 * The first frame is reserved for svc mode
289	 */
290	set_mode_stack	UND_MODE, r0
291	set_mode_stack	ABT_MODE, r0
292	set_mode_stack	IRQ_MODE, r0
293	set_mode_stack	FIQ_MODE, r0
294
295	msr	cpsr_cxsf, r2		@ back to svc mode
296	isb
297	mov	pc, lr
298
299/*
300 * Vector stubs
301 * Simplified version of the Linux kernel implementation
302 *   arch/arm/kernel/entry-armv.S
303 *
304 * Each mode has an S_FRAME_SIZE sized memory region,
305 * and the mode's stack pointer has been initialized
306 * to the base of that region in exceptions_init.
307 */
308.macro vector_stub, name, vec, mode, correction=0
309.align 5
310vector_\name:
311.if \correction
312	sub	lr, lr, #\correction
313.endif
314	/*
315	 * Save r0, r1, lr_<exception> (parent PC)
316	 * and spsr_<exception> (parent CPSR)
317	 */
318	str	r0, [sp, #S_R0]
319	str	r1, [sp, #S_R1]
320	str	lr, [sp, #S_PC]
321	mrs	r0, spsr
322	str	r0, [sp, #S_PSR]
323
324	/* Prepare for SVC32 mode. */
325	mrs	r0, cpsr
326	bic	r0, #MODE_MASK
327	orr	r0, #SVC_MODE
328	msr	spsr_cxsf, r0
329
330	/* Branch to handler in SVC mode */
331	mov	r0, #\vec
332	mov	r1, sp
333	ldr	lr, =vector_common
334	movs	pc, lr
335.endm
336
337vector_stub 	rst,	0, UND_MODE
338vector_stub	und,	1, UND_MODE
339vector_stub	pabt,	3, ABT_MODE, 4
340vector_stub	dabt,	4, ABT_MODE, 8
341vector_stub	irq,	6, IRQ_MODE, 4
342vector_stub	fiq,	7, FIQ_MODE, 4
343
344.align 5
345vector_svc:
346	/*
347	 * Save r0, r1, lr_<exception> (parent PC)
348	 * and spsr_<exception> (parent CPSR)
349	 */
350	push	{ r1 }
351	lsr	r1, sp, #THREAD_SHIFT
352	lsl	r1, #THREAD_SHIFT
353	add	r1, #THREAD_START_SP
354	str	r0, [r1, #S_R0]
355	pop	{ r0 }
356	str	r0, [r1, #S_R1]
357	str	lr, [r1, #S_PC]
358	mrs	r0, spsr
359	str	r0, [r1, #S_PSR]
360
361	/*
362	 * Branch to handler, still in SVC mode.
363	 * r0 := 2 is the svc vector number.
364	 */
365	mov	r0, #2
366	ldr	lr, =vector_common
367	mov	pc, lr
368
369vector_common:
370	/* make room for pt_regs */
371	sub	sp, #S_FRAME_SIZE
372	tst	sp, #4			@ check stack alignment
373	subne	sp, #4
374
375	/* store registers r0-r12 */
376	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
377
378	/* get registers saved in the stub */
379	ldr	r2, [r1, #S_R0]		@ r0
380	ldr	r3, [r1, #S_R1]		@ r1
381	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
382	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
383
384	/* fix r0 and r1 */
385	str	r2, [sp, #S_R0]
386	str	r3, [sp, #S_R1]
387
388	/* store sp_svc, if we were in usr mode we'll fix this later */
389	add	r6, sp, #S_FRAME_SIZE
390	addne	r6, #4			@ stack wasn't aligned
391	str	r6, [sp, #S_SP]
392
393	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
394	str	r4, [sp, #S_PC]		@ store lr_<exception>
395	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
396
397	/* set ORIG_r0 */
398	mov	r2, #-1
399	str	r2, [sp, #S_OLD_R0]
400
401	/* if we were in usr mode then we need sp_usr and lr_usr instead */
402	and	r1, r5, #MODE_MASK
403	cmp	r1, #USR_MODE
404	bne	1f
405	add	r1, sp, #S_SP
406	stmia	r1, { sp,lr }^
407
408	/* Call the handler. r0 is the vector number, r1 := pt_regs */
4091:	mov	r1, sp
410	bl	do_handle_exception
411
412	/*
413	 * make sure we restore sp_svc on mode change. No need to
414	 * worry about lr_svc though, as that gets clobbered on
415	 * exception entry anyway.
416	 */
417	str	r6, [sp, #S_SP]
418
419	/* return from exception */
420	msr	spsr_cxsf, r5
421	ldmia	sp, { r0-pc }^
422
423.align 5
424vector_addrexcptn:
425	b	vector_addrexcptn
426
427.section .text.ex
428.align 5
429vector_table:
430	b	vector_rst
431	b	vector_und
432	b	vector_svc
433	b	vector_pabt
434	b	vector_dabt
435	b	vector_addrexcptn	@ should never happen
436	b	vector_irq
437	b	vector_fiq
438