xref: /kvm-unit-tests/arm/cstart.S (revision 201b9e8bdc84c6436dd53b45d93a60c681b92719)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/assembler.h>
11#include <asm/thread_info.h>
12#include <asm/asm-offsets.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/ptrace.h>
15#include <asm/sysreg.h>
16
17#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
18
19.macro zero_range, tmp1, tmp2, tmp3, tmp4
20	mov	\tmp3, #0
21	mov	\tmp4, #0
229998:	cmp	\tmp1, \tmp2
23	beq	9997f
24	strd	\tmp3, \tmp4, [\tmp1], #8
25	b	9998b
269997:
27.endm
28
29.arm
30
31.section .init
32
33.globl start
34start:
35	/* zero BSS */
36	ldr	r4, =bss
37	ldr	r5, =ebss
38	zero_range r4, r5, r6, r7
39
40	/* zero stack */
41	ldr	r5, =stacktop
42	sub	r4, r5, #THREAD_SIZE
43	zero_range r4, r5, r6, r7
44
45	/*
46	 * set stack, making room at top of stack for cpu0's
47	 * exception stacks. Must start with stackptr, not
48	 * stacktop, so the thread size masking (shifts) work.
49	 */
50	ldr	sp, =stackptr
51	lsr	sp, #THREAD_SHIFT
52	lsl	sp, #THREAD_SHIFT
53	add	sp, #THREAD_START_SP
54
55	/*
56	 * save sp before pushing anything on the stack
57	 * lr makes a good temp register right now
58	 */
59	mov	lr, sp
60
61	/*
62	 * bootloader params are in r0-r2
63	 * See the kernel doc Documentation/arm/Booting
64	 *   r0 = 0
65	 *   r1 = machine type number
66	 *   r2 = physical address of the dtb
67	 *
68	 * As we have no need for r0's nor r1's value, then
69	 * put the dtb in r0. This allows setup to be consistent
70	 * with arm64.
71	 */
72	mov	r0, r2
73	push	{r0-r1}
74
75	/* set up vector table, mode stacks, and enable the VFP */
76	mov	r0, lr			@ lr is stack top (see above),
77					@ which is the exception stacks base
78	bl	exceptions_init
79	bl	enable_vfp
80
81	/* complete setup */
82	pop	{r0-r1}
83	mov	r3, #0
84	ldr	r2, =stacktop		@ r2,r3 is the base of free memory
85	bl	setup			@ r0 is the addr of the dtb
86
87	/* run the test */
88	ldr	r0, =__argc
89	ldr	r0, [r0]
90	ldr	r1, =__argv
91	ldr	r2, =__environ
92	bl	main
93	bl	exit
94	b	halt
95
96.text
97
98/*
99 * arm_smccc_hvc / arm_smccc_smc
100 *
101 * Inputs:
102 *   r0 -- function_id
103 *   r1 -- arg0
104 *   r2 -- arg1
105 *   r3 -- arg2
106 *   [sp] - arg3
107 *   [sp + #4] - arg4
108 *   [sp + #8] - arg5
109 *   [sp + #12] - arg6
110 *   [sp + #16] - arg7
111 *   [sp + #20] - arg8
112 *   [sp + #24] - arg9
113 *   [sp + #28] - arg10
114 *   [sp + #32] - result (as a pointer to a struct smccc_result)
115 *
116 * Outputs:
117 *   r0 -- return code
118 *
119 * If result pointer is not NULL:
120 *   result.r0 -- return code
121 *   result.r1 -- r1
122 *   result.r2 -- r2
123 *   result.r3 -- r3
124 *   result.r4 -- r4
125 *   result.r5 -- r5
126 *   result.r6 -- r6
127 *   result.r7 -- r7
128 *   result.r8 -- r8
129 *   result.r9 -- r9
130 */
131.macro do_smccc_call instr
132	mov	r12, sp
133	push	{r4-r11}
134	ldm	r12, {r4-r11}
135	\instr	#0
136	ldr	r10, [sp, #64]
137	cmp	r10, #0
138	beq	1f
139	stm	r10, {r0-r9}
1401:
141	pop	{r4-r11}
142	mov	pc, lr
143.endm
144
145.globl arm_smccc_hvc
146arm_smccc_hvc:
147	do_smccc_call hvc
148
149.globl arm_smccc_smc
150arm_smccc_smc:
151	do_smccc_call smc
152
153enable_vfp:
154	/* Enable full access to CP10 and CP11: */
155	mov	r0, #(3 << 22 | 3 << 20)
156	mcr	p15, 0, r0, c1, c0, 2
157	isb
158	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
159	mov	r0, #(1 << 30)
160	vmsr	fpexc, r0
161	mov	pc, lr
162
163get_mmu_off:
164	ldr	r0, =auxinfo
165	ldr	r0, [r0, #4]
166	and	r0, #AUXINFO_MMU_OFF
167	mov	pc, lr
168
169.global secondary_entry
170secondary_entry:
171	/* enable the MMU unless requested off */
172	bl	get_mmu_off
173	cmp	r0, #0
174	bne	1f
175	mov	r1, #0
176	ldr	r0, =mmu_idmap
177	ldr	r0, [r0]
178	bl	asm_mmu_enable
179
1801:
181	/*
182	 * Set the stack, and set up vector table
183	 * and exception stacks. Exception stacks
184	 * space starts at stack top and grows up.
185	 */
186	ldr	r1, =secondary_data
187	ldr	r0, [r1]
188	mov	sp, r0
189	bl	exceptions_init
190	bl	enable_vfp
191
192	/* finish init in C code */
193	bl	secondary_cinit
194
195	/* r0 is now the entry function, run it */
196	blx	r0
197	b	do_idle
198
199.globl halt
200halt:
2011:	wfi
202	b	1b
203
204/*
205 * asm_mmu_enable
206 *   Inputs:
207 *     (r0 - lo, r1 - hi) is the base address of the translation table
208 *   Outputs: none
209 */
210.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
211.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
212.globl asm_mmu_enable
213asm_mmu_enable:
214	/* TLBIALL */
215	mcr	p15, 0, r2, c8, c7, 0
216	dsb	nsh
217
218	/* TTBCR */
219	ldr	r2, =(TTBCR_EAE | 				\
220		      TTBCR_SH0_SHARED | 			\
221		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
222	mcr	p15, 0, r2, c2, c0, 2
223	isb
224
225	/* MAIR */
226	ldr	r2, =PRRR
227	mcr	p15, 0, r2, c10, c2, 0
228	ldr	r2, =NMRR
229	mcr	p15, 0, r2, c10, c2, 1
230
231	/* TTBR0 */
232	mcrr	p15, 0, r0, r1, c2
233	isb
234
235	/* SCTLR */
236	mrc	p15, 0, r2, c1, c0, 0
237	orr	r2, #CR_C
238	orr	r2, #CR_I
239	orr	r2, #CR_M
240	mcr	p15, 0, r2, c1, c0, 0
241	isb
242
243	mov     pc, lr
244
245.globl asm_mmu_disable
246asm_mmu_disable:
247	/* SCTLR */
248	mrc	p15, 0, r0, c1, c0, 0
249	bic	r0, #CR_M
250	mcr	p15, 0, r0, c1, c0, 0
251	isb
252
253	ldr	r0, =__phys_offset
254	ldr	r0, [r0]
255	ldr	r1, =__phys_end
256	ldr	r1, [r1]
257	sub	r1, r1, r0
258	dcache_by_line_op dccimvac, sy, r0, r1, r2, r3
259
260	mov     pc, lr
261
262/*
263 * Vectors
264 */
265
266.macro set_mode_stack mode, stack
267	add	\stack, #S_FRAME_SIZE
268	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
269	isb
270	mov	sp, \stack
271.endm
272
273/*
274 * exceptions_init
275 *
276 * Input r0 is the stack top, which is the exception stacks base
277 */
278.globl exceptions_init
279exceptions_init:
280	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
281	bic	r2, #CR_V		@ SCTLR.V := 0
282	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
283	ldr	r2, =vector_table
284	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
285
286	mrs	r2, cpsr
287
288	/*
289	 * The first frame is reserved for svc mode
290	 */
291	set_mode_stack	UND_MODE, r0
292	set_mode_stack	ABT_MODE, r0
293	set_mode_stack	IRQ_MODE, r0
294	set_mode_stack	FIQ_MODE, r0
295
296	msr	cpsr_cxsf, r2		@ back to svc mode
297	isb
298	mov	pc, lr
299
300/*
301 * Vector stubs
302 * Simplified version of the Linux kernel implementation
303 *   arch/arm/kernel/entry-armv.S
304 *
305 * Each mode has an S_FRAME_SIZE sized memory region,
306 * and the mode's stack pointer has been initialized
307 * to the base of that region in exceptions_init.
308 */
309.macro vector_stub, name, vec, mode, correction=0
310.align 5
311vector_\name:
312.if \correction
313	sub	lr, lr, #\correction
314.endif
315	/*
316	 * Save r0, r1, lr_<exception> (parent PC)
317	 * and spsr_<exception> (parent CPSR)
318	 */
319	str	r0, [sp, #S_R0]
320	str	r1, [sp, #S_R1]
321	str	lr, [sp, #S_PC]
322	mrs	r0, spsr
323	str	r0, [sp, #S_PSR]
324
325	/* Prepare for SVC32 mode. */
326	mrs	r0, cpsr
327	bic	r0, #MODE_MASK
328	orr	r0, #SVC_MODE
329	msr	spsr_cxsf, r0
330
331	/* Branch to handler in SVC mode */
332	mov	r0, #\vec
333	mov	r1, sp
334	ldr	lr, =vector_common
335	movs	pc, lr
336.endm
337
338vector_stub 	rst,	0, UND_MODE
339vector_stub	und,	1, UND_MODE
340vector_stub	pabt,	3, ABT_MODE, 4
341vector_stub	dabt,	4, ABT_MODE, 8
342vector_stub	irq,	6, IRQ_MODE, 4
343vector_stub	fiq,	7, FIQ_MODE, 4
344
345.align 5
346vector_svc:
347	/*
348	 * Save r0, r1, lr_<exception> (parent PC)
349	 * and spsr_<exception> (parent CPSR)
350	 */
351	push	{ r1 }
352	lsr	r1, sp, #THREAD_SHIFT
353	lsl	r1, #THREAD_SHIFT
354	add	r1, #THREAD_START_SP
355	str	r0, [r1, #S_R0]
356	pop	{ r0 }
357	str	r0, [r1, #S_R1]
358	str	lr, [r1, #S_PC]
359	mrs	r0, spsr
360	str	r0, [r1, #S_PSR]
361
362	/*
363	 * Branch to handler, still in SVC mode.
364	 * r0 := 2 is the svc vector number.
365	 */
366	mov	r0, #2
367	ldr	lr, =vector_common
368	mov	pc, lr
369
370vector_common:
371	/* make room for pt_regs */
372	sub	sp, #S_FRAME_SIZE
373	tst	sp, #4			@ check stack alignment
374	subne	sp, #4
375
376	/* store registers r0-r12 */
377	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
378
379	/* get registers saved in the stub */
380	ldr	r2, [r1, #S_R0]		@ r0
381	ldr	r3, [r1, #S_R1]		@ r1
382	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
383	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
384
385	/* fix r0 and r1 */
386	str	r2, [sp, #S_R0]
387	str	r3, [sp, #S_R1]
388
389	/* store sp_svc, if we were in usr mode we'll fix this later */
390	add	r6, sp, #S_FRAME_SIZE
391	addne	r6, #4			@ stack wasn't aligned
392	str	r6, [sp, #S_SP]
393
394	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
395	str	r4, [sp, #S_PC]		@ store lr_<exception>
396	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
397
398	/* set ORIG_r0 */
399	mov	r2, #-1
400	str	r2, [sp, #S_OLD_R0]
401
402	/* if we were in usr mode then we need sp_usr and lr_usr instead */
403	and	r1, r5, #MODE_MASK
404	cmp	r1, #USR_MODE
405	bne	1f
406	add	r1, sp, #S_SP
407	stmia	r1, { sp,lr }^
408
409	/* Call the handler. r0 is the vector number, r1 := pt_regs */
4101:	mov	r1, sp
411	bl	do_handle_exception
412
413	/*
414	 * make sure we restore sp_svc on mode change. No need to
415	 * worry about lr_svc though, as that gets clobbered on
416	 * exception entry anyway.
417	 */
418	str	r6, [sp, #S_SP]
419
420	/* return from exception */
421	msr	spsr_cxsf, r5
422	ldmia	sp, { r0-pc }^
423
424.align 5
425vector_addrexcptn:
426	b	vector_addrexcptn
427
428.section .text.ex
429.align 5
430vector_table:
431	b	vector_rst
432	b	vector_und
433	b	vector_svc
434	b	vector_pabt
435	b	vector_dabt
436	b	vector_addrexcptn	@ should never happen
437	b	vector_irq
438	b	vector_fiq
439