xref: /kvm-unit-tests/arm/cstart.S (revision 20239febc6ff35f9f616d1811b64f3431df0cf68)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/thread_info.h>
11#include <asm/asm-offsets.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include <asm/sysreg.h>
15
16#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
17
18.arm
19
20.section .init
21
22.globl start
23start:
24	/*
25	 * set stack, making room at top of stack for cpu0's
26	 * exception stacks. Must start wtih stackptr, not
27	 * stacktop, so the thread size masking (shifts) work.
28	 */
29	ldr	sp, =stackptr
30	lsr	sp, #THREAD_SHIFT
31	lsl	sp, #THREAD_SHIFT
32	add	sp, #THREAD_START_SP
33
34	/*
35	 * save sp before pushing anything on the stack
36	 * lr makes a good temp register right now
37	 */
38	mov	lr, sp
39
40	/*
41	 * bootloader params are in r0-r2
42	 * See the kernel doc Documentation/arm/Booting
43	 *   r0 = 0
44	 *   r1 = machine type number
45	 *   r2 = physical address of the dtb
46	 *
47	 * As we have no need for r0's nor r1's value, then
48	 * put the dtb in r0. This allows setup to be consistent
49	 * with arm64.
50	 */
51	mov	r0, r2
52	push	{r0-r1}
53
54	/* set up vector table, mode stacks, and enable the VFP */
55	mov	r0, lr			@ lr is stack top (see above),
56					@ which is the exception stacks base
57	bl	exceptions_init
58	bl	enable_vfp
59
60	/* complete setup */
61	pop	{r0-r1}
62	bl	setup
63	bl	get_mmu_off
64	cmp	r0, #0
65	bne	1f
66	bl	setup_vm
67
681:
69	/* run the test */
70	ldr	r0, =__argc
71	ldr	r0, [r0]
72	ldr	r1, =__argv
73	ldr	r2, =__environ
74	bl	main
75	bl	exit
76	b	halt
77
78
79.macro set_mode_stack mode, stack
80	add	\stack, #S_FRAME_SIZE
81	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
82	isb
83	mov	sp, \stack
84.endm
85
86exceptions_init:
87	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
88	bic	r2, #CR_V		@ SCTLR.V := 0
89	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
90	ldr	r2, =vector_table
91	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
92
93	mrs	r2, cpsr
94
95	/* first frame reserved for svc mode */
96	set_mode_stack	UND_MODE, r0
97	set_mode_stack	ABT_MODE, r0
98	set_mode_stack	IRQ_MODE, r0
99	set_mode_stack	FIQ_MODE, r0
100
101	msr	cpsr_cxsf, r2		@ back to svc mode
102	isb
103	mov	pc, lr
104
105enable_vfp:
106	/* Enable full access to CP10 and CP11: */
107	mov	r0, #(3 << 22 | 3 << 20)
108	mcr	p15, 0, r0, c1, c0, 2
109	isb
110	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
111	mov	r0, #(1 << 30)
112	vmsr	fpexc, r0
113	mov	pc, lr
114
115.text
116
117.global get_mmu_off
118get_mmu_off:
119	ldr	r0, =auxinfo
120	ldr	r0, [r0, #4]
121	and	r0, #AUXINFO_MMU_OFF
122	mov	pc, lr
123
124.global secondary_entry
125secondary_entry:
126	/* enable the MMU unless requested off */
127	bl	get_mmu_off
128	cmp	r0, #0
129	bne	1f
130	mov	r1, #0
131	ldr	r0, =mmu_idmap
132	ldr	r0, [r0]
133	bl	asm_mmu_enable
134
1351:
136	/*
137	 * Set the stack, and set up vector table
138	 * and exception stacks. Exception stacks
139	 * space starts at stack top and grows up.
140	 */
141	ldr	r1, =secondary_data
142	ldr	r0, [r1]
143	mov	sp, r0
144	bl	exceptions_init
145	bl	enable_vfp
146
147	/* finish init in C code */
148	bl	secondary_cinit
149
150	/* r0 is now the entry function, run it */
151	blx	r0
152	b	do_idle
153
154.globl halt
155halt:
1561:	wfi
157	b	1b
158
159/*
160 * asm_mmu_enable
161 *   Inputs:
162 *     (r0 - lo, r1 - hi) is the base address of the translation table
163 *   Outputs: none
164 */
165.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
166.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
167.globl asm_mmu_enable
168asm_mmu_enable:
169	/* TTBCR */
170	ldr	r2, =(TTBCR_EAE | 				\
171		      TTBCR_SH0_SHARED | 			\
172		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
173	mcr	p15, 0, r2, c2, c0, 2
174	isb
175
176	/* MAIR */
177	ldr	r2, =PRRR
178	mcr	p15, 0, r2, c10, c2, 0
179	ldr	r2, =NMRR
180	mcr	p15, 0, r2, c10, c2, 1
181
182	/* TTBR0 */
183	mcrr	p15, 0, r0, r1, c2
184	isb
185
186	/* SCTLR */
187	mrc	p15, 0, r2, c1, c0, 0
188	orr	r2, #CR_C
189	orr	r2, #CR_I
190	orr	r2, #CR_M
191	mcr	p15, 0, r2, c1, c0, 0
192	isb
193
194	mov     pc, lr
195
196.globl asm_mmu_disable
197asm_mmu_disable:
198	/* SCTLR */
199	mrc	p15, 0, r0, c1, c0, 0
200	bic	r0, #CR_M
201	mcr	p15, 0, r0, c1, c0, 0
202	isb
203	mov     pc, lr
204
205/*
206 * Vector stubs
207 * Simplified version of the Linux kernel implementation
208 *   arch/arm/kernel/entry-armv.S
209 *
210 * Each mode has an S_FRAME_SIZE sized memory region,
211 * and the mode's stack pointer has been initialized
212 * to the base of that region in exceptions_init.
213 */
214.macro vector_stub, name, vec, mode, correction=0
215.align 5
216vector_\name:
217.if \correction
218	sub	lr, lr, #\correction
219.endif
220	/*
221	 * Save r0, r1, lr_<exception> (parent PC)
222	 * and spsr_<exception> (parent CPSR)
223	 */
224	str	r0, [sp, #S_R0]
225	str	r1, [sp, #S_R1]
226	str	lr, [sp, #S_PC]
227	mrs	r0, spsr
228	str	r0, [sp, #S_PSR]
229
230	/* Prepare for SVC32 mode. */
231	mrs	r0, cpsr
232	bic	r0, #MODE_MASK
233	orr	r0, #SVC_MODE
234	msr	spsr_cxsf, r0
235
236	/* Branch to handler in SVC mode */
237	mov	r0, #\vec
238	mov	r1, sp
239	ldr	lr, =vector_common
240	movs	pc, lr
241.endm
242
243vector_stub 	rst,	0, UND_MODE
244vector_stub	und,	1, UND_MODE
245vector_stub	pabt,	3, ABT_MODE, 4
246vector_stub	dabt,	4, ABT_MODE, 8
247vector_stub	irq,	6, IRQ_MODE, 4
248vector_stub	fiq,	7, FIQ_MODE, 4
249
250.align 5
251vector_svc:
252	/*
253	 * Save r0, r1, lr_<exception> (parent PC)
254	 * and spsr_<exception> (parent CPSR)
255	 */
256	push	{ r1 }
257	lsr	r1, sp, #THREAD_SHIFT
258	lsl	r1, #THREAD_SHIFT
259	add	r1, #THREAD_START_SP
260	str	r0, [r1, #S_R0]
261	pop	{ r0 }
262	str	r0, [r1, #S_R1]
263	str	lr, [r1, #S_PC]
264	mrs	r0, spsr
265	str	r0, [r1, #S_PSR]
266
267	/*
268	 * Branch to handler, still in SVC mode.
269	 * r0 := 2 is the svc vector number.
270	 */
271	mov	r0, #2
272	ldr	lr, =vector_common
273	mov	pc, lr
274
275vector_common:
276	/* make room for pt_regs */
277	sub	sp, #S_FRAME_SIZE
278	tst	sp, #4			@ check stack alignment
279	subne	sp, #4
280
281	/* store registers r0-r12 */
282	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
283
284	/* get registers saved in the stub */
285	ldr	r2, [r1, #S_R0]		@ r0
286	ldr	r3, [r1, #S_R1]		@ r1
287	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
288	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
289
290	/* fix r0 and r1 */
291	str	r2, [sp, #S_R0]
292	str	r3, [sp, #S_R1]
293
294	/* store sp_svc, if we were in usr mode we'll fix this later */
295	add	r6, sp, #S_FRAME_SIZE
296	addne	r6, #4			@ stack wasn't aligned
297	str	r6, [sp, #S_SP]
298
299	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
300	str	r4, [sp, #S_PC]		@ store lr_<exception>
301	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
302
303	/* set ORIG_r0 */
304	mov	r2, #-1
305	str	r2, [sp, #S_OLD_R0]
306
307	/* if we were in usr mode then we need sp_usr and lr_usr instead */
308	and	r1, r5, #MODE_MASK
309	cmp	r1, #USR_MODE
310	bne	1f
311	add	r1, sp, #S_SP
312	stmia	r1, { sp,lr }^
313
314	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3151:	mov	r1, sp
316	bl	do_handle_exception
317
318	/*
319	 * make sure we restore sp_svc on mode change. No need to
320	 * worry about lr_svc though, as that gets clobbered on
321	 * exception entry anyway.
322	 */
323	str	r6, [sp, #S_SP]
324
325	/* return from exception */
326	msr	spsr_cxsf, r5
327	ldmia	sp, { r0-pc }^
328
329.align 5
330vector_addrexcptn:
331	b	vector_addrexcptn
332
333.section .text.ex
334.align 5
335vector_table:
336	b	vector_rst
337	b	vector_und
338	b	vector_svc
339	b	vector_pabt
340	b	vector_dabt
341	b	vector_addrexcptn	@ should never happen
342	b	vector_irq
343	b	vector_fiq
344