xref: /kvm-unit-tests/arm/cstart.S (revision f567e5eaa1d613d279b48e8b192a79e9e93aa909)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/thread_info.h>
11#include <asm/asm-offsets.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include <asm/sysreg.h>
15
16#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
17
18.arm
19
20.section .init
21
22.globl start
23start:
24	/*
25	 * set stack, making room at top of stack for cpu0's
26	 * exception stacks. Must start wtih stackptr, not
27	 * stacktop, so the thread size masking (shifts) work.
28	 */
29	ldr	sp, =stackptr
30	lsr	sp, #THREAD_SHIFT
31	lsl	sp, #THREAD_SHIFT
32	add	sp, #THREAD_START_SP
33
34	/*
35	 * save sp before pushing anything on the stack
36	 * lr makes a good temp register right now
37	 */
38	mov	lr, sp
39
40	/*
41	 * bootloader params are in r0-r2
42	 * See the kernel doc Documentation/arm/Booting
43	 *   r0 = 0
44	 *   r1 = machine type number
45	 *   r2 = physical address of the dtb
46	 *
47	 * As we have no need for r0's nor r1's value, then
48	 * put the dtb in r0. This allows setup to be consistent
49	 * with arm64.
50	 */
51	mov	r0, r2
52	push	{r0-r1}
53
54	/* set up vector table, mode stacks, and enable the VFP */
55	mov	r0, lr			@ lr is stack top (see above),
56					@ which is the exception stacks base
57	bl	exceptions_init
58	bl	enable_vfp
59
60	/* complete setup */
61	pop	{r0-r1}
62	bl	setup
63	bl	get_mmu_off
64	cmp	r0, #0
65	bne	1f
66	bl	setup_vm
67
681:
69	/* run the test */
70	ldr	r0, =__argc
71	ldr	r0, [r0]
72	ldr	r1, =__argv
73	ldr	r2, =__environ
74	bl	main
75	bl	exit
76	b	halt
77
78
79.macro set_mode_stack mode, stack
80	add	\stack, #S_FRAME_SIZE
81	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
82	isb
83	mov	sp, \stack
84.endm
85
86exceptions_init:
87	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
88	bic	r2, #CR_V		@ SCTLR.V := 0
89	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
90	ldr	r2, =vector_table
91	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
92
93	mrs	r2, cpsr
94
95	/* first frame reserved for svc mode */
96	set_mode_stack	UND_MODE, r0
97	set_mode_stack	ABT_MODE, r0
98	set_mode_stack	IRQ_MODE, r0
99	set_mode_stack	FIQ_MODE, r0
100
101	msr	cpsr_cxsf, r2		@ back to svc mode
102	isb
103	mov	pc, lr
104
105enable_vfp:
106	/* Enable full access to CP10 and CP11: */
107	mov	r0, #(3 << 22 | 3 << 20)
108	mcr	p15, 0, r0, c1, c0, 2
109	isb
110	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
111	mov	r0, #(1 << 30)
112	vmsr	fpexc, r0
113	mov	pc, lr
114
115.text
116
117.global get_mmu_off
118get_mmu_off:
119	ldr	r0, =auxinfo
120	ldr	r0, [r0, #4]
121	and	r0, #AUXINFO_MMU_OFF
122	mov	pc, lr
123
124.global secondary_entry
125secondary_entry:
126	/* enable the MMU unless requested off */
127	bl	get_mmu_off
128	cmp	r0, #0
129	bne	1f
130	mov	r1, #0
131	ldr	r0, =mmu_idmap
132	ldr	r0, [r0]
133	bl	asm_mmu_enable
134
1351:
136	/*
137	 * Set the stack, and set up vector table
138	 * and exception stacks. Exception stacks
139	 * space starts at stack top and grows up.
140	 */
141	ldr	r1, =secondary_data
142	ldr	r0, [r1]
143	mov	sp, r0
144	bl	exceptions_init
145	bl	enable_vfp
146
147	/* finish init in C code */
148	bl	secondary_cinit
149
150	/* r0 is now the entry function, run it */
151	blx	r0
152	b	do_idle
153
154.globl halt
155halt:
1561:	wfi
157	b	1b
158
159/*
160 * asm_mmu_enable
161 *   Inputs:
162 *     (r0 - lo, r1 - hi) is the base address of the translation table
163 *   Outputs: none
164 */
165.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
166.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
167.globl asm_mmu_enable
168asm_mmu_enable:
169	/* TLBIALL */
170	mcr	p15, 0, r2, c8, c7, 0
171	dsb	nsh
172
173	/* TTBCR */
174	ldr	r2, =(TTBCR_EAE | 				\
175		      TTBCR_SH0_SHARED | 			\
176		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
177	mcr	p15, 0, r2, c2, c0, 2
178	isb
179
180	/* MAIR */
181	ldr	r2, =PRRR
182	mcr	p15, 0, r2, c10, c2, 0
183	ldr	r2, =NMRR
184	mcr	p15, 0, r2, c10, c2, 1
185
186	/* TTBR0 */
187	mcrr	p15, 0, r0, r1, c2
188	isb
189
190	/* SCTLR */
191	mrc	p15, 0, r2, c1, c0, 0
192	orr	r2, #CR_C
193	orr	r2, #CR_I
194	orr	r2, #CR_M
195	mcr	p15, 0, r2, c1, c0, 0
196	isb
197
198	mov     pc, lr
199
200.globl asm_mmu_disable
201asm_mmu_disable:
202	/* SCTLR */
203	mrc	p15, 0, r0, c1, c0, 0
204	bic	r0, #CR_M
205	mcr	p15, 0, r0, c1, c0, 0
206	isb
207	mov     pc, lr
208
209/*
210 * Vector stubs
211 * Simplified version of the Linux kernel implementation
212 *   arch/arm/kernel/entry-armv.S
213 *
214 * Each mode has an S_FRAME_SIZE sized memory region,
215 * and the mode's stack pointer has been initialized
216 * to the base of that region in exceptions_init.
217 */
218.macro vector_stub, name, vec, mode, correction=0
219.align 5
220vector_\name:
221.if \correction
222	sub	lr, lr, #\correction
223.endif
224	/*
225	 * Save r0, r1, lr_<exception> (parent PC)
226	 * and spsr_<exception> (parent CPSR)
227	 */
228	str	r0, [sp, #S_R0]
229	str	r1, [sp, #S_R1]
230	str	lr, [sp, #S_PC]
231	mrs	r0, spsr
232	str	r0, [sp, #S_PSR]
233
234	/* Prepare for SVC32 mode. */
235	mrs	r0, cpsr
236	bic	r0, #MODE_MASK
237	orr	r0, #SVC_MODE
238	msr	spsr_cxsf, r0
239
240	/* Branch to handler in SVC mode */
241	mov	r0, #\vec
242	mov	r1, sp
243	ldr	lr, =vector_common
244	movs	pc, lr
245.endm
246
247vector_stub 	rst,	0, UND_MODE
248vector_stub	und,	1, UND_MODE
249vector_stub	pabt,	3, ABT_MODE, 4
250vector_stub	dabt,	4, ABT_MODE, 8
251vector_stub	irq,	6, IRQ_MODE, 4
252vector_stub	fiq,	7, FIQ_MODE, 4
253
254.align 5
255vector_svc:
256	/*
257	 * Save r0, r1, lr_<exception> (parent PC)
258	 * and spsr_<exception> (parent CPSR)
259	 */
260	push	{ r1 }
261	lsr	r1, sp, #THREAD_SHIFT
262	lsl	r1, #THREAD_SHIFT
263	add	r1, #THREAD_START_SP
264	str	r0, [r1, #S_R0]
265	pop	{ r0 }
266	str	r0, [r1, #S_R1]
267	str	lr, [r1, #S_PC]
268	mrs	r0, spsr
269	str	r0, [r1, #S_PSR]
270
271	/*
272	 * Branch to handler, still in SVC mode.
273	 * r0 := 2 is the svc vector number.
274	 */
275	mov	r0, #2
276	ldr	lr, =vector_common
277	mov	pc, lr
278
279vector_common:
280	/* make room for pt_regs */
281	sub	sp, #S_FRAME_SIZE
282	tst	sp, #4			@ check stack alignment
283	subne	sp, #4
284
285	/* store registers r0-r12 */
286	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
287
288	/* get registers saved in the stub */
289	ldr	r2, [r1, #S_R0]		@ r0
290	ldr	r3, [r1, #S_R1]		@ r1
291	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
292	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
293
294	/* fix r0 and r1 */
295	str	r2, [sp, #S_R0]
296	str	r3, [sp, #S_R1]
297
298	/* store sp_svc, if we were in usr mode we'll fix this later */
299	add	r6, sp, #S_FRAME_SIZE
300	addne	r6, #4			@ stack wasn't aligned
301	str	r6, [sp, #S_SP]
302
303	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
304	str	r4, [sp, #S_PC]		@ store lr_<exception>
305	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
306
307	/* set ORIG_r0 */
308	mov	r2, #-1
309	str	r2, [sp, #S_OLD_R0]
310
311	/* if we were in usr mode then we need sp_usr and lr_usr instead */
312	and	r1, r5, #MODE_MASK
313	cmp	r1, #USR_MODE
314	bne	1f
315	add	r1, sp, #S_SP
316	stmia	r1, { sp,lr }^
317
318	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3191:	mov	r1, sp
320	bl	do_handle_exception
321
322	/*
323	 * make sure we restore sp_svc on mode change. No need to
324	 * worry about lr_svc though, as that gets clobbered on
325	 * exception entry anyway.
326	 */
327	str	r6, [sp, #S_SP]
328
329	/* return from exception */
330	msr	spsr_cxsf, r5
331	ldmia	sp, { r0-pc }^
332
333.align 5
334vector_addrexcptn:
335	b	vector_addrexcptn
336
337.section .text.ex
338.align 5
339vector_table:
340	b	vector_rst
341	b	vector_und
342	b	vector_svc
343	b	vector_pabt
344	b	vector_dabt
345	b	vector_addrexcptn	@ should never happen
346	b	vector_irq
347	b	vector_fiq
348