xref: /kvm-unit-tests/arm/cstart.S (revision 410b3bf09e76fd2b6d68b424a26d407a0bc4bc11)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/thread_info.h>
11#include <asm/asm-offsets.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include <asm/sysreg.h>
15
16#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
17
18.arm
19
20.section .init
21
22.globl start
23start:
24	/*
25	 * set stack, making room at top of stack for cpu0's
26	 * exception stacks. Must start wtih stackptr, not
27	 * stacktop, so the thread size masking (shifts) work.
28	 */
29	ldr	sp, =stackptr
30	lsr	sp, #THREAD_SHIFT
31	lsl	sp, #THREAD_SHIFT
32	add	sp, #THREAD_START_SP
33
34	/*
35	 * save sp before pushing anything on the stack
36	 * lr makes a good temp register right now
37	 */
38	mov	lr, sp
39
40	/*
41	 * bootloader params are in r0-r2
42	 * See the kernel doc Documentation/arm/Booting
43	 *   r0 = 0
44	 *   r1 = machine type number
45	 *   r2 = physical address of the dtb
46	 *
47	 * As we have no need for r0's nor r1's value, then
48	 * put the dtb in r0. This allows setup to be consistent
49	 * with arm64.
50	 */
51	mov	r0, r2
52	push	{r0-r1}
53
54	/* set up vector table, mode stacks, and enable the VFP */
55	mov	r0, lr			@ lr is stack top (see above),
56					@ which is the exception stacks base
57	bl	exceptions_init
58	bl	enable_vfp
59
60	/* complete setup */
61	pop	{r0-r1}
62	bl	setup
63	bl	get_mmu_off
64	cmp	r0, #0
65	bne	1f
66	bl	setup_vm
67
681:
69	/* run the test */
70	ldr	r0, =__argc
71	ldr	r0, [r0]
72	ldr	r1, =__argv
73	ldr	r2, =__environ
74	bl	main
75	bl	exit
76	b	halt
77
78
79.macro set_mode_stack mode, stack
80	add	\stack, #S_FRAME_SIZE
81	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
82	isb
83	mov	sp, \stack
84.endm
85
86exceptions_init:
87	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
88	bic	r2, #CR_V		@ SCTLR.V := 0
89	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
90	ldr	r2, =vector_table
91	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
92
93	mrs	r2, cpsr
94
95	/* first frame reserved for svc mode */
96	set_mode_stack	UND_MODE, r0
97	set_mode_stack	ABT_MODE, r0
98	set_mode_stack	IRQ_MODE, r0
99	set_mode_stack	FIQ_MODE, r0
100
101	msr	cpsr_cxsf, r2		@ back to svc mode
102	isb
103	mov	pc, lr
104
105enable_vfp:
106	/* Enable full access to CP10 and CP11: */
107	mov	r0, #(3 << 22 | 3 << 20)
108	mcr	p15, 0, r0, c1, c0, 2
109	isb
110	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
111	mov	r0, #(1 << 30)
112	vmsr	fpexc, r0
113	mov	pc, lr
114
115.text
116
117.global get_mmu_off
118get_mmu_off:
119	ldr	r0, =auxinfo
120	ldr	r0, [r0, #4]
121	and	r0, #AUXINFO_MMU_OFF
122	mov	pc, lr
123
124.global secondary_entry
125secondary_entry:
126	/* enable the MMU unless requested off */
127	bl	get_mmu_off
128	cmp	r0, #0
129	bne	1f
130	mov	r1, #0
131	ldr	r0, =mmu_idmap
132	ldr	r0, [r0]
133	bl	asm_mmu_enable
134
1351:
136	/*
137	 * Set the stack, and set up vector table
138	 * and exception stacks. Exception stacks
139	 * space starts at stack top and grows up.
140	 */
141	ldr	r1, =secondary_data
142	ldr	r0, [r1]
143	mov	sp, r0
144	bl	exceptions_init
145	bl	enable_vfp
146
147	/* finish init in C code */
148	bl	secondary_cinit
149
150	/* r0 is now the entry function, run it */
151	blx	r0
152	b	do_idle
153
154.globl halt
155halt:
1561:	wfi
157	b	1b
158
159/*
160 * asm_mmu_enable
161 *   Inputs:
162 *     (r0 - lo, r1 - hi) is the base address of the translation table
163 *   Outputs: none
164 */
165.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
166.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
167.globl asm_mmu_enable
168asm_mmu_enable:
169	/* TLBIALL */
170	mcr	p15, 0, r2, c8, c7, 0
171	dsb	nsh
172
173	/* TTBCR */
174	ldr	r2, =(TTBCR_EAE | 				\
175		      TTBCR_SH0_SHARED | 			\
176		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
177	mcr	p15, 0, r2, c2, c0, 2
178	isb
179
180	/* MAIR */
181	ldr	r2, =PRRR
182	mcr	p15, 0, r2, c10, c2, 0
183	ldr	r2, =NMRR
184	mcr	p15, 0, r2, c10, c2, 1
185
186	/* TTBR0 */
187	mcrr	p15, 0, r0, r1, c2
188	isb
189
190	/* SCTLR */
191	mrc	p15, 0, r2, c1, c0, 0
192	orr	r2, #CR_C
193	orr	r2, #CR_I
194	orr	r2, #CR_M
195	mcr	p15, 0, r2, c1, c0, 0
196	isb
197
198	mov     pc, lr
199
200.macro dcache_clean_inval domain, start, end, tmp1, tmp2
201	ldr	\tmp1, =dcache_line_size
202	ldr	\tmp1, [\tmp1]
203	sub	\tmp2, \tmp1, #1
204	bic	\start, \start, \tmp2
2059998:
206	/* DCCIMVAC */
207	mcr	p15, 0, \start, c7, c14, 1
208	add	\start, \start, \tmp1
209	cmp	\start, \end
210	blo	9998b
211	dsb	\domain
212.endm
213
214.globl asm_mmu_disable
215asm_mmu_disable:
216	/* SCTLR */
217	mrc	p15, 0, r0, c1, c0, 0
218	bic	r0, #CR_M
219	mcr	p15, 0, r0, c1, c0, 0
220	isb
221
222	ldr	r0, =__phys_offset
223	ldr	r0, [r0]
224	ldr	r1, =__phys_end
225	ldr	r1, [r1]
226	dcache_clean_inval sy, r0, r1, r2, r3
227	isb
228
229	mov     pc, lr
230
231/*
232 * Vector stubs
233 * Simplified version of the Linux kernel implementation
234 *   arch/arm/kernel/entry-armv.S
235 *
236 * Each mode has an S_FRAME_SIZE sized memory region,
237 * and the mode's stack pointer has been initialized
238 * to the base of that region in exceptions_init.
239 */
240.macro vector_stub, name, vec, mode, correction=0
241.align 5
242vector_\name:
243.if \correction
244	sub	lr, lr, #\correction
245.endif
246	/*
247	 * Save r0, r1, lr_<exception> (parent PC)
248	 * and spsr_<exception> (parent CPSR)
249	 */
250	str	r0, [sp, #S_R0]
251	str	r1, [sp, #S_R1]
252	str	lr, [sp, #S_PC]
253	mrs	r0, spsr
254	str	r0, [sp, #S_PSR]
255
256	/* Prepare for SVC32 mode. */
257	mrs	r0, cpsr
258	bic	r0, #MODE_MASK
259	orr	r0, #SVC_MODE
260	msr	spsr_cxsf, r0
261
262	/* Branch to handler in SVC mode */
263	mov	r0, #\vec
264	mov	r1, sp
265	ldr	lr, =vector_common
266	movs	pc, lr
267.endm
268
269vector_stub 	rst,	0, UND_MODE
270vector_stub	und,	1, UND_MODE
271vector_stub	pabt,	3, ABT_MODE, 4
272vector_stub	dabt,	4, ABT_MODE, 8
273vector_stub	irq,	6, IRQ_MODE, 4
274vector_stub	fiq,	7, FIQ_MODE, 4
275
276.align 5
277vector_svc:
278	/*
279	 * Save r0, r1, lr_<exception> (parent PC)
280	 * and spsr_<exception> (parent CPSR)
281	 */
282	push	{ r1 }
283	lsr	r1, sp, #THREAD_SHIFT
284	lsl	r1, #THREAD_SHIFT
285	add	r1, #THREAD_START_SP
286	str	r0, [r1, #S_R0]
287	pop	{ r0 }
288	str	r0, [r1, #S_R1]
289	str	lr, [r1, #S_PC]
290	mrs	r0, spsr
291	str	r0, [r1, #S_PSR]
292
293	/*
294	 * Branch to handler, still in SVC mode.
295	 * r0 := 2 is the svc vector number.
296	 */
297	mov	r0, #2
298	ldr	lr, =vector_common
299	mov	pc, lr
300
301vector_common:
302	/* make room for pt_regs */
303	sub	sp, #S_FRAME_SIZE
304	tst	sp, #4			@ check stack alignment
305	subne	sp, #4
306
307	/* store registers r0-r12 */
308	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
309
310	/* get registers saved in the stub */
311	ldr	r2, [r1, #S_R0]		@ r0
312	ldr	r3, [r1, #S_R1]		@ r1
313	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
314	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
315
316	/* fix r0 and r1 */
317	str	r2, [sp, #S_R0]
318	str	r3, [sp, #S_R1]
319
320	/* store sp_svc, if we were in usr mode we'll fix this later */
321	add	r6, sp, #S_FRAME_SIZE
322	addne	r6, #4			@ stack wasn't aligned
323	str	r6, [sp, #S_SP]
324
325	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
326	str	r4, [sp, #S_PC]		@ store lr_<exception>
327	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
328
329	/* set ORIG_r0 */
330	mov	r2, #-1
331	str	r2, [sp, #S_OLD_R0]
332
333	/* if we were in usr mode then we need sp_usr and lr_usr instead */
334	and	r1, r5, #MODE_MASK
335	cmp	r1, #USR_MODE
336	bne	1f
337	add	r1, sp, #S_SP
338	stmia	r1, { sp,lr }^
339
340	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3411:	mov	r1, sp
342	bl	do_handle_exception
343
344	/*
345	 * make sure we restore sp_svc on mode change. No need to
346	 * worry about lr_svc though, as that gets clobbered on
347	 * exception entry anyway.
348	 */
349	str	r6, [sp, #S_SP]
350
351	/* return from exception */
352	msr	spsr_cxsf, r5
353	ldmia	sp, { r0-pc }^
354
355.align 5
356vector_addrexcptn:
357	b	vector_addrexcptn
358
359.section .text.ex
360.align 5
361vector_table:
362	b	vector_rst
363	b	vector_und
364	b	vector_svc
365	b	vector_pabt
366	b	vector_dabt
367	b	vector_addrexcptn	@ should never happen
368	b	vector_irq
369	b	vector_fiq
370