xref: /kvm-unit-tests/arm/cstart.S (revision 3c7d322e87043aaad022e1999844c82d7b373aa9)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <asm/thread_info.h>
10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h>
12#include <asm/sysreg.h>
13
14#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
15
16.arm
17
18.section .init
19
20.globl start
21start:
22	/*
23	 * set stack, making room at top of stack for cpu0's
24	 * exception stacks. Must start wtih stackptr, not
25	 * stacktop, so the thread size masking (shifts) work.
26	 */
27	ldr	sp, =stackptr
28	lsr	sp, #THREAD_SHIFT
29	lsl	sp, #THREAD_SHIFT
30	add	sp, #THREAD_START_SP
31
32	/*
33	 * save sp before pushing anything on the stack
34	 * lr makes a good temp register right now
35	 */
36	mov	lr, sp
37
38	/*
39	 * bootloader params are in r0-r2
40	 * See the kernel doc Documentation/arm/Booting
41	 *   r0 = 0
42	 *   r1 = machine type number
43	 *   r2 = physical address of the dtb
44	 *
45	 * As we have no need for r0's nor r1's value, then
46	 * put the dtb in r0. This allows setup to be consistent
47	 * with arm64.
48	 */
49	mov	r0, r2
50	push	{r0-r1}
51
52	/* set up vector table and mode stacks */
53	mov	r0, lr			@ lr is stack top (see above),
54					@ which is the exception stacks base
55	bl	exceptions_init
56
57	/* complete setup */
58	pop	{r0-r1}
59	bl	setup
60
61	/* run the test */
62	ldr	r0, =__argc
63	ldr	r0, [r0]
64	ldr	r1, =__argv
65	ldr	r2, =__environ
66	bl	main
67	bl	exit
68	b	halt
69
70
71.macro set_mode_stack mode, stack
72	add	\stack, #S_FRAME_SIZE
73	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
74	isb
75	mov	sp, \stack
76.endm
77
78exceptions_init:
79	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
80	bic	r2, #CR_V		@ SCTLR.V := 0
81	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
82	ldr	r2, =vector_table
83	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
84
85	mrs	r2, cpsr
86
87	/* first frame reserved for svc mode */
88	set_mode_stack	UND_MODE, r0
89	set_mode_stack	ABT_MODE, r0
90	set_mode_stack	IRQ_MODE, r0
91	set_mode_stack	FIQ_MODE, r0
92
93	msr	cpsr_cxsf, r2		@ back to svc mode
94	isb
95	mov	pc, lr
96
97.text
98
99.global secondary_entry
100secondary_entry:
101	/* enable the MMU */
102	mov	r1, #0
103	ldr	r0, =mmu_idmap
104	ldr	r0, [r0]
105	bl	asm_mmu_enable
106
107	/*
108	 * Set the stack, and set up vector table
109	 * and exception stacks. Exception stacks
110	 * space starts at stack top and grows up.
111	 */
112	ldr	r1, =secondary_data
113	ldr	r0, [r1]
114	mov	sp, r0
115	bl	exceptions_init
116
117	/* finish init in C code */
118	bl	secondary_cinit
119
120	/* r0 is now the entry function, run it */
121	mov	pc, r0
122
123.globl halt
124halt:
1251:	wfi
126	b	1b
127
128/*
129 * asm_mmu_enable
130 *   Inputs:
131 *     (r0 - lo, r1 - hi) is the base address of the translation table
132 *   Outputs: none
133 */
134.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
135.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
136.globl asm_mmu_enable
137asm_mmu_enable:
138	/* TTBCR */
139	mrc	p15, 0, r2, c2, c0, 2
140	orr	r2, #(1 << 31)		@ TTB_EAE
141	mcr	p15, 0, r2, c2, c0, 2
142
143	/* MAIR */
144	ldr	r2, =PRRR
145	mcr	p15, 0, r2, c10, c2, 0
146	ldr	r2, =NMRR
147	mcr	p15, 0, r2, c10, c2, 1
148
149	/* TTBR0 */
150	mcrr	p15, 0, r0, r1, c2
151	isb
152
153	/* SCTLR */
154	mrc	p15, 0, r2, c1, c0, 0
155	orr	r2, #CR_C
156	orr	r2, #CR_I
157	orr	r2, #CR_M
158	mcr	p15, 0, r2, c1, c0, 0
159	isb
160
161	mov     pc, lr
162
163.globl asm_mmu_disable
164asm_mmu_disable:
165	/* SCTLR */
166	mrc	p15, 0, r0, c1, c0, 0
167	bic	r0, #CR_M
168	mcr	p15, 0, r0, c1, c0, 0
169	isb
170	mov     pc, lr
171
172/*
173 * Vector stubs
174 * Simplified version of the Linux kernel implementation
175 *   arch/arm/kernel/entry-armv.S
176 *
177 * Each mode has an S_FRAME_SIZE sized memory region,
178 * and the mode's stack pointer has been initialized
179 * to the base of that region in exceptions_init.
180 */
181.macro vector_stub, name, vec, mode, correction=0
182.align 5
183vector_\name:
184.if \correction
185	sub	lr, lr, #\correction
186.endif
187	/*
188	 * Save r0, r1, lr_<exception> (parent PC)
189	 * and spsr_<exception> (parent CPSR)
190	 */
191	str	r0, [sp, #S_R0]
192	str	r1, [sp, #S_R1]
193	str	lr, [sp, #S_PC]
194	mrs	r0, spsr
195	str	r0, [sp, #S_PSR]
196
197	/* Prepare for SVC32 mode. */
198	mrs	r0, cpsr
199	bic	r0, #MODE_MASK
200	orr	r0, #SVC_MODE
201	msr	spsr_cxsf, r0
202
203	/* Branch to handler in SVC mode */
204	mov	r0, #\vec
205	mov	r1, sp
206	ldr	lr, =vector_common
207	movs	pc, lr
208.endm
209
210vector_stub 	rst,	0, UND_MODE
211vector_stub	und,	1, UND_MODE
212vector_stub	pabt,	3, ABT_MODE, 4
213vector_stub	dabt,	4, ABT_MODE, 8
214vector_stub	irq,	6, IRQ_MODE, 4
215vector_stub	fiq,	7, FIQ_MODE, 4
216
217.align 5
218vector_svc:
219	/*
220	 * Save r0, r1, lr_<exception> (parent PC)
221	 * and spsr_<exception> (parent CPSR)
222	 */
223	push	{ r1 }
224	lsr	r1, sp, #THREAD_SHIFT
225	lsl	r1, #THREAD_SHIFT
226	add	r1, #THREAD_START_SP
227	str	r0, [r1, #S_R0]
228	pop	{ r0 }
229	str	r0, [r1, #S_R1]
230	str	lr, [r1, #S_PC]
231	mrs	r0, spsr
232	str	r0, [r1, #S_PSR]
233
234	/*
235	 * Branch to handler, still in SVC mode.
236	 * r0 := 2 is the svc vector number.
237	 */
238	mov	r0, #2
239	ldr	lr, =vector_common
240	mov	pc, lr
241
242vector_common:
243	/* make room for pt_regs */
244	sub	sp, #S_FRAME_SIZE
245	tst	sp, #4			@ check stack alignment
246	subne	sp, #4
247
248	/* store registers r0-r12 */
249	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
250
251	/* get registers saved in the stub */
252	ldr	r2, [r1, #S_R0]		@ r0
253	ldr	r3, [r1, #S_R1]		@ r1
254	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
255	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
256
257	/* fix r0 and r1 */
258	str	r2, [sp, #S_R0]
259	str	r3, [sp, #S_R1]
260
261	/* store sp_svc, if we were in usr mode we'll fix this later */
262	add	r6, sp, #S_FRAME_SIZE
263	addne	r6, #4			@ stack wasn't aligned
264	str	r6, [sp, #S_SP]
265
266	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
267	str	r4, [sp, #S_PC]		@ store lr_<exception>
268	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
269
270	/* set ORIG_r0 */
271	mov	r2, #-1
272	str	r2, [sp, #S_OLD_R0]
273
274	/* if we were in usr mode then we need sp_usr and lr_usr instead */
275	and	r1, r5, #MODE_MASK
276	cmp	r1, #USR_MODE
277	bne	1f
278	add	r1, sp, #S_SP
279	stmia	r1, { sp,lr }^
280
281	/* Call the handler. r0 is the vector number, r1 := pt_regs */
2821:	mov	r1, sp
283	bl	do_handle_exception
284
285	/*
286	 * make sure we restore sp_svc on mode change. No need to
287	 * worry about lr_svc though, as that gets clobbered on
288	 * exception entry anyway.
289	 */
290	str	r6, [sp, #S_SP]
291
292	/* return from exception */
293	msr	spsr_cxsf, r5
294	ldmia	sp, { r0-pc }^
295
296.align 5
297vector_addrexcptn:
298	b	vector_addrexcptn
299
300.section .text.ex
301.align 5
302vector_table:
303	b	vector_rst
304	b	vector_und
305	b	vector_svc
306	b	vector_pabt
307	b	vector_dabt
308	b	vector_addrexcptn	@ should never happen
309	b	vector_irq
310	b	vector_fiq
311