xref: /kvm-unit-tests/arm/cstart.S (revision d9193a9cc0f9d902ad1cea65eed00df58238f3cb)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <asm/thread_info.h>
10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h>
12#include <asm/sysreg.h>
13
14#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
15
16.arm
17
18.section .init
19
20.globl start
21start:
22	/*
23	 * set stack, making room at top of stack for cpu0's
24	 * exception stacks. Must start wtih stackptr, not
25	 * stacktop, so the thread size masking (shifts) work.
26	 */
27	ldr	sp, =stackptr
28	lsr	sp, #THREAD_SHIFT
29	lsl	sp, #THREAD_SHIFT
30	add	sp, #THREAD_START_SP
31
32	/*
33	 * save sp before pushing anything on the stack
34	 * lr makes a good temp register right now
35	 */
36	mov	lr, sp
37
38	/*
39	 * bootloader params are in r0-r2
40	 * See the kernel doc Documentation/arm/Booting
41	 *   r0 = 0
42	 *   r1 = machine type number
43	 *   r2 = physical address of the dtb
44	 *
45	 * As we have no need for r0's nor r1's value, then
46	 * put the dtb in r0. This allows setup to be consistent
47	 * with arm64.
48	 */
49	mov	r0, r2
50	push	{r0-r1}
51
52	/* set up vector table and mode stacks */
53	mov	r0, lr			@ lr is stack top (see above),
54					@ which is the exception stacks base
55	bl	exceptions_init
56
57	/* complete setup */
58	pop	{r0-r1}
59	bl	setup
60
61	/* run the test */
62	ldr	r0, =__argc
63	ldr	r0, [r0]
64	ldr	r1, =__argv
65	bl	main
66	bl	exit
67	b	halt
68
69
70.macro set_mode_stack mode, stack
71	add	\stack, #S_FRAME_SIZE
72	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
73	isb
74	mov	sp, \stack
75.endm
76
77exceptions_init:
78	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
79	bic	r2, #CR_V		@ SCTLR.V := 0
80	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
81	ldr	r2, =vector_table
82	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
83
84	mrs	r2, cpsr
85
86	/* first frame reserved for svc mode */
87	set_mode_stack	UND_MODE, r0
88	set_mode_stack	ABT_MODE, r0
89	set_mode_stack	IRQ_MODE, r0
90	set_mode_stack	FIQ_MODE, r0
91
92	msr	cpsr_cxsf, r2		@ back to svc mode
93	isb
94	mov	pc, lr
95
96.text
97
98.global secondary_entry
99secondary_entry:
100	/* enable the MMU */
101	mov	r1, #0
102	ldr	r0, =mmu_idmap
103	ldr	r0, [r0]
104	bl	asm_mmu_enable
105
106	/*
107	 * Set the stack, and set up vector table
108	 * and exception stacks. Exception stacks
109	 * space starts at stack top and grows up.
110	 */
111	ldr	r1, =secondary_data
112	ldr	r0, [r1]
113	mov	sp, r0
114	bl	exceptions_init
115
116	/* finish init in C code */
117	bl	secondary_cinit
118
119	/* r0 is now the entry function, run it */
120	mov	pc, r0
121
122.globl halt
123halt:
1241:	wfi
125	b	1b
126
127/*
128 * asm_mmu_enable
129 *   Inputs:
130 *     (r0 - lo, r1 - hi) is the base address of the translation table
131 *   Outputs: none
132 */
133.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
134.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
135.globl asm_mmu_enable
136asm_mmu_enable:
137	/* TTBCR */
138	mrc	p15, 0, r2, c2, c0, 2
139	orr	r2, #(1 << 31)		@ TTB_EAE
140	mcr	p15, 0, r2, c2, c0, 2
141
142	/* MAIR */
143	ldr	r2, =PRRR
144	mcr	p15, 0, r2, c10, c2, 0
145	ldr	r2, =NMRR
146	mcr	p15, 0, r2, c10, c2, 1
147
148	/* TTBR0 */
149	mcrr	p15, 0, r0, r1, c2
150	isb
151
152	/* SCTLR */
153	mrc	p15, 0, r2, c1, c0, 0
154	orr	r2, #CR_C
155	orr	r2, #CR_I
156	orr	r2, #CR_M
157	mcr	p15, 0, r2, c1, c0, 0
158	isb
159
160	mov     pc, lr
161
162.globl asm_mmu_disable
163asm_mmu_disable:
164	/* SCTLR */
165	mrc	p15, 0, r0, c1, c0, 0
166	bic	r0, #CR_M
167	mcr	p15, 0, r0, c1, c0, 0
168	isb
169	mov     pc, lr
170
171/*
172 * Vector stubs
173 * Simplified version of the Linux kernel implementation
174 *   arch/arm/kernel/entry-armv.S
175 *
176 * Each mode has an S_FRAME_SIZE sized memory region,
177 * and the mode's stack pointer has been initialized
178 * to the base of that region in exceptions_init.
179 */
180.macro vector_stub, name, vec, mode, correction=0
181.align 5
182vector_\name:
183.if \correction
184	sub	lr, lr, #\correction
185.endif
186	/*
187	 * Save r0, r1, lr_<exception> (parent PC)
188	 * and spsr_<exception> (parent CPSR)
189	 */
190	str	r0, [sp, #S_R0]
191	str	r1, [sp, #S_R1]
192	str	lr, [sp, #S_PC]
193	mrs	r0, spsr
194	str	r0, [sp, #S_PSR]
195
196	/* Prepare for SVC32 mode. */
197	mrs	r0, cpsr
198	bic	r0, #MODE_MASK
199	orr	r0, #SVC_MODE
200	msr	spsr_cxsf, r0
201
202	/* Branch to handler in SVC mode */
203	mov	r0, #\vec
204	mov	r1, sp
205	ldr	lr, =vector_common
206	movs	pc, lr
207.endm
208
209vector_stub 	rst,	0, UND_MODE
210vector_stub	und,	1, UND_MODE
211vector_stub	pabt,	3, ABT_MODE, 4
212vector_stub	dabt,	4, ABT_MODE, 8
213vector_stub	irq,	6, IRQ_MODE, 4
214vector_stub	fiq,	7, FIQ_MODE, 4
215
216.align 5
217vector_svc:
218	/*
219	 * Save r0, r1, lr_<exception> (parent PC)
220	 * and spsr_<exception> (parent CPSR)
221	 */
222	push	{ r1 }
223	lsr	r1, sp, #THREAD_SHIFT
224	lsl	r1, #THREAD_SHIFT
225	add	r1, #THREAD_START_SP
226	str	r0, [r1, #S_R0]
227	pop	{ r0 }
228	str	r0, [r1, #S_R1]
229	str	lr, [r1, #S_PC]
230	mrs	r0, spsr
231	str	r0, [r1, #S_PSR]
232
233	/*
234	 * Branch to handler, still in SVC mode.
235	 * r0 := 2 is the svc vector number.
236	 */
237	mov	r0, #2
238	ldr	lr, =vector_common
239	mov	pc, lr
240
241vector_common:
242	/* make room for pt_regs */
243	sub	sp, #S_FRAME_SIZE
244	tst	sp, #4			@ check stack alignment
245	subne	sp, #4
246
247	/* store registers r0-r12 */
248	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
249
250	/* get registers saved in the stub */
251	ldr	r2, [r1, #S_R0]		@ r0
252	ldr	r3, [r1, #S_R1]		@ r1
253	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
254	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
255
256	/* fix r0 and r1 */
257	str	r2, [sp, #S_R0]
258	str	r3, [sp, #S_R1]
259
260	/* store sp_svc, if we were in usr mode we'll fix this later */
261	add	r6, sp, #S_FRAME_SIZE
262	addne	r6, #4			@ stack wasn't aligned
263	str	r6, [sp, #S_SP]
264
265	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
266	str	r4, [sp, #S_PC]		@ store lr_<exception>
267	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
268
269	/* set ORIG_r0 */
270	mov	r2, #-1
271	str	r2, [sp, #S_OLD_R0]
272
273	/* if we were in usr mode then we need sp_usr and lr_usr instead */
274	and	r1, r5, #MODE_MASK
275	cmp	r1, #USR_MODE
276	bne	1f
277	add	r1, sp, #S_SP
278	stmia	r1, { sp,lr }^
279
280	/* Call the handler. r0 is the vector number, r1 := pt_regs */
2811:	mov	r1, sp
282	bl	do_handle_exception
283
284	/*
285	 * make sure we restore sp_svc on mode change. No need to
286	 * worry about lr_svc though, as that gets clobbered on
287	 * exception entry anyway.
288	 */
289	str	r6, [sp, #S_SP]
290
291	/* return from exception */
292	msr	spsr_cxsf, r5
293	ldmia	sp, { r0-pc }^
294
295.align 5
296vector_addrexcptn:
297	b	vector_addrexcptn
298
299.section .text.ex
300.align 5
301vector_table:
302	b	vector_rst
303	b	vector_und
304	b	vector_svc
305	b	vector_pabt
306	b	vector_dabt
307	b	vector_addrexcptn	@ should never happen
308	b	vector_irq
309	b	vector_fiq
310