xref: /kvm-unit-tests/arm/cstart.S (revision c0edb3d22f90dd417bbd08fbe6c56de036cf0022)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/assembler.h>
11#include <asm/thread_info.h>
12#include <asm/asm-offsets.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/ptrace.h>
15#include <asm/sysreg.h>
16
17#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
18
19.macro zero_range, tmp1, tmp2, tmp3, tmp4
20	mov	\tmp3, #0
21	mov	\tmp4, #0
229998:	cmp	\tmp1, \tmp2
23	beq	9997f
24	strd	\tmp3, \tmp4, [\tmp1], #8
25	b	9998b
269997:
27.endm
28
29.arm
30
31.section .init
32
33.globl start
34start:
35	/* zero BSS */
36	ldr	r4, =bss
37	ldr	r5, =ebss
38	zero_range r4, r5, r6, r7
39
40	/* zero stack */
41	ldr	r5, =stacktop
42	sub	r4, r5, #THREAD_SIZE
43	zero_range r4, r5, r6, r7
44
45	/*
46	 * set stack, making room at top of stack for cpu0's
47	 * exception stacks. Must start wtih stackptr, not
48	 * stacktop, so the thread size masking (shifts) work.
49	 */
50	ldr	sp, =stackptr
51	lsr	sp, #THREAD_SHIFT
52	lsl	sp, #THREAD_SHIFT
53	add	sp, #THREAD_START_SP
54
55	/*
56	 * save sp before pushing anything on the stack
57	 * lr makes a good temp register right now
58	 */
59	mov	lr, sp
60
61	/*
62	 * bootloader params are in r0-r2
63	 * See the kernel doc Documentation/arm/Booting
64	 *   r0 = 0
65	 *   r1 = machine type number
66	 *   r2 = physical address of the dtb
67	 *
68	 * As we have no need for r0's nor r1's value, then
69	 * put the dtb in r0. This allows setup to be consistent
70	 * with arm64.
71	 */
72	mov	r0, r2
73	push	{r0-r1}
74
75	/* set up vector table, mode stacks, and enable the VFP */
76	mov	r0, lr			@ lr is stack top (see above),
77					@ which is the exception stacks base
78	bl	exceptions_init
79	bl	enable_vfp
80
81	/* complete setup */
82	pop	{r0-r1}
83	bl	setup
84
85	/* run the test */
86	ldr	r0, =__argc
87	ldr	r0, [r0]
88	ldr	r1, =__argv
89	ldr	r2, =__environ
90	bl	main
91	bl	exit
92	b	halt
93
94.text
95
96enable_vfp:
97	/* Enable full access to CP10 and CP11: */
98	mov	r0, #(3 << 22 | 3 << 20)
99	mcr	p15, 0, r0, c1, c0, 2
100	isb
101	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
102	mov	r0, #(1 << 30)
103	vmsr	fpexc, r0
104	mov	pc, lr
105
106get_mmu_off:
107	ldr	r0, =auxinfo
108	ldr	r0, [r0, #4]
109	and	r0, #AUXINFO_MMU_OFF
110	mov	pc, lr
111
112.global secondary_entry
113secondary_entry:
114	/* enable the MMU unless requested off */
115	bl	get_mmu_off
116	cmp	r0, #0
117	bne	1f
118	mov	r1, #0
119	ldr	r0, =mmu_idmap
120	ldr	r0, [r0]
121	bl	asm_mmu_enable
122
1231:
124	/*
125	 * Set the stack, and set up vector table
126	 * and exception stacks. Exception stacks
127	 * space starts at stack top and grows up.
128	 */
129	ldr	r1, =secondary_data
130	ldr	r0, [r1]
131	mov	sp, r0
132	bl	exceptions_init
133	bl	enable_vfp
134
135	/* finish init in C code */
136	bl	secondary_cinit
137
138	/* r0 is now the entry function, run it */
139	blx	r0
140	b	do_idle
141
142.globl halt
143halt:
1441:	wfi
145	b	1b
146
147/*
148 * asm_mmu_enable
149 *   Inputs:
150 *     (r0 - lo, r1 - hi) is the base address of the translation table
151 *   Outputs: none
152 */
153.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
154.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
155.globl asm_mmu_enable
156asm_mmu_enable:
157	/* TLBIALL */
158	mcr	p15, 0, r2, c8, c7, 0
159	dsb	nsh
160
161	/* TTBCR */
162	ldr	r2, =(TTBCR_EAE | 				\
163		      TTBCR_SH0_SHARED | 			\
164		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
165	mcr	p15, 0, r2, c2, c0, 2
166	isb
167
168	/* MAIR */
169	ldr	r2, =PRRR
170	mcr	p15, 0, r2, c10, c2, 0
171	ldr	r2, =NMRR
172	mcr	p15, 0, r2, c10, c2, 1
173
174	/* TTBR0 */
175	mcrr	p15, 0, r0, r1, c2
176	isb
177
178	/* SCTLR */
179	mrc	p15, 0, r2, c1, c0, 0
180	orr	r2, #CR_C
181	orr	r2, #CR_I
182	orr	r2, #CR_M
183	mcr	p15, 0, r2, c1, c0, 0
184	isb
185
186	mov     pc, lr
187
188.globl asm_mmu_disable
189asm_mmu_disable:
190	/* SCTLR */
191	mrc	p15, 0, r0, c1, c0, 0
192	bic	r0, #CR_M
193	mcr	p15, 0, r0, c1, c0, 0
194	isb
195
196	ldr	r0, =__phys_offset
197	ldr	r0, [r0]
198	ldr	r1, =__phys_end
199	ldr	r1, [r1]
200	sub	r1, r1, r0
201	dcache_by_line_op dccimvac, sy, r0, r1, r2, r3
202
203	mov     pc, lr
204
205/*
206 * Vectors
207 */
208
209.macro set_mode_stack mode, stack
210	add	\stack, #S_FRAME_SIZE
211	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
212	isb
213	mov	sp, \stack
214.endm
215
216/*
217 * exceptions_init
218 *
219 * Input r0 is the stack top, which is the exception stacks base
220 */
221exceptions_init:
222	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
223	bic	r2, #CR_V		@ SCTLR.V := 0
224	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
225	ldr	r2, =vector_table
226	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
227
228	mrs	r2, cpsr
229
230	/*
231	 * The first frame is reserved for svc mode
232	 */
233	set_mode_stack	UND_MODE, r0
234	set_mode_stack	ABT_MODE, r0
235	set_mode_stack	IRQ_MODE, r0
236	set_mode_stack	FIQ_MODE, r0
237
238	msr	cpsr_cxsf, r2		@ back to svc mode
239	isb
240	mov	pc, lr
241
242/*
243 * Vector stubs
244 * Simplified version of the Linux kernel implementation
245 *   arch/arm/kernel/entry-armv.S
246 *
247 * Each mode has an S_FRAME_SIZE sized memory region,
248 * and the mode's stack pointer has been initialized
249 * to the base of that region in exceptions_init.
250 */
251.macro vector_stub, name, vec, mode, correction=0
252.align 5
253vector_\name:
254.if \correction
255	sub	lr, lr, #\correction
256.endif
257	/*
258	 * Save r0, r1, lr_<exception> (parent PC)
259	 * and spsr_<exception> (parent CPSR)
260	 */
261	str	r0, [sp, #S_R0]
262	str	r1, [sp, #S_R1]
263	str	lr, [sp, #S_PC]
264	mrs	r0, spsr
265	str	r0, [sp, #S_PSR]
266
267	/* Prepare for SVC32 mode. */
268	mrs	r0, cpsr
269	bic	r0, #MODE_MASK
270	orr	r0, #SVC_MODE
271	msr	spsr_cxsf, r0
272
273	/* Branch to handler in SVC mode */
274	mov	r0, #\vec
275	mov	r1, sp
276	ldr	lr, =vector_common
277	movs	pc, lr
278.endm
279
280vector_stub 	rst,	0, UND_MODE
281vector_stub	und,	1, UND_MODE
282vector_stub	pabt,	3, ABT_MODE, 4
283vector_stub	dabt,	4, ABT_MODE, 8
284vector_stub	irq,	6, IRQ_MODE, 4
285vector_stub	fiq,	7, FIQ_MODE, 4
286
287.align 5
288vector_svc:
289	/*
290	 * Save r0, r1, lr_<exception> (parent PC)
291	 * and spsr_<exception> (parent CPSR)
292	 */
293	push	{ r1 }
294	lsr	r1, sp, #THREAD_SHIFT
295	lsl	r1, #THREAD_SHIFT
296	add	r1, #THREAD_START_SP
297	str	r0, [r1, #S_R0]
298	pop	{ r0 }
299	str	r0, [r1, #S_R1]
300	str	lr, [r1, #S_PC]
301	mrs	r0, spsr
302	str	r0, [r1, #S_PSR]
303
304	/*
305	 * Branch to handler, still in SVC mode.
306	 * r0 := 2 is the svc vector number.
307	 */
308	mov	r0, #2
309	ldr	lr, =vector_common
310	mov	pc, lr
311
312vector_common:
313	/* make room for pt_regs */
314	sub	sp, #S_FRAME_SIZE
315	tst	sp, #4			@ check stack alignment
316	subne	sp, #4
317
318	/* store registers r0-r12 */
319	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
320
321	/* get registers saved in the stub */
322	ldr	r2, [r1, #S_R0]		@ r0
323	ldr	r3, [r1, #S_R1]		@ r1
324	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
325	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
326
327	/* fix r0 and r1 */
328	str	r2, [sp, #S_R0]
329	str	r3, [sp, #S_R1]
330
331	/* store sp_svc, if we were in usr mode we'll fix this later */
332	add	r6, sp, #S_FRAME_SIZE
333	addne	r6, #4			@ stack wasn't aligned
334	str	r6, [sp, #S_SP]
335
336	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
337	str	r4, [sp, #S_PC]		@ store lr_<exception>
338	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
339
340	/* set ORIG_r0 */
341	mov	r2, #-1
342	str	r2, [sp, #S_OLD_R0]
343
344	/* if we were in usr mode then we need sp_usr and lr_usr instead */
345	and	r1, r5, #MODE_MASK
346	cmp	r1, #USR_MODE
347	bne	1f
348	add	r1, sp, #S_SP
349	stmia	r1, { sp,lr }^
350
351	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3521:	mov	r1, sp
353	bl	do_handle_exception
354
355	/*
356	 * make sure we restore sp_svc on mode change. No need to
357	 * worry about lr_svc though, as that gets clobbered on
358	 * exception entry anyway.
359	 */
360	str	r6, [sp, #S_SP]
361
362	/* return from exception */
363	msr	spsr_cxsf, r5
364	ldmia	sp, { r0-pc }^
365
366.align 5
367vector_addrexcptn:
368	b	vector_addrexcptn
369
370.section .text.ex
371.align 5
372vector_table:
373	b	vector_rst
374	b	vector_und
375	b	vector_svc
376	b	vector_pabt
377	b	vector_dabt
378	b	vector_addrexcptn	@ should never happen
379	b	vector_irq
380	b	vector_fiq
381