xref: /kvm-unit-tests/arm/cstart.S (revision 0917dc65eabbacb592456c0d1bb05e5828c23661)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/thread_info.h>
11#include <asm/asm-offsets.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include <asm/sysreg.h>
15
16#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
17
18.macro zero_range, tmp1, tmp2, tmp3, tmp4
19	mov	\tmp3, #0
20	mov	\tmp4, #0
219998:	cmp	\tmp1, \tmp2
22	beq	9997f
23	strd	\tmp3, \tmp4, [\tmp1], #8
24	b	9998b
259997:
26.endm
27
28.arm
29
30.section .init
31
32.globl start
33start:
34	/* zero BSS */
35	ldr	r4, =bss
36	ldr	r5, =ebss
37	zero_range r4, r5, r6, r7
38
39	/* zero stack */
40	ldr	r5, =stacktop
41	sub	r4, r5, #THREAD_SIZE
42	zero_range r4, r5, r6, r7
43
44	/*
45	 * set stack, making room at top of stack for cpu0's
46	 * exception stacks. Must start wtih stackptr, not
47	 * stacktop, so the thread size masking (shifts) work.
48	 */
49	ldr	sp, =stackptr
50	lsr	sp, #THREAD_SHIFT
51	lsl	sp, #THREAD_SHIFT
52	add	sp, #THREAD_START_SP
53
54	/*
55	 * save sp before pushing anything on the stack
56	 * lr makes a good temp register right now
57	 */
58	mov	lr, sp
59
60	/*
61	 * bootloader params are in r0-r2
62	 * See the kernel doc Documentation/arm/Booting
63	 *   r0 = 0
64	 *   r1 = machine type number
65	 *   r2 = physical address of the dtb
66	 *
67	 * As we have no need for r0's nor r1's value, then
68	 * put the dtb in r0. This allows setup to be consistent
69	 * with arm64.
70	 */
71	mov	r0, r2
72	push	{r0-r1}
73
74	/* set up vector table, mode stacks, and enable the VFP */
75	mov	r0, lr			@ lr is stack top (see above),
76					@ which is the exception stacks base
77	bl	exceptions_init
78	bl	enable_vfp
79
80	/* complete setup */
81	pop	{r0-r1}
82	bl	setup
83	bl	get_mmu_off
84	cmp	r0, #0
85	bne	1f
86	bl	setup_vm
87
881:
89	/* run the test */
90	ldr	r0, =__argc
91	ldr	r0, [r0]
92	ldr	r1, =__argv
93	ldr	r2, =__environ
94	bl	main
95	bl	exit
96	b	halt
97
98
99.macro set_mode_stack mode, stack
100	add	\stack, #S_FRAME_SIZE
101	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
102	isb
103	mov	sp, \stack
104.endm
105
106exceptions_init:
107	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
108	bic	r2, #CR_V		@ SCTLR.V := 0
109	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
110	ldr	r2, =vector_table
111	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
112
113	mrs	r2, cpsr
114
115	/* first frame reserved for svc mode */
116	set_mode_stack	UND_MODE, r0
117	set_mode_stack	ABT_MODE, r0
118	set_mode_stack	IRQ_MODE, r0
119	set_mode_stack	FIQ_MODE, r0
120
121	msr	cpsr_cxsf, r2		@ back to svc mode
122	isb
123	mov	pc, lr
124
125enable_vfp:
126	/* Enable full access to CP10 and CP11: */
127	mov	r0, #(3 << 22 | 3 << 20)
128	mcr	p15, 0, r0, c1, c0, 2
129	isb
130	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
131	mov	r0, #(1 << 30)
132	vmsr	fpexc, r0
133	mov	pc, lr
134
135.text
136
137.global get_mmu_off
138get_mmu_off:
139	ldr	r0, =auxinfo
140	ldr	r0, [r0, #4]
141	and	r0, #AUXINFO_MMU_OFF
142	mov	pc, lr
143
144.global secondary_entry
145secondary_entry:
146	/* enable the MMU unless requested off */
147	bl	get_mmu_off
148	cmp	r0, #0
149	bne	1f
150	mov	r1, #0
151	ldr	r0, =mmu_idmap
152	ldr	r0, [r0]
153	bl	asm_mmu_enable
154
1551:
156	/*
157	 * Set the stack, and set up vector table
158	 * and exception stacks. Exception stacks
159	 * space starts at stack top and grows up.
160	 */
161	ldr	r1, =secondary_data
162	ldr	r0, [r1]
163	mov	sp, r0
164	bl	exceptions_init
165	bl	enable_vfp
166
167	/* finish init in C code */
168	bl	secondary_cinit
169
170	/* r0 is now the entry function, run it */
171	blx	r0
172	b	do_idle
173
174.globl halt
175halt:
1761:	wfi
177	b	1b
178
179/*
180 * asm_mmu_enable
181 *   Inputs:
182 *     (r0 - lo, r1 - hi) is the base address of the translation table
183 *   Outputs: none
184 */
185.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
186.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
187.globl asm_mmu_enable
188asm_mmu_enable:
189	/* TLBIALL */
190	mcr	p15, 0, r2, c8, c7, 0
191	dsb	nsh
192
193	/* TTBCR */
194	ldr	r2, =(TTBCR_EAE | 				\
195		      TTBCR_SH0_SHARED | 			\
196		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
197	mcr	p15, 0, r2, c2, c0, 2
198	isb
199
200	/* MAIR */
201	ldr	r2, =PRRR
202	mcr	p15, 0, r2, c10, c2, 0
203	ldr	r2, =NMRR
204	mcr	p15, 0, r2, c10, c2, 1
205
206	/* TTBR0 */
207	mcrr	p15, 0, r0, r1, c2
208	isb
209
210	/* SCTLR */
211	mrc	p15, 0, r2, c1, c0, 0
212	orr	r2, #CR_C
213	orr	r2, #CR_I
214	orr	r2, #CR_M
215	mcr	p15, 0, r2, c1, c0, 0
216	isb
217
218	mov     pc, lr
219
220.macro dcache_clean_inval domain, start, end, tmp1, tmp2
221	ldr	\tmp1, =dcache_line_size
222	ldr	\tmp1, [\tmp1]
223	sub	\tmp2, \tmp1, #1
224	bic	\start, \start, \tmp2
2259998:
226	/* DCCIMVAC */
227	mcr	p15, 0, \start, c7, c14, 1
228	add	\start, \start, \tmp1
229	cmp	\start, \end
230	blo	9998b
231	dsb	\domain
232.endm
233
234.globl asm_mmu_disable
235asm_mmu_disable:
236	/* SCTLR */
237	mrc	p15, 0, r0, c1, c0, 0
238	bic	r0, #CR_M
239	mcr	p15, 0, r0, c1, c0, 0
240	isb
241
242	ldr	r0, =__phys_offset
243	ldr	r0, [r0]
244	ldr	r1, =__phys_end
245	ldr	r1, [r1]
246	dcache_clean_inval sy, r0, r1, r2, r3
247	isb
248
249	mov     pc, lr
250
251/*
252 * Vector stubs
253 * Simplified version of the Linux kernel implementation
254 *   arch/arm/kernel/entry-armv.S
255 *
256 * Each mode has an S_FRAME_SIZE sized memory region,
257 * and the mode's stack pointer has been initialized
258 * to the base of that region in exceptions_init.
259 */
260.macro vector_stub, name, vec, mode, correction=0
261.align 5
262vector_\name:
263.if \correction
264	sub	lr, lr, #\correction
265.endif
266	/*
267	 * Save r0, r1, lr_<exception> (parent PC)
268	 * and spsr_<exception> (parent CPSR)
269	 */
270	str	r0, [sp, #S_R0]
271	str	r1, [sp, #S_R1]
272	str	lr, [sp, #S_PC]
273	mrs	r0, spsr
274	str	r0, [sp, #S_PSR]
275
276	/* Prepare for SVC32 mode. */
277	mrs	r0, cpsr
278	bic	r0, #MODE_MASK
279	orr	r0, #SVC_MODE
280	msr	spsr_cxsf, r0
281
282	/* Branch to handler in SVC mode */
283	mov	r0, #\vec
284	mov	r1, sp
285	ldr	lr, =vector_common
286	movs	pc, lr
287.endm
288
289vector_stub 	rst,	0, UND_MODE
290vector_stub	und,	1, UND_MODE
291vector_stub	pabt,	3, ABT_MODE, 4
292vector_stub	dabt,	4, ABT_MODE, 8
293vector_stub	irq,	6, IRQ_MODE, 4
294vector_stub	fiq,	7, FIQ_MODE, 4
295
296.align 5
297vector_svc:
298	/*
299	 * Save r0, r1, lr_<exception> (parent PC)
300	 * and spsr_<exception> (parent CPSR)
301	 */
302	push	{ r1 }
303	lsr	r1, sp, #THREAD_SHIFT
304	lsl	r1, #THREAD_SHIFT
305	add	r1, #THREAD_START_SP
306	str	r0, [r1, #S_R0]
307	pop	{ r0 }
308	str	r0, [r1, #S_R1]
309	str	lr, [r1, #S_PC]
310	mrs	r0, spsr
311	str	r0, [r1, #S_PSR]
312
313	/*
314	 * Branch to handler, still in SVC mode.
315	 * r0 := 2 is the svc vector number.
316	 */
317	mov	r0, #2
318	ldr	lr, =vector_common
319	mov	pc, lr
320
321vector_common:
322	/* make room for pt_regs */
323	sub	sp, #S_FRAME_SIZE
324	tst	sp, #4			@ check stack alignment
325	subne	sp, #4
326
327	/* store registers r0-r12 */
328	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
329
330	/* get registers saved in the stub */
331	ldr	r2, [r1, #S_R0]		@ r0
332	ldr	r3, [r1, #S_R1]		@ r1
333	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
334	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
335
336	/* fix r0 and r1 */
337	str	r2, [sp, #S_R0]
338	str	r3, [sp, #S_R1]
339
340	/* store sp_svc, if we were in usr mode we'll fix this later */
341	add	r6, sp, #S_FRAME_SIZE
342	addne	r6, #4			@ stack wasn't aligned
343	str	r6, [sp, #S_SP]
344
345	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
346	str	r4, [sp, #S_PC]		@ store lr_<exception>
347	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
348
349	/* set ORIG_r0 */
350	mov	r2, #-1
351	str	r2, [sp, #S_OLD_R0]
352
353	/* if we were in usr mode then we need sp_usr and lr_usr instead */
354	and	r1, r5, #MODE_MASK
355	cmp	r1, #USR_MODE
356	bne	1f
357	add	r1, sp, #S_SP
358	stmia	r1, { sp,lr }^
359
360	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3611:	mov	r1, sp
362	bl	do_handle_exception
363
364	/*
365	 * make sure we restore sp_svc on mode change. No need to
366	 * worry about lr_svc though, as that gets clobbered on
367	 * exception entry anyway.
368	 */
369	str	r6, [sp, #S_SP]
370
371	/* return from exception */
372	msr	spsr_cxsf, r5
373	ldmia	sp, { r0-pc }^
374
375.align 5
376vector_addrexcptn:
377	b	vector_addrexcptn
378
379.section .text.ex
380.align 5
381vector_table:
382	b	vector_rst
383	b	vector_und
384	b	vector_svc
385	b	vector_pabt
386	b	vector_dabt
387	b	vector_addrexcptn	@ should never happen
388	b	vector_irq
389	b	vector_fiq
390