xref: /kvm-unit-tests/arm/cstart.S (revision 1d0f08f40d53daa39566842ec46a112db5f7e524)
1/*
2 * Boot entry point and assembler functions for armv7 tests.
3 *
4 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5 *
6 * This work is licensed under the terms of the GNU LGPL, version 2.
7 */
8#define __ASSEMBLY__
9#include <auxinfo.h>
10#include <asm/assembler.h>
11#include <asm/thread_info.h>
12#include <asm/asm-offsets.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/ptrace.h>
15#include <asm/sysreg.h>
16
17#define THREAD_START_SP ((THREAD_SIZE - S_FRAME_SIZE * 8) & ~7)
18
19.macro zero_range, tmp1, tmp2, tmp3, tmp4
20	mov	\tmp3, #0
21	mov	\tmp4, #0
229998:	cmp	\tmp1, \tmp2
23	beq	9997f
24	strd	\tmp3, \tmp4, [\tmp1], #8
25	b	9998b
269997:
27.endm
28
29.arm
30
31.section .init
32
33.globl start
34start:
35	/* zero BSS */
36	ldr	r4, =bss
37	ldr	r5, =ebss
38	zero_range r4, r5, r6, r7
39
40	/* zero stack */
41	ldr	r5, =stacktop
42	sub	r4, r5, #THREAD_SIZE
43	zero_range r4, r5, r6, r7
44
45	/*
46	 * set stack, making room at top of stack for cpu0's
47	 * exception stacks. Must start with stackptr, not
48	 * stacktop, so the thread size masking (shifts) work.
49	 */
50	ldr	sp, =stackptr
51	lsr	sp, #THREAD_SHIFT
52	lsl	sp, #THREAD_SHIFT
53	add	sp, #THREAD_START_SP
54
55	/*
56	 * save sp before pushing anything on the stack
57	 * lr makes a good temp register right now
58	 */
59	mov	lr, sp
60
61	/*
62	 * bootloader params are in r0-r2
63	 * See the kernel doc Documentation/arm/Booting
64	 *   r0 = 0
65	 *   r1 = machine type number
66	 *   r2 = physical address of the dtb
67	 *
68	 * As we have no need for r0's nor r1's value, then
69	 * put the dtb in r0. This allows setup to be consistent
70	 * with arm64.
71	 */
72	mov	r0, r2
73	push	{r0-r1}
74
75	/* set up vector table, mode stacks, and enable the VFP */
76	mov	r0, lr			@ lr is stack top (see above),
77					@ which is the exception stacks base
78	bl	exceptions_init
79	bl	enable_vfp
80
81	/* complete setup */
82	pop	{r0-r1}
83	mov	r3, #0
84	ldr	r2, =stacktop		@ r2,r3 is the base of free memory
85	bl	setup			@ r0 is the addr of the dtb
86
87	/* run the test */
88	ldr	r0, =__argc
89	ldr	r0, [r0]
90	ldr	r1, =__argv
91	ldr	r2, =__environ
92	bl	main
93	bl	exit
94	b	halt
95
96.text
97
98/*
99 * psci_invoke_hvc / psci_invoke_smc
100 *
101 * Inputs:
102 *   r0 -- function_id
103 *   r1 -- arg0
104 *   r2 -- arg1
105 *   r3 -- arg2
106 *
107 * Outputs:
108 *   r0 -- return code
109 */
110.globl psci_invoke_hvc
111psci_invoke_hvc:
112	hvc	#0
113	mov	pc, lr
114
115.globl psci_invoke_smc
116psci_invoke_smc:
117	smc	#0
118	mov	pc, lr
119
120enable_vfp:
121	/* Enable full access to CP10 and CP11: */
122	mov	r0, #(3 << 22 | 3 << 20)
123	mcr	p15, 0, r0, c1, c0, 2
124	isb
125	/* Set the FPEXC.EN bit to enable Advanced SIMD and VFP: */
126	mov	r0, #(1 << 30)
127	vmsr	fpexc, r0
128	mov	pc, lr
129
130get_mmu_off:
131	ldr	r0, =auxinfo
132	ldr	r0, [r0, #4]
133	and	r0, #AUXINFO_MMU_OFF
134	mov	pc, lr
135
136.global secondary_entry
137secondary_entry:
138	/* enable the MMU unless requested off */
139	bl	get_mmu_off
140	cmp	r0, #0
141	bne	1f
142	mov	r1, #0
143	ldr	r0, =mmu_idmap
144	ldr	r0, [r0]
145	bl	asm_mmu_enable
146
1471:
148	/*
149	 * Set the stack, and set up vector table
150	 * and exception stacks. Exception stacks
151	 * space starts at stack top and grows up.
152	 */
153	ldr	r1, =secondary_data
154	ldr	r0, [r1]
155	mov	sp, r0
156	bl	exceptions_init
157	bl	enable_vfp
158
159	/* finish init in C code */
160	bl	secondary_cinit
161
162	/* r0 is now the entry function, run it */
163	blx	r0
164	b	do_idle
165
166.globl halt
167halt:
1681:	wfi
169	b	1b
170
171/*
172 * asm_mmu_enable
173 *   Inputs:
174 *     (r0 - lo, r1 - hi) is the base address of the translation table
175 *   Outputs: none
176 */
177.equ	PRRR,	0xeeaa4400		@ MAIR0 (from Linux kernel)
178.equ	NMRR,	0xff000004		@ MAIR1 (from Linux kernel)
179.globl asm_mmu_enable
180asm_mmu_enable:
181	/* TLBIALL */
182	mcr	p15, 0, r2, c8, c7, 0
183	dsb	nsh
184
185	/* TTBCR */
186	ldr	r2, =(TTBCR_EAE | 				\
187		      TTBCR_SH0_SHARED | 			\
188		      TTBCR_IRGN0_WBWA | TTBCR_ORGN0_WBWA)
189	mcr	p15, 0, r2, c2, c0, 2
190	isb
191
192	/* MAIR */
193	ldr	r2, =PRRR
194	mcr	p15, 0, r2, c10, c2, 0
195	ldr	r2, =NMRR
196	mcr	p15, 0, r2, c10, c2, 1
197
198	/* TTBR0 */
199	mcrr	p15, 0, r0, r1, c2
200	isb
201
202	/* SCTLR */
203	mrc	p15, 0, r2, c1, c0, 0
204	orr	r2, #CR_C
205	orr	r2, #CR_I
206	orr	r2, #CR_M
207	mcr	p15, 0, r2, c1, c0, 0
208	isb
209
210	mov     pc, lr
211
212.globl asm_mmu_disable
213asm_mmu_disable:
214	/* SCTLR */
215	mrc	p15, 0, r0, c1, c0, 0
216	bic	r0, #CR_M
217	mcr	p15, 0, r0, c1, c0, 0
218	isb
219
220	ldr	r0, =__phys_offset
221	ldr	r0, [r0]
222	ldr	r1, =__phys_end
223	ldr	r1, [r1]
224	sub	r1, r1, r0
225	dcache_by_line_op dccimvac, sy, r0, r1, r2, r3
226
227	mov     pc, lr
228
229/*
230 * Vectors
231 */
232
233.macro set_mode_stack mode, stack
234	add	\stack, #S_FRAME_SIZE
235	msr	cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
236	isb
237	mov	sp, \stack
238.endm
239
240/*
241 * exceptions_init
242 *
243 * Input r0 is the stack top, which is the exception stacks base
244 */
245.globl exceptions_init
246exceptions_init:
247	mrc	p15, 0, r2, c1, c0, 0	@ read SCTLR
248	bic	r2, #CR_V		@ SCTLR.V := 0
249	mcr	p15, 0, r2, c1, c0, 0	@ write SCTLR
250	ldr	r2, =vector_table
251	mcr	p15, 0, r2, c12, c0, 0	@ write VBAR
252
253	mrs	r2, cpsr
254
255	/*
256	 * The first frame is reserved for svc mode
257	 */
258	set_mode_stack	UND_MODE, r0
259	set_mode_stack	ABT_MODE, r0
260	set_mode_stack	IRQ_MODE, r0
261	set_mode_stack	FIQ_MODE, r0
262
263	msr	cpsr_cxsf, r2		@ back to svc mode
264	isb
265	mov	pc, lr
266
267/*
268 * Vector stubs
269 * Simplified version of the Linux kernel implementation
270 *   arch/arm/kernel/entry-armv.S
271 *
272 * Each mode has an S_FRAME_SIZE sized memory region,
273 * and the mode's stack pointer has been initialized
274 * to the base of that region in exceptions_init.
275 */
276.macro vector_stub, name, vec, mode, correction=0
277.align 5
278vector_\name:
279.if \correction
280	sub	lr, lr, #\correction
281.endif
282	/*
283	 * Save r0, r1, lr_<exception> (parent PC)
284	 * and spsr_<exception> (parent CPSR)
285	 */
286	str	r0, [sp, #S_R0]
287	str	r1, [sp, #S_R1]
288	str	lr, [sp, #S_PC]
289	mrs	r0, spsr
290	str	r0, [sp, #S_PSR]
291
292	/* Prepare for SVC32 mode. */
293	mrs	r0, cpsr
294	bic	r0, #MODE_MASK
295	orr	r0, #SVC_MODE
296	msr	spsr_cxsf, r0
297
298	/* Branch to handler in SVC mode */
299	mov	r0, #\vec
300	mov	r1, sp
301	ldr	lr, =vector_common
302	movs	pc, lr
303.endm
304
305vector_stub 	rst,	0, UND_MODE
306vector_stub	und,	1, UND_MODE
307vector_stub	pabt,	3, ABT_MODE, 4
308vector_stub	dabt,	4, ABT_MODE, 8
309vector_stub	irq,	6, IRQ_MODE, 4
310vector_stub	fiq,	7, FIQ_MODE, 4
311
312.align 5
313vector_svc:
314	/*
315	 * Save r0, r1, lr_<exception> (parent PC)
316	 * and spsr_<exception> (parent CPSR)
317	 */
318	push	{ r1 }
319	lsr	r1, sp, #THREAD_SHIFT
320	lsl	r1, #THREAD_SHIFT
321	add	r1, #THREAD_START_SP
322	str	r0, [r1, #S_R0]
323	pop	{ r0 }
324	str	r0, [r1, #S_R1]
325	str	lr, [r1, #S_PC]
326	mrs	r0, spsr
327	str	r0, [r1, #S_PSR]
328
329	/*
330	 * Branch to handler, still in SVC mode.
331	 * r0 := 2 is the svc vector number.
332	 */
333	mov	r0, #2
334	ldr	lr, =vector_common
335	mov	pc, lr
336
337vector_common:
338	/* make room for pt_regs */
339	sub	sp, #S_FRAME_SIZE
340	tst	sp, #4			@ check stack alignment
341	subne	sp, #4
342
343	/* store registers r0-r12 */
344	stmia	sp, { r0-r12 }		@ stored wrong r0 and r1, fix later
345
346	/* get registers saved in the stub */
347	ldr	r2, [r1, #S_R0]		@ r0
348	ldr	r3, [r1, #S_R1]		@ r1
349	ldr	r4, [r1, #S_PC] 	@ lr_<exception> (parent PC)
350	ldr	r5, [r1, #S_PSR]	@ spsr_<exception> (parent CPSR)
351
352	/* fix r0 and r1 */
353	str	r2, [sp, #S_R0]
354	str	r3, [sp, #S_R1]
355
356	/* store sp_svc, if we were in usr mode we'll fix this later */
357	add	r6, sp, #S_FRAME_SIZE
358	addne	r6, #4			@ stack wasn't aligned
359	str	r6, [sp, #S_SP]
360
361	str	lr, [sp, #S_LR]		@ store lr_svc, fix later for usr mode
362	str	r4, [sp, #S_PC]		@ store lr_<exception>
363	str	r5, [sp, #S_PSR]	@ store spsr_<exception>
364
365	/* set ORIG_r0 */
366	mov	r2, #-1
367	str	r2, [sp, #S_OLD_R0]
368
369	/* if we were in usr mode then we need sp_usr and lr_usr instead */
370	and	r1, r5, #MODE_MASK
371	cmp	r1, #USR_MODE
372	bne	1f
373	add	r1, sp, #S_SP
374	stmia	r1, { sp,lr }^
375
376	/* Call the handler. r0 is the vector number, r1 := pt_regs */
3771:	mov	r1, sp
378	bl	do_handle_exception
379
380	/*
381	 * make sure we restore sp_svc on mode change. No need to
382	 * worry about lr_svc though, as that gets clobbered on
383	 * exception entry anyway.
384	 */
385	str	r6, [sp, #S_SP]
386
387	/* return from exception */
388	msr	spsr_cxsf, r5
389	ldmia	sp, { r0-pc }^
390
391.align 5
392vector_addrexcptn:
393	b	vector_addrexcptn
394
395.section .text.ex
396.align 5
397vector_table:
398	b	vector_rst
399	b	vector_und
400	b	vector_svc
401	b	vector_pabt
402	b	vector_dabt
403	b	vector_addrexcptn	@ should never happen
404	b	vector_irq
405	b	vector_fiq
406