1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  linux/boot/head.S
4 *
5 *  Copyright (C) 1991, 1992, 1993  Linus Torvalds
6 */
7
8/*
9 *  head.S contains the 32-bit startup code.
10 *
11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
12 * the page directory will exist. The startup code will be overwritten by
13 * the page directory. [According to comments etc elsewhere on a compressed
14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
15 *
16 * Page 0 is deliberately kept safe, since System Management Mode code in
17 * laptops may need to access the BIOS data stored there.  This is also
18 * useful for future device drivers that either access the BIOS via VM86
19 * mode.
20 */
21
22/*
23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
24 */
25	.code32
26	.text
27
28#include <linux/init.h>
29#include <linux/linkage.h>
30#include <asm/segment.h>
31#include <asm/boot.h>
32#include <asm/msr.h>
33#include <asm/processor-flags.h>
34#include <asm/asm-offsets.h>
35#include <asm/bootparam.h>
36#include <asm/desc_defs.h>
37#include <asm/trapnr.h>
38#include "pgtable.h"
39
40/*
41 * Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result
42 * in assembly errors due to trying to move .org backward due to the excessive
43 * alignment.
44 */
45#undef __ALIGN
46#define __ALIGN		.balign	16, 0x90
47
48/*
49 * Locally defined symbols should be marked hidden:
50 */
51	.hidden _bss
52	.hidden _ebss
53	.hidden _end
54
55	__HEAD
56
57/*
58 * This macro gives the relative virtual address of X, i.e. the offset of X
59 * from startup_32. This is the same as the link-time virtual address of X,
60 * since startup_32 is at 0, but defining it this way tells the
61 * assembler/linker that we do not want the actual run-time address of X. This
62 * prevents the linker from trying to create unwanted run-time relocation
63 * entries for the reference when the compressed kernel is linked as PIE.
64 *
65 * A reference X(%reg) will result in the link-time VA of X being stored with
66 * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that
67 * adds the 64-bit base address where the kernel is loaded.
68 *
69 * Replacing it with (X-startup_32)(%reg) results in the offset being stored,
70 * and no run-time relocation.
71 *
72 * The macro should be used as a displacement with a base register containing
73 * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate
74 * [$ rva(X)].
75 *
76 * This macro can only be used from within the .head.text section, since the
77 * expression requires startup_32 to be in the same section as the code being
78 * assembled.
79 */
80#define rva(X) ((X) - startup_32)
81
82	.code32
83SYM_FUNC_START(startup_32)
84	/*
85	 * 32bit entry is 0 and it is ABI so immutable!
86	 * If we come here directly from a bootloader,
87	 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
88	 * all need to be under the 4G limit.
89	 */
90	cld
91	cli
92
93/*
94 * Calculate the delta between where we were compiled to run
95 * at and where we were actually loaded at.  This can only be done
96 * with a short local call on x86.  Nothing  else will tell us what
97 * address we are running at.  The reserved chunk of the real-mode
98 * data at 0x1e4 (defined as a scratch field) are used as the stack
99 * for this calculation. Only 4 bytes are needed.
100 */
101	leal	(BP_scratch+4)(%esi), %esp
102	call	1f
1031:	popl	%ebp
104	subl	$ rva(1b), %ebp
105
106	/* Load new GDT with the 64bit segments using 32bit descriptor */
107	leal	rva(gdt)(%ebp), %eax
108	movl	%eax, 2(%eax)
109	lgdt	(%eax)
110
111	/* Load segment registers with our descriptors */
112	movl	$__BOOT_DS, %eax
113	movl	%eax, %ds
114	movl	%eax, %es
115	movl	%eax, %fs
116	movl	%eax, %gs
117	movl	%eax, %ss
118
119	/* Setup a stack and load CS from current GDT */
120	leal	rva(boot_stack_end)(%ebp), %esp
121
122	pushl	$__KERNEL32_CS
123	leal	rva(1f)(%ebp), %eax
124	pushl	%eax
125	lretl
1261:
127
128	/* Setup Exception handling for SEV-ES */
129#ifdef CONFIG_AMD_MEM_ENCRYPT
130	call	startup32_load_idt
131#endif
132
133	/* Make sure cpu supports long mode. */
134	call	verify_cpu
135	testl	%eax, %eax
136	jnz	.Lno_longmode
137
138/*
139 * Compute the delta between where we were compiled to run at
140 * and where the code will actually run at.
141 *
142 * %ebp contains the address we are loaded at by the boot loader and %ebx
143 * contains the address where we should move the kernel image temporarily
144 * for safe in-place decompression.
145 */
146
147#ifdef CONFIG_RELOCATABLE
148	movl	%ebp, %ebx
149	movl	BP_kernel_alignment(%esi), %eax
150	decl	%eax
151	addl	%eax, %ebx
152	notl	%eax
153	andl	%eax, %ebx
154	cmpl	$LOAD_PHYSICAL_ADDR, %ebx
155	jae	1f
156#endif
157	movl	$LOAD_PHYSICAL_ADDR, %ebx
1581:
159
160	/* Target address to relocate to for decompression */
161	addl	BP_init_size(%esi), %ebx
162	subl	$ rva(_end), %ebx
163
164/*
165 * Prepare for entering 64 bit mode
166 */
167
168	/* Enable PAE mode */
169	movl	%cr4, %eax
170	orl	$X86_CR4_PAE, %eax
171	movl	%eax, %cr4
172
173 /*
174  * Build early 4G boot pagetable
175  */
176	/*
177	 * If SEV is active then set the encryption mask in the page tables.
178	 * This will ensure that when the kernel is copied and decompressed
179	 * it will be done so encrypted.
180	 */
181	xorl	%edx, %edx
182#ifdef	CONFIG_AMD_MEM_ENCRYPT
183	call	get_sev_encryption_bit
184	xorl	%edx, %edx
185	testl	%eax, %eax
186	jz	1f
187	subl	$32, %eax	/* Encryption bit is always above bit 31 */
188	bts	%eax, %edx	/* Set encryption mask for page tables */
189	/*
190	 * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that
191	 * startup32_check_sev_cbit() will do a check. sev_enable() will
192	 * initialize sev_status with all the bits reported by
193	 * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT
194	 * needs to be set for now.
195	 */
196	movl	$1, rva(sev_status)(%ebp)
1971:
198#endif
199
200	/* Initialize Page tables to 0 */
201	leal	rva(pgtable)(%ebx), %edi
202	xorl	%eax, %eax
203	movl	$(BOOT_INIT_PGT_SIZE/4), %ecx
204	rep	stosl
205
206	/* Build Level 4 */
207	leal	rva(pgtable + 0)(%ebx), %edi
208	leal	0x1007 (%edi), %eax
209	movl	%eax, 0(%edi)
210	addl	%edx, 4(%edi)
211
212	/* Build Level 3 */
213	leal	rva(pgtable + 0x1000)(%ebx), %edi
214	leal	0x1007(%edi), %eax
215	movl	$4, %ecx
2161:	movl	%eax, 0x00(%edi)
217	addl	%edx, 0x04(%edi)
218	addl	$0x00001000, %eax
219	addl	$8, %edi
220	decl	%ecx
221	jnz	1b
222
223	/* Build Level 2 */
224	leal	rva(pgtable + 0x2000)(%ebx), %edi
225	movl	$0x00000183, %eax
226	movl	$2048, %ecx
2271:	movl	%eax, 0(%edi)
228	addl	%edx, 4(%edi)
229	addl	$0x00200000, %eax
230	addl	$8, %edi
231	decl	%ecx
232	jnz	1b
233
234	/* Enable the boot page tables */
235	leal	rva(pgtable)(%ebx), %eax
236	movl	%eax, %cr3
237
238	/* Enable Long mode in EFER (Extended Feature Enable Register) */
239	movl	$MSR_EFER, %ecx
240	rdmsr
241	btsl	$_EFER_LME, %eax
242	wrmsr
243
244	/* After gdt is loaded */
245	xorl	%eax, %eax
246	lldt	%ax
247	movl    $__BOOT_TSS, %eax
248	ltr	%ax
249
250#ifdef CONFIG_AMD_MEM_ENCRYPT
251	/* Check if the C-bit position is correct when SEV is active */
252	call	startup32_check_sev_cbit
253#endif
254
255	/*
256	 * Setup for the jump to 64bit mode
257	 *
258	 * When the jump is performed we will be in long mode but
259	 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
260	 * (and in turn EFER.LMA = 1).	To jump into 64bit mode we use
261	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
262	 * We place all of the values on our mini stack so lret can
263	 * used to perform that far jump.
264	 */
265	leal	rva(startup_64)(%ebp), %eax
266	pushl	$__KERNEL_CS
267	pushl	%eax
268
269	/* Enter paged protected Mode, activating Long Mode */
270	movl	$CR0_STATE, %eax
271	movl	%eax, %cr0
272
273	/* Jump from 32bit compatibility mode into 64bit mode. */
274	lret
275SYM_FUNC_END(startup_32)
276
277	.code64
278	.org 0x200
279SYM_CODE_START(startup_64)
280	/*
281	 * 64bit entry is 0x200 and it is ABI so immutable!
282	 * We come here either from startup_32 or directly from a
283	 * 64bit bootloader.
284	 * If we come here from a bootloader, kernel(text+data+bss+brk),
285	 * ramdisk, zero_page, command line could be above 4G.
286	 * We depend on an identity mapped page table being provided
287	 * that maps our entire kernel(text+data+bss+brk), zero page
288	 * and command line.
289	 */
290
291	cld
292	cli
293
294	/* Setup data segments. */
295	xorl	%eax, %eax
296	movl	%eax, %ds
297	movl	%eax, %es
298	movl	%eax, %ss
299	movl	%eax, %fs
300	movl	%eax, %gs
301
302	/*
303	 * Compute the decompressed kernel start address.  It is where
304	 * we were loaded at aligned to a 2M boundary. %rbp contains the
305	 * decompressed kernel start address.
306	 *
307	 * If it is a relocatable kernel then decompress and run the kernel
308	 * from load address aligned to 2MB addr, otherwise decompress and
309	 * run the kernel from LOAD_PHYSICAL_ADDR
310	 *
311	 * We cannot rely on the calculation done in 32-bit mode, since we
312	 * may have been invoked via the 64-bit entry point.
313	 */
314
315	/* Start with the delta to where the kernel will run at. */
316#ifdef CONFIG_RELOCATABLE
317	leaq	startup_32(%rip) /* - $startup_32 */, %rbp
318	movl	BP_kernel_alignment(%rsi), %eax
319	decl	%eax
320	addq	%rax, %rbp
321	notq	%rax
322	andq	%rax, %rbp
323	cmpq	$LOAD_PHYSICAL_ADDR, %rbp
324	jae	1f
325#endif
326	movq	$LOAD_PHYSICAL_ADDR, %rbp
3271:
328
329	/* Target address to relocate to for decompression */
330	movl	BP_init_size(%rsi), %ebx
331	subl	$ rva(_end), %ebx
332	addq	%rbp, %rbx
333
334	/* Set up the stack */
335	leaq	rva(boot_stack_end)(%rbx), %rsp
336
337	/*
338	 * At this point we are in long mode with 4-level paging enabled,
339	 * but we might want to enable 5-level paging or vice versa.
340	 *
341	 * The problem is that we cannot do it directly. Setting or clearing
342	 * CR4.LA57 in long mode would trigger #GP. So we need to switch off
343	 * long mode and paging first.
344	 *
345	 * We also need a trampoline in lower memory to switch over from
346	 * 4- to 5-level paging for cases when the bootloader puts the kernel
347	 * above 4G, but didn't enable 5-level paging for us.
348	 *
349	 * The same trampoline can be used to switch from 5- to 4-level paging
350	 * mode, like when starting 4-level paging kernel via kexec() when
351	 * original kernel worked in 5-level paging mode.
352	 *
353	 * For the trampoline, we need the top page table to reside in lower
354	 * memory as we don't have a way to load 64-bit values into CR3 in
355	 * 32-bit mode.
356	 */
357
358	/* Make sure we have GDT with 32-bit code segment */
359	leaq	gdt64(%rip), %rax
360	addq	%rax, 2(%rax)
361	lgdt	(%rax)
362
363	/* Reload CS so IRET returns to a CS actually in the GDT */
364	pushq	$__KERNEL_CS
365	leaq	.Lon_kernel_cs(%rip), %rax
366	pushq	%rax
367	lretq
368
369.Lon_kernel_cs:
370	/*
371	 * RSI holds a pointer to a boot_params structure provided by the
372	 * loader, and this needs to be preserved across C function calls. So
373	 * move it into a callee saved register.
374	 */
375	movq	%rsi, %r15
376
377	call	load_stage1_idt
378
379#ifdef CONFIG_AMD_MEM_ENCRYPT
380	/*
381	 * Now that the stage1 interrupt handlers are set up, #VC exceptions from
382	 * CPUID instructions can be properly handled for SEV-ES guests.
383	 *
384	 * For SEV-SNP, the CPUID table also needs to be set up in advance of any
385	 * CPUID instructions being issued, so go ahead and do that now via
386	 * sev_enable(), which will also handle the rest of the SEV-related
387	 * detection/setup to ensure that has been done in advance of any dependent
388	 * code. Pass the boot_params pointer as the first argument.
389	 */
390	movq	%r15, %rdi
391	call	sev_enable
392#endif
393
394	/* Preserve only the CR4 bits that must be preserved, and clear the rest */
395	movq	%cr4, %rax
396	andl	$(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
397	movq	%rax, %cr4
398
399	/*
400	 * configure_5level_paging() updates the number of paging levels using
401	 * a trampoline in 32-bit addressable memory if the current number does
402	 * not match the desired number.
403	 *
404	 * Pass the boot_params pointer as the first argument. The second
405	 * argument is the relocated address of the page table to use instead
406	 * of the page table in trampoline memory (if required).
407	 */
408	movq	%r15, %rdi
409	leaq	rva(top_pgtable)(%rbx), %rsi
410	call	configure_5level_paging
411
412	/* Zero EFLAGS */
413	pushq	$0
414	popfq
415
416/*
417 * Copy the compressed kernel to the end of our buffer
418 * where decompression in place becomes safe.
419 */
420	leaq	(_bss-8)(%rip), %rsi
421	leaq	rva(_bss-8)(%rbx), %rdi
422	movl	$(_bss - startup_32), %ecx
423	shrl	$3, %ecx
424	std
425	rep	movsq
426	cld
427
428	/*
429	 * The GDT may get overwritten either during the copy we just did or
430	 * during extract_kernel below. To avoid any issues, repoint the GDTR
431	 * to the new copy of the GDT.
432	 */
433	leaq	rva(gdt64)(%rbx), %rax
434	leaq	rva(gdt)(%rbx), %rdx
435	movq	%rdx, 2(%rax)
436	lgdt	(%rax)
437
438/*
439 * Jump to the relocated address.
440 */
441	leaq	rva(.Lrelocated)(%rbx), %rax
442	jmp	*%rax
443SYM_CODE_END(startup_64)
444
445	.text
446SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
447
448/*
449 * Clear BSS (stack is currently empty)
450 */
451	xorl	%eax, %eax
452	leaq    _bss(%rip), %rdi
453	leaq    _ebss(%rip), %rcx
454	subq	%rdi, %rcx
455	shrq	$3, %rcx
456	rep	stosq
457
458	call	load_stage2_idt
459
460	/* Pass boot_params to initialize_identity_maps() */
461	movq	%r15, %rdi
462	call	initialize_identity_maps
463
464/*
465 * Do the extraction, and jump to the new kernel..
466 */
467	/* pass struct boot_params pointer and output target address */
468	movq	%r15, %rdi
469	movq	%rbp, %rsi
470	call	extract_kernel		/* returns kernel entry point in %rax */
471
472/*
473 * Jump to the decompressed kernel.
474 */
475	movq	%r15, %rsi
476	jmp	*%rax
477SYM_FUNC_END(.Lrelocated)
478
479	.code32
480SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
481	/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
4821:
483	hlt
484	jmp     1b
485SYM_FUNC_END(.Lno_longmode)
486
487	.globl	verify_cpu
488#include "../../kernel/verify_cpu.S"
489
490	.data
491SYM_DATA_START_LOCAL(gdt64)
492	.word	gdt_end - gdt - 1
493	.quad   gdt - gdt64
494SYM_DATA_END(gdt64)
495	.balign	8
496SYM_DATA_START_LOCAL(gdt)
497	.word	gdt_end - gdt - 1
498	.long	0
499	.word	0
500	.quad	0x00cf9a000000ffff	/* __KERNEL32_CS */
501	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
502	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
503	.quad	0x0080890000000000	/* TS descriptor */
504	.quad   0x0000000000000000	/* TS continued */
505SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
506
507SYM_DATA_START(boot_idt_desc)
508	.word	boot_idt_end - boot_idt - 1
509	.quad	0
510SYM_DATA_END(boot_idt_desc)
511	.balign 8
512SYM_DATA_START(boot_idt)
513	.rept	BOOT_IDT_ENTRIES
514	.quad	0
515	.quad	0
516	.endr
517SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
518
519/*
520 * Stack and heap for uncompression
521 */
522	.bss
523	.balign 4
524SYM_DATA_START_LOCAL(boot_stack)
525	.fill BOOT_STACK_SIZE, 1, 0
526	.balign 16
527SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
528
529/*
530 * Space for page tables (not in .bss so not zeroed)
531 */
532	.section ".pgtable","aw",@nobits
533	.balign 4096
534SYM_DATA_LOCAL(pgtable,		.fill BOOT_PGT_SIZE, 1, 0)
535
536/*
537 * The page table is going to be used instead of page table in the trampoline
538 * memory.
539 */
540SYM_DATA_LOCAL(top_pgtable,	.fill PAGE_SIZE, 1, 0)
541