1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 *	verify_cpu.S - Code for cpu long mode and SSE verification. This
5 *	code has been borrowed from boot/setup.S and was introduced by
6 * 	Andi Kleen.
7 *
8 *	Copyright (c) 2007  Andi Kleen (ak@suse.de)
9 *	Copyright (c) 2007  Eric Biederman (ebiederm@xmission.com)
10 *	Copyright (c) 2007  Vivek Goyal (vgoyal@in.ibm.com)
11 *	Copyright (c) 2010  Kees Cook (kees.cook@canonical.com)
12 *
13 *	This is a common code for verification whether CPU supports
14 * 	long mode and SSE or not. It is not called directly instead this
15 *	file is included at various places and compiled in that context.
16 *	This file is expected to run in 32bit code.  Currently:
17 *
18 *	arch/x86/boot/compressed/head_64.S: Boot cpu verification
19 *	arch/x86/kernel/trampoline_64.S: secondary processor verification
20 *	arch/x86/kernel/head_32.S: processor startup
21 *
22 *	verify_cpu, returns the status of longmode and SSE in register %eax.
23 *		0: Success    1: Failure
24 *
25 *	On Intel, the XD_DISABLE flag will be cleared as a side-effect.
26 *
27 * 	The caller needs to check for the error code and take the action
28 * 	appropriately. Either display a message or halt.
29 */
30
31#include <asm/cpufeatures.h>
32#include <asm/cpufeaturemasks.h>
33#include <asm/msr-index.h>
34
35#define SSE_MASK	\
36	(REQUIRED_MASK0 & ((1<<(X86_FEATURE_XMM & 31)) | (1<<(X86_FEATURE_XMM2 & 31))))
37
38SYM_FUNC_START_LOCAL(verify_cpu)
39	pushf				# Save caller passed flags
40	push	$0			# Kill any dangerous flags
41	popf
42
43#ifndef __x86_64__
44	pushfl				# standard way to check for cpuid
45	popl	%eax
46	movl	%eax,%ebx
47	xorl	$0x200000,%eax
48	pushl	%eax
49	popfl
50	pushfl
51	popl	%eax
52	cmpl	%eax,%ebx
53	jz	.Lverify_cpu_no_longmode	# cpu has no cpuid
54#endif
55
56	movl	$0x0,%eax		# See if cpuid 1 is implemented
57	cpuid
58	cmpl	$0x1,%eax
59	jb	.Lverify_cpu_no_longmode	# no cpuid 1
60
61	xor	%di,%di
62	cmpl	$0x68747541,%ebx	# AuthenticAMD
63	jnz	.Lverify_cpu_noamd
64	cmpl	$0x69746e65,%edx
65	jnz	.Lverify_cpu_noamd
66	cmpl	$0x444d4163,%ecx
67	jnz	.Lverify_cpu_noamd
68	mov	$1,%di			# cpu is from AMD
69	jmp	.Lverify_cpu_check
70
71.Lverify_cpu_noamd:
72	cmpl	$0x756e6547,%ebx        # GenuineIntel?
73	jnz	.Lverify_cpu_check
74	cmpl	$0x49656e69,%edx
75	jnz	.Lverify_cpu_check
76	cmpl	$0x6c65746e,%ecx
77	jnz	.Lverify_cpu_check
78
79	# only call IA32_MISC_ENABLE when:
80	# family > 6 || (family == 6 && model >= 0xd)
81	movl	$0x1, %eax		# check CPU family and model
82	cpuid
83	movl	%eax, %ecx
84
85	andl	$0x0ff00f00, %eax	# mask family and extended family
86	shrl	$8, %eax
87	cmpl	$6, %eax
88	ja	.Lverify_cpu_clear_xd	# family > 6, ok
89	jb	.Lverify_cpu_check	# family < 6, skip
90
91	andl	$0x000f00f0, %ecx	# mask model and extended model
92	shrl	$4, %ecx
93	cmpl	$0xd, %ecx
94	jb	.Lverify_cpu_check	# family == 6, model < 0xd, skip
95
96.Lverify_cpu_clear_xd:
97	movl	$MSR_IA32_MISC_ENABLE, %ecx
98	rdmsr
99	btrl	$2, %edx		# clear MSR_IA32_MISC_ENABLE_XD_DISABLE
100	jnc	.Lverify_cpu_check	# only write MSR if bit was changed
101	wrmsr
102
103.Lverify_cpu_check:
104	movl    $0x1,%eax		# Does the cpu have what it takes
105	cpuid
106	andl	$REQUIRED_MASK0,%edx
107	xorl	$REQUIRED_MASK0,%edx
108	jnz	.Lverify_cpu_no_longmode
109
110	movl    $0x80000000,%eax	# See if extended cpuid is implemented
111	cpuid
112	cmpl    $0x80000001,%eax
113	jb      .Lverify_cpu_no_longmode	# no extended cpuid
114
115	movl    $0x80000001,%eax	# Does the cpu have what it takes
116	cpuid
117	andl    $REQUIRED_MASK1,%edx
118	xorl    $REQUIRED_MASK1,%edx
119	jnz     .Lverify_cpu_no_longmode
120
121.Lverify_cpu_sse_test:
122	movl	$1,%eax
123	cpuid
124	andl	$SSE_MASK,%edx
125	cmpl	$SSE_MASK,%edx
126	je	.Lverify_cpu_sse_ok
127	test	%di,%di
128	jz	.Lverify_cpu_no_longmode	# only try to force SSE on AMD
129	movl	$MSR_K7_HWCR,%ecx
130	rdmsr
131	btr	$15,%eax		# enable SSE
132	wrmsr
133	xor	%di,%di			# don't loop
134	jmp	.Lverify_cpu_sse_test	# try again
135
136.Lverify_cpu_no_longmode:
137	popf				# Restore caller passed flags
138	movl $1,%eax
139	RET
140.Lverify_cpu_sse_ok:
141	popf				# Restore caller passed flags
142	xorl %eax, %eax
143	RET
144SYM_FUNC_END(verify_cpu)
145