1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_CPUID_API_H
3 #define _ASM_X86_CPUID_API_H
4
5 #include <asm/cpuid/types.h>
6
7 #include <linux/build_bug.h>
8 #include <linux/types.h>
9
10 #include <asm/string.h>
11
12 /*
13 * Raw CPUID accessors:
14 */
15
16 #ifdef CONFIG_X86_32
17 bool have_cpuid_p(void);
18 #else
have_cpuid_p(void)19 static inline bool have_cpuid_p(void)
20 {
21 return true;
22 }
23 #endif
24
native_cpuid(u32 * eax,u32 * ebx,u32 * ecx,u32 * edx)25 static inline void native_cpuid(u32 *eax, u32 *ebx,
26 u32 *ecx, u32 *edx)
27 {
28 /* ecx is often an input as well as an output. */
29 asm volatile("cpuid"
30 : "=a" (*eax),
31 "=b" (*ebx),
32 "=c" (*ecx),
33 "=d" (*edx)
34 : "0" (*eax), "2" (*ecx)
35 : "memory");
36 }
37
38 #define NATIVE_CPUID_REG(reg) \
39 static inline u32 native_cpuid_##reg(u32 op) \
40 { \
41 u32 eax = op, ebx, ecx = 0, edx; \
42 \
43 native_cpuid(&eax, &ebx, &ecx, &edx); \
44 \
45 return reg; \
46 }
47
48 /*
49 * Native CPUID functions returning a single datum:
50 */
51 NATIVE_CPUID_REG(eax)
NATIVE_CPUID_REG(ebx)52 NATIVE_CPUID_REG(ebx)
53 NATIVE_CPUID_REG(ecx)
54 NATIVE_CPUID_REG(edx)
55
56 #ifdef CONFIG_PARAVIRT_XXL
57 # include <asm/paravirt.h>
58 #else
59 # define __cpuid native_cpuid
60 #endif
61
62 /*
63 * Generic CPUID function
64 *
65 * Clear ECX since some CPUs (Cyrix MII) do not set or clear ECX
66 * resulting in stale register contents being returned.
67 */
68 static inline void cpuid(u32 op,
69 u32 *eax, u32 *ebx,
70 u32 *ecx, u32 *edx)
71 {
72 *eax = op;
73 *ecx = 0;
74 __cpuid(eax, ebx, ecx, edx);
75 }
76
77 /* Some CPUID calls want 'count' to be placed in ECX */
cpuid_count(u32 op,int count,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx)78 static inline void cpuid_count(u32 op, int count,
79 u32 *eax, u32 *ebx,
80 u32 *ecx, u32 *edx)
81 {
82 *eax = op;
83 *ecx = count;
84 __cpuid(eax, ebx, ecx, edx);
85 }
86
87 /*
88 * CPUID functions returning a single datum:
89 */
90
cpuid_eax(u32 op)91 static inline u32 cpuid_eax(u32 op)
92 {
93 u32 eax, ebx, ecx, edx;
94
95 cpuid(op, &eax, &ebx, &ecx, &edx);
96
97 return eax;
98 }
99
cpuid_ebx(u32 op)100 static inline u32 cpuid_ebx(u32 op)
101 {
102 u32 eax, ebx, ecx, edx;
103
104 cpuid(op, &eax, &ebx, &ecx, &edx);
105
106 return ebx;
107 }
108
cpuid_ecx(u32 op)109 static inline u32 cpuid_ecx(u32 op)
110 {
111 u32 eax, ebx, ecx, edx;
112
113 cpuid(op, &eax, &ebx, &ecx, &edx);
114
115 return ecx;
116 }
117
cpuid_edx(u32 op)118 static inline u32 cpuid_edx(u32 op)
119 {
120 u32 eax, ebx, ecx, edx;
121
122 cpuid(op, &eax, &ebx, &ecx, &edx);
123
124 return edx;
125 }
126
__cpuid_read(u32 leaf,u32 subleaf,u32 * regs)127 static inline void __cpuid_read(u32 leaf, u32 subleaf, u32 *regs)
128 {
129 regs[CPUID_EAX] = leaf;
130 regs[CPUID_ECX] = subleaf;
131 __cpuid(regs + CPUID_EAX, regs + CPUID_EBX, regs + CPUID_ECX, regs + CPUID_EDX);
132 }
133
134 #define cpuid_subleaf(leaf, subleaf, regs) { \
135 static_assert(sizeof(*(regs)) == 16); \
136 __cpuid_read(leaf, subleaf, (u32 *)(regs)); \
137 }
138
139 #define cpuid_leaf(leaf, regs) { \
140 static_assert(sizeof(*(regs)) == 16); \
141 __cpuid_read(leaf, 0, (u32 *)(regs)); \
142 }
143
__cpuid_read_reg(u32 leaf,u32 subleaf,enum cpuid_regs_idx regidx,u32 * reg)144 static inline void __cpuid_read_reg(u32 leaf, u32 subleaf,
145 enum cpuid_regs_idx regidx, u32 *reg)
146 {
147 u32 regs[4];
148
149 __cpuid_read(leaf, subleaf, regs);
150 *reg = regs[regidx];
151 }
152
153 #define cpuid_subleaf_reg(leaf, subleaf, regidx, reg) { \
154 static_assert(sizeof(*(reg)) == 4); \
155 __cpuid_read_reg(leaf, subleaf, regidx, (u32 *)(reg)); \
156 }
157
158 #define cpuid_leaf_reg(leaf, regidx, reg) { \
159 static_assert(sizeof(*(reg)) == 4); \
160 __cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \
161 }
162
cpuid_function_is_indexed(u32 function)163 static __always_inline bool cpuid_function_is_indexed(u32 function)
164 {
165 switch (function) {
166 case 4:
167 case 7:
168 case 0xb:
169 case 0xd:
170 case 0xf:
171 case 0x10:
172 case 0x12:
173 case 0x14:
174 case 0x17:
175 case 0x18:
176 case 0x1d:
177 case 0x1e:
178 case 0x1f:
179 case 0x24:
180 case 0x8000001d:
181 return true;
182 }
183
184 return false;
185 }
186
187 #define for_each_possible_hypervisor_cpuid_base(function) \
188 for (function = 0x40000000; function < 0x40010000; function += 0x100)
189
hypervisor_cpuid_base(const char * sig,u32 leaves)190 static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves)
191 {
192 u32 base, eax, signature[3];
193
194 for_each_possible_hypervisor_cpuid_base(base) {
195 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
196
197 /*
198 * This must not compile to "call memcmp" because it's called
199 * from PVH early boot code before instrumentation is set up
200 * and memcmp() itself may be instrumented.
201 */
202 if (!__builtin_memcmp(sig, signature, 12) &&
203 (leaves == 0 || ((eax - base) >= leaves)))
204 return base;
205 }
206
207 return 0;
208 }
209
210 #endif /* _ASM_X86_CPUID_API_H */
211