1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _X86_MICROCODE_INTERNAL_H
3 #define _X86_MICROCODE_INTERNAL_H
4
5 #include <linux/earlycpio.h>
6 #include <linux/initrd.h>
7
8 #include <asm/cpu.h>
9 #include <asm/microcode.h>
10
11 struct device;
12
13 enum ucode_state {
14 UCODE_OK = 0,
15 UCODE_NEW,
16 UCODE_NEW_SAFE,
17 UCODE_UPDATED,
18 UCODE_NFOUND,
19 UCODE_ERROR,
20 UCODE_TIMEOUT,
21 UCODE_OFFLINE,
22 };
23
24 struct microcode_ops {
25 enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
26 void (*microcode_fini_cpu)(int cpu);
27
28 /*
29 * The generic 'microcode_core' part guarantees that the callbacks
30 * below run on a target CPU when they are being called.
31 * See also the "Synchronization" section in microcode_core.c.
32 */
33 enum ucode_state (*apply_microcode)(int cpu);
34 void (*stage_microcode)(void);
35 int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
36 void (*finalize_late_load)(int result);
37 unsigned int nmi_safe : 1,
38 use_nmi : 1,
39 use_staging : 1;
40 };
41
42 struct early_load_data {
43 u32 old_rev;
44 u32 new_rev;
45 };
46
47 extern struct early_load_data early_data;
48 extern struct ucode_cpu_info ucode_cpu_info[];
49 extern u32 microcode_rev[NR_CPUS];
50 extern u32 base_rev;
51 extern bool hypervisor_present;
52
53 struct cpio_data find_microcode_in_initrd(const char *path);
54
55 #define MAX_UCODE_COUNT 128
56
57 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
58 #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
59 #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
60 #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
61 #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
62 #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
63 #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
64
65 #define CPUID_IS(a, b, c, ebx, ecx, edx) \
66 (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))
67
68 /*
69 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
70 * x86_cpuid_vendor() gets vendor id for BSP.
71 *
72 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
73 * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
74 *
75 * x86_cpuid_vendor() gets vendor information directly from CPUID.
76 */
x86_cpuid_vendor(void)77 static inline int x86_cpuid_vendor(void)
78 {
79 u32 eax = 0x00000000;
80 u32 ebx, ecx = 0, edx;
81
82 native_cpuid(&eax, &ebx, &ecx, &edx);
83
84 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
85 return X86_VENDOR_INTEL;
86
87 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
88 return X86_VENDOR_AMD;
89
90 return X86_VENDOR_UNKNOWN;
91 }
92
x86_cpuid_family(void)93 static inline unsigned int x86_cpuid_family(void)
94 {
95 u32 eax = 0x00000001;
96 u32 ebx, ecx = 0, edx;
97
98 native_cpuid(&eax, &ebx, &ecx, &edx);
99
100 return x86_family(eax);
101 }
102
103 extern bool force_minrev;
104
105 #ifdef CONFIG_CPU_SUP_AMD
106 void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
107 void load_ucode_amd_ap(unsigned int family);
108 void reload_ucode_amd(unsigned int cpu);
109 struct microcode_ops *init_amd_microcode(void);
110 void exit_amd_microcode(void);
111 #else /* CONFIG_CPU_SUP_AMD */
load_ucode_amd_bsp(struct early_load_data * ed,unsigned int family)112 static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
load_ucode_amd_ap(unsigned int family)113 static inline void load_ucode_amd_ap(unsigned int family) { }
reload_ucode_amd(unsigned int cpu)114 static inline void reload_ucode_amd(unsigned int cpu) { }
init_amd_microcode(void)115 static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
exit_amd_microcode(void)116 static inline void exit_amd_microcode(void) { }
117 #endif /* !CONFIG_CPU_SUP_AMD */
118
119 #ifdef CONFIG_CPU_SUP_INTEL
120 void load_ucode_intel_bsp(struct early_load_data *ed);
121 void load_ucode_intel_ap(void);
122 void reload_ucode_intel(void);
123 struct microcode_ops *init_intel_microcode(void);
124 #else /* CONFIG_CPU_SUP_INTEL */
load_ucode_intel_bsp(struct early_load_data * ed)125 static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
load_ucode_intel_ap(void)126 static inline void load_ucode_intel_ap(void) { }
reload_ucode_intel(void)127 static inline void reload_ucode_intel(void) { }
init_intel_microcode(void)128 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
129 #endif /* !CONFIG_CPU_SUP_INTEL */
130
131 #define ucode_dbg(fmt, ...) \
132 ({ \
133 if (IS_ENABLED(CONFIG_MICROCODE_DBG)) \
134 pr_info(fmt, ##__VA_ARGS__); \
135 })
136
137 #endif /* _X86_MICROCODE_INTERNAL_H */
138