1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpu.h>
3
4 #include <xen/xen.h>
5
6 #include <asm/intel-family.h>
7 #include <asm/apic.h>
8 #include <asm/processor.h>
9 #include <asm/smp.h>
10
11 #include "cpu.h"
12
13 struct x86_topology_system x86_topo_system __ro_after_init;
14 EXPORT_SYMBOL_GPL(x86_topo_system);
15
16 unsigned int __amd_nodes_per_pkg __ro_after_init;
17 EXPORT_SYMBOL_GPL(__amd_nodes_per_pkg);
18
19 /* CPUs which are the primary SMT threads */
20 struct cpumask __cpu_primary_thread_mask __read_mostly;
21
topology_set_dom(struct topo_scan * tscan,enum x86_topology_domains dom,unsigned int shift,unsigned int ncpus)22 void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
23 unsigned int shift, unsigned int ncpus)
24 {
25 topology_update_dom(tscan, dom, shift, ncpus);
26
27 /* Propagate to the upper levels */
28 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
29 tscan->dom_shifts[dom] = tscan->dom_shifts[dom - 1];
30 tscan->dom_ncpus[dom] = tscan->dom_ncpus[dom - 1];
31 }
32 }
33
get_topology_cpu_type(struct cpuinfo_x86 * c)34 enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
35 {
36 if (c->x86_vendor == X86_VENDOR_INTEL) {
37 switch (c->topo.intel_type) {
38 case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
39 case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
40 }
41 }
42 if (c->x86_vendor == X86_VENDOR_AMD) {
43 switch (c->topo.amd_type) {
44 case 0: return TOPO_CPU_TYPE_PERFORMANCE;
45 case 1: return TOPO_CPU_TYPE_EFFICIENCY;
46 }
47 }
48
49 return TOPO_CPU_TYPE_UNKNOWN;
50 }
51
get_topology_cpu_type_name(struct cpuinfo_x86 * c)52 const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
53 {
54 switch (get_topology_cpu_type(c)) {
55 case TOPO_CPU_TYPE_PERFORMANCE:
56 return "performance";
57 case TOPO_CPU_TYPE_EFFICIENCY:
58 return "efficiency";
59 default:
60 return "unknown";
61 }
62 }
63
parse_num_cores_legacy(struct cpuinfo_x86 * c)64 static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
65 {
66 struct {
67 u32 cache_type : 5,
68 unused : 21,
69 ncores : 6;
70 } eax;
71
72 if (c->cpuid_level < 4)
73 return 1;
74
75 cpuid_subleaf_reg(4, 0, CPUID_EAX, &eax);
76 if (!eax.cache_type)
77 return 1;
78
79 return eax.ncores + 1;
80 }
81
parse_legacy(struct topo_scan * tscan)82 static void parse_legacy(struct topo_scan *tscan)
83 {
84 unsigned int cores, core_shift, smt_shift = 0;
85 struct cpuinfo_x86 *c = tscan->c;
86
87 cores = parse_num_cores_legacy(c);
88 core_shift = get_count_order(cores);
89
90 if (cpu_has(c, X86_FEATURE_HT)) {
91 if (!WARN_ON_ONCE(tscan->ebx1_nproc_shift < core_shift))
92 smt_shift = tscan->ebx1_nproc_shift - core_shift;
93 /*
94 * The parser expects leaf 0xb/0x1f format, which means
95 * the number of logical processors at core level is
96 * counting threads.
97 */
98 core_shift += smt_shift;
99 cores <<= smt_shift;
100 }
101
102 topology_set_dom(tscan, TOPO_SMT_DOMAIN, smt_shift, 1U << smt_shift);
103 topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores);
104 }
105
fake_topology(struct topo_scan * tscan)106 static bool fake_topology(struct topo_scan *tscan)
107 {
108 /*
109 * Preset the CORE level shift for CPUID less systems and XEN_PV,
110 * which has useless CPUID information.
111 */
112 topology_set_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
113 topology_set_dom(tscan, TOPO_CORE_DOMAIN, 0, 1);
114
115 return tscan->c->cpuid_level < 1;
116 }
117
parse_topology(struct topo_scan * tscan,bool early)118 static void parse_topology(struct topo_scan *tscan, bool early)
119 {
120 const struct cpuinfo_topology topo_defaults = {
121 .cu_id = 0xff,
122 .llc_id = BAD_APICID,
123 .l2c_id = BAD_APICID,
124 .cpu_type = TOPO_CPU_TYPE_UNKNOWN,
125 };
126 struct cpuinfo_x86 *c = tscan->c;
127 struct {
128 u32 unused0 : 16,
129 nproc : 8,
130 apicid : 8;
131 } ebx;
132
133 c->topo = topo_defaults;
134
135 if (fake_topology(tscan))
136 return;
137
138 /* Preset Initial APIC ID from CPUID leaf 1 */
139 cpuid_leaf_reg(1, CPUID_EBX, &ebx);
140 c->topo.initial_apicid = ebx.apicid;
141
142 /*
143 * The initial invocation from early_identify_cpu() happens before
144 * the APIC is mapped or X2APIC enabled. For establishing the
145 * topology, that's not required. Use the initial APIC ID.
146 */
147 if (early)
148 c->topo.apicid = c->topo.initial_apicid;
149 else
150 c->topo.apicid = read_apic_id();
151
152 /* The above is sufficient for UP */
153 if (!IS_ENABLED(CONFIG_SMP))
154 return;
155
156 tscan->ebx1_nproc_shift = get_count_order(ebx.nproc);
157
158 switch (c->x86_vendor) {
159 case X86_VENDOR_AMD:
160 if (IS_ENABLED(CONFIG_CPU_SUP_AMD))
161 cpu_parse_topology_amd(tscan);
162 break;
163 case X86_VENDOR_CENTAUR:
164 case X86_VENDOR_ZHAOXIN:
165 parse_legacy(tscan);
166 break;
167 case X86_VENDOR_INTEL:
168 if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
169 parse_legacy(tscan);
170 if (c->cpuid_level >= 0x1a)
171 c->topo.cpu_type = cpuid_eax(0x1a);
172 break;
173 case X86_VENDOR_HYGON:
174 if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
175 cpu_parse_topology_amd(tscan);
176 break;
177 }
178 }
179
topo_set_ids(struct topo_scan * tscan,bool early)180 static void topo_set_ids(struct topo_scan *tscan, bool early)
181 {
182 struct cpuinfo_x86 *c = tscan->c;
183 u32 apicid = c->topo.apicid;
184
185 c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN);
186 c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN);
187
188 if (!early) {
189 c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
190 c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
191 c->topo.logical_core_id = topology_get_logical_id(apicid, TOPO_CORE_DOMAIN);
192 }
193
194 /* Package relative core ID */
195 c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
196 x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN];
197
198 c->topo.amd_node_id = tscan->amd_node_id;
199
200 if (c->x86_vendor == X86_VENDOR_AMD)
201 cpu_topology_fixup_amd(tscan);
202 }
203
cpu_parse_topology(struct cpuinfo_x86 * c)204 void cpu_parse_topology(struct cpuinfo_x86 *c)
205 {
206 unsigned int dom, cpu = smp_processor_id();
207 struct topo_scan tscan = { .c = c, };
208
209 parse_topology(&tscan, false);
210
211 if (IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
212 if (c->topo.initial_apicid != c->topo.apicid) {
213 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. CPUID: 0x%04x APIC: 0x%04x\n",
214 cpu, c->topo.initial_apicid, c->topo.apicid);
215 }
216
217 if (c->topo.apicid != cpuid_to_apicid[cpu]) {
218 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. Firmware: 0x%04x APIC: 0x%04x\n",
219 cpu, cpuid_to_apicid[cpu], c->topo.apicid);
220 }
221 }
222
223 for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++) {
224 if (tscan.dom_shifts[dom] == x86_topo_system.dom_shifts[dom])
225 continue;
226 pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom,
227 tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]);
228 }
229
230 topo_set_ids(&tscan, false);
231 }
232
cpu_init_topology(struct cpuinfo_x86 * c)233 void __init cpu_init_topology(struct cpuinfo_x86 *c)
234 {
235 struct topo_scan tscan = { .c = c, };
236 unsigned int dom, sft;
237
238 parse_topology(&tscan, true);
239
240 /* Copy the shift values and calculate the unit sizes. */
241 memcpy(x86_topo_system.dom_shifts, tscan.dom_shifts, sizeof(x86_topo_system.dom_shifts));
242
243 dom = TOPO_SMT_DOMAIN;
244 x86_topo_system.dom_size[dom] = 1U << x86_topo_system.dom_shifts[dom];
245
246 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
247 sft = x86_topo_system.dom_shifts[dom] - x86_topo_system.dom_shifts[dom - 1];
248 x86_topo_system.dom_size[dom] = 1U << sft;
249 }
250
251 topo_set_ids(&tscan, true);
252
253 /*
254 * AMD systems have Nodes per package which cannot be mapped to
255 * APIC ID.
256 */
257 __amd_nodes_per_pkg = tscan.amd_nodes_per_pkg;
258 }
259