1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Hygon Processor Support for Linux
4 *
5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6 *
7 * Author: Pu Wen <puwen@hygon.cn>
8 */
9 #include <linux/io.h>
10
11 #include <asm/cpu.h>
12 #include <asm/smp.h>
13 #include <asm/numa.h>
14 #include <asm/cacheinfo.h>
15 #include <asm/spec-ctrl.h>
16 #include <asm/delay.h>
17 #ifdef CONFIG_X86_64
18 # include <asm/set_memory.h>
19 #endif
20
21 #include "cpu.h"
22
23 #define APICID_SOCKET_ID_BIT 6
24
25 /*
26 * nodes_per_socket: Stores the number of nodes per socket.
27 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
28 */
29 static u32 nodes_per_socket = 1;
30
31 #ifdef CONFIG_NUMA
32 /*
33 * To workaround broken NUMA config. Read the comment in
34 * srat_detect_node().
35 */
nearby_node(int apicid)36 static int nearby_node(int apicid)
37 {
38 int i, node;
39
40 for (i = apicid - 1; i >= 0; i--) {
41 node = __apicid_to_node[i];
42 if (node != NUMA_NO_NODE && node_online(node))
43 return node;
44 }
45 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
46 node = __apicid_to_node[i];
47 if (node != NUMA_NO_NODE && node_online(node))
48 return node;
49 }
50 return first_node(node_online_map); /* Shouldn't happen */
51 }
52 #endif
53
hygon_get_topology_early(struct cpuinfo_x86 * c)54 static void hygon_get_topology_early(struct cpuinfo_x86 *c)
55 {
56 if (cpu_has(c, X86_FEATURE_TOPOEXT))
57 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
58 }
59
60 /*
61 * Fixup core topology information for
62 * (1) Hygon multi-node processors
63 * Assumption: Number of cores in each internal node is the same.
64 * (2) Hygon processors supporting compute units
65 */
hygon_get_topology(struct cpuinfo_x86 * c)66 static void hygon_get_topology(struct cpuinfo_x86 *c)
67 {
68 u8 node_id;
69 int cpu = smp_processor_id();
70
71 /* get information required for multi-node processors */
72 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
73 int err;
74 u32 eax, ebx, ecx, edx;
75
76 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
77
78 node_id = ecx & 0xff;
79
80 c->cpu_core_id = ebx & 0xff;
81
82 if (smp_num_siblings > 1)
83 c->x86_max_cores /= smp_num_siblings;
84
85 /*
86 * In case leaf B is available, use it to derive
87 * topology information.
88 */
89 err = detect_extended_topology(c);
90 if (!err)
91 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
92
93 /* Socket ID is ApicId[6] for these processors. */
94 c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
95
96 cacheinfo_hygon_init_llc_id(c, cpu, node_id);
97 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
98 u64 value;
99
100 rdmsrl(MSR_FAM10H_NODE_ID, value);
101 node_id = value & 7;
102
103 per_cpu(cpu_llc_id, cpu) = node_id;
104 } else
105 return;
106
107 if (nodes_per_socket > 1)
108 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
109 }
110
111 /*
112 * On Hygon setup the lower bits of the APIC id distinguish the cores.
113 * Assumes number of cores is a power of two.
114 */
hygon_detect_cmp(struct cpuinfo_x86 * c)115 static void hygon_detect_cmp(struct cpuinfo_x86 *c)
116 {
117 unsigned int bits;
118 int cpu = smp_processor_id();
119
120 bits = c->x86_coreid_bits;
121 /* Low order bits define the core id (index of core in socket) */
122 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
123 /* Convert the initial APIC ID into the socket ID */
124 c->phys_proc_id = c->initial_apicid >> bits;
125 /* use socket ID also for last level cache */
126 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
127 }
128
srat_detect_node(struct cpuinfo_x86 * c)129 static void srat_detect_node(struct cpuinfo_x86 *c)
130 {
131 #ifdef CONFIG_NUMA
132 int cpu = smp_processor_id();
133 int node;
134 unsigned int apicid = c->apicid;
135
136 node = numa_cpu_node(cpu);
137 if (node == NUMA_NO_NODE)
138 node = per_cpu(cpu_llc_id, cpu);
139
140 /*
141 * On multi-fabric platform (e.g. Numascale NumaChip) a
142 * platform-specific handler needs to be called to fixup some
143 * IDs of the CPU.
144 */
145 if (x86_cpuinit.fixup_cpu_id)
146 x86_cpuinit.fixup_cpu_id(c, node);
147
148 if (!node_online(node)) {
149 /*
150 * Two possibilities here:
151 *
152 * - The CPU is missing memory and no node was created. In
153 * that case try picking one from a nearby CPU.
154 *
155 * - The APIC IDs differ from the HyperTransport node IDs.
156 * Assume they are all increased by a constant offset, but
157 * in the same order as the HT nodeids. If that doesn't
158 * result in a usable node fall back to the path for the
159 * previous case.
160 *
161 * This workaround operates directly on the mapping between
162 * APIC ID and NUMA node, assuming certain relationship
163 * between APIC ID, HT node ID and NUMA topology. As going
164 * through CPU mapping may alter the outcome, directly
165 * access __apicid_to_node[].
166 */
167 int ht_nodeid = c->initial_apicid;
168
169 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
170 node = __apicid_to_node[ht_nodeid];
171 /* Pick a nearby node */
172 if (!node_online(node))
173 node = nearby_node(apicid);
174 }
175 numa_set_node(cpu, node);
176 #endif
177 }
178
early_init_hygon_mc(struct cpuinfo_x86 * c)179 static void early_init_hygon_mc(struct cpuinfo_x86 *c)
180 {
181 #ifdef CONFIG_SMP
182 unsigned int bits, ecx;
183
184 /* Multi core CPU? */
185 if (c->extended_cpuid_level < 0x80000008)
186 return;
187
188 ecx = cpuid_ecx(0x80000008);
189
190 c->x86_max_cores = (ecx & 0xff) + 1;
191
192 /* CPU telling us the core id bits shift? */
193 bits = (ecx >> 12) & 0xF;
194
195 /* Otherwise recompute */
196 if (bits == 0) {
197 while ((1 << bits) < c->x86_max_cores)
198 bits++;
199 }
200
201 c->x86_coreid_bits = bits;
202 #endif
203 }
204
bsp_init_hygon(struct cpuinfo_x86 * c)205 static void bsp_init_hygon(struct cpuinfo_x86 *c)
206 {
207 #ifdef CONFIG_X86_64
208 unsigned long long tseg;
209
210 /*
211 * Split up direct mapping around the TSEG SMM area.
212 * Don't do it for gbpages because there seems very little
213 * benefit in doing so.
214 */
215 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
216 unsigned long pfn = tseg >> PAGE_SHIFT;
217
218 pr_debug("tseg: %010llx\n", tseg);
219 if (pfn_range_is_mapped(pfn, pfn + 1))
220 set_memory_4k((unsigned long)__va(tseg), 1);
221 }
222 #endif
223
224 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
225 u64 val;
226
227 rdmsrl(MSR_K7_HWCR, val);
228 if (!(val & BIT(24)))
229 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
230 }
231
232 if (cpu_has(c, X86_FEATURE_MWAITX))
233 use_mwaitx_delay();
234
235 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
236 u32 ecx;
237
238 ecx = cpuid_ecx(0x8000001e);
239 nodes_per_socket = ((ecx >> 8) & 7) + 1;
240 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
241 u64 value;
242
243 rdmsrl(MSR_FAM10H_NODE_ID, value);
244 nodes_per_socket = ((value >> 3) & 7) + 1;
245 }
246
247 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
248 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
249 /*
250 * Try to cache the base value so further operations can
251 * avoid RMW. If that faults, do not enable SSBD.
252 */
253 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
254 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
255 setup_force_cpu_cap(X86_FEATURE_SSBD);
256 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
257 }
258 }
259 }
260
early_init_hygon(struct cpuinfo_x86 * c)261 static void early_init_hygon(struct cpuinfo_x86 *c)
262 {
263 u32 dummy;
264
265 early_init_hygon_mc(c);
266
267 set_cpu_cap(c, X86_FEATURE_K8);
268
269 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
270
271 /*
272 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
273 * with P/T states and does not stop in deep C-states
274 */
275 if (c->x86_power & (1 << 8)) {
276 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
277 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
278 }
279
280 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
281 if (c->x86_power & BIT(12))
282 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
283
284 #ifdef CONFIG_X86_64
285 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
286 #endif
287
288 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
289 /*
290 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
291 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
292 */
293 if (boot_cpu_has(X86_FEATURE_APIC))
294 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
295 #endif
296
297 /*
298 * This is only needed to tell the kernel whether to use VMCALL
299 * and VMMCALL. VMMCALL is never executed except under virt, so
300 * we can set it unconditionally.
301 */
302 set_cpu_cap(c, X86_FEATURE_VMMCALL);
303
304 hygon_get_topology_early(c);
305 }
306
init_hygon(struct cpuinfo_x86 * c)307 static void init_hygon(struct cpuinfo_x86 *c)
308 {
309 early_init_hygon(c);
310
311 /*
312 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
313 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
314 */
315 clear_cpu_cap(c, 0*32+31);
316
317 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
318
319 /* get apicid instead of initial apic id from cpuid */
320 c->apicid = hard_smp_processor_id();
321
322 set_cpu_cap(c, X86_FEATURE_ZEN);
323 set_cpu_cap(c, X86_FEATURE_CPB);
324
325 cpu_detect_cache_sizes(c);
326
327 hygon_detect_cmp(c);
328 hygon_get_topology(c);
329 srat_detect_node(c);
330
331 init_hygon_cacheinfo(c);
332
333 if (cpu_has(c, X86_FEATURE_XMM2)) {
334 /*
335 * Use LFENCE for execution serialization. On families which
336 * don't have that MSR, LFENCE is already serializing.
337 * msr_set_bit() uses the safe accessors, too, even if the MSR
338 * is not present.
339 */
340 msr_set_bit(MSR_F10H_DECFG,
341 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
342
343 /* A serializing LFENCE stops RDTSC speculation */
344 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
345 }
346
347 /*
348 * Hygon processors have APIC timer running in deep C states.
349 */
350 set_cpu_cap(c, X86_FEATURE_ARAT);
351
352 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
353 if (!cpu_has(c, X86_FEATURE_XENPV))
354 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
355 }
356
cpu_detect_tlb_hygon(struct cpuinfo_x86 * c)357 static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
358 {
359 u32 ebx, eax, ecx, edx;
360 u16 mask = 0xfff;
361
362 if (c->extended_cpuid_level < 0x80000006)
363 return;
364
365 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
366
367 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
368 tlb_lli_4k[ENTRIES] = ebx & mask;
369
370 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
371 if (!((eax >> 16) & mask))
372 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
373 else
374 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
375
376 /* a 4M entry uses two 2M entries */
377 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
378
379 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
380 if (!(eax & mask)) {
381 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
382 tlb_lli_2m[ENTRIES] = eax & 0xff;
383 } else
384 tlb_lli_2m[ENTRIES] = eax & mask;
385
386 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
387 }
388
389 static const struct cpu_dev hygon_cpu_dev = {
390 .c_vendor = "Hygon",
391 .c_ident = { "HygonGenuine" },
392 .c_early_init = early_init_hygon,
393 .c_detect_tlb = cpu_detect_tlb_hygon,
394 .c_bsp_init = bsp_init_hygon,
395 .c_init = init_hygon,
396 .c_x86_vendor = X86_VENDOR_HYGON,
397 };
398
399 cpu_dev_register(hygon_cpu_dev);
400