1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cleanup.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/cpu_smt.h>
15 #include <linux/device.h>
16 #include <linux/of.h>
17 #include <linux/slab.h>
18 #include <linux/sched/topology.h>
19 #include <linux/cpuset.h>
20 #include <linux/cpumask.h>
21 #include <linux/init.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched.h>
24 #include <linux/units.h>
25
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/hw_pressure.h>
28
29 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
30 static struct cpumask scale_freq_counters_mask;
31 static bool scale_freq_invariant;
32 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
33 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
34
supports_scale_freq_counters(const struct cpumask * cpus)35 static bool supports_scale_freq_counters(const struct cpumask *cpus)
36 {
37 return cpumask_subset(cpus, &scale_freq_counters_mask);
38 }
39
topology_scale_freq_invariant(void)40 bool topology_scale_freq_invariant(void)
41 {
42 return cpufreq_supports_freq_invariance() ||
43 supports_scale_freq_counters(cpu_online_mask);
44 }
45
update_scale_freq_invariant(bool status)46 static void update_scale_freq_invariant(bool status)
47 {
48 if (scale_freq_invariant == status)
49 return;
50
51 /*
52 * Task scheduler behavior depends on frequency invariance support,
53 * either cpufreq or counter driven. If the support status changes as
54 * a result of counter initialisation and use, retrigger the build of
55 * scheduling domains to ensure the information is propagated properly.
56 */
57 if (topology_scale_freq_invariant() == status) {
58 scale_freq_invariant = status;
59 rebuild_sched_domains_energy();
60 }
61 }
62
topology_set_scale_freq_source(struct scale_freq_data * data,const struct cpumask * cpus)63 void topology_set_scale_freq_source(struct scale_freq_data *data,
64 const struct cpumask *cpus)
65 {
66 struct scale_freq_data *sfd;
67 int cpu;
68
69 /*
70 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
71 * supported by cpufreq.
72 */
73 if (cpumask_empty(&scale_freq_counters_mask))
74 scale_freq_invariant = topology_scale_freq_invariant();
75
76 rcu_read_lock();
77
78 for_each_cpu(cpu, cpus) {
79 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
80
81 /* Use ARCH provided counters whenever possible */
82 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
83 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
84 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
85 }
86 }
87
88 rcu_read_unlock();
89
90 update_scale_freq_invariant(true);
91 }
92 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
93
topology_clear_scale_freq_source(enum scale_freq_source source,const struct cpumask * cpus)94 void topology_clear_scale_freq_source(enum scale_freq_source source,
95 const struct cpumask *cpus)
96 {
97 struct scale_freq_data *sfd;
98 int cpu;
99
100 rcu_read_lock();
101
102 for_each_cpu(cpu, cpus) {
103 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
104
105 if (sfd && sfd->source == source) {
106 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
107 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
108 }
109 }
110
111 rcu_read_unlock();
112
113 /*
114 * Make sure all references to previous sft_data are dropped to avoid
115 * use-after-free races.
116 */
117 synchronize_rcu();
118
119 update_scale_freq_invariant(false);
120 }
121 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
122
topology_scale_freq_tick(void)123 void topology_scale_freq_tick(void)
124 {
125 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
126
127 if (sfd)
128 sfd->set_freq_scale();
129 }
130
131 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
132 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
133
topology_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)134 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
135 unsigned long max_freq)
136 {
137 unsigned long scale;
138 int i;
139
140 if (WARN_ON_ONCE(!cur_freq || !max_freq))
141 return;
142
143 /*
144 * If the use of counters for FIE is enabled, just return as we don't
145 * want to update the scale factor with information from CPUFREQ.
146 * Instead the scale factor will be updated from arch_scale_freq_tick.
147 */
148 if (supports_scale_freq_counters(cpus))
149 return;
150
151 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
152
153 for_each_cpu(i, cpus)
154 per_cpu(arch_freq_scale, i) = scale;
155 }
156
157 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
158 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
159
topology_set_cpu_scale(unsigned int cpu,unsigned long capacity)160 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
161 {
162 per_cpu(cpu_scale, cpu) = capacity;
163 }
164
165 DEFINE_PER_CPU(unsigned long, hw_pressure);
166
167 /**
168 * topology_update_hw_pressure() - Update HW pressure for CPUs
169 * @cpus : The related CPUs for which capacity has been reduced
170 * @capped_freq : The maximum allowed frequency that CPUs can run at
171 *
172 * Update the value of HW pressure for all @cpus in the mask. The
173 * cpumask should include all (online+offline) affected CPUs, to avoid
174 * operating on stale data when hot-plug is used for some CPUs. The
175 * @capped_freq reflects the currently allowed max CPUs frequency due to
176 * HW capping. It might be also a boost frequency value, which is bigger
177 * than the internal 'capacity_freq_ref' max frequency. In such case the
178 * pressure value should simply be removed, since this is an indication that
179 * there is no HW throttling. The @capped_freq must be provided in kHz.
180 */
topology_update_hw_pressure(const struct cpumask * cpus,unsigned long capped_freq)181 void topology_update_hw_pressure(const struct cpumask *cpus,
182 unsigned long capped_freq)
183 {
184 unsigned long max_capacity, capacity, pressure;
185 u32 max_freq;
186 int cpu;
187
188 cpu = cpumask_first(cpus);
189 max_capacity = arch_scale_cpu_capacity(cpu);
190 max_freq = arch_scale_freq_ref(cpu);
191
192 /*
193 * Handle properly the boost frequencies, which should simply clean
194 * the HW pressure value.
195 */
196 if (max_freq <= capped_freq)
197 capacity = max_capacity;
198 else
199 capacity = mult_frac(max_capacity, capped_freq, max_freq);
200
201 pressure = max_capacity - capacity;
202
203 trace_hw_pressure_update(cpu, pressure);
204
205 for_each_cpu(cpu, cpus)
206 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
207 }
208 EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
209
cpu_capacity_show(struct device * dev,struct device_attribute * attr,char * buf)210 static ssize_t cpu_capacity_show(struct device *dev,
211 struct device_attribute *attr,
212 char *buf)
213 {
214 struct cpu *cpu = container_of(dev, struct cpu, dev);
215
216 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
217 }
218
219 static void update_topology_flags_workfn(struct work_struct *work);
220 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
221
222 static DEVICE_ATTR_RO(cpu_capacity);
223
cpu_capacity_sysctl_add(unsigned int cpu)224 static int cpu_capacity_sysctl_add(unsigned int cpu)
225 {
226 struct device *cpu_dev = get_cpu_device(cpu);
227
228 if (!cpu_dev)
229 return -ENOENT;
230
231 device_create_file(cpu_dev, &dev_attr_cpu_capacity);
232
233 return 0;
234 }
235
cpu_capacity_sysctl_remove(unsigned int cpu)236 static int cpu_capacity_sysctl_remove(unsigned int cpu)
237 {
238 struct device *cpu_dev = get_cpu_device(cpu);
239
240 if (!cpu_dev)
241 return -ENOENT;
242
243 device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
244
245 return 0;
246 }
247
register_cpu_capacity_sysctl(void)248 static int register_cpu_capacity_sysctl(void)
249 {
250 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
251 cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
252
253 return 0;
254 }
255 subsys_initcall(register_cpu_capacity_sysctl);
256
257 static int update_topology;
258
topology_update_cpu_topology(void)259 int topology_update_cpu_topology(void)
260 {
261 return update_topology;
262 }
263
264 /*
265 * Updating the sched_domains can't be done directly from cpufreq callbacks
266 * due to locking, so queue the work for later.
267 */
update_topology_flags_workfn(struct work_struct * work)268 static void update_topology_flags_workfn(struct work_struct *work)
269 {
270 update_topology = 1;
271 rebuild_sched_domains();
272 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
273 update_topology = 0;
274 }
275
276 static u32 *raw_capacity;
277
free_raw_capacity(void)278 static int free_raw_capacity(void)
279 {
280 kfree(raw_capacity);
281 raw_capacity = NULL;
282
283 return 0;
284 }
285
topology_normalize_cpu_scale(void)286 void topology_normalize_cpu_scale(void)
287 {
288 u64 capacity;
289 u64 capacity_scale;
290 int cpu;
291
292 if (!raw_capacity)
293 return;
294
295 capacity_scale = 1;
296 for_each_possible_cpu(cpu) {
297 capacity = raw_capacity[cpu] *
298 (per_cpu(capacity_freq_ref, cpu) ?: 1);
299 capacity_scale = max(capacity, capacity_scale);
300 }
301
302 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
303 for_each_possible_cpu(cpu) {
304 capacity = raw_capacity[cpu] *
305 (per_cpu(capacity_freq_ref, cpu) ?: 1);
306 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
307 capacity_scale);
308 topology_set_cpu_scale(cpu, capacity);
309 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
310 cpu, topology_get_cpu_scale(cpu));
311 }
312 }
313
topology_parse_cpu_capacity(struct device_node * cpu_node,int cpu)314 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
315 {
316 struct clk *cpu_clk;
317 static bool cap_parsing_failed;
318 int ret;
319 u32 cpu_capacity;
320
321 if (cap_parsing_failed)
322 return false;
323
324 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
325 &cpu_capacity);
326 if (!ret) {
327 if (!raw_capacity) {
328 raw_capacity = kcalloc(num_possible_cpus(),
329 sizeof(*raw_capacity),
330 GFP_KERNEL);
331 if (!raw_capacity) {
332 cap_parsing_failed = true;
333 return false;
334 }
335 }
336 raw_capacity[cpu] = cpu_capacity;
337 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
338 cpu_node, raw_capacity[cpu]);
339
340 /*
341 * Update capacity_freq_ref for calculating early boot CPU capacities.
342 * For non-clk CPU DVFS mechanism, there's no way to get the
343 * frequency value now, assuming they are running at the same
344 * frequency (by keeping the initial capacity_freq_ref value).
345 */
346 cpu_clk = of_clk_get(cpu_node, 0);
347 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
348 per_cpu(capacity_freq_ref, cpu) =
349 clk_get_rate(cpu_clk) / HZ_PER_KHZ;
350 clk_put(cpu_clk);
351 }
352 } else {
353 if (raw_capacity) {
354 pr_err("cpu_capacity: missing %pOF raw capacity\n",
355 cpu_node);
356 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
357 }
358 cap_parsing_failed = true;
359 free_raw_capacity();
360 }
361
362 return !ret;
363 }
364
freq_inv_set_max_ratio(int cpu,u64 max_rate)365 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
366 {
367 }
368
369 #ifdef CONFIG_ACPI_CPPC_LIB
370 #include <acpi/cppc_acpi.h>
371
topology_init_cpu_capacity_cppc(void)372 static inline void topology_init_cpu_capacity_cppc(void)
373 {
374 u64 capacity, capacity_scale = 0;
375 struct cppc_perf_caps perf_caps;
376 int cpu;
377
378 if (likely(!acpi_cpc_valid()))
379 return;
380
381 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
382 GFP_KERNEL);
383 if (!raw_capacity)
384 return;
385
386 for_each_possible_cpu(cpu) {
387 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
388 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
389 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
390 raw_capacity[cpu] = perf_caps.highest_perf;
391 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
392
393 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
394
395 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
396 cpu, raw_capacity[cpu]);
397 continue;
398 }
399
400 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
401 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
402 goto exit;
403 }
404
405 for_each_possible_cpu(cpu) {
406 freq_inv_set_max_ratio(cpu,
407 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
408
409 capacity = raw_capacity[cpu];
410 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
411 capacity_scale);
412 topology_set_cpu_scale(cpu, capacity);
413 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
414 cpu, topology_get_cpu_scale(cpu));
415 }
416
417 schedule_work(&update_topology_flags_work);
418 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
419
420 exit:
421 free_raw_capacity();
422 }
acpi_processor_init_invariance_cppc(void)423 void acpi_processor_init_invariance_cppc(void)
424 {
425 topology_init_cpu_capacity_cppc();
426 }
427 #endif
428
429 #ifdef CONFIG_CPU_FREQ
430 static cpumask_var_t cpus_to_visit;
431 static void parsing_done_workfn(struct work_struct *work);
432 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
433
434 static int
init_cpu_capacity_callback(struct notifier_block * nb,unsigned long val,void * data)435 init_cpu_capacity_callback(struct notifier_block *nb,
436 unsigned long val,
437 void *data)
438 {
439 struct cpufreq_policy *policy = data;
440 int cpu;
441
442 if (val != CPUFREQ_CREATE_POLICY)
443 return 0;
444
445 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
446 cpumask_pr_args(policy->related_cpus),
447 cpumask_pr_args(cpus_to_visit));
448
449 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
450
451 for_each_cpu(cpu, policy->related_cpus) {
452 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
453 freq_inv_set_max_ratio(cpu,
454 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
455 }
456
457 if (cpumask_empty(cpus_to_visit)) {
458 if (raw_capacity) {
459 topology_normalize_cpu_scale();
460 schedule_work(&update_topology_flags_work);
461 free_raw_capacity();
462 }
463 pr_debug("cpu_capacity: parsing done\n");
464 schedule_work(&parsing_done_work);
465 }
466
467 return 0;
468 }
469
470 static struct notifier_block init_cpu_capacity_notifier = {
471 .notifier_call = init_cpu_capacity_callback,
472 };
473
register_cpufreq_notifier(void)474 static int __init register_cpufreq_notifier(void)
475 {
476 int ret;
477
478 /*
479 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
480 * information is not needed for cpu capacity initialization.
481 */
482 if (!acpi_disabled)
483 return -EINVAL;
484
485 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
486 return -ENOMEM;
487
488 cpumask_copy(cpus_to_visit, cpu_possible_mask);
489
490 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
491 CPUFREQ_POLICY_NOTIFIER);
492
493 if (ret)
494 free_cpumask_var(cpus_to_visit);
495
496 return ret;
497 }
498 core_initcall(register_cpufreq_notifier);
499
parsing_done_workfn(struct work_struct * work)500 static void parsing_done_workfn(struct work_struct *work)
501 {
502 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
503 CPUFREQ_POLICY_NOTIFIER);
504 free_cpumask_var(cpus_to_visit);
505 }
506
507 #else
508 core_initcall(free_raw_capacity);
509 #endif
510
511 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
512
513 /* Used to enable the SMT control */
514 static unsigned int max_smt_thread_num = 1;
515
516 /*
517 * This function returns the logic cpu number of the node.
518 * There are basically three kinds of return values:
519 * (1) logic cpu number which is > 0.
520 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
521 * there is no possible logical CPU in the kernel to match. This happens
522 * when CONFIG_NR_CPUS is configure to be smaller than the number of
523 * CPU nodes in DT. We need to just ignore this case.
524 * (3) -1 if the node does not exist in the device tree
525 */
get_cpu_for_node(struct device_node * node)526 static int __init get_cpu_for_node(struct device_node *node)
527 {
528 int cpu;
529 struct device_node *cpu_node __free(device_node) =
530 of_parse_phandle(node, "cpu", 0);
531
532 if (!cpu_node)
533 return -1;
534
535 cpu = of_cpu_node_to_id(cpu_node);
536 if (cpu >= 0)
537 topology_parse_cpu_capacity(cpu_node, cpu);
538 else
539 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
540 cpu_node, cpumask_pr_args(cpu_possible_mask));
541
542 return cpu;
543 }
544
parse_core(struct device_node * core,int package_id,int cluster_id,int core_id)545 static int __init parse_core(struct device_node *core, int package_id,
546 int cluster_id, int core_id)
547 {
548 char name[20];
549 bool leaf = true;
550 int i = 0;
551 int cpu;
552
553 do {
554 snprintf(name, sizeof(name), "thread%d", i);
555 struct device_node *t __free(device_node) =
556 of_get_child_by_name(core, name);
557
558 if (!t)
559 break;
560
561 leaf = false;
562 cpu = get_cpu_for_node(t);
563 if (cpu >= 0) {
564 cpu_topology[cpu].package_id = package_id;
565 cpu_topology[cpu].cluster_id = cluster_id;
566 cpu_topology[cpu].core_id = core_id;
567 cpu_topology[cpu].thread_id = i;
568 } else if (cpu != -ENODEV) {
569 pr_err("%pOF: Can't get CPU for thread\n", t);
570 return -EINVAL;
571 }
572 i++;
573 } while (1);
574
575 max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
576
577 cpu = get_cpu_for_node(core);
578 if (cpu >= 0) {
579 if (!leaf) {
580 pr_err("%pOF: Core has both threads and CPU\n",
581 core);
582 return -EINVAL;
583 }
584
585 cpu_topology[cpu].package_id = package_id;
586 cpu_topology[cpu].cluster_id = cluster_id;
587 cpu_topology[cpu].core_id = core_id;
588 } else if (leaf && cpu != -ENODEV) {
589 pr_err("%pOF: Can't get CPU for leaf core\n", core);
590 return -EINVAL;
591 }
592
593 return 0;
594 }
595
parse_cluster(struct device_node * cluster,int package_id,int cluster_id,int depth)596 static int __init parse_cluster(struct device_node *cluster, int package_id,
597 int cluster_id, int depth)
598 {
599 char name[20];
600 bool leaf = true;
601 bool has_cores = false;
602 int core_id = 0;
603 int i, ret;
604
605 /*
606 * First check for child clusters; we currently ignore any
607 * information about the nesting of clusters and present the
608 * scheduler with a flat list of them.
609 */
610 i = 0;
611 do {
612 snprintf(name, sizeof(name), "cluster%d", i);
613 struct device_node *c __free(device_node) =
614 of_get_child_by_name(cluster, name);
615
616 if (!c)
617 break;
618
619 leaf = false;
620 ret = parse_cluster(c, package_id, i, depth + 1);
621 if (depth > 0)
622 pr_warn("Topology for clusters of clusters not yet supported\n");
623 if (ret != 0)
624 return ret;
625 i++;
626 } while (1);
627
628 /* Now check for cores */
629 i = 0;
630 do {
631 snprintf(name, sizeof(name), "core%d", i);
632 struct device_node *c __free(device_node) =
633 of_get_child_by_name(cluster, name);
634
635 if (!c)
636 break;
637
638 has_cores = true;
639
640 if (depth == 0) {
641 pr_err("%pOF: cpu-map children should be clusters\n", c);
642 return -EINVAL;
643 }
644
645 if (leaf) {
646 ret = parse_core(c, package_id, cluster_id, core_id++);
647 if (ret != 0)
648 return ret;
649 } else {
650 pr_err("%pOF: Non-leaf cluster with core %s\n",
651 cluster, name);
652 return -EINVAL;
653 }
654
655 i++;
656 } while (1);
657
658 if (leaf && !has_cores)
659 pr_warn("%pOF: empty cluster\n", cluster);
660
661 return 0;
662 }
663
parse_socket(struct device_node * socket)664 static int __init parse_socket(struct device_node *socket)
665 {
666 char name[20];
667 bool has_socket = false;
668 int package_id = 0, ret;
669
670 do {
671 snprintf(name, sizeof(name), "socket%d", package_id);
672 struct device_node *c __free(device_node) =
673 of_get_child_by_name(socket, name);
674
675 if (!c)
676 break;
677
678 has_socket = true;
679 ret = parse_cluster(c, package_id, -1, 0);
680 if (ret != 0)
681 return ret;
682
683 package_id++;
684 } while (1);
685
686 if (!has_socket)
687 ret = parse_cluster(socket, 0, -1, 0);
688
689 /*
690 * Reset the max_smt_thread_num to 1 on failure. Since on failure
691 * we need to notify the framework the SMT is not supported, but
692 * max_smt_thread_num can be initialized to the SMT thread number
693 * of the cores which are successfully parsed.
694 */
695 if (ret)
696 max_smt_thread_num = 1;
697
698 cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
699
700 return ret;
701 }
702
parse_dt_topology(void)703 static int __init parse_dt_topology(void)
704 {
705 int ret = 0;
706 int cpu;
707 struct device_node *cn __free(device_node) =
708 of_find_node_by_path("/cpus");
709
710 if (!cn) {
711 pr_err("No CPU information found in DT\n");
712 return 0;
713 }
714
715 /*
716 * When topology is provided cpu-map is essentially a root
717 * cluster with restricted subnodes.
718 */
719 struct device_node *map __free(device_node) =
720 of_get_child_by_name(cn, "cpu-map");
721
722 if (!map)
723 return ret;
724
725 ret = parse_socket(map);
726 if (ret != 0)
727 return ret;
728
729 topology_normalize_cpu_scale();
730
731 /*
732 * Check that all cores are in the topology; the SMP code will
733 * only mark cores described in the DT as possible.
734 */
735 for_each_possible_cpu(cpu)
736 if (cpu_topology[cpu].package_id < 0) {
737 return -EINVAL;
738 }
739
740 return ret;
741 }
742 #endif
743
744 /*
745 * cpu topology table
746 */
747 struct cpu_topology cpu_topology[NR_CPUS];
748 EXPORT_SYMBOL_GPL(cpu_topology);
749
cpu_coregroup_mask(int cpu)750 const struct cpumask *cpu_coregroup_mask(int cpu)
751 {
752 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
753
754 /* Find the smaller of NUMA, core or LLC siblings */
755 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
756 /* not numa in package, lets use the package siblings */
757 core_mask = &cpu_topology[cpu].core_sibling;
758 }
759
760 if (last_level_cache_is_valid(cpu)) {
761 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
762 core_mask = &cpu_topology[cpu].llc_sibling;
763 }
764
765 /*
766 * For systems with no shared cpu-side LLC but with clusters defined,
767 * extend core_mask to cluster_siblings. The sched domain builder will
768 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
769 */
770 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
771 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
772 core_mask = &cpu_topology[cpu].cluster_sibling;
773
774 return core_mask;
775 }
776
cpu_clustergroup_mask(int cpu)777 const struct cpumask *cpu_clustergroup_mask(int cpu)
778 {
779 /*
780 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
781 * cpu_coregroup_mask().
782 */
783 if (cpumask_subset(cpu_coregroup_mask(cpu),
784 &cpu_topology[cpu].cluster_sibling))
785 return topology_sibling_cpumask(cpu);
786
787 return &cpu_topology[cpu].cluster_sibling;
788 }
789
update_siblings_masks(unsigned int cpuid)790 void update_siblings_masks(unsigned int cpuid)
791 {
792 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
793 int cpu, ret;
794
795 ret = detect_cache_attributes(cpuid);
796 if (ret && ret != -ENOENT)
797 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
798
799 /* update core and thread sibling masks */
800 for_each_online_cpu(cpu) {
801 cpu_topo = &cpu_topology[cpu];
802
803 if (last_level_cache_is_shared(cpu, cpuid)) {
804 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
805 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
806 }
807
808 if (cpuid_topo->package_id != cpu_topo->package_id)
809 continue;
810
811 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
812 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
813
814 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
815 continue;
816
817 if (cpuid_topo->cluster_id >= 0) {
818 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
819 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
820 }
821
822 if (cpuid_topo->core_id != cpu_topo->core_id)
823 continue;
824
825 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
826 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
827 }
828 }
829
clear_cpu_topology(int cpu)830 static void clear_cpu_topology(int cpu)
831 {
832 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
833
834 cpumask_clear(&cpu_topo->llc_sibling);
835 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
836
837 cpumask_clear(&cpu_topo->cluster_sibling);
838 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
839
840 cpumask_clear(&cpu_topo->core_sibling);
841 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
842 cpumask_clear(&cpu_topo->thread_sibling);
843 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
844 }
845
reset_cpu_topology(void)846 void __init reset_cpu_topology(void)
847 {
848 unsigned int cpu;
849
850 for_each_possible_cpu(cpu) {
851 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
852
853 cpu_topo->thread_id = -1;
854 cpu_topo->core_id = -1;
855 cpu_topo->cluster_id = -1;
856 cpu_topo->package_id = -1;
857
858 clear_cpu_topology(cpu);
859 }
860 }
861
remove_cpu_topology(unsigned int cpu)862 void remove_cpu_topology(unsigned int cpu)
863 {
864 int sibling;
865
866 for_each_cpu(sibling, topology_core_cpumask(cpu))
867 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
868 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
869 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
870 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
871 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
872 for_each_cpu(sibling, topology_llc_cpumask(cpu))
873 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
874
875 clear_cpu_topology(cpu);
876 }
877
parse_acpi_topology(void)878 __weak int __init parse_acpi_topology(void)
879 {
880 return 0;
881 }
882
883 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
init_cpu_topology(void)884 void __init init_cpu_topology(void)
885 {
886 int cpu, ret;
887
888 reset_cpu_topology();
889 ret = parse_acpi_topology();
890 if (!ret)
891 ret = of_have_populated_dt() && parse_dt_topology();
892
893 if (ret) {
894 /*
895 * Discard anything that was parsed if we hit an error so we
896 * don't use partial information. But do not return yet to give
897 * arch-specific early cache level detection a chance to run.
898 */
899 reset_cpu_topology();
900 }
901
902 for_each_possible_cpu(cpu) {
903 ret = fetch_cache_info(cpu);
904 if (!ret)
905 continue;
906 else if (ret != -ENOENT)
907 pr_err("Early cacheinfo failed, ret = %d\n", ret);
908 return;
909 }
910 }
911
store_cpu_topology(unsigned int cpuid)912 void store_cpu_topology(unsigned int cpuid)
913 {
914 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
915
916 if (cpuid_topo->package_id != -1)
917 goto topology_populated;
918
919 cpuid_topo->thread_id = -1;
920 cpuid_topo->core_id = cpuid;
921 cpuid_topo->package_id = cpu_to_node(cpuid);
922
923 pr_debug("CPU%u: package %d core %d thread %d\n",
924 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
925 cpuid_topo->thread_id);
926
927 topology_populated:
928 update_siblings_masks(cpuid);
929 }
930 #endif
931