Lines Matching full:cpu
3 * Arch specific cpu topology information
12 #include <linux/cpu.h>
67 int cpu; in topology_set_scale_freq_source() local
78 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
79 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
83 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
84 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
98 int cpu; in topology_clear_scale_freq_source() local
102 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
103 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
106 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
107 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
160 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
162 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
186 int cpu; in topology_update_hw_pressure() local
188 cpu = cpumask_first(cpus); in topology_update_hw_pressure()
189 max_capacity = arch_scale_cpu_capacity(cpu); in topology_update_hw_pressure()
190 max_freq = arch_scale_freq_ref(cpu); in topology_update_hw_pressure()
203 trace_hw_pressure_update(cpu, pressure); in topology_update_hw_pressure()
205 for_each_cpu(cpu, cpus) in topology_update_hw_pressure()
206 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure()
214 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
216 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
224 static int cpu_capacity_sysctl_add(unsigned int cpu) in cpu_capacity_sysctl_add() argument
226 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_add()
236 static int cpu_capacity_sysctl_remove(unsigned int cpu) in cpu_capacity_sysctl_remove() argument
238 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_remove()
250 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", in register_cpu_capacity_sysctl()
290 int cpu; in topology_normalize_cpu_scale() local
296 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
297 capacity = raw_capacity[cpu] * in topology_normalize_cpu_scale()
298 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale()
303 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
304 capacity = raw_capacity[cpu] * in topology_normalize_cpu_scale()
305 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale()
308 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
309 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_normalize_cpu_scale()
310 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
314 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
336 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
338 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
341 * Update capacity_freq_ref for calculating early boot CPU capacities. in topology_parse_cpu_capacity()
342 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
348 per_cpu(capacity_freq_ref, cpu) = in topology_parse_cpu_capacity()
365 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) in freq_inv_set_max_ratio() argument
376 int cpu; in topology_init_cpu_capacity_cppc() local
386 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
387 if (!cppc_get_perf_caps(cpu, &perf_caps) && in topology_init_cpu_capacity_cppc()
390 raw_capacity[cpu] = perf_caps.highest_perf; in topology_init_cpu_capacity_cppc()
391 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
393 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
395 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", in topology_init_cpu_capacity_cppc()
396 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
400 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); in topology_init_cpu_capacity_cppc()
405 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
406 freq_inv_set_max_ratio(cpu, in topology_init_cpu_capacity_cppc()
407 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in topology_init_cpu_capacity_cppc()
409 capacity = raw_capacity[cpu]; in topology_init_cpu_capacity_cppc()
412 topology_set_cpu_scale(cpu, capacity); in topology_init_cpu_capacity_cppc()
413 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_init_cpu_capacity_cppc()
414 cpu, topology_get_cpu_scale(cpu)); in topology_init_cpu_capacity_cppc()
440 int cpu; in init_cpu_capacity_callback() local
445 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", in init_cpu_capacity_callback()
451 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
452 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback()
453 freq_inv_set_max_ratio(cpu, in init_cpu_capacity_callback()
454 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in init_cpu_capacity_callback()
480 * information is not needed for cpu capacity initialization. in register_cpufreq_notifier()
517 * This function returns the logic cpu number of the node.
519 * (1) logic cpu number which is > 0.
521 * there is no possible logical CPU in the kernel to match. This happens
523 * CPU nodes in DT. We need to just ignore this case.
528 int cpu; in get_cpu_for_node() local
530 of_parse_phandle(node, "cpu", 0); in get_cpu_for_node()
535 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
536 if (cpu >= 0) in get_cpu_for_node()
537 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
539 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", in get_cpu_for_node()
542 return cpu; in get_cpu_for_node()
551 int cpu; in parse_core() local
562 cpu = get_cpu_for_node(t); in parse_core()
563 if (cpu >= 0) { in parse_core()
564 cpu_topology[cpu].package_id = package_id; in parse_core()
565 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
566 cpu_topology[cpu].core_id = core_id; in parse_core()
567 cpu_topology[cpu].thread_id = i; in parse_core()
568 } else if (cpu != -ENODEV) { in parse_core()
569 pr_err("%pOF: Can't get CPU for thread\n", t); in parse_core()
577 cpu = get_cpu_for_node(core); in parse_core()
578 if (cpu >= 0) { in parse_core()
580 pr_err("%pOF: Core has both threads and CPU\n", in parse_core()
585 cpu_topology[cpu].package_id = package_id; in parse_core()
586 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
587 cpu_topology[cpu].core_id = core_id; in parse_core()
588 } else if (leaf && cpu != -ENODEV) { in parse_core()
589 pr_err("%pOF: Can't get CPU for leaf core\n", core); in parse_core()
641 pr_err("%pOF: cpu-map children should be clusters\n", c); in parse_cluster()
706 int cpu; in parse_dt_topology() local
711 pr_err("No CPU information found in DT\n"); in parse_dt_topology()
716 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
720 of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
735 for_each_possible_cpu(cpu) in parse_dt_topology()
736 if (cpu_topology[cpu].package_id < 0) { in parse_dt_topology()
745 * cpu topology table
750 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
752 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
755 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
757 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
760 if (last_level_cache_is_valid(cpu)) { in cpu_coregroup_mask()
761 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
762 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
766 * For systems with no shared cpu-side LLC but with clusters defined, in cpu_coregroup_mask()
771 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) in cpu_coregroup_mask()
772 core_mask = &cpu_topology[cpu].cluster_sibling; in cpu_coregroup_mask()
777 const struct cpumask *cpu_clustergroup_mask(int cpu) in cpu_clustergroup_mask() argument
783 if (cpumask_subset(cpu_coregroup_mask(cpu), in cpu_clustergroup_mask()
784 &cpu_topology[cpu].cluster_sibling)) in cpu_clustergroup_mask()
785 return topology_sibling_cpumask(cpu); in cpu_clustergroup_mask()
787 return &cpu_topology[cpu].cluster_sibling; in cpu_clustergroup_mask()
793 int cpu, ret; in update_siblings_masks() local
800 for_each_online_cpu(cpu) { in update_siblings_masks()
801 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
803 if (last_level_cache_is_shared(cpu, cpuid)) { in update_siblings_masks()
804 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
812 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
818 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
826 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
830 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
832 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
835 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
838 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
841 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
843 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
848 unsigned int cpu; in reset_cpu_topology() local
850 for_each_possible_cpu(cpu) { in reset_cpu_topology()
851 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
858 clear_cpu_topology(cpu); in reset_cpu_topology()
862 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
866 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
867 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
868 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
869 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
870 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
871 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
872 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
873 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
875 clear_cpu_topology(cpu); in remove_cpu_topology()
886 int cpu, ret; in init_cpu_topology() local
902 for_each_possible_cpu(cpu) { in init_cpu_topology()
903 ret = fetch_cache_info(cpu); in init_cpu_topology()
923 pr_debug("CPU%u: package %d core %d thread %d\n", in store_cpu_topology()