Lines Matching refs:cpu
67 int cpu; in topology_set_scale_freq_source() local
78 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
79 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
83 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
84 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
98 int cpu; in topology_clear_scale_freq_source() local
102 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
103 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
106 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
107 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
178 int cpu; in topology_update_hw_pressure() local
180 cpu = cpumask_first(cpus); in topology_update_hw_pressure()
181 max_capacity = arch_scale_cpu_capacity(cpu); in topology_update_hw_pressure()
182 max_freq = arch_scale_freq_ref(cpu); in topology_update_hw_pressure()
195 trace_hw_pressure_update(cpu, pressure); in topology_update_hw_pressure()
197 for_each_cpu(cpu, cpus) in topology_update_hw_pressure()
198 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure()
238 int cpu; in topology_normalize_cpu_scale() local
244 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
245 capacity = raw_capacity[cpu] * in topology_normalize_cpu_scale()
246 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale()
251 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
252 capacity = raw_capacity[cpu] * in topology_normalize_cpu_scale()
253 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale()
256 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
258 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
262 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
284 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
286 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
296 per_cpu(capacity_freq_ref, cpu) = in topology_parse_cpu_capacity()
313 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) in freq_inv_set_max_ratio() argument
324 int cpu; in topology_init_cpu_capacity_cppc() local
334 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
335 if (!cppc_get_perf_caps(cpu, &perf_caps) && in topology_init_cpu_capacity_cppc()
338 raw_capacity[cpu] = perf_caps.highest_perf; in topology_init_cpu_capacity_cppc()
339 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
341 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
344 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
348 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); in topology_init_cpu_capacity_cppc()
353 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
354 freq_inv_set_max_ratio(cpu, in topology_init_cpu_capacity_cppc()
355 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in topology_init_cpu_capacity_cppc()
357 capacity = raw_capacity[cpu]; in topology_init_cpu_capacity_cppc()
360 topology_set_cpu_scale(cpu, capacity); in topology_init_cpu_capacity_cppc()
362 cpu, topology_get_cpu_scale(cpu)); in topology_init_cpu_capacity_cppc()
388 int cpu; in init_cpu_capacity_callback() local
399 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
400 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback()
401 freq_inv_set_max_ratio(cpu, in init_cpu_capacity_callback()
402 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in init_cpu_capacity_callback()
476 int cpu; in get_cpu_for_node() local
483 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
484 if (cpu >= 0) in get_cpu_for_node()
485 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
490 return cpu; in get_cpu_for_node()
499 int cpu; in parse_core() local
510 cpu = get_cpu_for_node(t); in parse_core()
511 if (cpu >= 0) { in parse_core()
512 cpu_topology[cpu].package_id = package_id; in parse_core()
513 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
514 cpu_topology[cpu].core_id = core_id; in parse_core()
515 cpu_topology[cpu].thread_id = i; in parse_core()
516 } else if (cpu != -ENODEV) { in parse_core()
525 cpu = get_cpu_for_node(core); in parse_core()
526 if (cpu >= 0) { in parse_core()
533 cpu_topology[cpu].package_id = package_id; in parse_core()
534 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
535 cpu_topology[cpu].core_id = core_id; in parse_core()
536 } else if (leaf && cpu != -ENODEV) { in parse_core()
654 int cpu; in parse_dt_topology() local
683 for_each_possible_cpu(cpu) in parse_dt_topology()
684 if (cpu_topology[cpu].package_id < 0) { in parse_dt_topology()
698 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
700 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
703 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
705 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
708 if (last_level_cache_is_valid(cpu)) { in cpu_coregroup_mask()
709 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
710 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
719 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) in cpu_coregroup_mask()
720 core_mask = &cpu_topology[cpu].cluster_sibling; in cpu_coregroup_mask()
725 const struct cpumask *cpu_clustergroup_mask(int cpu) in cpu_clustergroup_mask() argument
731 if (cpumask_subset(cpu_coregroup_mask(cpu), in cpu_clustergroup_mask()
732 &cpu_topology[cpu].cluster_sibling)) in cpu_clustergroup_mask()
733 return topology_sibling_cpumask(cpu); in cpu_clustergroup_mask()
735 return &cpu_topology[cpu].cluster_sibling; in cpu_clustergroup_mask()
741 int cpu, ret; in update_siblings_masks() local
748 for_each_online_cpu(cpu) { in update_siblings_masks()
749 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
751 if (last_level_cache_is_shared(cpu, cpuid)) { in update_siblings_masks()
752 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
760 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
766 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
774 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
778 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
780 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
783 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
786 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
789 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
791 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
796 unsigned int cpu; in reset_cpu_topology() local
798 for_each_possible_cpu(cpu) { in reset_cpu_topology()
799 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
806 clear_cpu_topology(cpu); in reset_cpu_topology()
810 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
814 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
815 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
816 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
817 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
818 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
819 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
820 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
821 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
823 clear_cpu_topology(cpu); in remove_cpu_topology()
834 int cpu, ret; in init_cpu_topology() local
850 for_each_possible_cpu(cpu) { in init_cpu_topology()
851 ret = fetch_cache_info(cpu); in init_cpu_topology()