| /linux/lib/tests/ |
| H A D | cpumask_kunit.c | 13 "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \ 19 int mask_weight = cpumask_weight(m); \ 33 weight = cpumask_weight(&mask_tmp); \ 42 int mask_weight = cpumask_weight(m); \ 67 KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty)); in test_cpumask_weight() 68 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask), in test_cpumask_weight() 70 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all)); in test_cpumask_weight()
|
| H A D | test_ratelimit.c | 105 const int n_stress_kthread = cpumask_weight(cpu_online_mask); in test_ratelimit_stress()
|
| /linux/arch/x86/include/asm/trace/ |
| H A D | hyperv.h | 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 103 possible = cpumask_weight(&node_affinity.real_cpu_mask); in init_real_cpu_mask() 104 ht = cpumask_weight(topology_sibling_cpumask( in init_real_cpu_mask() 128 cpumask_weight(topology_sibling_cpumask( in node_affinity_init() 501 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init() 507 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init() 516 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init() 620 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init() 1002 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
|
| /linux/drivers/thermal/intel/ |
| H A D | intel_hfi.c | 209 cpu_count = cpumask_weight(hfi_instance->cpus); in update_capabilities() 485 if (cpumask_weight(hfi_instance->cpus) == 1 && hfi_clients_nr > 0) { in intel_hfi_online() 529 if (!cpumask_weight(hfi_instance->cpus)) in intel_hfi_offline()
|
| /linux/include/linux/ |
| H A D | cpumask.h | 849 static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp) in cpumask_weight() function 1198 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 1199 #define num_enabled_cpus() cpumask_weight(cpu_enabled_mask) 1200 #define num_present_cpus() cpumask_weight(cpu_present_mask) 1201 #define num_active_cpus() cpumask_weight(cpu_active_mask)
|
| H A D | topology.h | 40 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
| /linux/kernel/irq/ |
| H A D | ipi.c | 40 nr_irqs = cpumask_weight(dest); in irq_reserve_ipi() 144 nr_irqs = cpumask_weight(dest); in irq_destroy_ipi()
|
| H A D | affinity.c | 122 set_vecs = cpumask_weight(cpu_possible_mask); in irq_calc_affinity_vectors()
|
| /linux/arch/mips/kernel/ |
| H A D | crash.c | 75 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { in crash_kexec_prepare_cpus()
|
| /linux/rust/helpers/ |
| H A D | cpumask.c | 47 return cpumask_weight(srcp); in rust_helper_cpumask_weight()
|
| /linux/lib/ |
| H A D | group_cpus.c | 148 ncpus = cpumask_weight(nmsk); in alloc_nodes_groups() 298 ncpus = cpumask_weight(nmsk); in __group_cpus_evenly()
|
| /linux/arch/x86/kernel/ |
| H A D | smpboot.c | 557 threads = cpumask_weight(topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 1130 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); in recompute_smt_state() 1148 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) in remove_siblinginfo() 1157 if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) in remove_siblinginfo()
|
| /linux/block/ |
| H A D | blk-mq-cpumap.c | 25 num = cpumask_weight(mask); in blk_mq_num_queues()
|
| /linux/arch/x86/platform/uv/ |
| H A D | uv_nmi.c | 629 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus() 687 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait() 697 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait()
|
| /linux/kernel/sched/ |
| H A D | topology.c | 172 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate() 681 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain() 994 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group() 1221 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group() 1291 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity() 1634 sd_weight = cpumask_weight(tl->mask(tl, cpu)); in sd_init() 2114 if (cpumask_weight(cpumask_of_node(node)) != 1) in sched_update_numa() 2522 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
|
| /linux/drivers/thermal/ |
| H A D | cpufreq_cooling.c | 285 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); in cpufreq_state2power() 372 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); in allocate_idle_time()
|
| /linux/arch/x86/kernel/cpu/resctrl/ |
| H A D | monitor.c | 394 cpus_per_node = cpumask_weight(node0_cpumask); in snc_get_config() 395 cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map); in snc_get_config()
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_main.c | 170 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu() 174 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu()
|
| /linux/rust/kernel/ |
| H A D | cpumask.rs | 151 unsafe { bindings::cpumask_weight(self.as_raw()) } in weight()
|
| /linux/drivers/base/ |
| H A D | cacheinfo.c | 949 return cpumask_weight(*map); in cpu_map_shared_cache() 958 return cpumask_weight(*map); in cpu_map_shared_cache() 990 nr_shared = cpumask_weight(&llc->shared_cpu_map); in update_per_cpu_data_slice_size_cpu()
|
| /linux/arch/x86/kernel/cpu/microcode/ |
| H A D | core.c | 496 unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); in load_primary() 629 nr_offl = cpumask_weight(&cpu_offline_mask); in load_late_stop_cpus()
|
| /linux/kernel/ |
| H A D | stop_machine.c | 429 cpu_stop_init_done(&done, cpumask_weight(cpumask)); in __stop_cpus() 643 .num_threads = cpumask_weight(smt_mask), in stop_core_cpuslocked()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | irq_affinity.c | 66 if (cpumask_weight(&af_desc->mask) > 1) in irq_pool_request_irq()
|
| /linux/arch/loongarch/kernel/ |
| H A D | machine_kexec.c | 207 while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) { in crash_smp_send_stop()
|