| /linux/tools/sched_ext/ |
| H A D | scx_central.c | 61 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main() 64 assert(skel->rodata->nr_cpu_ids > 0); in main() 65 assert(skel->rodata->nr_cpu_ids <= INT32_MAX); in main() 74 if (central_cpu >= skel->rodata->nr_cpu_ids) { in main() 75 …tderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids); in main() 91 RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); in main() 92 RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); in main() 107 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main() 109 CPU_ZERO_S(CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids), cpuset); in main() 113 skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); in main()
|
| H A D | scx_central.bpf.c | 59 const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ variable 188 bpf_for(cpu, 0, nr_cpu_ids) { in BPF_STRUCT_OPS() 195 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 226 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 241 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 249 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 267 bpf_for(i, 0, nr_cpu_ids) { in central_timerfn() 268 s32 cpu = (nr_timers + i) % nr_cpu_ids; in central_timerfn() 275 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in central_timerfn()
|
| /linux/rust/kernel/ |
| H A D | cpu.rs | 11 pub fn nr_cpu_ids() -> u32 { in nr_cpu_ids() function 20 bindings::nr_cpu_ids in nr_cpu_ids() 58 debug_assert!((id as u32) < nr_cpu_ids()); in from_i32_unchecked() 66 if id < 0 || id as u32 >= nr_cpu_ids() { in from_i32() 81 debug_assert!(id < nr_cpu_ids()); in from_u32_unchecked() 92 if id >= nr_cpu_ids() { in from_u32()
|
| /linux/lib/tests/ |
| H A D | cpumask_kunit.c | 44 for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \ 68 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask), in test_cpumask_weight() 70 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all)); in test_cpumask_weight() 75 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty)); in test_cpumask_first() 79 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask), in test_cpumask_first() 87 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask), in test_cpumask_last() 94 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask), in test_cpumask_next() 97 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty), in test_cpumask_next()
|
| /linux/kernel/ |
| H A D | watchdog_buddy.c | 17 return nr_cpu_ids; in watchdog_next_cpu() 48 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_enable() 72 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_disable() 99 if (next_cpu >= nr_cpu_ids) in watchdog_buddy_check_hardlockup()
|
| H A D | scftorture.c | 372 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one() 379 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one() 402 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one() 485 cpu = scfp->cpu % nr_cpu_ids; in scftorture_invoker() 497 __func__, scfp->cpu, curcpu, nr_cpu_ids); in scftorture_invoker() 563 for (i = 0; i < nr_cpu_ids; i++) in scf_torture_cleanup() 599 weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init() 600 weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init() 601 weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init() 602 weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
|
| /linux/arch/powerpc/kernel/ |
| H A D | paca.c | 62 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca() 245 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs() 247 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs() 291 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas() 296 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas() 309 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
|
| H A D | setup-common.c | 326 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo() 338 if ((*pos) < nr_cpu_ids) in c_start() 419 for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) { in assign_threads() 461 cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), in smp_setup_cpu_maps() 507 } else if (cpu >= nr_cpu_ids) { in smp_setup_cpu_maps() 512 if (cpu < nr_cpu_ids) in smp_setup_cpu_maps() 546 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps() 550 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps() 551 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps() 898 memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32)); in smp_setup_pacas()
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | topology.c | 289 if (apic_id != topo_info.boot_cpu_apic_id && topo_info.nr_assigned_cpus >= nr_cpu_ids) { in topology_register_apic() 290 pr_warn_once("CPU limit of %d reached. Ignoring further CPUs\n", nr_cpu_ids); in topology_register_apic() 443 unsigned int possible = nr_cpu_ids; in topology_apply_cmdline_limits_early() 452 if (possible < nr_cpu_ids) { in topology_apply_cmdline_limits_early() 494 if (WARN_ON_ONCE(assigned > nr_cpu_ids)) { in topology_init_possible_cpus() 495 disabled += assigned - nr_cpu_ids; in topology_init_possible_cpus() 496 assigned = nr_cpu_ids; in topology_init_possible_cpus() 498 allowed = min_t(unsigned int, total, nr_cpu_ids); in topology_init_possible_cpus()
|
| /linux/arch/arm/mach-spear/ |
| H A D | platsmp.c | 102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus() 104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus() 105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
|
| /linux/include/linux/ |
| H A D | cpumask.h | 25 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 28 #define nr_cpu_ids ((unsigned int)NR_CPUS) macro 30 extern unsigned int nr_cpu_ids; 36 WARN_ON(nr != nr_cpu_ids); in set_nr_cpu_ids() 38 nr_cpu_ids = nr; in set_nr_cpu_ids() 70 #define small_cpumask_bits nr_cpu_ids 73 #define small_cpumask_bits nr_cpu_ids 74 #define large_cpumask_bits nr_cpu_ids 76 #define nr_cpumask_bits nr_cpu_ids 366 return find_random_bit(cpumask_bits(src), nr_cpu_ids); in cpumask_random() [all …]
|
| /linux/lib/ |
| H A D | cpumask.c | 117 WARN_ON(cpu >= nr_cpu_ids); in cpumask_local_spread() 143 if (next < nr_cpu_ids) in cpumask_any_and_distribute() 163 if (next < nr_cpu_ids) in cpumask_any_distribute()
|
| H A D | objpool.c | 53 for (i = 0; i < nr_cpu_ids; i++) { in objpool_init_percpu_slots() 112 for (i = 0; i < nr_cpu_ids; i++) in objpool_fini_percpu_slots() 145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot); in objpool_init()
|
| /linux/arch/arm/mach-bcm/ |
| H A D | bcm63xx_smp.c | 64 if (ncores > nr_cpu_ids) { in scu_a9_enable() 66 ncores, nr_cpu_ids); in scu_a9_enable() 67 ncores = nr_cpu_ids; in scu_a9_enable()
|
| /linux/arch/arm/kernel/ |
| H A D | devtree.c | 129 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps() 131 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps() 132 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
|
| /linux/arch/riscv/kernel/ |
| H A D | smpboot.c | 153 if (cpuid > nr_cpu_ids) in of_parse_and_init_cpus() 155 cpuid, nr_cpu_ids); in of_parse_and_init_cpus() 169 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) in setup_smp()
|
| H A D | acpi_numa.c | 42 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_cpu_for_acpi_id() 100 for (i = 0; i < nr_cpu_ids; i++) in acpi_map_cpus_to_nodes()
|
| /linux/kernel/irq/ |
| H A D | ipi.c | 70 if (next < nr_cpu_ids) in irq_reserve_ipi() 72 if (next < nr_cpu_ids) { in irq_reserve_ipi() 168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq() 199 if (cpu >= nr_cpu_ids) in ipi_send_verify()
|
| /linux/scripts/gdb/linux/ |
| H A D | timerlist.py | 150 nr_cpu_ids = 1 152 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids") 156 num_bytes = (nr_cpu_ids + 7) / 8 172 extra = nr_cpu_ids % 8
|
| /linux/net/netfilter/ |
| H A D | nf_flow_table_procfs.c | 14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start() 29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
|
| /linux/arch/arm64/kernel/ |
| H A D | topology.c | 168 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in freq_counters_valid() 325 ref_cpu = nr_cpu_ids; in arch_freq_get_on_cpu() 334 if (ref_cpu >= nr_cpu_ids) in arch_freq_get_on_cpu() 476 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in cpc_ffh_supported()
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 75 if (cpu >= nr_cpu_ids) /* empty */ in cpu_mask_set_get_first() 114 cpumask_clear_cpus(&node_affinity.real_cpu_mask, curr_cpu, nr_cpu_ids - curr_cpu); in init_real_cpu_mask() 253 if (ret_cpu >= nr_cpu_ids) { in per_cpu_affinity_get() 289 if (max_cpu >= nr_cpu_ids) in per_cpu_affinity_put_max() 343 if (cpu >= nr_cpu_ids) in _dev_comp_vect_cpu_get() 346 if (cpu >= nr_cpu_ids) { /* empty */ in _dev_comp_vect_cpu_get() 591 if (cpumask_first(local_mask) >= nr_cpu_ids) in hfi1_dev_affinity_init() 652 if (curr_cpu >= nr_cpu_ids) in hfi1_dev_affinity_init() 971 cpumask_clear_cpus(hw_thread_mask, curr_cpu, nr_cpu_ids - curr_cpu); in find_hw_thread_mask() 1139 if (cpu >= nr_cpu_ids) /* empty */ in hfi1_get_proc_affinity()
|
| /linux/arch/arm/mach-omap2/ |
| H A D | omap-smp.c | 278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus() 280 ncores, nr_cpu_ids); in omap4_smp_init_cpus() 281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
|
| /linux/kernel/sched/ |
| H A D | isolation.c | 42 if (cpu < nr_cpu_ids) in housekeeping_any_cpu() 46 if (likely(cpu < nr_cpu_ids)) in housekeeping_any_cpu() 139 if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) { in housekeeping_setup()
|
| /linux/drivers/perf/ |
| H A D | arm_pmu_platform.c | 87 cpu = nr_cpu_ids; in pmu_parse_irq_affinity() 123 if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node)) in pmu_parse_irqs() 141 if (cpu >= nr_cpu_ids) in pmu_parse_irqs()
|