| /linux/arch/sparc/include/asm/ |
| H A D | smp_32.h | 68 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0); in xc0() 73 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); in xc1() 77 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); in xc2() 83 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, in xc3() 90 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, in xc4()
|
| /linux/kernel/irq/ |
| H A D | cpuhotplug.c | 40 !cpumask_intersects(m, cpu_online_mask)) { in irq_needs_fixup() 113 if (!cpumask_intersects(affinity, cpu_online_mask)) { in migrate_one_irq() 123 affinity = cpu_online_mask; in migrate_one_irq() 139 if (err == -ENOSPC && !irqd_affinity_is_managed(d) && affinity != cpu_online_mask) { in migrate_one_irq() 143 affinity = cpu_online_mask; in migrate_one_irq()
|
| H A D | migration.c | 29 if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_fixup_move_pending() 87 if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_move_masked_irq()
|
| /linux/include/asm-generic/ |
| H A D | topology.h | 49 #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) 51 #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
|
| /linux/arch/x86/xen/ |
| H A D | smp.c | 149 for_each_cpu_and(cpu, mask, cpu_online_mask) in __xen_send_IPI_mask() 220 __xen_send_IPI_mask(cpu_online_mask, xen_vector); in xen_send_IPI_all() 241 for_each_cpu_and(cpu, mask, cpu_online_mask) { in xen_send_IPI_mask_allbutself() 251 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); in xen_send_IPI_allbutself()
|
| /linux/arch/powerpc/kernel/ |
| H A D | irq.c | 364 if (cpumask_equal(mask, cpu_online_mask)) { in irq_choose_cpu() 373 irq_rover = cpumask_next(irq_rover, cpu_online_mask); in irq_choose_cpu() 375 irq_rover = cpumask_first(cpu_online_mask); in irq_choose_cpu() 381 cpuid = cpumask_first_and(mask, cpu_online_mask); in irq_choose_cpu()
|
| H A D | rtasd.c | 437 cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in rtas_event_scan() 439 cpu = cpumask_first(cpu_online_mask); in rtas_event_scan() 492 schedule_delayed_work_on(cpumask_first(cpu_online_mask), in start_event_scan()
|
| /linux/arch/x86/kernel/apic/ |
| H A D | ipi.c | 60 __apic_send_IPI_mask_allbutself(cpu_online_mask, vector); in apic_send_IPI_allbutself() 87 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask)) in native_send_call_func_ipi() 287 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); in default_send_IPI_mask_logical()
|
| /linux/arch/riscv/kernel/ |
| H A D | sys_hwprobe.c | 356 cpumask_copy(&cpus, cpu_online_mask); in hwprobe_get_values() 369 cpumask_and(&cpus, &cpus, cpu_online_mask); in hwprobe_get_values() 417 cpumask_copy(&cpus, cpu_online_mask); in hwprobe_get_cpus() 419 cpumask_and(&cpus, &cpus, cpu_online_mask); in hwprobe_get_cpus() 498 hwprobe_one_pair(&pair, cpu_online_mask); in complete_hwprobe_vdso_data()
|
| H A D | unaligned_access_speed.c | 153 for_each_cpu(cpu, cpu_online_mask) { in check_unaligned_access_speed_all_cpus() 168 for_each_cpu(cpu, cpu_online_mask) { in check_unaligned_access_speed_all_cpus() 202 cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask); in set_unaligned_access_static_branches_except_cpu() 221 cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask); in set_unaligned_access_static_branches()
|
| H A D | smp.c | 261 cpumask_copy(&mask, cpu_online_mask); in smp_send_stop() 276 cpumask_pr_args(cpu_online_mask)); in smp_send_stop() 313 cpumask_copy(&mask, cpu_online_mask); in crash_smp_send_stop()
|
| /linux/drivers/net/wireguard/ |
| H A D | queueing.h | 110 cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask); in wg_cpumask_choose_online() 122 int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); in wg_cpumask_next_online() 124 cpu = cpumask_first(cpu_online_mask); in wg_cpumask_next_online()
|
| /linux/arch/arm/mach-omap2/ |
| H A D | cpuidle44xx.c | 133 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { in omap_enter_idle_coupled() 191 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { in omap_enter_idle_coupled() 329 return cpuidle_register(idle_driver, cpu_online_mask); in omap4_idle_init()
|
| /linux/include/linux/ |
| H A D | nmi.h | 160 arch_trigger_cpumask_backtrace(cpu_online_mask, -1); in trigger_all_cpu_backtrace() 166 arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu); in trigger_allbutcpu_cpu_backtrace()
|
| H A D | smp.h | 71 on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); in on_each_cpu() 105 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); in on_each_cpu_cond()
|
| H A D | topology.h | 274 return cpumask_nth_and(cpu, cpus, cpu_online_mask); in sched_numa_find_nth_cpu() 328 cpu_online_mask, \
|
| /linux/Documentation/translations/zh_TW/admin-guide/ |
| H A D | cputopology.rst | 64 導致未上線的CPU。[~cpu_online_mask + cpus >= NR_CPUS] 66 online: 在線的CPU,可供調度使用。[cpu_online_mask]
|
| /linux/Documentation/translations/zh_CN/admin-guide/ |
| H A D | cputopology.rst | 64 导致未上线的CPU。[~cpu_online_mask + cpus >= NR_CPUS] 66 online: 在线的CPU,可供调度使用。[cpu_online_mask]
|
| /linux/drivers/powercap/ |
| H A D | dtpm_cpu.c | 50 nr_cpus = cpumask_weight_and(cpu_online_mask, to_cpumask(pd->cpus)); in set_pd_power_limit() 82 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) in scale_pd_power_uw() 128 nr_cpus = cpumask_weight_and(cpu_online_mask, to_cpumask(em->cpus)); in update_pd_power_uw()
|
| /linux/arch/x86/kernel/ |
| H A D | tsc_sync.c | 102 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in tsc_sync_check_timer_fn() 104 next_cpu = cpumask_first(cpu_online_mask); in tsc_sync_check_timer_fn() 456 smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source, in check_tsc_sync_target()
|
| /linux/arch/s390/kernel/ |
| H A D | processor.c | 345 unsigned long first = cpumask_first(cpu_online_mask); in show_cpuinfo() 359 *pos = cpumask_next(*pos - 1, cpu_online_mask); in c_update() 361 *pos = cpumask_first(cpu_online_mask); in c_update()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | irq_affinity.c | 25 for_each_cpu_and(cpu, req_mask, cpu_online_mask) { in cpu_get_least_loaded() 40 best_cpu = cpumask_first(cpu_online_mask); in cpu_get_least_loaded()
|
| /linux/drivers/cpufreq/ |
| H A D | speedstep-ich.c | 263 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_target() 298 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_cpu_init()
|
| /linux/arch/mips/include/asm/mach-loongson64/ |
| H A D | topology.h | 15 #define cpumask_of_pcibus(bus) (cpu_online_mask)
|
| /linux/kernel/power/ |
| H A D | poweroff.c | 29 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); in handle_poweroff()
|