/linux/arch/x86/kernel/ |
H A D | smpboot.c | 1012 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node); in smp_prepare_cpus_common() 1013 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node); in smp_prepare_cpus_common() 1014 zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node); in smp_prepare_cpus_common() 1015 zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node); in smp_prepare_cpus_common() 1016 zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node); in smp_prepare_cpus_common()
|
H A D | kvm.c | 691 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), in kvm_alloc_cpumask()
|
/linux/arch/powerpc/kernel/ |
H A D | smp.c | 927 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); in update_mask_from_threadgroup() 1072 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores() 1110 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus() 1112 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus() 1114 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus() 1117 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
|
/linux/kernel/irq/ |
H A D | irqdesc.c | 57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks() 62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks() 70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
|
/linux/arch/x86/kernel/apic/ |
H A D | x2apic_cluster.c | 188 if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node)) in x2apic_prepare_cpu()
|
/linux/include/linux/ |
H A D | cpumask.h | 1012 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) in zalloc_cpumask_var_node() function 1072 static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, in zalloc_cpumask_var_node() function
|
/linux/kernel/ |
H A D | smp.c | 58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu() 61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, in smpcfd_prepare_cpu()
|
H A D | workqueue.c | 7763 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); in workqueue_init_early()
|
/linux/drivers/cpufreq/ |
H A D | acpi-cpufreq.c | 563 if (!zalloc_cpumask_var_node( in acpi_cpufreq_early_init()
|
/linux/kernel/sched/ |
H A D | ext.c | 6307 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); in init_sched_ext_class() 6308 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); in init_sched_ext_class() 6309 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); in init_sched_ext_class() 6310 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); in init_sched_ext_class()
|
H A D | rt.c | 2401 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), in init_sched_rt_class()
|
H A D | deadline.c | 2901 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), in init_sched_dl_class()
|
H A D | fair.c | 13614 zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i)); in init_sched_fair_class() 13615 zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i)); in init_sched_fair_class() 13616 zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i), in init_sched_fair_class()
|
/linux/block/ |
H A D | blk-mq.c | 4005 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) in blk_mq_alloc_hctx()
|
/linux/drivers/net/ethernet/pensando/ionic/ |
H A D | ionic_lif.c | 3197 if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, in ionic_affinity_masks_alloc()
|