Lines Matching full:smt
35 cpumask_var_t smt; member
78 * SMT mask should be cleared whether we can claim @cpu or not. The SMT in scx_idle_test_and_clear_cpu()
83 const struct cpumask *smt = cpu_smt_mask(cpu); in scx_idle_test_and_clear_cpu() local
84 struct cpumask *idle_smts = idle_cpumask(node)->smt; in scx_idle_test_and_clear_cpu()
89 * @cpu is never cleared from the idle SMT mask. Ensure that in scx_idle_test_and_clear_cpu()
96 if (cpumask_intersects(smt, idle_smts)) in scx_idle_test_and_clear_cpu()
97 cpumask_andnot(idle_smts, idle_smts, smt); in scx_idle_test_and_clear_cpu()
115 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); in pick_idle_cpu_in_node()
399 * idle) to avoid interference caused by SMT.
499 * If CPU has SMT, any wholly idle CPU is likely a better pick than in scx_select_cpu_dfl()
506 if (cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && in scx_select_cpu_dfl()
543 * Give up if we're strictly looking for a full-idle SMT in scx_select_cpu_dfl()
603 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL)); in scx_idle_init_masks()
616 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->smt, GFP_KERNEL, node)); in scx_idle_init_masks()
629 const struct cpumask *smt = cpu_smt_mask(cpu); in update_builtin_idle() local
630 struct cpumask *idle_smts = idle_cpumask(node)->smt; in update_builtin_idle()
637 if (!cpumask_subset(smt, idle_cpus)) in update_builtin_idle()
639 cpumask_or(idle_smts, idle_smts, smt); in update_builtin_idle()
641 cpumask_andnot(idle_smts, idle_smts, smt); in update_builtin_idle()
708 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask); in reset_idle_masks()
716 cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask); in reset_idle_masks()
908 return idle_cpumask(node)->smt; in scx_bpf_get_idle_smtmask_node()
936 return idle_cpumask(NUMA_NO_NODE)->smt; in scx_bpf_get_idle_smtmask()
946 * either the percpu, or SMT idle-tracking cpumask.