Lines Matching defs:prev_cpu

443  * begin in @prev_cpu's node and proceed to other nodes in order of
451 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
456 int node = scx_cpu_node_if_enabled(prev_cpu);
463 * Check whether @prev_cpu is still within the allowed set. If not,
466 is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed);
498 const struct cpumask *cpus = numa_span(prev_cpu);
508 const struct cpumask *cpus = llc_span(prev_cpu);
523 * If the waker's CPU is cache affine and prev_cpu is idle,
527 if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
528 scx_idle_test_and_clear_cpu(prev_cpu)) {
529 cpu = prev_cpu;
558 * partially idle @prev_cpu.
562 * Keep using @prev_cpu if it's part of a fully idle core.
565 cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
566 scx_idle_test_and_clear_cpu(prev_cpu)) {
567 cpu = prev_cpu;
594 * begin in prev_cpu's node and proceed to other nodes in
612 * Use @prev_cpu if it's idle.
614 if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) {
615 cpu = prev_cpu;
642 * in prev_cpu's node and proceed to other nodes in order of
859 static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
866 if (!kf_cpu_valid(prev_cpu, NULL))
902 if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
903 scx_idle_test_and_clear_cpu(prev_cpu))
904 cpu = prev_cpu;
908 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
934 * @prev_cpu: CPU @p was on previously
946 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
951 cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0);
958 return prev_cpu;
965 * @prev_cpu: CPU @p was on previously
975 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
981 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
984 return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags);