Home
last modified time | relevance | path

Searched refs:nr_cpus_allowed (Results 1 – 14 of 14) sorted by relevance

/linux/tools/testing/selftests/sched_ext/
H A Ddsp_local_on.bpf.c46 if (p->nr_cpus_allowed == nr_cpus && !is_migration_disabled(p)) in BPF_STRUCT_OPS()
/linux/include/linux/
H A Dmm_types.h998 * @nr_cpus_allowed: Number of CPUs allowed for mm.
1003 unsigned int nr_cpus_allowed; member
1016 * mm nr_cpus_allowed updates.
1387 mm->nr_cpus_allowed = p->nr_cpus_allowed; in mm_init_cid()
1424 WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed)); in mm_set_cpus_allowed()
H A Dsched.h915 int nr_cpus_allowed; member
1792 (current->nr_cpus_allowed == 1); in is_percpu_thread()
/linux/kernel/sched/
H A Ddeadline.c2127 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) in enqueue_task_dl()
2211 (curr->nr_cpus_allowed < 2 || in select_task_rq_dl()
2213 p->nr_cpus_allowed > 1; in select_task_rq_dl()
2273 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_dl()
2281 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
2425 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2501 if (task->nr_cpus_allowed == 1) in find_later_rq()
2643 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
2674 rq->curr->nr_cpus_allowed > 1) { in push_dl_task()
2834 p->nr_cpus_allowed > in task_woken_dl()
[all...]
H A Drt.c1446 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1543 (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio); in select_task_rq_rt()
1573 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1581 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1728 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1769 if (task->nr_cpus_allowed == 1) in find_lowest_rq()
1861 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
2346 p->nr_cpus_allowed > 1 && in task_woken_rt()
2348 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2428 if (p->nr_cpus_allowed > in switched_to_rt()
[all...]
H A Didle.c394 WARN_ON_ONCE(current->nr_cpus_allowed != 1); in play_idle_precise()
H A Dext_idle.c413 return p->nr_cpus_allowed >= num_possible_cpus(); in task_affinity_all()
874 * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed. in select_cpu_from_kfunc()
889 * p->nr_cpus_allowed: if we're holding an rq lock, we're safe; in select_cpu_from_kfunc()
901 if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) { in select_cpu_from_kfunc()
H A Dsched.h2600 if (p->nr_cpus_allowed == 1) in get_push_task()
3492 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()
3597 while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), in __mm_cid_try_get()
3618 while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) { in __mm_cid_try_get()
3633 if (cid < READ_ONCE(mm->nr_cpus_allowed)) in __mm_cid_try_get()
H A Dfair.c5096 if (!p || (p->nr_cpus_allowed == 1) || in update_misfit_status()
10825 if (p->nr_cpus_allowed != NR_CPUS) { in sched_balance_find_dst_group()
H A Dext.c136 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
6792 if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1) in scx_bpf_reenqueue_local()
/linux/tools/sched_ext/
H A Dscx_central.bpf.c114 if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { in BPF_STRUCT_OPS()
H A Dscx_flatcg.bpf.c362 if (p->nr_cpus_allowed != nr_cpus) { in BPF_STRUCT_OPS()
373 if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) { in BPF_STRUCT_OPS()
H A Dscx_qmap.bpf.c138 if (p->nr_cpus_allowed == 1 || in pick_direct_dispatch_cpu()
/linux/drivers/infiniband/hw/hfi1/
H A Dsdma.c839 if (current->nr_cpus_allowed != 1) in sdma_select_user_engine()