Searched refs:nr_cpus_allowed (Results 1 – 17 of 17) sorted by relevance
46 if (p->nr_cpus_allowed == nr_cpus && !is_migration_disabled(p)) in BPF_STRUCT_OPS()
2110 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) in enqueue_task_dl()2194 (curr->nr_cpus_allowed < 2 || in select_task_rq_dl()2196 p->nr_cpus_allowed > 1; in select_task_rq_dl()2256 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_dl()2264 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()2405 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()2481 if (task->nr_cpus_allowed == 1) in find_later_rq()2568 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()2679 rq->curr->nr_cpus_allowed > 1) { in push_dl_task()2839 p->nr_cpus_allowed > 1 && in task_woken_dl()[all …]
1446 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()1543 (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio); in select_task_rq_rt()1573 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()1581 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()1728 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()1769 if (task->nr_cpus_allowed == 1) in find_lowest_rq()1861 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()2346 p->nr_cpus_allowed > 1 && in task_woken_rt()2348 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()2428 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
394 WARN_ON_ONCE(current->nr_cpus_allowed != 1); in play_idle_precise()
413 return p->nr_cpus_allowed >= num_possible_cpus(); in task_affinity_all()928 if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) { in select_cpu_from_kfunc()
2629 if (p->nr_cpus_allowed == 1) in get_push_task()3523 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()3628 while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), in __mm_cid_try_get()3649 while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) { in __mm_cid_try_get()3664 if (cid < READ_ONCE(mm->nr_cpus_allowed)) in __mm_cid_try_get()
2686 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()3571 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()10629 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) in sched_mm_cid_migrate_to()
5093 if (!p || (p->nr_cpus_allowed == 1) || in update_misfit_status()10918 if (p->nr_cpus_allowed != NR_CPUS) { in sched_balance_find_dst_group()
1015 unsigned int nr_cpus_allowed; member1438 mm->nr_cpus_allowed = p->nr_cpus_allowed; in mm_init_cid()1475 WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed)); in mm_set_cpus_allowed()
921 int nr_cpus_allowed; member1798 (current->nr_cpus_allowed == 1); in is_percpu_thread()
114 if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { in BPF_STRUCT_OPS()
362 if (p->nr_cpus_allowed != nr_cpus) { in BPF_STRUCT_OPS()373 if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) { in BPF_STRUCT_OPS()
140 if (p->nr_cpus_allowed == 1 || in pick_direct_dispatch_cpu()
86 .nr_cpus_allowed= NR_CPUS,
991 if (current->nr_cpus_allowed == 1) { in hfi1_get_proc_affinity()1002 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
839 if (current->nr_cpus_allowed != 1) in sdma_select_user_engine()
2410 if (current->nr_cpus_allowed > 1 || cpu != smp_processor_id()) { in timerlat_fd_open()