/linux/Documentation/translations/zh_CN/scheduler/ |
H A D | sched-capacity.rst | 302 task_util(p) < capacity(task_cpu(p)) 358 则任务可能变为CPU受限的,也就是说 ``task_util(p) > capacity(task_cpu(p))`` ;CPU算力 374 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 387 task_bandwidth(p) < capacity(task_cpu(p))
|
/linux/kernel/sched/ |
H A D | stop_task.c | 15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
|
H A D | deadline.c | 451 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() 456 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending() 1767 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer() 1776 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer() 2495 int cpu = task_cpu(task); in find_later_rq() 2641 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task() 2868 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl() 3228 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
|
H A D | psi.c | 900 task->pid, task->comm, task_cpu(task), in psi_flags_change() 911 int cpu = task_cpu(task); in psi_task_change() 930 int cpu = task_cpu(prev); in psi_task_switch() 1008 int cpu = task_cpu(curr); in psi_account_irqtime()
|
H A D | idle.c | 438 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
|
H A D | cpuacct.c | 338 unsigned int cpu = task_cpu(tsk); in cpuacct_charge()
|
H A D | fair.c | 2493 .src_cpu = task_cpu(p), in task_numa_migrate() 3627 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period() 7138 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without() 7161 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without() 7914 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util() 7916 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util() 7987 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without() 8544 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 10533 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu() 13245 set_task_rq(p, task_cpu( in task_change_group_fair() [all...] |
H A D | rt.c | 1505 /* For anything but wake ups, just return the task_cpu */ in select_task_rq_rt() 1762 int cpu = task_cpu(task); in find_lowest_rq() 1858 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
|
H A D | debug.c | 791 if (task_cpu(p) != rq_cpu) in print_rq()
|
H A D | sched.h | 1327 #define task_rq(p) cpu_rq(task_cpu(p)) 2398 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
|
H A D | ext.c | 1239 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq() 2618 WARN_ON_ONCE(task_cpu(p) == cpu); in task_can_run_on_remote_rq() 2635 p->comm, p->pid, task_cpu(p), cpu); in task_can_run_on_remote_rq() 7414 return task_cpu(p); in scx_bpf_task_cpu()
|
/linux/kernel/rcu/ |
H A D | tasks.h | 1006 cpu = task_cpu(t); in rcu_tasks_is_holdout() 1116 cpu = task_cpu(t); in check_holdout_task() 1669 int cpu = task_cpu(t); in trc_inspect_reader() 1690 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader() 1749 cpu = task_cpu(t); in trc_wait_for_one_reader() 1884 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task() 1903 cpu = task_cpu(t); in show_stalled_task_trace()
|
H A D | tree_stall.h | 467 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving() 576 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation() 619 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
|
H A D | tree_nocb.h | 1546 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state() 1603 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
|
/linux/include/linux/ |
H A D | kdb.h | 190 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
|
/linux/kernel/trace/ |
H A D | trace_sched_wakeup.c | 412 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 438 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace() 583 wakeup_cpu = task_cpu(p); in probe_wakeup()
|
/linux/include/linux/sched/ |
H A D | topology.h | 250 return cpu_to_node(task_cpu(p)); in task_node()
|
/linux/Documentation/scheduler/ |
H A D | sched-capacity.rst | 342 task_util(p) < capacity(task_cpu(p)) 405 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``; 424 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 438 task_bandwidth(p) < capacity(task_cpu(p))
|
/linux/kernel/ |
H A D | stop_machine.c | 55 * If @task is a stopper task, it cannot migrate and task_cpu() is in print_stop_info() 58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
|
/linux/arch/powerpc/kernel/ |
H A D | process.c | 2162 unsigned long cpu = task_cpu(p); in valid_irq_stack() 2183 unsigned long cpu = task_cpu(p); in valid_emergency_stack() 2217 unsigned long cpu = task_cpu(p); in valid_emergency_stack()
|
/linux/arch/parisc/kernel/ |
H A D | traps.c | 153 level, task_cpu(current), cr30, cr31); in show_regs()
|
/linux/arch/mips/kernel/ |
H A D | process.c | 852 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
|
/linux/fs/proc/ |
H A D | array.c | 642 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
|
/linux/fs/fuse/ |
H A D | dev_uring.c | 1236 qid = task_cpu(current); in fuse_uring_task_to_queue()
|
/linux/fs/resctrl/ |
H A D | rdtgroup.c | 613 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); in update_task_closid_rmid() 2812 cpumask_set_cpu(task_cpu(t), mask); in rdt_move_group_tasks()
|