Home
last modified time | relevance | path

Searched defs:cfs_rq (Results 1 – 5 of 5) sorted by relevance

/linux/kernel/sched/
H A Dfair.c310 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
378 list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq) list_del_leaf_cfs_rq() argument
404 for_each_leaf_cfs_rq_safe(rq,cfs_rq,pos) global() argument
460 cfs_rq_is_idle(struct cfs_rq * cfs_rq) cfs_rq_is_idle() argument
477 list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq) list_add_leaf_cfs_rq() argument
482 list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq) list_del_leaf_cfs_rq() argument
490 for_each_leaf_cfs_rq_safe(rq,cfs_rq,pos) global() argument
508 cfs_rq_is_idle(struct cfs_rq * cfs_rq) cfs_rq_is_idle() argument
555 entity_key(struct cfs_rq * cfs_rq,struct sched_entity * se) entity_key() argument
622 avg_vruntime_add(struct cfs_rq * cfs_rq,struct sched_entity * se) avg_vruntime_add() argument
632 avg_vruntime_sub(struct cfs_rq * cfs_rq,struct sched_entity * se) avg_vruntime_sub() argument
642 avg_vruntime_update(struct cfs_rq * cfs_rq,s64 delta) avg_vruntime_update() argument
654 avg_vruntime(struct cfs_rq * cfs_rq) avg_vruntime() argument
693 update_entity_lag(struct cfs_rq * cfs_rq,struct sched_entity * se) update_entity_lag() argument
722 vruntime_eligible(struct cfs_rq * cfs_rq,u64 vruntime) vruntime_eligible() argument
738 entity_eligible(struct cfs_rq * cfs_rq,struct sched_entity * se) entity_eligible() argument
743 __update_min_vruntime(struct cfs_rq * cfs_rq,u64 vruntime) __update_min_vruntime() argument
757 update_min_vruntime(struct cfs_rq * cfs_rq) update_min_vruntime() argument
781 cfs_rq_min_slice(struct cfs_rq * cfs_rq) cfs_rq_min_slice() argument
848 __enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se) __enqueue_entity() argument
857 __dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se) __dequeue_entity() argument
864 __pick_root_entity(struct cfs_rq * cfs_rq) __pick_root_entity() argument
874 __pick_first_entity(struct cfs_rq * cfs_rq) __pick_first_entity() argument
892 set_protect_slice(struct cfs_rq * cfs_rq,struct sched_entity * se) set_protect_slice() argument
907 update_protect_slice(struct cfs_rq * cfs_rq,struct sched_entity * se) update_protect_slice() argument
944 __pick_eevdf(struct cfs_rq * cfs_rq,bool protect) __pick_eevdf() argument
1005 pick_eevdf(struct cfs_rq * cfs_rq) pick_eevdf() argument
1010 __pick_last_entity(struct cfs_rq * cfs_rq) __pick_last_entity() argument
1041 update_deadline(struct cfs_rq * cfs_rq,struct sched_entity * se) update_deadline() argument
1120 struct cfs_rq *cfs_rq = cfs_rq_of(se); post_init_entity_util_avg() local
1207 update_curr(struct cfs_rq * cfs_rq) update_curr() argument
1263 update_stats_wait_start_fair(struct cfs_rq * cfs_rq,struct sched_entity * se) update_stats_wait_start_fair() argument
1280 update_stats_wait_end_fair(struct cfs_rq * cfs_rq,struct sched_entity * se) update_stats_wait_end_fair() argument
1306 update_stats_enqueue_sleeper_fair(struct cfs_rq * cfs_rq,struct sched_entity * se) update_stats_enqueue_sleeper_fair() argument
1326 update_stats_enqueue_fair(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) update_stats_enqueue_fair() argument
1343 update_stats_dequeue_fair(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) update_stats_dequeue_fair() argument
1375 update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se) update_stats_curr_start() argument
3680 account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se) account_entity_enqueue() argument
3693 account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se) account_entity_dequeue() argument
3752 enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se) enqueue_load_avg() argument
3759 dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se) dequeue_load_avg() argument
3770 reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight) reweight_entity() argument
3827 struct cfs_rq *cfs_rq = cfs_rq_of(se); reweight_task_fair() local
3910 calc_group_shares(struct cfs_rq * cfs_rq) calc_group_shares() argument
3974 cfs_rq_util_change(struct cfs_rq * cfs_rq,int flags) cfs_rq_util_change() argument
4020 cfs_rq_last_update_time(struct cfs_rq * cfs_rq) cfs_rq_last_update_time() argument
4034 child_cfs_rq_on_list(struct cfs_rq * cfs_rq) child_cfs_rq_on_list() argument
4054 cfs_rq_is_decayed(struct cfs_rq * cfs_rq) cfs_rq_is_decayed() argument
4082 update_tg_load_avg(struct cfs_rq * cfs_rq) update_tg_load_avg() argument
4113 clear_tg_load_avg(struct cfs_rq * cfs_rq) clear_tg_load_avg() argument
4147 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; clear_tg_offline_cfs_rqs() local
4255 update_tg_cfs_util(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq) update_tg_cfs_util() argument
4287 update_tg_cfs_runnable(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq) update_tg_cfs_runnable() argument
4317 update_tg_cfs_load(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq) update_tg_cfs_load() argument
4384 add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum) add_tg_cfs_propagate() argument
4393 struct cfs_rq *cfs_rq, *gcfs_rq; propagate_entity_load_avg() local
4450 update_tg_load_avg(struct cfs_rq * cfs_rq) update_tg_load_avg() argument
4459 add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum) add_tg_cfs_propagate() argument
4467 struct cfs_rq *cfs_rq; migrate_se_pelt_lag() local
4562 update_cfs_rq_load_avg(u64 now,struct cfs_rq * cfs_rq) update_cfs_rq_load_avg() argument
4633 attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se) attach_entity_load_avg() argument
4688 detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se) detach_entity_load_avg() argument
4719 update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) update_load_avg() argument
4767 struct cfs_rq *cfs_rq = cfs_rq_of(se); sync_entity_load_avg() local
4780 struct cfs_rq *cfs_rq = cfs_rq_of(se); remove_entity_load_avg() local
4799 cfs_rq_runnable_avg(struct cfs_rq * cfs_rq) cfs_rq_runnable_avg() argument
4804 cfs_rq_load_avg(struct cfs_rq * cfs_rq) cfs_rq_load_avg() argument
4831 util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p) util_est_enqueue() argument
4847 util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p) util_est_dequeue() argument
4865 util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep) util_est_update() argument
5128 place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) place_entity() argument
5242 enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) enqueue_entity() argument
5312 struct cfs_rq *cfs_rq = cfs_rq_of(se); __clear_buddies_next() local
5320 clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se) clear_buddies() argument
5341 struct cfs_rq *cfs_rq = cfs_rq_of(se); set_delayed() local
5363 struct cfs_rq *cfs_rq = cfs_rq_of(se); clear_delayed() local
5379 dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags) dequeue_entity() argument
5460 set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se) set_next_entity() argument
5510 pick_next_entity(struct rq * rq,struct cfs_rq * cfs_rq) pick_next_entity() argument
5537 put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev) put_prev_entity() argument
5561 entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued) entity_tick() argument
5657 __assign_cfs_rq_runtime(struct cfs_bandwidth * cfs_b,struct cfs_rq * cfs_rq,u64 target_runtime) __assign_cfs_rq_runtime() argument
5684 assign_cfs_rq_runtime(struct cfs_rq * cfs_rq) assign_cfs_rq_runtime() argument
5696 __account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec) __account_cfs_rq_runtime() argument
5715 account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec) account_cfs_rq_runtime() argument
5723 cfs_rq_throttled(struct cfs_rq * cfs_rq) cfs_rq_throttled() argument
5729 throttled_hierarchy(struct cfs_rq * cfs_rq) throttled_hierarchy() argument
5754 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local
5783 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local
5799 throttle_cfs_rq(struct cfs_rq * cfs_rq) throttle_cfs_rq() argument
5900 unthrottle_cfs_rq(struct cfs_rq * cfs_rq) unthrottle_cfs_rq() argument
6038 __unthrottle_cfs_rq_async(struct cfs_rq * cfs_rq) __unthrottle_cfs_rq_async() argument
6058 unthrottle_cfs_rq_async(struct cfs_rq * cfs_rq) unthrottle_cfs_rq_async() argument
6074 struct cfs_rq *cfs_rq, *tmp; distribute_cfs_runtime() local
6261 __return_cfs_rq_runtime(struct cfs_rq * cfs_rq) __return_cfs_rq_runtime() argument
6284 return_cfs_rq_runtime(struct cfs_rq * cfs_rq) return_cfs_rq_runtime() argument
6329 check_enqueue_throttle(struct cfs_rq * cfs_rq) check_enqueue_throttle() argument
6350 struct cfs_rq *pcfs_rq, *cfs_rq; sync_throttle() local
6366 check_cfs_rq_runtime(struct cfs_rq * cfs_rq) check_cfs_rq_runtime() argument
6470 init_cfs_rq_runtime(struct cfs_rq * cfs_rq) init_cfs_rq_runtime() argument
6540 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; update_runtime_enabled() local
6569 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; unthrottle_offline_cfs_rqs() local
6597 struct cfs_rq *cfs_rq = task_cfs_rq(p); cfs_task_bw_constrained() local
6637 account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec) account_cfs_rq_runtime() argument
6638 check_cfs_rq_runtime(struct cfs_rq * cfs_rq) check_cfs_rq_runtime() argument
6639 check_enqueue_throttle(struct cfs_rq * cfs_rq) check_enqueue_throttle() argument
6641 return_cfs_rq_runtime(struct cfs_rq * cfs_rq) return_cfs_rq_runtime() argument
6643 cfs_rq_throttled(struct cfs_rq * cfs_rq) cfs_rq_throttled() argument
6648 throttled_hierarchy(struct cfs_rq * cfs_rq) throttled_hierarchy() argument
6661 init_cfs_rq_runtime(struct cfs_rq * cfs_rq) init_cfs_rq_runtime() argument
6790 struct cfs_rq *cfs_rq = cfs_rq_of(se); requeue_delayed_entity() local
6826 struct cfs_rq *cfs_rq; enqueue_task_fair() local
6970 struct cfs_rq *cfs_rq; dequeue_entities() local
7134 struct cfs_rq *cfs_rq; cpu_load_without() local
7157 struct cfs_rq *cfs_rq; cpu_runnable_without() local
7899 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; cpu_util() local
8650 struct cfs_rq *cfs_rq = task_cfs_rq(donor); check_preempt_wakeup_fair() local
8743 struct cfs_rq *cfs_rq; pick_task_fair() local
8802 struct cfs_rq *cfs_rq; pick_next_task_fair() local
8887 struct cfs_rq *cfs_rq; put_prev_task_fair() local
8901 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local
9629 cfs_rq_has_blocked(struct cfs_rq * cfs_rq) cfs_rq_has_blocked() argument
9668 cfs_rq_has_blocked(struct cfs_rq * cfs_rq) cfs_rq_has_blocked() argument
9694 struct cfs_rq *cfs_rq, *pos; __update_blocked_fair() local
9740 update_cfs_rq_h_load(struct cfs_rq * cfs_rq) update_cfs_rq_h_load() argument
9775 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local
9784 struct cfs_rq *cfs_rq = &rq->cfs; __update_blocked_fair() local
12928 struct cfs_rq *cfs_rq = cfs_rq_of(se); se_fi_update() local
13000 struct cfs_rq *cfs_rq; task_is_throttled_fair() local
13023 struct cfs_rq *cfs_rq; task_tick_fair() local
13082 struct cfs_rq *cfs_rq = cfs_rq_of(se); propagate_entity_cfs_rq() local
13111 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_entity_cfs_rq() local
13131 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq() local
13214 struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_task_fair() local
13224 init_cfs_rq(struct cfs_rq * cfs_rq) init_cfs_rq() argument
13267 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local
13330 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; unregister_fair_sched_group() local
13357 init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent) init_tg_cfs_entry() argument
13479 struct cfs_rq *cfs_rq = cfs_rq_of(se); sched_group_set_idle() local
13577 struct cfs_rq *cfs_rq, *pos; print_cfs_stats() local
[all...]
H A Dpelt.h161 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument
174 cfs_rq_clock_pelt(struct cfs_rq * cfs_rq) cfs_rq_clock_pelt() argument
182 update_idle_cfs_rq_clock_pelt(struct cfs_rq * cfs_rq) update_idle_cfs_rq_clock_pelt() argument
183 cfs_rq_clock_pelt(struct cfs_rq * cfs_rq) cfs_rq_clock_pelt() argument
[all...]
H A Dsched.h458 struct cfs_rq **cfs_rq; member
650 struct cfs_rq { global() struct
669 tasks_timelinecfs_rq global() argument
672 currcfs_rq global() argument
673 nextcfs_rq global() argument
678 avgcfs_rq global() argument
680 last_update_time_copycfs_rq global() argument
682 __anon2a8854610108cfs_rq global() argument
708 rqcfs_rq global() argument
719 leaf_cfs_rq_listcfs_rq global() argument
720 tgcfs_rq global() argument
723 idlecfs_rq global() argument
726 runtime_enabledcfs_rq global() argument
727 runtime_remainingcfs_rq global() argument
729 throttled_pelt_idlecfs_rq global() argument
731 throttled_pelt_idle_copycfs_rq global() argument
733 throttled_clockcfs_rq global() argument
734 throttled_clock_peltcfs_rq global() argument
735 throttled_clock_pelt_timecfs_rq global() argument
736 throttled_clock_selfcfs_rq global() argument
737 throttled_clock_self_timecfs_rq global() argument
738 throttledcfs_rq global() argument
739 throttle_countcfs_rq global() argument
740 throttled_listcfs_rq global() argument
741 throttled_csd_listcfs_rq global() argument
1298 rq_of(struct cfs_rq * cfs_rq) rq_of() argument
1305 rq_of(struct cfs_rq * cfs_rq) rq_of() argument
[all...]
H A Dpelt.c307 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
321 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
[all...]
H A Ddebug.c799 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
[all...]