Searched refs:cfs_rq (Results 1 – 8 of 8) sorted by relevance
| /linux/kernel/sched/ |
| H A D | fair.c | 310 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument 312 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq() 315 if (cfs_rq->on_list) in list_add_leaf_cfs_rq() 318 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq() 329 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq() 330 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq() 337 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq() 338 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq() 348 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq() 353 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq() [all …]
|
| H A D | pelt.h | 9 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 10 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 161 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument 165 if (unlikely(cfs_rq->pelt_clock_throttled)) in update_idle_cfs_rq_clock_pelt() 168 throttled = cfs_rq->throttled_clock_pelt_time; in update_idle_cfs_rq_clock_pelt() 170 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled); in update_idle_cfs_rq_clock_pelt() 174 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument 176 if (unlikely(cfs_rq->pelt_clock_throttled)) in cfs_rq_clock_pelt() 177 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt() 179 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt() [all …]
|
| H A D | pelt.c | 307 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 310 cfs_rq->curr == se)) { in __update_load_avg_se() 321 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument 323 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq() 324 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq() 325 cfs_rq->h_nr_runnable, in __update_load_avg_cfs_rq() 326 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq() 328 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq() 329 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
|
| H A D | debug.c | 797 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument 806 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq() 813 root = __pick_root_entity(cfs_rq); in print_cfs_rq() 816 first = __pick_first_entity(cfs_rq); in print_cfs_rq() 819 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 822 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq() 832 SPLIT_NS(avg_vruntime(cfs_rq))); in print_cfs_rq() 837 SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued); in print_cfs_rq() 838 SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable); in print_cfs_rq() 839 SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued); in print_cfs_rq() [all …]
|
| H A D | sched.h | 80 struct cfs_rq; 483 struct cfs_rq **cfs_rq; member 579 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 586 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 612 struct cfs_rq *prev, struct cfs_rq *next); 675 struct cfs_rq { struct 1147 struct cfs_rq cfs; 1326 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 1328 return cfs_rq->rq; in rq_of() 1333 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument [all …]
|
| H A D | core.c | 4457 p->se.cfs_rq = NULL; in __sched_fork() 5472 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start() 8673 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init() 9604 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local 9605 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() 9608 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth() 9609 cfs_rq->runtime_remaining = 1; in tg_set_cfs_bandwidth() 9611 if (cfs_rq->throttled) in tg_set_cfs_bandwidth() 9612 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth() 9772 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
|
| /linux/include/trace/events/ |
| H A D | sched.h | 834 TP_PROTO(struct cfs_rq *cfs_rq), 835 TP_ARGS(cfs_rq)); 866 TP_PROTO(struct cfs_rq *cfs_rq), 867 TP_ARGS(cfs_rq));
|
| /linux/include/linux/ |
| H A D | sched.h | 64 struct cfs_rq; 606 struct cfs_rq *cfs_rq; member 608 struct cfs_rq *my_q;
|