Searched refs:sched_group (Results 1 – 7 of 7) sorted by relevance
64 struct sched_group;77 struct sched_group *groups; /* the balancing groups of the domain */172 struct sched_group *__percpu *sg;
46 struct sched_group *group = sd->groups; in sched_domain_debug_one()599 static void free_sched_groups(struct sched_group *sg, int free_sgc) in free_sched_groups()601 struct sched_group *tmp, *first; in free_sched_groups()752 struct sched_group *sg = sd->groups; in cpu_attach_domain()799 int group_balance_cpu(struct sched_group *sg) in group_balance_cpu()911 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask()947 static struct sched_group *950 struct sched_group *sg; in build_group_from_child_sched_domain()953 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain()972 struct sched_group *sg) in init_overlap_sched_group()[all …]
82 struct sched_group;1372 static inline struct cpumask *sched_group_span(struct sched_group *sg);1451 struct sched_group *group) in sched_group_cookie_match()1511 struct sched_group *group) in sched_group_cookie_match()2095 struct sched_group { struct2096 struct sched_group *next; /* Must be a circular list */ argument2115 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span() argument2123 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask()2128 extern int group_balance_cpu(struct sched_group *sg);
7423 static struct sched_group *7430 sched_balance_find_dst_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in sched_balance_find_dst_group_cpu()7502 struct sched_group *group; in sched_balance_find_dst_cpu()7705 struct sched_group *sg = sd->groups; in select_idle_cpu()9941 struct sched_group *busiest; /* Busiest group in this sd */9942 struct sched_group *local; /* Local group in this sd */10003 struct sched_group *sdg = sd->groups; in update_cpu_capacity()10019 struct sched_group *group, *sdg = sd->groups; in update_group_capacity()10118 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced()10179 struct sched_group *group, in group_classify()[all …]
264 struct sched_group *sg; in numa_weight()283 struct sched_group *sg; in numa_span()
28 每个调度域必须具有一个或多个CPU调度组(struct sched_group),它们以单向循环链表的形式
19 Each scheduling domain must have one or more CPU groups (struct sched_group)