Lines Matching +full:3 +full:rd

228 			pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n",  in sched_is_eas_possible()
237 pr_info("rd %*pbl: Checking EAS, SMT is not supported\n", in sched_is_eas_possible()
245 pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", in sched_is_eas_possible()
256 pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d", in sched_is_eas_possible()
265 pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n", in sched_is_eas_possible()
418 * 3. no SMT is detected.
419 * 4. schedutil is driving the frequency of all CPUs of the rd;
427 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() local
451 tmp = rd->pd; in build_perf_domains()
452 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
460 tmp = rd->pd; in build_perf_domains()
461 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
473 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); in free_rootdomain() local
475 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
476 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
477 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
478 free_cpumask_var(rd->rto_mask); in free_rootdomain()
479 free_cpumask_var(rd->online); in free_rootdomain()
480 free_cpumask_var(rd->span); in free_rootdomain()
481 free_pd(rd->pd); in free_rootdomain()
482 kfree(rd); in free_rootdomain()
485 void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
492 if (rq->rd) { in rq_attach_root()
493 old_rd = rq->rd; in rq_attach_root()
509 atomic_inc(&rd->refcount); in rq_attach_root()
510 rq->rd = rd; in rq_attach_root()
512 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
518 * move the fair server bw to the rd if it already started. in rq_attach_root()
530 void sched_get_rd(struct root_domain *rd) in sched_get_rd() argument
532 atomic_inc(&rd->refcount); in sched_get_rd()
535 void sched_put_rd(struct root_domain *rd) in sched_put_rd() argument
537 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
540 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
543 static int init_rootdomain(struct root_domain *rd) in init_rootdomain() argument
545 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
547 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
549 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
551 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
555 rd->rto_cpu = -1; in init_rootdomain()
556 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
557 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); in init_rootdomain()
560 rd->visit_cookie = 0; in init_rootdomain()
561 init_dl_bw(&rd->dl_bw); in init_rootdomain()
562 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
565 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
570 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
572 free_cpumask_var(rd->rto_mask); in init_rootdomain()
574 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
576 free_cpumask_var(rd->online); in init_rootdomain()
578 free_cpumask_var(rd->span); in init_rootdomain()
598 struct root_domain *rd; in alloc_rootdomain() local
600 rd = kzalloc(sizeof(*rd), GFP_KERNEL); in alloc_rootdomain()
601 if (!rd) in alloc_rootdomain()
604 if (init_rootdomain(rd) != 0) { in alloc_rootdomain()
605 kfree(rd); in alloc_rootdomain()
609 return rd; in alloc_rootdomain()
729 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
782 rq_attach_root(rq, rd); in cpu_attach_domain()
793 struct root_domain *rd; member
823 * node 0 1 2 3
827 * 3: 20 30 20 10
835 * 3 ----- 2
841 * For the above NUMA topology that gives 3 levels:
843 * NUMA-2 0-3 0-3 0-3 0-3
844 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
846 * NUMA-1 0-1,3 0-2 1-3 0,2-3
847 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3}
849 * NUMA-0 0 1 2 3
858 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
863 * gets us the first 0-1,3
864 * - the only uncovered node is 2, who's child domain is 1-3.
869 * end up at those groups (they would end up in group: 0-1,3).
884 * node 0 1 2 3
888 * 3: 30 20 20 10
896 * 2 ----- 3
898 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
904 * NUMA-2 0-3 0-3
905 * groups: {0-2},{1-3} {1-3},{0-2}
907 * NUMA-1 0-2 0-3 0-3 1-3
909 * NUMA-0 0 1 2 3
1072 * But for machines whose NUMA diameter are 3 or above, we move in build_overlap_sched_groups()
1077 * Smallest diameter=3 topology is: in build_overlap_sched_groups()
1079 * node 0 1 2 3 in build_overlap_sched_groups()
1083 * 3: 40 30 20 10 in build_overlap_sched_groups()
1085 * 0 --- 1 --- 2 --- 3 in build_overlap_sched_groups()
1087 * NUMA-3 0-3 N/A N/A 0-3 in build_overlap_sched_groups()
1088 * groups: {0-2},{1-3} {1-3},{0-2} in build_overlap_sched_groups()
1090 * NUMA-2 0-2 0-3 0-3 1-3 in build_overlap_sched_groups()
1091 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} in build_overlap_sched_groups()
1093 * NUMA-1 0-1 0-2 1-3 2-3 in build_overlap_sched_groups()
1094 * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} in build_overlap_sched_groups()
1096 * NUMA-0 0 1 2 3 in build_overlap_sched_groups()
1098 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the in build_overlap_sched_groups()
1144 * The tree consists of 3 primary data structures:
1160 * CPU 0 1 2 3 4 5 6 7
1169 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1170 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1172 * CPU 0 1 2 3 4 5 6 7
1496 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1497 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1520 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1521 if (!d->rd) in __visit_domain_allocation_hell()
2479 imb = sd->span_weight >> 3; in build_sched_domains()
2516 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
2745 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2750 /* No match - add perf domains for a new rd */ in partition_sched_domains_locked()