Lines Matching +full:in +full:- +full:masks
1 // SPDX-License-Identifier: GPL-2.0
44 struct sched_group *group = sd->groups; in sched_domain_debug_one()
45 unsigned long flags = sd->flags; in sched_domain_debug_one()
50 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); in sched_domain_debug_one()
52 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
55 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
58 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
65 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && in sched_domain_debug_one()
66 !(sd->child->flags & flag)) in sched_domain_debug_one()
67 printk(KERN_ERR "ERROR: flag %s set here but not in child\n", in sched_domain_debug_one()
70 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && in sched_domain_debug_one()
71 !(sd->parent->flags & flag)) in sched_domain_debug_one()
72 printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", in sched_domain_debug_one()
90 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
100 group->sgc->id, in sched_domain_debug_one()
103 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
109 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
110 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
112 if (group == sd->groups && sd->child && in sched_domain_debug_one()
113 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
115 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); in sched_domain_debug_one()
120 group = group->next; in sched_domain_debug_one()
122 if (group != sd->groups) in sched_domain_debug_one()
125 } while (group != sd->groups); in sched_domain_debug_one()
129 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); in sched_domain_debug_one()
131 if (sd->parent && in sched_domain_debug_one()
132 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
133 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); in sched_domain_debug_one()
145 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
149 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
155 sd = sd->parent; in sched_domain_debug()
174 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && in sd_degenerate()
175 (sd->groups != sd->groups->next)) in sd_degenerate()
179 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
188 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
196 /* Flags needing groups don't count if only 1 group in parent */ in sd_parent_degenerate()
197 if (parent->groups == parent->groups->next) in sd_parent_degenerate()
245 pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", in sched_is_eas_possible()
261 gov = policy->governor; in sched_is_eas_possible()
291 return -EPERM; in sched_energy_aware_handler()
295 return -EOPNOTSUPP; in sched_energy_aware_handler()
338 tmp = pd->next; in free_pd()
349 pd = pd->next; in find_pd()
369 pd->em_pd = obj; in pd_init()
386 em_pd_nr_perf_states(pd->em_pd)); in perf_domain_debug()
387 pd = pd->next; in perf_domain_debug()
417 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
427 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
444 tmp->next = pd; in build_perf_domains()
451 tmp = rd->pd; in build_perf_domains()
452 rcu_assign_pointer(rd->pd, pd); in build_perf_domains()
454 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
460 tmp = rd->pd; in build_perf_domains()
461 rcu_assign_pointer(rd->pd, NULL); in build_perf_domains()
463 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); in build_perf_domains()
475 cpupri_cleanup(&rd->cpupri); in free_rootdomain()
476 cpudl_cleanup(&rd->cpudl); in free_rootdomain()
477 free_cpumask_var(rd->dlo_mask); in free_rootdomain()
478 free_cpumask_var(rd->rto_mask); in free_rootdomain()
479 free_cpumask_var(rd->online); in free_rootdomain()
480 free_cpumask_var(rd->span); in free_rootdomain()
481 free_pd(rd->pd); in free_rootdomain()
492 if (rq->rd) { in rq_attach_root()
493 old_rd = rq->rd; in rq_attach_root()
495 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
498 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
503 * in this function: in rq_attach_root()
505 if (!atomic_dec_and_test(&old_rd->refcount)) in rq_attach_root()
509 atomic_inc(&rd->refcount); in rq_attach_root()
510 rq->rd = rd; in rq_attach_root()
512 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
513 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
521 if (rq->fair_server.dl_server) in rq_attach_root()
522 __dl_server_attach_root(&rq->fair_server, rq); in rq_attach_root()
527 call_rcu(&old_rd->rcu, free_rootdomain); in rq_attach_root()
532 atomic_inc(&rd->refcount); in sched_get_rd()
537 if (!atomic_dec_and_test(&rd->refcount)) in sched_put_rd()
540 call_rcu(&rd->rcu, free_rootdomain); in sched_put_rd()
545 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) in init_rootdomain()
547 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) in init_rootdomain()
549 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) in init_rootdomain()
551 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) in init_rootdomain()
555 rd->rto_cpu = -1; in init_rootdomain()
556 raw_spin_lock_init(&rd->rto_lock); in init_rootdomain()
557 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); in init_rootdomain()
560 rd->visit_cookie = 0; in init_rootdomain()
561 init_dl_bw(&rd->dl_bw); in init_rootdomain()
562 if (cpudl_init(&rd->cpudl) != 0) in init_rootdomain()
565 if (cpupri_init(&rd->cpupri) != 0) in init_rootdomain()
570 cpudl_cleanup(&rd->cpudl); in init_rootdomain()
572 free_cpumask_var(rd->rto_mask); in init_rootdomain()
574 free_cpumask_var(rd->dlo_mask); in init_rootdomain()
576 free_cpumask_var(rd->online); in init_rootdomain()
578 free_cpumask_var(rd->span); in init_rootdomain()
580 return -ENOMEM; in init_rootdomain()
584 * By default the system creates a single root-domain with all CPUs as
621 tmp = sg->next; in free_sched_groups()
623 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
624 kfree(sg->sgc); in free_sched_groups()
626 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups()
639 free_sched_groups(sd->groups, 1); in destroy_sched_domain()
641 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain()
642 kfree(sd->shared); in destroy_sched_domain()
651 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
660 call_rcu(&sd->rcu, destroy_sched_domains_rcu); in destroy_sched_domains()
668 * Also keep a unique ID per domain (we use the first CPU number in the cpumask
669 * of the domain), this allows us to quickly tell if two CPUs are in the same
695 sds = sd->shared; in update_top_cache_domain()
710 * but equals to LLC id on non-Cluster machines. in update_top_cache_domain()
736 struct sched_domain *parent = tmp->parent; in cpu_attach_domain()
741 tmp->parent = parent->parent; in cpu_attach_domain()
743 if (parent->parent) { in cpu_attach_domain()
744 parent->parent->child = tmp; in cpu_attach_domain()
745 parent->parent->groups->flags = tmp->flags; in cpu_attach_domain()
749 * Transfer SD_PREFER_SIBLING down in case of a in cpu_attach_domain()
753 if (parent->flags & SD_PREFER_SIBLING) in cpu_attach_domain()
754 tmp->flags |= SD_PREFER_SIBLING; in cpu_attach_domain()
757 tmp = tmp->parent; in cpu_attach_domain()
762 sd = sd->parent; in cpu_attach_domain()
765 struct sched_group *sg = sd->groups; in cpu_attach_domain()
773 sg->flags = 0; in cpu_attach_domain()
774 } while (sg != sd->groups); in cpu_attach_domain()
776 sd->child = NULL; in cpu_attach_domain()
783 tmp = rq->sd; in cpu_attach_domain()
784 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
805 * of this group that's also in the balance mask.
821 * Given a node-distance table, for example:
831 * 0 ----- 1
835 * 3 ----- 2
839 * construct the mask of all nodes reachable in @level hops.
843 * NUMA-2 0-3 0-3 0-3 0-3
844 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
846 * NUMA-1 0-1,3 0-2 1-3 0,2-3
849 * NUMA-0 0 1 2 3
853 * When we iterate a domain in child domain chunks some nodes can be
854 * represented multiple times -- hence the "overlap" naming for this part of
857 * In order to minimize this overlap, we only build enough groups to cover the
858 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
862 * - the first group of each domain is its child domain; this
863 * gets us the first 0-1,3
864 * - the only uncovered node is 2, who's child domain is 1-3.
867 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
868 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
869 * end up at those groups (they would end up in group: 0-1,3).
872 * will contain those CPUs in the group that can reach this group given the
892 * 0 ----- 1
896 * 2 ----- 3
904 * NUMA-2 0-3 0-3
905 * groups: {0-2},{1-3} {1-3},{0-2}
907 * NUMA-1 0-2 0-3 0-3 1-3
909 * NUMA-0 0 1 2 3
927 struct sd_data *sdd = sd->private; in build_balance_mask()
934 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
937 * Can happen in the asymmetric case, where these siblings are in build_balance_mask()
941 if (!sibling->child) in build_balance_mask()
945 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) in build_balance_mask()
951 /* We must not have empty masks here */ in build_balance_mask()
956 * XXX: This creates per-node group entries; since the load-balancer will
957 * immediately access remote memory to construct this group's load-balance
973 if (sd->child) { in build_group_from_child_sched_domain()
974 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain()
975 sg->flags = sd->child->flags; in build_group_from_child_sched_domain()
980 atomic_inc(&sg->ref); in build_group_from_child_sched_domain()
988 struct sd_data *sdd = sd->private; in init_overlap_sched_group()
995 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
996 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
1002 * Initialize sgc->capacity such that even if we mess up the in init_overlap_sched_group()
1007 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
1008 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
1009 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
1019 while (sibling->child && in find_descended_sibling()
1020 !cpumask_subset(sched_domain_span(sibling->child), in find_descended_sibling()
1022 sibling = sibling->child; in find_descended_sibling()
1027 * scheduling because they will be degenerated in cpu_attach_domain in find_descended_sibling()
1029 while (sibling->child && in find_descended_sibling()
1030 cpumask_equal(sched_domain_span(sibling->child), in find_descended_sibling()
1032 sibling = sibling->child; in find_descended_sibling()
1043 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
1055 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
1058 * Asymmetric node setups can result in situations where the in build_overlap_sched_groups()
1062 * In that case build_sched_domains() will have terminated the in build_overlap_sched_groups()
1085 * 0 --- 1 --- 2 --- 3 in build_overlap_sched_groups()
1087 * NUMA-3 0-3 N/A N/A 0-3 in build_overlap_sched_groups()
1088 * groups: {0-2},{1-3} {1-3},{0-2} in build_overlap_sched_groups()
1090 * NUMA-2 0-2 0-3 0-3 1-3 in build_overlap_sched_groups()
1091 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} in build_overlap_sched_groups()
1093 * NUMA-1 0-1 0-2 1-3 2-3 in build_overlap_sched_groups()
1096 * NUMA-0 0 1 2 3 in build_overlap_sched_groups()
1098 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the in build_overlap_sched_groups()
1101 if (sibling->child && in build_overlap_sched_groups()
1102 !cpumask_subset(sched_domain_span(sibling->child), span)) in build_overlap_sched_groups()
1117 last->next = sg; in build_overlap_sched_groups()
1119 last->next = first; in build_overlap_sched_groups()
1121 sd->groups = first; in build_overlap_sched_groups()
1128 return -ENOMEM; in build_overlap_sched_groups()
1133 * Package topology (also see the load-balance blurb in fair.c)
1138 * - Simultaneous multithreading (SMT)
1139 * - Multi-Core Cache (MC)
1140 * - Package (PKG)
1146 * sched_domain -> sched_group -> sched_group_capacity
1148 * `-' `-'
1150 * The sched_domains are per-CPU and have a two way link (parent & child) and
1154 * denoting the domains of the level below (or individual CPUs in case of the
1166 * - or -
1168 * PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1169 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1170 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1183 * - The first is the balance_cpu (see should_we_balance() and the
1184 * load-balance blurb in fair.c); for each group we only want 1 CPU to
1187 * - The second is the sched_group_capacity; we want all identical groups
1193 * for each CPU in the hierarchy.
1196 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1197 * group), we can simply pick the first CPU in each group.
1200 * [*] in other words, the first group of each domain is its child domain.
1205 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1206 struct sched_domain *child = sd->child; in get_group()
1213 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1214 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1217 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group()
1219 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1228 sg->flags = child->flags; in get_group()
1234 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1235 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1236 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1243 * covered by the given span, will set each group's ->cpumask correctly,
1244 * and will initialize their ->sgc.
1252 struct sd_data *sdd = sd->private; in build_sched_groups()
1275 last->next = sg; in build_sched_groups()
1278 last->next = first; in build_sched_groups()
1279 sd->groups = first; in build_sched_groups()
1288 * distributing the load between different sched groups in a sched domain.
1289 * Typically cpu_capacity for all the groups in a sched domain will be same
1290 * unless there are asymmetries in the topology. If there are asymmetries,
1296 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
1302 int cpu, cores = 0, max_cpu = -1; in init_sched_groups_capacity()
1304 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity()
1313 sg->cores = cores; in init_sched_groups_capacity()
1315 if (!(sd->flags & SD_ASYM_PACKING)) in init_sched_groups_capacity()
1324 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
1327 sg = sg->next; in init_sched_groups_capacity()
1328 } while (sg != sd->groups); in init_sched_groups_capacity()
1345 * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1399 if (capacity == entry->capacity) in asym_cpu_capacity_update_data()
1401 else if (!insert_entry && capacity > entry->capacity) in asym_cpu_capacity_update_data()
1408 entry->capacity = capacity; in asym_cpu_capacity_update_data()
1412 list_add_tail_rcu(&entry->link, &asym_cap_list); in asym_cpu_capacity_update_data()
1414 list_add_rcu(&entry->link, &insert_entry->link); in asym_cpu_capacity_update_data()
1420 * Build-up/update list of CPUs grouped by their capacities
1437 list_del_rcu(&entry->link); in asym_cpu_capacity_scan()
1438 call_rcu(&entry->rcu, free_asym_cap_entry); in asym_cpu_capacity_scan()
1448 list_del_rcu(&entry->link); in asym_cpu_capacity_scan()
1449 call_rcu(&entry->rcu, free_asym_cap_entry); in asym_cpu_capacity_scan()
1455 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1458 static int default_relax_domain_level = -1;
1475 if (!attr || attr->relax_domain_level < 0) { in set_domain_attribute()
1480 request = attr->relax_domain_level; in set_domain_attribute()
1482 if (sd->level >= request) { in set_domain_attribute()
1484 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1496 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1497 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1500 free_percpu(d->sd); in __free_domain_allocs()
1517 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1518 if (!d->sd) in __visit_domain_allocation_hell()
1520 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1521 if (!d->rd) in __visit_domain_allocation_hell()
1534 struct sd_data *sdd = sd->private; in claim_allocations()
1536 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1537 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1539 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1540 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1542 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1543 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1545 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1546 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1561 * SD_flags allowed in topology descriptions.
1564 * behaviour. Behaviour is artificial and mapped in the below sd_init()
1575 * SD_ASYM_PACKING - describes SMT quirks
1589 struct sd_data *sdd = &tl->data; in sd_init()
1590 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1598 sched_domains_curr_level = tl->numa_level; in sd_init()
1601 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1603 if (tl->sd_flags) in sd_init()
1604 sd_flags = (*tl->sd_flags)(); in sd_init()
1606 "wrong sd_flags in topology description\n")) in sd_init()
1635 .name = tl->name, in sd_init()
1639 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1642 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
1644 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == in sd_init()
1652 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) in sd_init()
1653 sd->child->flags &= ~SD_PREFER_SIBLING; in sd_init()
1655 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
1656 sd->imbalance_pct = 110; in sd_init()
1658 } else if (sd->flags & SD_SHARE_LLC) { in sd_init()
1659 sd->imbalance_pct = 117; in sd_init()
1660 sd->cache_nice_tries = 1; in sd_init()
1663 } else if (sd->flags & SD_NUMA) { in sd_init()
1664 sd->cache_nice_tries = 2; in sd_init()
1666 sd->flags &= ~SD_PREFER_SIBLING; in sd_init()
1667 sd->flags |= SD_SERIALIZE; in sd_init()
1668 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init()
1669 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
1676 sd->cache_nice_tries = 1; in sd_init()
1683 if (sd->flags & SD_SHARE_LLC) { in sd_init()
1684 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); in sd_init()
1685 atomic_inc(&sd->shared->ref); in sd_init()
1686 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); in sd_init()
1689 sd->private = sdd; in sd_init()
1695 * Topology list, bottom-up.
1718 for (tl = sched_domain_topology; tl->mask; tl++)
1798 * topology lies in whether communication between not directly
1804 * - If the maximum distance between any nodes is 1 hop, the system
1806 * - If for two nodes A and B, located N > 1 hops away from each other,
1856 struct cpumask ***masks; in sched_init_numa() local
1859 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the in sched_init_numa()
1860 * unique distances in the node_distance() table. in sched_init_numa()
1912 * in other functions. in sched_init_numa()
1918 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); in sched_init_numa()
1919 if (!masks) in sched_init_numa()
1927 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); in sched_init_numa()
1928 if (!masks[i]) in sched_init_numa()
1938 masks[i][j] = mask; in sched_init_numa()
1942 sched_numa_warn("Node-distance not symmetric"); in sched_init_numa()
1951 rcu_assign_pointer(sched_domains_numa_masks, masks); in sched_init_numa()
1993 WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); in sched_init_numa()
2002 struct cpumask ***masks; in sched_reset_numa() local
2010 masks = sched_domains_numa_masks; in sched_reset_numa()
2012 if (distances || masks) { in sched_reset_numa()
2017 for (i = 0; i < nr_levels && masks; i++) { in sched_reset_numa()
2018 if (!masks[i]) in sched_reset_numa()
2021 kfree(masks[i][j]); in sched_reset_numa()
2022 kfree(masks[i]); in sched_reset_numa()
2024 kfree(masks); in sched_reset_numa()
2062 /* Set ourselves in the remote node's masks */ in sched_domains_numa_masks_set()
2082 * sched_numa_find_closest() - given the NUMA topology, find the cpu
2092 struct cpumask ***masks; in sched_numa_find_closest() local
2095 masks = rcu_dereference(sched_domains_numa_masks); in sched_numa_find_closest()
2096 if (!masks) in sched_numa_find_closest()
2099 if (!masks[i][j]) in sched_numa_find_closest()
2101 cpu = cpumask_any_and(cpus, masks[i][j]); in sched_numa_find_closest()
2115 struct cpumask ***masks; member
2126 if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) in hop_cmp()
2129 if (b == k->masks) { in hop_cmp()
2130 k->w = 0; in hop_cmp()
2134 prev_hop = *((struct cpumask ***)b - 1); in hop_cmp()
2135 k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); in hop_cmp()
2136 if (k->w <= k->cpu) in hop_cmp()
2139 return -1; in hop_cmp()
2143 * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU
2163 /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ in sched_numa_find_nth_cpu()
2167 k.masks = rcu_dereference(sched_domains_numa_masks); in sched_numa_find_nth_cpu()
2168 if (!k.masks) in sched_numa_find_nth_cpu()
2171 hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); in sched_numa_find_nth_cpu()
2172 hop = hop_masks - k.masks; in sched_numa_find_nth_cpu()
2175 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : in sched_numa_find_nth_cpu()
2176 cpumask_nth_and(cpu, cpus, k.masks[0][node]); in sched_numa_find_nth_cpu()
2184 * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
2193 * read-side section, copy it if required beyond that.
2195 * Note that not all hops are equal in distance; see sched_init_numa() for how
2196 * distances and masks are handled.
2198 * during the lifetime of the system (offline nodes are taken out of the masks).
2202 struct cpumask ***masks; in sched_numa_hop_mask() local
2205 return ERR_PTR(-EINVAL); in sched_numa_hop_mask()
2207 masks = rcu_dereference(sched_domains_numa_masks); in sched_numa_hop_mask()
2208 if (!masks) in sched_numa_hop_mask()
2209 return ERR_PTR(-EBUSY); in sched_numa_hop_mask()
2211 return masks[hops][node]; in sched_numa_hop_mask()
2223 struct sd_data *sdd = &tl->data; in __sdt_alloc()
2225 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
2226 if (!sdd->sd) in __sdt_alloc()
2227 return -ENOMEM; in __sdt_alloc()
2229 sdd->sds = alloc_percpu(struct sched_domain_shared *); in __sdt_alloc()
2230 if (!sdd->sds) in __sdt_alloc()
2231 return -ENOMEM; in __sdt_alloc()
2233 sdd->sg = alloc_percpu(struct sched_group *); in __sdt_alloc()
2234 if (!sdd->sg) in __sdt_alloc()
2235 return -ENOMEM; in __sdt_alloc()
2237 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
2238 if (!sdd->sgc) in __sdt_alloc()
2239 return -ENOMEM; in __sdt_alloc()
2250 return -ENOMEM; in __sdt_alloc()
2252 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
2257 return -ENOMEM; in __sdt_alloc()
2259 *per_cpu_ptr(sdd->sds, j) = sds; in __sdt_alloc()
2264 return -ENOMEM; in __sdt_alloc()
2266 sg->next = sg; in __sdt_alloc()
2268 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
2273 return -ENOMEM; in __sdt_alloc()
2275 sgc->id = j; in __sdt_alloc()
2277 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
2290 struct sd_data *sdd = &tl->data; in __sdt_free()
2295 if (sdd->sd) { in __sdt_free()
2296 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
2297 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
2298 free_sched_groups(sd->groups, 0); in __sdt_free()
2299 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
2302 if (sdd->sds) in __sdt_free()
2303 kfree(*per_cpu_ptr(sdd->sds, j)); in __sdt_free()
2304 if (sdd->sg) in __sdt_free()
2305 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
2306 if (sdd->sgc) in __sdt_free()
2307 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
2309 free_percpu(sdd->sd); in __sdt_free()
2310 sdd->sd = NULL; in __sdt_free()
2311 free_percpu(sdd->sds); in __sdt_free()
2312 sdd->sds = NULL; in __sdt_free()
2313 free_percpu(sdd->sg); in __sdt_free()
2314 sdd->sg = NULL; in __sdt_free()
2315 free_percpu(sdd->sgc); in __sdt_free()
2316 sdd->sgc = NULL; in __sdt_free()
2327 sd->level = child->level + 1; in build_sched_domain()
2328 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
2329 child->parent = sd; in build_sched_domain()
2335 child->name, sd->name); in build_sched_domain()
2349 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2350 * any two given CPUs at this (non-NUMA) topology level.
2358 if (tl->flags & SDTL_OVERLAP) in topology_span_sane()
2362 * Non-NUMA levels cannot partially overlap - they must be either in topology_span_sane()
2364 * breaking the sched_group lists - i.e. a later get_group() pass in topology_span_sane()
2369 * We should 'and' all those masks with 'cpu_map' to exactly in topology_span_sane()
2374 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && in topology_span_sane()
2375 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
2393 int i, ret = -ENOMEM; in build_sched_domains()
2416 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; in build_sched_domains()
2420 if (tl->flags & SDTL_OVERLAP) in build_sched_domains()
2421 sd->flags |= SD_OVERLAP; in build_sched_domains()
2429 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2430 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
2431 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
2449 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2450 struct sched_domain *child = sd->child; in build_sched_domains()
2452 if (!(sd->flags & SD_SHARE_LLC) && child && in build_sched_domains()
2453 (child->flags & SD_SHARE_LLC)) { in build_sched_domains()
2460 * arbitrary cutoff based two factors -- SMT and in build_sched_domains()
2461 * memory channels. For SMT-2, the intent is to in build_sched_domains()
2463 * SMT-4 or SMT-8 *may* benefit from a different in build_sched_domains()
2477 nr_llcs = sd->span_weight / child->span_weight; in build_sched_domains()
2479 imb = sd->span_weight >> 3; in build_sched_domains()
2483 sd->imb_numa_nr = imb; in build_sched_domains()
2486 top_p = sd->parent; in build_sched_domains()
2487 while (top_p && !(top_p->flags & SD_NUMA)) { in build_sched_domains()
2488 top_p = top_p->parent; in build_sched_domains()
2490 imb_span = top_p ? top_p->span_weight : sd->span_weight; in build_sched_domains()
2492 int factor = max(1U, (sd->span_weight / imb_span)); in build_sched_domains()
2494 sd->imb_numa_nr = imb * factor; in build_sched_domains()
2500 for (i = nr_cpumask_bits-1; i >= 0; i--) { in build_sched_domains()
2504 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2542 /* Number of sched domains in 'doms_cur': */
2545 /* Attributes of custom domains in 'doms_cur' */
2592 * CPUs, but could be used to exclude other special cases in the future.
2615 * Detach sched domains from a group of CPUs specified in cpu_map
2654 * cpumasks in the array doms_new[] of cpumasks. This compares
2659 * The masks don't intersect (don't overlap.) We should setup one
2660 * sched domain for each mask. CPUs not in any of the cpumasks will
2661 * not be load balanced. If the same cpumask appears both in the
2662 * current 'doms_cur' domains and in the new 'doms_new', we can leave
2665 * The passed in 'doms_new' should be allocated using
2668 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2713 /* No match - a current sched domain not in new doms_new[] */ in partition_sched_domains_locked()
2734 /* No match - add a new doms_new */ in partition_sched_domains_locked()
2745 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
2750 /* No match - add perf domains for a new rd */ in partition_sched_domains_locked()