1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Scheduler topology setup/handling methods 4 */ 5 6 #include <linux/sched/isolation.h> 7 #include <linux/bsearch.h> 8 #include "sched.h" 9 10 DEFINE_MUTEX(sched_domains_mutex); 11 void sched_domains_mutex_lock(void) 12 { 13 mutex_lock(&sched_domains_mutex); 14 } 15 void sched_domains_mutex_unlock(void) 16 { 17 mutex_unlock(&sched_domains_mutex); 18 } 19 20 /* Protected by sched_domains_mutex: */ 21 static cpumask_var_t sched_domains_tmpmask; 22 static cpumask_var_t sched_domains_tmpmask2; 23 24 static int __init sched_debug_setup(char *str) 25 { 26 sched_debug_verbose = true; 27 28 return 0; 29 } 30 early_param("sched_verbose", sched_debug_setup); 31 32 static inline bool sched_debug(void) 33 { 34 return sched_debug_verbose; 35 } 36 37 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 38 const struct sd_flag_debug sd_flag_debug[] = { 39 #include <linux/sched/sd_flags.h> 40 }; 41 #undef SD_FLAG 42 43 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 44 struct cpumask *groupmask) 45 { 46 struct sched_group *group = sd->groups; 47 unsigned long flags = sd->flags; 48 unsigned int idx; 49 50 cpumask_clear(groupmask); 51 52 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 53 printk(KERN_CONT "span=%*pbl level=%s\n", 54 cpumask_pr_args(sched_domain_span(sd)), sd->name); 55 56 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 57 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 58 } 59 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 60 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 61 } 62 63 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 64 unsigned int flag = BIT(idx); 65 unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 66 67 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 68 !(sd->child->flags & flag)) 69 printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 70 sd_flag_debug[idx].name); 71 72 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 73 !(sd->parent->flags & flag)) 74 printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 75 sd_flag_debug[idx].name); 76 } 77 78 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 79 do { 80 if (!group) { 81 printk("\n"); 82 printk(KERN_ERR "ERROR: group is NULL\n"); 83 break; 84 } 85 86 if (cpumask_empty(sched_group_span(group))) { 87 printk(KERN_CONT "\n"); 88 printk(KERN_ERR "ERROR: empty group\n"); 89 break; 90 } 91 92 if (!(sd->flags & SD_OVERLAP) && 93 cpumask_intersects(groupmask, sched_group_span(group))) { 94 printk(KERN_CONT "\n"); 95 printk(KERN_ERR "ERROR: repeated CPUs\n"); 96 break; 97 } 98 99 cpumask_or(groupmask, groupmask, sched_group_span(group)); 100 101 printk(KERN_CONT " %d:{ span=%*pbl", 102 group->sgc->id, 103 cpumask_pr_args(sched_group_span(group))); 104 105 if ((sd->flags & SD_OVERLAP) && 106 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 107 printk(KERN_CONT " mask=%*pbl", 108 cpumask_pr_args(group_balance_mask(group))); 109 } 110 111 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 112 printk(KERN_CONT " cap=%lu", group->sgc->capacity); 113 114 if (group == sd->groups && sd->child && 115 !cpumask_equal(sched_domain_span(sd->child), 116 sched_group_span(group))) { 117 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 118 } 119 120 printk(KERN_CONT " }"); 121 122 group = group->next; 123 124 if (group != sd->groups) 125 printk(KERN_CONT ","); 126 127 } while (group != sd->groups); 128 printk(KERN_CONT "\n"); 129 130 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 131 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 132 133 if (sd->parent && 134 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 135 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 136 return 0; 137 } 138 139 static void sched_domain_debug(struct sched_domain *sd, int cpu) 140 { 141 int level = 0; 142 143 if (!sched_debug_verbose) 144 return; 145 146 if (!sd) { 147 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 148 return; 149 } 150 151 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 152 153 for (;;) { 154 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 155 break; 156 level++; 157 sd = sd->parent; 158 if (!sd) 159 break; 160 } 161 } 162 163 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 164 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 165 static const unsigned int SD_DEGENERATE_GROUPS_MASK = 166 #include <linux/sched/sd_flags.h> 167 0; 168 #undef SD_FLAG 169 170 static int sd_degenerate(struct sched_domain *sd) 171 { 172 if (cpumask_weight(sched_domain_span(sd)) == 1) 173 return 1; 174 175 /* Following flags need at least 2 groups */ 176 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 177 (sd->groups != sd->groups->next)) 178 return 0; 179 180 /* Following flags don't use groups */ 181 if (sd->flags & (SD_WAKE_AFFINE)) 182 return 0; 183 184 return 1; 185 } 186 187 static int 188 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 189 { 190 unsigned long cflags = sd->flags, pflags = parent->flags; 191 192 if (sd_degenerate(parent)) 193 return 1; 194 195 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 196 return 0; 197 198 /* Flags needing groups don't count if only 1 group in parent */ 199 if (parent->groups == parent->groups->next) 200 pflags &= ~SD_DEGENERATE_GROUPS_MASK; 201 202 if (~cflags & pflags) 203 return 0; 204 205 return 1; 206 } 207 208 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 209 DEFINE_STATIC_KEY_FALSE(sched_energy_present); 210 static unsigned int sysctl_sched_energy_aware = 1; 211 static DEFINE_MUTEX(sched_energy_mutex); 212 static bool sched_energy_update; 213 214 static bool sched_is_eas_possible(const struct cpumask *cpu_mask) 215 { 216 bool any_asym_capacity = false; 217 int i; 218 219 /* EAS is enabled for asymmetric CPU capacity topologies. */ 220 for_each_cpu(i, cpu_mask) { 221 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) { 222 any_asym_capacity = true; 223 break; 224 } 225 } 226 if (!any_asym_capacity) { 227 if (sched_debug()) { 228 pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n", 229 cpumask_pr_args(cpu_mask)); 230 } 231 return false; 232 } 233 234 /* EAS definitely does *not* handle SMT */ 235 if (sched_smt_active()) { 236 if (sched_debug()) { 237 pr_info("rd %*pbl: Checking EAS, SMT is not supported\n", 238 cpumask_pr_args(cpu_mask)); 239 } 240 return false; 241 } 242 243 if (!arch_scale_freq_invariant()) { 244 if (sched_debug()) { 245 pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", 246 cpumask_pr_args(cpu_mask)); 247 } 248 return false; 249 } 250 251 if (!cpufreq_ready_for_eas(cpu_mask)) { 252 if (sched_debug()) { 253 pr_info("rd %*pbl: Checking EAS: cpufreq is not ready\n", 254 cpumask_pr_args(cpu_mask)); 255 } 256 return false; 257 } 258 259 return true; 260 } 261 262 void rebuild_sched_domains_energy(void) 263 { 264 mutex_lock(&sched_energy_mutex); 265 sched_energy_update = true; 266 rebuild_sched_domains(); 267 sched_energy_update = false; 268 mutex_unlock(&sched_energy_mutex); 269 } 270 271 #ifdef CONFIG_PROC_SYSCTL 272 static int sched_energy_aware_handler(const struct ctl_table *table, int write, 273 void *buffer, size_t *lenp, loff_t *ppos) 274 { 275 int ret, state; 276 277 if (write && !capable(CAP_SYS_ADMIN)) 278 return -EPERM; 279 280 if (!sched_is_eas_possible(cpu_active_mask)) { 281 if (write) { 282 return -EOPNOTSUPP; 283 } else { 284 *lenp = 0; 285 return 0; 286 } 287 } 288 289 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 290 if (!ret && write) { 291 state = static_branch_unlikely(&sched_energy_present); 292 if (state != sysctl_sched_energy_aware) 293 rebuild_sched_domains_energy(); 294 } 295 296 return ret; 297 } 298 299 static const struct ctl_table sched_energy_aware_sysctls[] = { 300 { 301 .procname = "sched_energy_aware", 302 .data = &sysctl_sched_energy_aware, 303 .maxlen = sizeof(unsigned int), 304 .mode = 0644, 305 .proc_handler = sched_energy_aware_handler, 306 .extra1 = SYSCTL_ZERO, 307 .extra2 = SYSCTL_ONE, 308 }, 309 }; 310 311 static int __init sched_energy_aware_sysctl_init(void) 312 { 313 register_sysctl_init("kernel", sched_energy_aware_sysctls); 314 return 0; 315 } 316 317 late_initcall(sched_energy_aware_sysctl_init); 318 #endif /* CONFIG_PROC_SYSCTL */ 319 320 static void free_pd(struct perf_domain *pd) 321 { 322 struct perf_domain *tmp; 323 324 while (pd) { 325 tmp = pd->next; 326 kfree(pd); 327 pd = tmp; 328 } 329 } 330 331 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 332 { 333 while (pd) { 334 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 335 return pd; 336 pd = pd->next; 337 } 338 339 return NULL; 340 } 341 342 static struct perf_domain *pd_init(int cpu) 343 { 344 struct em_perf_domain *obj = em_cpu_get(cpu); 345 struct perf_domain *pd; 346 347 if (!obj) { 348 if (sched_debug()) 349 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 350 return NULL; 351 } 352 353 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 354 if (!pd) 355 return NULL; 356 pd->em_pd = obj; 357 358 return pd; 359 } 360 361 static void perf_domain_debug(const struct cpumask *cpu_map, 362 struct perf_domain *pd) 363 { 364 if (!sched_debug() || !pd) 365 return; 366 367 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 368 369 while (pd) { 370 printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 371 cpumask_first(perf_domain_span(pd)), 372 cpumask_pr_args(perf_domain_span(pd)), 373 em_pd_nr_perf_states(pd->em_pd)); 374 pd = pd->next; 375 } 376 377 printk(KERN_CONT "\n"); 378 } 379 380 static void destroy_perf_domain_rcu(struct rcu_head *rp) 381 { 382 struct perf_domain *pd; 383 384 pd = container_of(rp, struct perf_domain, rcu); 385 free_pd(pd); 386 } 387 388 static void sched_energy_set(bool has_eas) 389 { 390 if (!has_eas && static_branch_unlikely(&sched_energy_present)) { 391 if (sched_debug()) 392 pr_info("%s: stopping EAS\n", __func__); 393 static_branch_disable_cpuslocked(&sched_energy_present); 394 } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) { 395 if (sched_debug()) 396 pr_info("%s: starting EAS\n", __func__); 397 static_branch_enable_cpuslocked(&sched_energy_present); 398 } 399 } 400 401 /* 402 * EAS can be used on a root domain if it meets all the following conditions: 403 * 1. an Energy Model (EM) is available; 404 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 405 * 3. no SMT is detected. 406 * 4. schedutil is driving the frequency of all CPUs of the rd; 407 * 5. frequency invariance support is present; 408 */ 409 static bool build_perf_domains(const struct cpumask *cpu_map) 410 { 411 int i; 412 struct perf_domain *pd = NULL, *tmp; 413 int cpu = cpumask_first(cpu_map); 414 struct root_domain *rd = cpu_rq(cpu)->rd; 415 416 if (!sysctl_sched_energy_aware) 417 goto free; 418 419 if (!sched_is_eas_possible(cpu_map)) 420 goto free; 421 422 for_each_cpu(i, cpu_map) { 423 /* Skip already covered CPUs. */ 424 if (find_pd(pd, i)) 425 continue; 426 427 /* Create the new pd and add it to the local list. */ 428 tmp = pd_init(i); 429 if (!tmp) 430 goto free; 431 tmp->next = pd; 432 pd = tmp; 433 } 434 435 perf_domain_debug(cpu_map, pd); 436 437 /* Attach the new list of performance domains to the root domain. */ 438 tmp = rd->pd; 439 rcu_assign_pointer(rd->pd, pd); 440 if (tmp) 441 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 442 443 return !!pd; 444 445 free: 446 free_pd(pd); 447 tmp = rd->pd; 448 rcu_assign_pointer(rd->pd, NULL); 449 if (tmp) 450 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 451 452 return false; 453 } 454 #else /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL): */ 455 static void free_pd(struct perf_domain *pd) { } 456 #endif /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 457 458 static void free_rootdomain(struct rcu_head *rcu) 459 { 460 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 461 462 cpupri_cleanup(&rd->cpupri); 463 cpudl_cleanup(&rd->cpudl); 464 free_cpumask_var(rd->dlo_mask); 465 free_cpumask_var(rd->rto_mask); 466 free_cpumask_var(rd->online); 467 free_cpumask_var(rd->span); 468 free_pd(rd->pd); 469 kfree(rd); 470 } 471 472 void rq_attach_root(struct rq *rq, struct root_domain *rd) 473 { 474 struct root_domain *old_rd = NULL; 475 struct rq_flags rf; 476 477 rq_lock_irqsave(rq, &rf); 478 479 if (rq->rd) { 480 old_rd = rq->rd; 481 482 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 483 set_rq_offline(rq); 484 485 cpumask_clear_cpu(rq->cpu, old_rd->span); 486 487 /* 488 * If we don't want to free the old_rd yet then 489 * set old_rd to NULL to skip the freeing later 490 * in this function: 491 */ 492 if (!atomic_dec_and_test(&old_rd->refcount)) 493 old_rd = NULL; 494 } 495 496 atomic_inc(&rd->refcount); 497 rq->rd = rd; 498 499 cpumask_set_cpu(rq->cpu, rd->span); 500 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 501 set_rq_online(rq); 502 503 /* 504 * Because the rq is not a task, dl_add_task_root_domain() did not 505 * move the fair server bw to the rd if it already started. 506 * Add it now. 507 */ 508 if (rq->fair_server.dl_server) 509 __dl_server_attach_root(&rq->fair_server, rq); 510 511 rq_unlock_irqrestore(rq, &rf); 512 513 if (old_rd) 514 call_rcu(&old_rd->rcu, free_rootdomain); 515 } 516 517 void sched_get_rd(struct root_domain *rd) 518 { 519 atomic_inc(&rd->refcount); 520 } 521 522 void sched_put_rd(struct root_domain *rd) 523 { 524 if (!atomic_dec_and_test(&rd->refcount)) 525 return; 526 527 call_rcu(&rd->rcu, free_rootdomain); 528 } 529 530 static int init_rootdomain(struct root_domain *rd) 531 { 532 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 533 goto out; 534 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 535 goto free_span; 536 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 537 goto free_online; 538 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 539 goto free_dlo_mask; 540 541 #ifdef HAVE_RT_PUSH_IPI 542 rd->rto_cpu = -1; 543 raw_spin_lock_init(&rd->rto_lock); 544 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 545 #endif 546 547 rd->visit_cookie = 0; 548 init_dl_bw(&rd->dl_bw); 549 if (cpudl_init(&rd->cpudl) != 0) 550 goto free_rto_mask; 551 552 if (cpupri_init(&rd->cpupri) != 0) 553 goto free_cpudl; 554 return 0; 555 556 free_cpudl: 557 cpudl_cleanup(&rd->cpudl); 558 free_rto_mask: 559 free_cpumask_var(rd->rto_mask); 560 free_dlo_mask: 561 free_cpumask_var(rd->dlo_mask); 562 free_online: 563 free_cpumask_var(rd->online); 564 free_span: 565 free_cpumask_var(rd->span); 566 out: 567 return -ENOMEM; 568 } 569 570 /* 571 * By default the system creates a single root-domain with all CPUs as 572 * members (mimicking the global state we have today). 573 */ 574 struct root_domain def_root_domain; 575 576 void __init init_defrootdomain(void) 577 { 578 init_rootdomain(&def_root_domain); 579 580 atomic_set(&def_root_domain.refcount, 1); 581 } 582 583 static struct root_domain *alloc_rootdomain(void) 584 { 585 struct root_domain *rd; 586 587 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 588 if (!rd) 589 return NULL; 590 591 if (init_rootdomain(rd) != 0) { 592 kfree(rd); 593 return NULL; 594 } 595 596 return rd; 597 } 598 599 static void free_sched_groups(struct sched_group *sg, int free_sgc) 600 { 601 struct sched_group *tmp, *first; 602 603 if (!sg) 604 return; 605 606 first = sg; 607 do { 608 tmp = sg->next; 609 610 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 611 kfree(sg->sgc); 612 613 if (atomic_dec_and_test(&sg->ref)) 614 kfree(sg); 615 sg = tmp; 616 } while (sg != first); 617 } 618 619 static void destroy_sched_domain(struct sched_domain *sd) 620 { 621 /* 622 * A normal sched domain may have multiple group references, an 623 * overlapping domain, having private groups, only one. Iterate, 624 * dropping group/capacity references, freeing where none remain. 625 */ 626 free_sched_groups(sd->groups, 1); 627 628 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 629 kfree(sd->shared); 630 kfree(sd); 631 } 632 633 static void destroy_sched_domains_rcu(struct rcu_head *rcu) 634 { 635 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 636 637 while (sd) { 638 struct sched_domain *parent = sd->parent; 639 destroy_sched_domain(sd); 640 sd = parent; 641 } 642 } 643 644 static void destroy_sched_domains(struct sched_domain *sd) 645 { 646 if (sd) 647 call_rcu(&sd->rcu, destroy_sched_domains_rcu); 648 } 649 650 /* 651 * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set 652 * (Last Level Cache Domain) for this allows us to avoid some pointer chasing 653 * select_idle_sibling(). 654 * 655 * Also keep a unique ID per domain (we use the first CPU number in the cpumask 656 * of the domain), this allows us to quickly tell if two CPUs are in the same 657 * cache domain, see cpus_share_cache(). 658 */ 659 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 660 DEFINE_PER_CPU(int, sd_llc_size); 661 DEFINE_PER_CPU(int, sd_llc_id); 662 DEFINE_PER_CPU(int, sd_share_id); 663 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 664 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 665 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 666 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 667 668 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 669 DEFINE_STATIC_KEY_FALSE(sched_cluster_active); 670 671 static void update_top_cache_domain(int cpu) 672 { 673 struct sched_domain_shared *sds = NULL; 674 struct sched_domain *sd; 675 int id = cpu; 676 int size = 1; 677 678 sd = highest_flag_domain(cpu, SD_SHARE_LLC); 679 if (sd) { 680 id = cpumask_first(sched_domain_span(sd)); 681 size = cpumask_weight(sched_domain_span(sd)); 682 sds = sd->shared; 683 } 684 685 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 686 per_cpu(sd_llc_size, cpu) = size; 687 per_cpu(sd_llc_id, cpu) = id; 688 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 689 690 sd = lowest_flag_domain(cpu, SD_CLUSTER); 691 if (sd) 692 id = cpumask_first(sched_domain_span(sd)); 693 694 /* 695 * This assignment should be placed after the sd_llc_id as 696 * we want this id equals to cluster id on cluster machines 697 * but equals to LLC id on non-Cluster machines. 698 */ 699 per_cpu(sd_share_id, cpu) = id; 700 701 sd = lowest_flag_domain(cpu, SD_NUMA); 702 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 703 704 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 705 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 706 707 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 708 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 709 } 710 711 /* 712 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 713 * hold the hotplug lock. 714 */ 715 static void 716 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 717 { 718 struct rq *rq = cpu_rq(cpu); 719 struct sched_domain *tmp; 720 721 /* Remove the sched domains which do not contribute to scheduling. */ 722 for (tmp = sd; tmp; ) { 723 struct sched_domain *parent = tmp->parent; 724 if (!parent) 725 break; 726 727 if (sd_parent_degenerate(tmp, parent)) { 728 tmp->parent = parent->parent; 729 730 if (parent->parent) { 731 parent->parent->child = tmp; 732 parent->parent->groups->flags = tmp->flags; 733 } 734 735 /* 736 * Transfer SD_PREFER_SIBLING down in case of a 737 * degenerate parent; the spans match for this 738 * so the property transfers. 739 */ 740 if (parent->flags & SD_PREFER_SIBLING) 741 tmp->flags |= SD_PREFER_SIBLING; 742 destroy_sched_domain(parent); 743 } else 744 tmp = tmp->parent; 745 } 746 747 if (sd && sd_degenerate(sd)) { 748 tmp = sd; 749 sd = sd->parent; 750 destroy_sched_domain(tmp); 751 if (sd) { 752 struct sched_group *sg = sd->groups; 753 754 /* 755 * sched groups hold the flags of the child sched 756 * domain for convenience. Clear such flags since 757 * the child is being destroyed. 758 */ 759 do { 760 sg->flags = 0; 761 } while (sg != sd->groups); 762 763 sd->child = NULL; 764 } 765 } 766 767 sched_domain_debug(sd, cpu); 768 769 rq_attach_root(rq, rd); 770 tmp = rq->sd; 771 rcu_assign_pointer(rq->sd, sd); 772 dirty_sched_domain_sysctl(cpu); 773 destroy_sched_domains(tmp); 774 775 update_top_cache_domain(cpu); 776 } 777 778 struct s_data { 779 struct sched_domain * __percpu *sd; 780 struct root_domain *rd; 781 }; 782 783 enum s_alloc { 784 sa_rootdomain, 785 sa_sd, 786 sa_sd_storage, 787 sa_none, 788 }; 789 790 /* 791 * Return the canonical balance CPU for this group, this is the first CPU 792 * of this group that's also in the balance mask. 793 * 794 * The balance mask are all those CPUs that could actually end up at this 795 * group. See build_balance_mask(). 796 * 797 * Also see should_we_balance(). 798 */ 799 int group_balance_cpu(struct sched_group *sg) 800 { 801 return cpumask_first(group_balance_mask(sg)); 802 } 803 804 805 /* 806 * NUMA topology (first read the regular topology blurb below) 807 * 808 * Given a node-distance table, for example: 809 * 810 * node 0 1 2 3 811 * 0: 10 20 30 20 812 * 1: 20 10 20 30 813 * 2: 30 20 10 20 814 * 3: 20 30 20 10 815 * 816 * which represents a 4 node ring topology like: 817 * 818 * 0 ----- 1 819 * | | 820 * | | 821 * | | 822 * 3 ----- 2 823 * 824 * We want to construct domains and groups to represent this. The way we go 825 * about doing this is to build the domains on 'hops'. For each NUMA level we 826 * construct the mask of all nodes reachable in @level hops. 827 * 828 * For the above NUMA topology that gives 3 levels: 829 * 830 * NUMA-2 0-3 0-3 0-3 0-3 831 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 832 * 833 * NUMA-1 0-1,3 0-2 1-3 0,2-3 834 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 835 * 836 * NUMA-0 0 1 2 3 837 * 838 * 839 * As can be seen; things don't nicely line up as with the regular topology. 840 * When we iterate a domain in child domain chunks some nodes can be 841 * represented multiple times -- hence the "overlap" naming for this part of 842 * the topology. 843 * 844 * In order to minimize this overlap, we only build enough groups to cover the 845 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 846 * 847 * Because: 848 * 849 * - the first group of each domain is its child domain; this 850 * gets us the first 0-1,3 851 * - the only uncovered node is 2, who's child domain is 1-3. 852 * 853 * However, because of the overlap, computing a unique CPU for each group is 854 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 855 * groups include the CPUs of Node-0, while those CPUs would not in fact ever 856 * end up at those groups (they would end up in group: 0-1,3). 857 * 858 * To correct this we have to introduce the group balance mask. This mask 859 * will contain those CPUs in the group that can reach this group given the 860 * (child) domain tree. 861 * 862 * With this we can once again compute balance_cpu and sched_group_capacity 863 * relations. 864 * 865 * XXX include words on how balance_cpu is unique and therefore can be 866 * used for sched_group_capacity links. 867 * 868 * 869 * Another 'interesting' topology is: 870 * 871 * node 0 1 2 3 872 * 0: 10 20 20 30 873 * 1: 20 10 20 20 874 * 2: 20 20 10 20 875 * 3: 30 20 20 10 876 * 877 * Which looks a little like: 878 * 879 * 0 ----- 1 880 * | / | 881 * | / | 882 * | / | 883 * 2 ----- 3 884 * 885 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 886 * are not. 887 * 888 * This leads to a few particularly weird cases where the sched_domain's are 889 * not of the same number for each CPU. Consider: 890 * 891 * NUMA-2 0-3 0-3 892 * groups: {0-2},{1-3} {1-3},{0-2} 893 * 894 * NUMA-1 0-2 0-3 0-3 1-3 895 * 896 * NUMA-0 0 1 2 3 897 * 898 */ 899 900 901 /* 902 * Build the balance mask; it contains only those CPUs that can arrive at this 903 * group and should be considered to continue balancing. 904 * 905 * We do this during the group creation pass, therefore the group information 906 * isn't complete yet, however since each group represents a (child) domain we 907 * can fully construct this using the sched_domain bits (which are already 908 * complete). 909 */ 910 static void 911 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 912 { 913 const struct cpumask *sg_span = sched_group_span(sg); 914 struct sd_data *sdd = sd->private; 915 struct sched_domain *sibling; 916 int i; 917 918 cpumask_clear(mask); 919 920 for_each_cpu(i, sg_span) { 921 sibling = *per_cpu_ptr(sdd->sd, i); 922 923 /* 924 * Can happen in the asymmetric case, where these siblings are 925 * unused. The mask will not be empty because those CPUs that 926 * do have the top domain _should_ span the domain. 927 */ 928 if (!sibling->child) 929 continue; 930 931 /* If we would not end up here, we can't continue from here */ 932 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 933 continue; 934 935 cpumask_set_cpu(i, mask); 936 } 937 938 /* We must not have empty masks here */ 939 WARN_ON_ONCE(cpumask_empty(mask)); 940 } 941 942 /* 943 * XXX: This creates per-node group entries; since the load-balancer will 944 * immediately access remote memory to construct this group's load-balance 945 * statistics having the groups node local is of dubious benefit. 946 */ 947 static struct sched_group * 948 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 949 { 950 struct sched_group *sg; 951 struct cpumask *sg_span; 952 953 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 954 GFP_KERNEL, cpu_to_node(cpu)); 955 956 if (!sg) 957 return NULL; 958 959 sg_span = sched_group_span(sg); 960 if (sd->child) { 961 cpumask_copy(sg_span, sched_domain_span(sd->child)); 962 sg->flags = sd->child->flags; 963 } else { 964 cpumask_copy(sg_span, sched_domain_span(sd)); 965 } 966 967 atomic_inc(&sg->ref); 968 return sg; 969 } 970 971 static void init_overlap_sched_group(struct sched_domain *sd, 972 struct sched_group *sg) 973 { 974 struct cpumask *mask = sched_domains_tmpmask2; 975 struct sd_data *sdd = sd->private; 976 struct cpumask *sg_span; 977 int cpu; 978 979 build_balance_mask(sd, sg, mask); 980 cpu = cpumask_first(mask); 981 982 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 983 if (atomic_inc_return(&sg->sgc->ref) == 1) 984 cpumask_copy(group_balance_mask(sg), mask); 985 else 986 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 987 988 /* 989 * Initialize sgc->capacity such that even if we mess up the 990 * domains and no possible iteration will get us here, we won't 991 * die on a /0 trap. 992 */ 993 sg_span = sched_group_span(sg); 994 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 995 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 996 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 997 } 998 999 static struct sched_domain * 1000 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 1001 { 1002 /* 1003 * The proper descendant would be the one whose child won't span out 1004 * of sd 1005 */ 1006 while (sibling->child && 1007 !cpumask_subset(sched_domain_span(sibling->child), 1008 sched_domain_span(sd))) 1009 sibling = sibling->child; 1010 1011 /* 1012 * As we are referencing sgc across different topology level, we need 1013 * to go down to skip those sched_domains which don't contribute to 1014 * scheduling because they will be degenerated in cpu_attach_domain 1015 */ 1016 while (sibling->child && 1017 cpumask_equal(sched_domain_span(sibling->child), 1018 sched_domain_span(sibling))) 1019 sibling = sibling->child; 1020 1021 return sibling; 1022 } 1023 1024 static int 1025 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1026 { 1027 struct sched_group *first = NULL, *last = NULL, *sg; 1028 const struct cpumask *span = sched_domain_span(sd); 1029 struct cpumask *covered = sched_domains_tmpmask; 1030 struct sd_data *sdd = sd->private; 1031 struct sched_domain *sibling; 1032 int i; 1033 1034 cpumask_clear(covered); 1035 1036 for_each_cpu_wrap(i, span, cpu) { 1037 struct cpumask *sg_span; 1038 1039 if (cpumask_test_cpu(i, covered)) 1040 continue; 1041 1042 sibling = *per_cpu_ptr(sdd->sd, i); 1043 1044 /* 1045 * Asymmetric node setups can result in situations where the 1046 * domain tree is of unequal depth, make sure to skip domains 1047 * that already cover the entire range. 1048 * 1049 * In that case build_sched_domains() will have terminated the 1050 * iteration early and our sibling sd spans will be empty. 1051 * Domains should always include the CPU they're built on, so 1052 * check that. 1053 */ 1054 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1055 continue; 1056 1057 /* 1058 * Usually we build sched_group by sibling's child sched_domain 1059 * But for machines whose NUMA diameter are 3 or above, we move 1060 * to build sched_group by sibling's proper descendant's child 1061 * domain because sibling's child sched_domain will span out of 1062 * the sched_domain being built as below. 1063 * 1064 * Smallest diameter=3 topology is: 1065 * 1066 * node 0 1 2 3 1067 * 0: 10 20 30 40 1068 * 1: 20 10 20 30 1069 * 2: 30 20 10 20 1070 * 3: 40 30 20 10 1071 * 1072 * 0 --- 1 --- 2 --- 3 1073 * 1074 * NUMA-3 0-3 N/A N/A 0-3 1075 * groups: {0-2},{1-3} {1-3},{0-2} 1076 * 1077 * NUMA-2 0-2 0-3 0-3 1-3 1078 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1079 * 1080 * NUMA-1 0-1 0-2 1-3 2-3 1081 * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1082 * 1083 * NUMA-0 0 1 2 3 1084 * 1085 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1086 * group span isn't a subset of the domain span. 1087 */ 1088 if (sibling->child && 1089 !cpumask_subset(sched_domain_span(sibling->child), span)) 1090 sibling = find_descended_sibling(sd, sibling); 1091 1092 sg = build_group_from_child_sched_domain(sibling, cpu); 1093 if (!sg) 1094 goto fail; 1095 1096 sg_span = sched_group_span(sg); 1097 cpumask_or(covered, covered, sg_span); 1098 1099 init_overlap_sched_group(sibling, sg); 1100 1101 if (!first) 1102 first = sg; 1103 if (last) 1104 last->next = sg; 1105 last = sg; 1106 last->next = first; 1107 } 1108 sd->groups = first; 1109 1110 return 0; 1111 1112 fail: 1113 free_sched_groups(first, 0); 1114 1115 return -ENOMEM; 1116 } 1117 1118 1119 /* 1120 * Package topology (also see the load-balance blurb in fair.c) 1121 * 1122 * The scheduler builds a tree structure to represent a number of important 1123 * topology features. By default (default_topology[]) these include: 1124 * 1125 * - Simultaneous multithreading (SMT) 1126 * - Multi-Core Cache (MC) 1127 * - Package (PKG) 1128 * 1129 * Where the last one more or less denotes everything up to a NUMA node. 1130 * 1131 * The tree consists of 3 primary data structures: 1132 * 1133 * sched_domain -> sched_group -> sched_group_capacity 1134 * ^ ^ ^ ^ 1135 * `-' `-' 1136 * 1137 * The sched_domains are per-CPU and have a two way link (parent & child) and 1138 * denote the ever growing mask of CPUs belonging to that level of topology. 1139 * 1140 * Each sched_domain has a circular (double) linked list of sched_group's, each 1141 * denoting the domains of the level below (or individual CPUs in case of the 1142 * first domain level). The sched_group linked by a sched_domain includes the 1143 * CPU of that sched_domain [*]. 1144 * 1145 * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 1146 * 1147 * CPU 0 1 2 3 4 5 6 7 1148 * 1149 * PKG [ ] 1150 * MC [ ] [ ] 1151 * SMT [ ] [ ] [ ] [ ] 1152 * 1153 * - or - 1154 * 1155 * PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 1156 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 1157 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 1158 * 1159 * CPU 0 1 2 3 4 5 6 7 1160 * 1161 * One way to think about it is: sched_domain moves you up and down among these 1162 * topology levels, while sched_group moves you sideways through it, at child 1163 * domain granularity. 1164 * 1165 * sched_group_capacity ensures each unique sched_group has shared storage. 1166 * 1167 * There are two related construction problems, both require a CPU that 1168 * uniquely identify each group (for a given domain): 1169 * 1170 * - The first is the balance_cpu (see should_we_balance() and the 1171 * load-balance blurb in fair.c); for each group we only want 1 CPU to 1172 * continue balancing at a higher domain. 1173 * 1174 * - The second is the sched_group_capacity; we want all identical groups 1175 * to share a single sched_group_capacity. 1176 * 1177 * Since these topologies are exclusive by construction. That is, its 1178 * impossible for an SMT thread to belong to multiple cores, and cores to 1179 * be part of multiple caches. There is a very clear and unique location 1180 * for each CPU in the hierarchy. 1181 * 1182 * Therefore computing a unique CPU for each group is trivial (the iteration 1183 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 1184 * group), we can simply pick the first CPU in each group. 1185 * 1186 * 1187 * [*] in other words, the first group of each domain is its child domain. 1188 */ 1189 1190 static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1191 { 1192 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1193 struct sched_domain *child = sd->child; 1194 struct sched_group *sg; 1195 bool already_visited; 1196 1197 if (child) 1198 cpu = cpumask_first(sched_domain_span(child)); 1199 1200 sg = *per_cpu_ptr(sdd->sg, cpu); 1201 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1202 1203 /* Increase refcounts for claim_allocations: */ 1204 already_visited = atomic_inc_return(&sg->ref) > 1; 1205 /* sgc visits should follow a similar trend as sg */ 1206 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 1207 1208 /* If we have already visited that group, it's already initialized. */ 1209 if (already_visited) 1210 return sg; 1211 1212 if (child) { 1213 cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1214 cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 1215 sg->flags = child->flags; 1216 } else { 1217 cpumask_set_cpu(cpu, sched_group_span(sg)); 1218 cpumask_set_cpu(cpu, group_balance_mask(sg)); 1219 } 1220 1221 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 1222 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1223 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 1224 1225 return sg; 1226 } 1227 1228 /* 1229 * build_sched_groups will build a circular linked list of the groups 1230 * covered by the given span, will set each group's ->cpumask correctly, 1231 * and will initialize their ->sgc. 1232 * 1233 * Assumes the sched_domain tree is fully constructed 1234 */ 1235 static int 1236 build_sched_groups(struct sched_domain *sd, int cpu) 1237 { 1238 struct sched_group *first = NULL, *last = NULL; 1239 struct sd_data *sdd = sd->private; 1240 const struct cpumask *span = sched_domain_span(sd); 1241 struct cpumask *covered; 1242 int i; 1243 1244 lockdep_assert_held(&sched_domains_mutex); 1245 covered = sched_domains_tmpmask; 1246 1247 cpumask_clear(covered); 1248 1249 for_each_cpu_wrap(i, span, cpu) { 1250 struct sched_group *sg; 1251 1252 if (cpumask_test_cpu(i, covered)) 1253 continue; 1254 1255 sg = get_group(i, sdd); 1256 1257 cpumask_or(covered, covered, sched_group_span(sg)); 1258 1259 if (!first) 1260 first = sg; 1261 if (last) 1262 last->next = sg; 1263 last = sg; 1264 } 1265 last->next = first; 1266 sd->groups = first; 1267 1268 return 0; 1269 } 1270 1271 /* 1272 * Initialize sched groups cpu_capacity. 1273 * 1274 * cpu_capacity indicates the capacity of sched group, which is used while 1275 * distributing the load between different sched groups in a sched domain. 1276 * Typically cpu_capacity for all the groups in a sched domain will be same 1277 * unless there are asymmetries in the topology. If there are asymmetries, 1278 * group having more cpu_capacity will pickup more load compared to the 1279 * group having less cpu_capacity. 1280 */ 1281 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1282 { 1283 struct sched_group *sg = sd->groups; 1284 struct cpumask *mask = sched_domains_tmpmask2; 1285 1286 WARN_ON(!sg); 1287 1288 do { 1289 int cpu, cores = 0, max_cpu = -1; 1290 1291 sg->group_weight = cpumask_weight(sched_group_span(sg)); 1292 1293 cpumask_copy(mask, sched_group_span(sg)); 1294 for_each_cpu(cpu, mask) { 1295 cores++; 1296 #ifdef CONFIG_SCHED_SMT 1297 cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); 1298 #endif 1299 } 1300 sg->cores = cores; 1301 1302 if (!(sd->flags & SD_ASYM_PACKING)) 1303 goto next; 1304 1305 for_each_cpu(cpu, sched_group_span(sg)) { 1306 if (max_cpu < 0) 1307 max_cpu = cpu; 1308 else if (sched_asym_prefer(cpu, max_cpu)) 1309 max_cpu = cpu; 1310 } 1311 sg->asym_prefer_cpu = max_cpu; 1312 1313 next: 1314 sg = sg->next; 1315 } while (sg != sd->groups); 1316 1317 if (cpu != group_balance_cpu(sg)) 1318 return; 1319 1320 update_group_capacity(sd, cpu); 1321 } 1322 1323 /* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ 1324 void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) 1325 { 1326 int asym_prefer_cpu = cpu; 1327 struct sched_domain *sd; 1328 1329 guard(rcu)(); 1330 1331 for_each_domain(cpu, sd) { 1332 struct sched_group *sg; 1333 int group_cpu; 1334 1335 if (!(sd->flags & SD_ASYM_PACKING)) 1336 continue; 1337 1338 /* 1339 * Groups of overlapping domain are replicated per NUMA 1340 * node and will require updating "asym_prefer_cpu" on 1341 * each local copy. 1342 * 1343 * If you are hitting this warning, consider moving 1344 * "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu" 1345 * which is shared by all the overlapping groups. 1346 */ 1347 WARN_ON_ONCE(sd->flags & SD_OVERLAP); 1348 1349 sg = sd->groups; 1350 if (cpu != sg->asym_prefer_cpu) { 1351 /* 1352 * Since the parent is a superset of the current group, 1353 * if the cpu is not the "asym_prefer_cpu" at the 1354 * current level, it cannot be the preferred CPU at a 1355 * higher levels either. 1356 */ 1357 if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu)) 1358 return; 1359 1360 WRITE_ONCE(sg->asym_prefer_cpu, cpu); 1361 continue; 1362 } 1363 1364 /* Ranking has improved; CPU is still the preferred one. */ 1365 if (new_prio >= old_prio) 1366 continue; 1367 1368 for_each_cpu(group_cpu, sched_group_span(sg)) { 1369 if (sched_asym_prefer(group_cpu, asym_prefer_cpu)) 1370 asym_prefer_cpu = group_cpu; 1371 } 1372 1373 WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); 1374 } 1375 } 1376 1377 /* 1378 * Set of available CPUs grouped by their corresponding capacities 1379 * Each list entry contains a CPU mask reflecting CPUs that share the same 1380 * capacity. 1381 * The lifespan of data is unlimited. 1382 */ 1383 LIST_HEAD(asym_cap_list); 1384 1385 /* 1386 * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1387 * Provides sd_flags reflecting the asymmetry scope. 1388 */ 1389 static inline int 1390 asym_cpu_capacity_classify(const struct cpumask *sd_span, 1391 const struct cpumask *cpu_map) 1392 { 1393 struct asym_cap_data *entry; 1394 int count = 0, miss = 0; 1395 1396 /* 1397 * Count how many unique CPU capacities this domain spans across 1398 * (compare sched_domain CPUs mask with ones representing available 1399 * CPUs capacities). Take into account CPUs that might be offline: 1400 * skip those. 1401 */ 1402 list_for_each_entry(entry, &asym_cap_list, link) { 1403 if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1404 ++count; 1405 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1406 ++miss; 1407 } 1408 1409 WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1410 1411 /* No asymmetry detected */ 1412 if (count < 2) 1413 return 0; 1414 /* Some of the available CPU capacity values have not been detected */ 1415 if (miss) 1416 return SD_ASYM_CPUCAPACITY; 1417 1418 /* Full asymmetry */ 1419 return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1420 1421 } 1422 1423 static void free_asym_cap_entry(struct rcu_head *head) 1424 { 1425 struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu); 1426 kfree(entry); 1427 } 1428 1429 static inline void asym_cpu_capacity_update_data(int cpu) 1430 { 1431 unsigned long capacity = arch_scale_cpu_capacity(cpu); 1432 struct asym_cap_data *insert_entry = NULL; 1433 struct asym_cap_data *entry; 1434 1435 /* 1436 * Search if capacity already exits. If not, track which the entry 1437 * where we should insert to keep the list ordered descending. 1438 */ 1439 list_for_each_entry(entry, &asym_cap_list, link) { 1440 if (capacity == entry->capacity) 1441 goto done; 1442 else if (!insert_entry && capacity > entry->capacity) 1443 insert_entry = list_prev_entry(entry, link); 1444 } 1445 1446 entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1447 if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1448 return; 1449 entry->capacity = capacity; 1450 1451 /* If NULL then the new capacity is the smallest, add last. */ 1452 if (!insert_entry) 1453 list_add_tail_rcu(&entry->link, &asym_cap_list); 1454 else 1455 list_add_rcu(&entry->link, &insert_entry->link); 1456 done: 1457 __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1458 } 1459 1460 /* 1461 * Build-up/update list of CPUs grouped by their capacities 1462 * An update requires explicit request to rebuild sched domains 1463 * with state indicating CPU topology changes. 1464 */ 1465 static void asym_cpu_capacity_scan(void) 1466 { 1467 struct asym_cap_data *entry, *next; 1468 int cpu; 1469 1470 list_for_each_entry(entry, &asym_cap_list, link) 1471 cpumask_clear(cpu_capacity_span(entry)); 1472 1473 for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1474 asym_cpu_capacity_update_data(cpu); 1475 1476 list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1477 if (cpumask_empty(cpu_capacity_span(entry))) { 1478 list_del_rcu(&entry->link); 1479 call_rcu(&entry->rcu, free_asym_cap_entry); 1480 } 1481 } 1482 1483 /* 1484 * Only one capacity value has been detected i.e. this system is symmetric. 1485 * No need to keep this data around. 1486 */ 1487 if (list_is_singular(&asym_cap_list)) { 1488 entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 1489 list_del_rcu(&entry->link); 1490 call_rcu(&entry->rcu, free_asym_cap_entry); 1491 } 1492 } 1493 1494 /* 1495 * Initializers for schedule domains 1496 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1497 */ 1498 1499 static int default_relax_domain_level = -1; 1500 int sched_domain_level_max; 1501 1502 static int __init setup_relax_domain_level(char *str) 1503 { 1504 if (kstrtoint(str, 0, &default_relax_domain_level)) 1505 pr_warn("Unable to set relax_domain_level\n"); 1506 1507 return 1; 1508 } 1509 __setup("relax_domain_level=", setup_relax_domain_level); 1510 1511 static void set_domain_attribute(struct sched_domain *sd, 1512 struct sched_domain_attr *attr) 1513 { 1514 int request; 1515 1516 if (!attr || attr->relax_domain_level < 0) { 1517 if (default_relax_domain_level < 0) 1518 return; 1519 request = default_relax_domain_level; 1520 } else 1521 request = attr->relax_domain_level; 1522 1523 if (sd->level >= request) { 1524 /* Turn off idle balance on this domain: */ 1525 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1526 } 1527 } 1528 1529 static void __sdt_free(const struct cpumask *cpu_map); 1530 static int __sdt_alloc(const struct cpumask *cpu_map); 1531 1532 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1533 const struct cpumask *cpu_map) 1534 { 1535 switch (what) { 1536 case sa_rootdomain: 1537 if (!atomic_read(&d->rd->refcount)) 1538 free_rootdomain(&d->rd->rcu); 1539 fallthrough; 1540 case sa_sd: 1541 free_percpu(d->sd); 1542 fallthrough; 1543 case sa_sd_storage: 1544 __sdt_free(cpu_map); 1545 fallthrough; 1546 case sa_none: 1547 break; 1548 } 1549 } 1550 1551 static enum s_alloc 1552 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1553 { 1554 memset(d, 0, sizeof(*d)); 1555 1556 if (__sdt_alloc(cpu_map)) 1557 return sa_sd_storage; 1558 d->sd = alloc_percpu(struct sched_domain *); 1559 if (!d->sd) 1560 return sa_sd_storage; 1561 d->rd = alloc_rootdomain(); 1562 if (!d->rd) 1563 return sa_sd; 1564 1565 return sa_rootdomain; 1566 } 1567 1568 /* 1569 * NULL the sd_data elements we've used to build the sched_domain and 1570 * sched_group structure so that the subsequent __free_domain_allocs() 1571 * will not free the data we're using. 1572 */ 1573 static void claim_allocations(int cpu, struct sched_domain *sd) 1574 { 1575 struct sd_data *sdd = sd->private; 1576 1577 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1578 *per_cpu_ptr(sdd->sd, cpu) = NULL; 1579 1580 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1581 *per_cpu_ptr(sdd->sds, cpu) = NULL; 1582 1583 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1584 *per_cpu_ptr(sdd->sg, cpu) = NULL; 1585 1586 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1587 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1588 } 1589 1590 #ifdef CONFIG_NUMA 1591 enum numa_topology_type sched_numa_topology_type; 1592 1593 static int sched_domains_numa_levels; 1594 static int sched_domains_curr_level; 1595 1596 int sched_max_numa_distance; 1597 static int *sched_domains_numa_distance; 1598 static struct cpumask ***sched_domains_numa_masks; 1599 #endif /* CONFIG_NUMA */ 1600 1601 /* 1602 * SD_flags allowed in topology descriptions. 1603 * 1604 * These flags are purely descriptive of the topology and do not prescribe 1605 * behaviour. Behaviour is artificial and mapped in the below sd_init() 1606 * function. For details, see include/linux/sched/sd_flags.h. 1607 * 1608 * SD_SHARE_CPUCAPACITY 1609 * SD_SHARE_LLC 1610 * SD_CLUSTER 1611 * SD_NUMA 1612 * 1613 * Odd one out, which beside describing the topology has a quirk also 1614 * prescribes the desired behaviour that goes along with it: 1615 * 1616 * SD_ASYM_PACKING - describes SMT quirks 1617 */ 1618 #define TOPOLOGY_SD_FLAGS \ 1619 (SD_SHARE_CPUCAPACITY | \ 1620 SD_CLUSTER | \ 1621 SD_SHARE_LLC | \ 1622 SD_NUMA | \ 1623 SD_ASYM_PACKING) 1624 1625 static struct sched_domain * 1626 sd_init(struct sched_domain_topology_level *tl, 1627 const struct cpumask *cpu_map, 1628 struct sched_domain *child, int cpu) 1629 { 1630 struct sd_data *sdd = &tl->data; 1631 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1632 int sd_id, sd_weight, sd_flags = 0; 1633 struct cpumask *sd_span; 1634 1635 #ifdef CONFIG_NUMA 1636 /* 1637 * Ugly hack to pass state to sd_numa_mask()... 1638 */ 1639 sched_domains_curr_level = tl->numa_level; 1640 #endif 1641 1642 sd_weight = cpumask_weight(tl->mask(cpu)); 1643 1644 if (tl->sd_flags) 1645 sd_flags = (*tl->sd_flags)(); 1646 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1647 "wrong sd_flags in topology description\n")) 1648 sd_flags &= TOPOLOGY_SD_FLAGS; 1649 1650 *sd = (struct sched_domain){ 1651 .min_interval = sd_weight, 1652 .max_interval = 2*sd_weight, 1653 .busy_factor = 16, 1654 .imbalance_pct = 117, 1655 1656 .cache_nice_tries = 0, 1657 1658 .flags = 1*SD_BALANCE_NEWIDLE 1659 | 1*SD_BALANCE_EXEC 1660 | 1*SD_BALANCE_FORK 1661 | 0*SD_BALANCE_WAKE 1662 | 1*SD_WAKE_AFFINE 1663 | 0*SD_SHARE_CPUCAPACITY 1664 | 0*SD_SHARE_LLC 1665 | 0*SD_SERIALIZE 1666 | 1*SD_PREFER_SIBLING 1667 | 0*SD_NUMA 1668 | sd_flags 1669 , 1670 1671 .last_balance = jiffies, 1672 .balance_interval = sd_weight, 1673 .max_newidle_lb_cost = 0, 1674 .last_decay_max_lb_cost = jiffies, 1675 .child = child, 1676 .name = tl->name, 1677 }; 1678 1679 sd_span = sched_domain_span(sd); 1680 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); 1681 sd_id = cpumask_first(sd_span); 1682 1683 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1684 1685 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1686 (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1687 "CPU capacity asymmetry not supported on SMT\n"); 1688 1689 /* 1690 * Convert topological properties into behaviour. 1691 */ 1692 /* Don't attempt to spread across CPUs of different capacities. */ 1693 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 1694 sd->child->flags &= ~SD_PREFER_SIBLING; 1695 1696 if (sd->flags & SD_SHARE_CPUCAPACITY) { 1697 sd->imbalance_pct = 110; 1698 1699 } else if (sd->flags & SD_SHARE_LLC) { 1700 sd->imbalance_pct = 117; 1701 sd->cache_nice_tries = 1; 1702 1703 #ifdef CONFIG_NUMA 1704 } else if (sd->flags & SD_NUMA) { 1705 sd->cache_nice_tries = 2; 1706 1707 sd->flags &= ~SD_PREFER_SIBLING; 1708 sd->flags |= SD_SERIALIZE; 1709 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1710 sd->flags &= ~(SD_BALANCE_EXEC | 1711 SD_BALANCE_FORK | 1712 SD_WAKE_AFFINE); 1713 } 1714 1715 #endif /* CONFIG_NUMA */ 1716 } else { 1717 sd->cache_nice_tries = 1; 1718 } 1719 1720 /* 1721 * For all levels sharing cache; connect a sched_domain_shared 1722 * instance. 1723 */ 1724 if (sd->flags & SD_SHARE_LLC) { 1725 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1726 atomic_inc(&sd->shared->ref); 1727 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1728 } 1729 1730 sd->private = sdd; 1731 1732 return sd; 1733 } 1734 1735 /* 1736 * Topology list, bottom-up. 1737 */ 1738 static struct sched_domain_topology_level default_topology[] = { 1739 #ifdef CONFIG_SCHED_SMT 1740 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1741 #endif 1742 1743 #ifdef CONFIG_SCHED_CLUSTER 1744 { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, 1745 #endif 1746 1747 #ifdef CONFIG_SCHED_MC 1748 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1749 #endif 1750 { cpu_cpu_mask, SD_INIT_NAME(PKG) }, 1751 { NULL, }, 1752 }; 1753 1754 static struct sched_domain_topology_level *sched_domain_topology = 1755 default_topology; 1756 static struct sched_domain_topology_level *sched_domain_topology_saved; 1757 1758 #define for_each_sd_topology(tl) \ 1759 for (tl = sched_domain_topology; tl->mask; tl++) 1760 1761 void __init set_sched_topology(struct sched_domain_topology_level *tl) 1762 { 1763 if (WARN_ON_ONCE(sched_smp_initialized)) 1764 return; 1765 1766 sched_domain_topology = tl; 1767 sched_domain_topology_saved = NULL; 1768 } 1769 1770 #ifdef CONFIG_NUMA 1771 1772 static const struct cpumask *sd_numa_mask(int cpu) 1773 { 1774 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1775 } 1776 1777 static void sched_numa_warn(const char *str) 1778 { 1779 static int done = false; 1780 int i,j; 1781 1782 if (done) 1783 return; 1784 1785 done = true; 1786 1787 printk(KERN_WARNING "ERROR: %s\n\n", str); 1788 1789 for (i = 0; i < nr_node_ids; i++) { 1790 printk(KERN_WARNING " "); 1791 for (j = 0; j < nr_node_ids; j++) { 1792 if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 1793 printk(KERN_CONT "(%02d) ", node_distance(i,j)); 1794 else 1795 printk(KERN_CONT " %02d ", node_distance(i,j)); 1796 } 1797 printk(KERN_CONT "\n"); 1798 } 1799 printk(KERN_WARNING "\n"); 1800 } 1801 1802 bool find_numa_distance(int distance) 1803 { 1804 bool found = false; 1805 int i, *distances; 1806 1807 if (distance == node_distance(0, 0)) 1808 return true; 1809 1810 rcu_read_lock(); 1811 distances = rcu_dereference(sched_domains_numa_distance); 1812 if (!distances) 1813 goto unlock; 1814 for (i = 0; i < sched_domains_numa_levels; i++) { 1815 if (distances[i] == distance) { 1816 found = true; 1817 break; 1818 } 1819 } 1820 unlock: 1821 rcu_read_unlock(); 1822 1823 return found; 1824 } 1825 1826 #define for_each_cpu_node_but(n, nbut) \ 1827 for_each_node_state(n, N_CPU) \ 1828 if (n == nbut) \ 1829 continue; \ 1830 else 1831 1832 /* 1833 * A system can have three types of NUMA topology: 1834 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1835 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1836 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1837 * 1838 * The difference between a glueless mesh topology and a backplane 1839 * topology lies in whether communication between not directly 1840 * connected nodes goes through intermediary nodes (where programs 1841 * could run), or through backplane controllers. This affects 1842 * placement of programs. 1843 * 1844 * The type of topology can be discerned with the following tests: 1845 * - If the maximum distance between any nodes is 1 hop, the system 1846 * is directly connected. 1847 * - If for two nodes A and B, located N > 1 hops away from each other, 1848 * there is an intermediary node C, which is < N hops away from both 1849 * nodes A and B, the system is a glueless mesh. 1850 */ 1851 static void init_numa_topology_type(int offline_node) 1852 { 1853 int a, b, c, n; 1854 1855 n = sched_max_numa_distance; 1856 1857 if (sched_domains_numa_levels <= 2) { 1858 sched_numa_topology_type = NUMA_DIRECT; 1859 return; 1860 } 1861 1862 for_each_cpu_node_but(a, offline_node) { 1863 for_each_cpu_node_but(b, offline_node) { 1864 /* Find two nodes furthest removed from each other. */ 1865 if (node_distance(a, b) < n) 1866 continue; 1867 1868 /* Is there an intermediary node between a and b? */ 1869 for_each_cpu_node_but(c, offline_node) { 1870 if (node_distance(a, c) < n && 1871 node_distance(b, c) < n) { 1872 sched_numa_topology_type = 1873 NUMA_GLUELESS_MESH; 1874 return; 1875 } 1876 } 1877 1878 sched_numa_topology_type = NUMA_BACKPLANE; 1879 return; 1880 } 1881 } 1882 1883 pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 1884 sched_numa_topology_type = NUMA_DIRECT; 1885 } 1886 1887 1888 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1889 1890 void sched_init_numa(int offline_node) 1891 { 1892 struct sched_domain_topology_level *tl; 1893 unsigned long *distance_map; 1894 int nr_levels = 0; 1895 int i, j; 1896 int *distances; 1897 struct cpumask ***masks; 1898 1899 /* 1900 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the 1901 * unique distances in the node_distance() table. 1902 */ 1903 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1904 if (!distance_map) 1905 return; 1906 1907 bitmap_zero(distance_map, NR_DISTANCE_VALUES); 1908 for_each_cpu_node_but(i, offline_node) { 1909 for_each_cpu_node_but(j, offline_node) { 1910 int distance = node_distance(i, j); 1911 1912 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 1913 sched_numa_warn("Invalid distance value range"); 1914 bitmap_free(distance_map); 1915 return; 1916 } 1917 1918 bitmap_set(distance_map, distance, 1); 1919 } 1920 } 1921 /* 1922 * We can now figure out how many unique distance values there are and 1923 * allocate memory accordingly. 1924 */ 1925 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 1926 1927 distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); 1928 if (!distances) { 1929 bitmap_free(distance_map); 1930 return; 1931 } 1932 1933 for (i = 0, j = 0; i < nr_levels; i++, j++) { 1934 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 1935 distances[i] = j; 1936 } 1937 rcu_assign_pointer(sched_domains_numa_distance, distances); 1938 1939 bitmap_free(distance_map); 1940 1941 /* 1942 * 'nr_levels' contains the number of unique distances 1943 * 1944 * The sched_domains_numa_distance[] array includes the actual distance 1945 * numbers. 1946 */ 1947 1948 /* 1949 * Here, we should temporarily reset sched_domains_numa_levels to 0. 1950 * If it fails to allocate memory for array sched_domains_numa_masks[][], 1951 * the array will contain less then 'nr_levels' members. This could be 1952 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1953 * in other functions. 1954 * 1955 * We reset it to 'nr_levels' at the end of this function. 1956 */ 1957 sched_domains_numa_levels = 0; 1958 1959 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 1960 if (!masks) 1961 return; 1962 1963 /* 1964 * Now for each level, construct a mask per node which contains all 1965 * CPUs of nodes that are that many hops away from us. 1966 */ 1967 for (i = 0; i < nr_levels; i++) { 1968 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 1969 if (!masks[i]) 1970 return; 1971 1972 for_each_cpu_node_but(j, offline_node) { 1973 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1974 int k; 1975 1976 if (!mask) 1977 return; 1978 1979 masks[i][j] = mask; 1980 1981 for_each_cpu_node_but(k, offline_node) { 1982 if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) 1983 sched_numa_warn("Node-distance not symmetric"); 1984 1985 if (node_distance(j, k) > sched_domains_numa_distance[i]) 1986 continue; 1987 1988 cpumask_or(mask, mask, cpumask_of_node(k)); 1989 } 1990 } 1991 } 1992 rcu_assign_pointer(sched_domains_numa_masks, masks); 1993 1994 /* Compute default topology size */ 1995 for (i = 0; sched_domain_topology[i].mask; i++); 1996 1997 tl = kzalloc((i + nr_levels + 1) * 1998 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1999 if (!tl) 2000 return; 2001 2002 /* 2003 * Copy the default topology bits.. 2004 */ 2005 for (i = 0; sched_domain_topology[i].mask; i++) 2006 tl[i] = sched_domain_topology[i]; 2007 2008 /* 2009 * Add the NUMA identity distance, aka single NODE. 2010 */ 2011 tl[i++] = (struct sched_domain_topology_level){ 2012 .mask = sd_numa_mask, 2013 .numa_level = 0, 2014 SD_INIT_NAME(NODE) 2015 }; 2016 2017 /* 2018 * .. and append 'j' levels of NUMA goodness. 2019 */ 2020 for (j = 1; j < nr_levels; i++, j++) { 2021 tl[i] = (struct sched_domain_topology_level){ 2022 .mask = sd_numa_mask, 2023 .sd_flags = cpu_numa_flags, 2024 .flags = SDTL_OVERLAP, 2025 .numa_level = j, 2026 SD_INIT_NAME(NUMA) 2027 }; 2028 } 2029 2030 sched_domain_topology_saved = sched_domain_topology; 2031 sched_domain_topology = tl; 2032 2033 sched_domains_numa_levels = nr_levels; 2034 WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); 2035 2036 init_numa_topology_type(offline_node); 2037 } 2038 2039 2040 static void sched_reset_numa(void) 2041 { 2042 int nr_levels, *distances; 2043 struct cpumask ***masks; 2044 2045 nr_levels = sched_domains_numa_levels; 2046 sched_domains_numa_levels = 0; 2047 sched_max_numa_distance = 0; 2048 sched_numa_topology_type = NUMA_DIRECT; 2049 distances = sched_domains_numa_distance; 2050 rcu_assign_pointer(sched_domains_numa_distance, NULL); 2051 masks = sched_domains_numa_masks; 2052 rcu_assign_pointer(sched_domains_numa_masks, NULL); 2053 if (distances || masks) { 2054 int i, j; 2055 2056 synchronize_rcu(); 2057 kfree(distances); 2058 for (i = 0; i < nr_levels && masks; i++) { 2059 if (!masks[i]) 2060 continue; 2061 for_each_node(j) 2062 kfree(masks[i][j]); 2063 kfree(masks[i]); 2064 } 2065 kfree(masks); 2066 } 2067 if (sched_domain_topology_saved) { 2068 kfree(sched_domain_topology); 2069 sched_domain_topology = sched_domain_topology_saved; 2070 sched_domain_topology_saved = NULL; 2071 } 2072 } 2073 2074 /* 2075 * Call with hotplug lock held 2076 */ 2077 void sched_update_numa(int cpu, bool online) 2078 { 2079 int node; 2080 2081 node = cpu_to_node(cpu); 2082 /* 2083 * Scheduler NUMA topology is updated when the first CPU of a 2084 * node is onlined or the last CPU of a node is offlined. 2085 */ 2086 if (cpumask_weight(cpumask_of_node(node)) != 1) 2087 return; 2088 2089 sched_reset_numa(); 2090 sched_init_numa(online ? NUMA_NO_NODE : node); 2091 } 2092 2093 void sched_domains_numa_masks_set(unsigned int cpu) 2094 { 2095 int node = cpu_to_node(cpu); 2096 int i, j; 2097 2098 for (i = 0; i < sched_domains_numa_levels; i++) { 2099 for (j = 0; j < nr_node_ids; j++) { 2100 if (!node_state(j, N_CPU)) 2101 continue; 2102 2103 /* Set ourselves in the remote node's masks */ 2104 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 2105 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2106 } 2107 } 2108 } 2109 2110 void sched_domains_numa_masks_clear(unsigned int cpu) 2111 { 2112 int i, j; 2113 2114 for (i = 0; i < sched_domains_numa_levels; i++) { 2115 for (j = 0; j < nr_node_ids; j++) { 2116 if (sched_domains_numa_masks[i][j]) 2117 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2118 } 2119 } 2120 } 2121 2122 /* 2123 * sched_numa_find_closest() - given the NUMA topology, find the cpu 2124 * closest to @cpu from @cpumask. 2125 * cpumask: cpumask to find a cpu from 2126 * cpu: cpu to be close to 2127 * 2128 * returns: cpu, or nr_cpu_ids when nothing found. 2129 */ 2130 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2131 { 2132 int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 2133 struct cpumask ***masks; 2134 2135 rcu_read_lock(); 2136 masks = rcu_dereference(sched_domains_numa_masks); 2137 if (!masks) 2138 goto unlock; 2139 for (i = 0; i < sched_domains_numa_levels; i++) { 2140 if (!masks[i][j]) 2141 break; 2142 cpu = cpumask_any_and_distribute(cpus, masks[i][j]); 2143 if (cpu < nr_cpu_ids) { 2144 found = cpu; 2145 break; 2146 } 2147 } 2148 unlock: 2149 rcu_read_unlock(); 2150 2151 return found; 2152 } 2153 2154 struct __cmp_key { 2155 const struct cpumask *cpus; 2156 struct cpumask ***masks; 2157 int node; 2158 int cpu; 2159 int w; 2160 }; 2161 2162 static int hop_cmp(const void *a, const void *b) 2163 { 2164 struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; 2165 struct __cmp_key *k = (struct __cmp_key *)a; 2166 2167 if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) 2168 return 1; 2169 2170 if (b == k->masks) { 2171 k->w = 0; 2172 return 0; 2173 } 2174 2175 prev_hop = *((struct cpumask ***)b - 1); 2176 k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); 2177 if (k->w <= k->cpu) 2178 return 0; 2179 2180 return -1; 2181 } 2182 2183 /** 2184 * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU 2185 * from @cpus to @cpu, taking into account distance 2186 * from a given @node. 2187 * @cpus: cpumask to find a cpu from 2188 * @cpu: CPU to start searching 2189 * @node: NUMA node to order CPUs by distance 2190 * 2191 * Return: cpu, or nr_cpu_ids when nothing found. 2192 */ 2193 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) 2194 { 2195 struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; 2196 struct cpumask ***hop_masks; 2197 int hop, ret = nr_cpu_ids; 2198 2199 if (node == NUMA_NO_NODE) 2200 return cpumask_nth_and(cpu, cpus, cpu_online_mask); 2201 2202 rcu_read_lock(); 2203 2204 /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ 2205 node = numa_nearest_node(node, N_CPU); 2206 k.node = node; 2207 2208 k.masks = rcu_dereference(sched_domains_numa_masks); 2209 if (!k.masks) 2210 goto unlock; 2211 2212 hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); 2213 hop = hop_masks - k.masks; 2214 2215 ret = hop ? 2216 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : 2217 cpumask_nth_and(cpu, cpus, k.masks[0][node]); 2218 unlock: 2219 rcu_read_unlock(); 2220 return ret; 2221 } 2222 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); 2223 2224 /** 2225 * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from 2226 * @node 2227 * @node: The node to count hops from. 2228 * @hops: Include CPUs up to that many hops away. 0 means local node. 2229 * 2230 * Return: On success, a pointer to a cpumask of CPUs at most @hops away from 2231 * @node, an error value otherwise. 2232 * 2233 * Requires rcu_lock to be held. Returned cpumask is only valid within that 2234 * read-side section, copy it if required beyond that. 2235 * 2236 * Note that not all hops are equal in distance; see sched_init_numa() for how 2237 * distances and masks are handled. 2238 * Also note that this is a reflection of sched_domains_numa_masks, which may change 2239 * during the lifetime of the system (offline nodes are taken out of the masks). 2240 */ 2241 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) 2242 { 2243 struct cpumask ***masks; 2244 2245 if (node >= nr_node_ids || hops >= sched_domains_numa_levels) 2246 return ERR_PTR(-EINVAL); 2247 2248 masks = rcu_dereference(sched_domains_numa_masks); 2249 if (!masks) 2250 return ERR_PTR(-EBUSY); 2251 2252 return masks[hops][node]; 2253 } 2254 EXPORT_SYMBOL_GPL(sched_numa_hop_mask); 2255 2256 #endif /* CONFIG_NUMA */ 2257 2258 static int __sdt_alloc(const struct cpumask *cpu_map) 2259 { 2260 struct sched_domain_topology_level *tl; 2261 int j; 2262 2263 for_each_sd_topology(tl) { 2264 struct sd_data *sdd = &tl->data; 2265 2266 sdd->sd = alloc_percpu(struct sched_domain *); 2267 if (!sdd->sd) 2268 return -ENOMEM; 2269 2270 sdd->sds = alloc_percpu(struct sched_domain_shared *); 2271 if (!sdd->sds) 2272 return -ENOMEM; 2273 2274 sdd->sg = alloc_percpu(struct sched_group *); 2275 if (!sdd->sg) 2276 return -ENOMEM; 2277 2278 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2279 if (!sdd->sgc) 2280 return -ENOMEM; 2281 2282 for_each_cpu(j, cpu_map) { 2283 struct sched_domain *sd; 2284 struct sched_domain_shared *sds; 2285 struct sched_group *sg; 2286 struct sched_group_capacity *sgc; 2287 2288 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2289 GFP_KERNEL, cpu_to_node(j)); 2290 if (!sd) 2291 return -ENOMEM; 2292 2293 *per_cpu_ptr(sdd->sd, j) = sd; 2294 2295 sds = kzalloc_node(sizeof(struct sched_domain_shared), 2296 GFP_KERNEL, cpu_to_node(j)); 2297 if (!sds) 2298 return -ENOMEM; 2299 2300 *per_cpu_ptr(sdd->sds, j) = sds; 2301 2302 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2303 GFP_KERNEL, cpu_to_node(j)); 2304 if (!sg) 2305 return -ENOMEM; 2306 2307 sg->next = sg; 2308 2309 *per_cpu_ptr(sdd->sg, j) = sg; 2310 2311 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2312 GFP_KERNEL, cpu_to_node(j)); 2313 if (!sgc) 2314 return -ENOMEM; 2315 2316 sgc->id = j; 2317 2318 *per_cpu_ptr(sdd->sgc, j) = sgc; 2319 } 2320 } 2321 2322 return 0; 2323 } 2324 2325 static void __sdt_free(const struct cpumask *cpu_map) 2326 { 2327 struct sched_domain_topology_level *tl; 2328 int j; 2329 2330 for_each_sd_topology(tl) { 2331 struct sd_data *sdd = &tl->data; 2332 2333 for_each_cpu(j, cpu_map) { 2334 struct sched_domain *sd; 2335 2336 if (sdd->sd) { 2337 sd = *per_cpu_ptr(sdd->sd, j); 2338 if (sd && (sd->flags & SD_OVERLAP)) 2339 free_sched_groups(sd->groups, 0); 2340 kfree(*per_cpu_ptr(sdd->sd, j)); 2341 } 2342 2343 if (sdd->sds) 2344 kfree(*per_cpu_ptr(sdd->sds, j)); 2345 if (sdd->sg) 2346 kfree(*per_cpu_ptr(sdd->sg, j)); 2347 if (sdd->sgc) 2348 kfree(*per_cpu_ptr(sdd->sgc, j)); 2349 } 2350 free_percpu(sdd->sd); 2351 sdd->sd = NULL; 2352 free_percpu(sdd->sds); 2353 sdd->sds = NULL; 2354 free_percpu(sdd->sg); 2355 sdd->sg = NULL; 2356 free_percpu(sdd->sgc); 2357 sdd->sgc = NULL; 2358 } 2359 } 2360 2361 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2362 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2363 struct sched_domain *child, int cpu) 2364 { 2365 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2366 2367 if (child) { 2368 sd->level = child->level + 1; 2369 sched_domain_level_max = max(sched_domain_level_max, sd->level); 2370 child->parent = sd; 2371 2372 if (!cpumask_subset(sched_domain_span(child), 2373 sched_domain_span(sd))) { 2374 pr_err("BUG: arch topology borken\n"); 2375 pr_err(" the %s domain not a subset of the %s domain\n", 2376 child->name, sd->name); 2377 /* Fixup, ensure @sd has at least @child CPUs. */ 2378 cpumask_or(sched_domain_span(sd), 2379 sched_domain_span(sd), 2380 sched_domain_span(child)); 2381 } 2382 2383 } 2384 set_domain_attribute(sd, attr); 2385 2386 return sd; 2387 } 2388 2389 /* 2390 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2391 * any two given CPUs on non-NUMA topology levels. 2392 */ 2393 static bool topology_span_sane(const struct cpumask *cpu_map) 2394 { 2395 struct sched_domain_topology_level *tl; 2396 struct cpumask *covered, *id_seen; 2397 int cpu; 2398 2399 lockdep_assert_held(&sched_domains_mutex); 2400 covered = sched_domains_tmpmask; 2401 id_seen = sched_domains_tmpmask2; 2402 2403 for_each_sd_topology(tl) { 2404 2405 /* NUMA levels are allowed to overlap */ 2406 if (tl->flags & SDTL_OVERLAP) 2407 continue; 2408 2409 cpumask_clear(covered); 2410 cpumask_clear(id_seen); 2411 2412 /* 2413 * Non-NUMA levels cannot partially overlap - they must be either 2414 * completely equal or completely disjoint. Otherwise we can end up 2415 * breaking the sched_group lists - i.e. a later get_group() pass 2416 * breaks the linking done for an earlier span. 2417 */ 2418 for_each_cpu(cpu, cpu_map) { 2419 const struct cpumask *tl_cpu_mask = tl->mask(cpu); 2420 int id; 2421 2422 /* lowest bit set in this mask is used as a unique id */ 2423 id = cpumask_first(tl_cpu_mask); 2424 2425 if (cpumask_test_cpu(id, id_seen)) { 2426 /* First CPU has already been seen, ensure identical spans */ 2427 if (!cpumask_equal(tl->mask(id), tl_cpu_mask)) 2428 return false; 2429 } else { 2430 /* First CPU hasn't been seen before, ensure it's a completely new span */ 2431 if (cpumask_intersects(tl_cpu_mask, covered)) 2432 return false; 2433 2434 cpumask_or(covered, covered, tl_cpu_mask); 2435 cpumask_set_cpu(id, id_seen); 2436 } 2437 } 2438 } 2439 return true; 2440 } 2441 2442 /* 2443 * Build sched domains for a given set of CPUs and attach the sched domains 2444 * to the individual CPUs 2445 */ 2446 static int 2447 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2448 { 2449 enum s_alloc alloc_state = sa_none; 2450 struct sched_domain *sd; 2451 struct s_data d; 2452 struct rq *rq = NULL; 2453 int i, ret = -ENOMEM; 2454 bool has_asym = false; 2455 bool has_cluster = false; 2456 2457 if (WARN_ON(cpumask_empty(cpu_map))) 2458 goto error; 2459 2460 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2461 if (alloc_state != sa_rootdomain) 2462 goto error; 2463 2464 /* Set up domains for CPUs specified by the cpu_map: */ 2465 for_each_cpu(i, cpu_map) { 2466 struct sched_domain_topology_level *tl; 2467 2468 sd = NULL; 2469 for_each_sd_topology(tl) { 2470 2471 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2472 2473 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 2474 2475 if (tl == sched_domain_topology) 2476 *per_cpu_ptr(d.sd, i) = sd; 2477 if (tl->flags & SDTL_OVERLAP) 2478 sd->flags |= SD_OVERLAP; 2479 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2480 break; 2481 } 2482 } 2483 2484 if (WARN_ON(!topology_span_sane(cpu_map))) 2485 goto error; 2486 2487 /* Build the groups for the domains */ 2488 for_each_cpu(i, cpu_map) { 2489 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2490 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2491 if (sd->flags & SD_OVERLAP) { 2492 if (build_overlap_sched_groups(sd, i)) 2493 goto error; 2494 } else { 2495 if (build_sched_groups(sd, i)) 2496 goto error; 2497 } 2498 } 2499 } 2500 2501 /* 2502 * Calculate an allowed NUMA imbalance such that LLCs do not get 2503 * imbalanced. 2504 */ 2505 for_each_cpu(i, cpu_map) { 2506 unsigned int imb = 0; 2507 unsigned int imb_span = 1; 2508 2509 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2510 struct sched_domain *child = sd->child; 2511 2512 if (!(sd->flags & SD_SHARE_LLC) && child && 2513 (child->flags & SD_SHARE_LLC)) { 2514 struct sched_domain __rcu *top_p; 2515 unsigned int nr_llcs; 2516 2517 /* 2518 * For a single LLC per node, allow an 2519 * imbalance up to 12.5% of the node. This is 2520 * arbitrary cutoff based two factors -- SMT and 2521 * memory channels. For SMT-2, the intent is to 2522 * avoid premature sharing of HT resources but 2523 * SMT-4 or SMT-8 *may* benefit from a different 2524 * cutoff. For memory channels, this is a very 2525 * rough estimate of how many channels may be 2526 * active and is based on recent CPUs with 2527 * many cores. 2528 * 2529 * For multiple LLCs, allow an imbalance 2530 * until multiple tasks would share an LLC 2531 * on one node while LLCs on another node 2532 * remain idle. This assumes that there are 2533 * enough logical CPUs per LLC to avoid SMT 2534 * factors and that there is a correlation 2535 * between LLCs and memory channels. 2536 */ 2537 nr_llcs = sd->span_weight / child->span_weight; 2538 if (nr_llcs == 1) 2539 imb = sd->span_weight >> 3; 2540 else 2541 imb = nr_llcs; 2542 imb = max(1U, imb); 2543 sd->imb_numa_nr = imb; 2544 2545 /* Set span based on the first NUMA domain. */ 2546 top_p = sd->parent; 2547 while (top_p && !(top_p->flags & SD_NUMA)) { 2548 top_p = top_p->parent; 2549 } 2550 imb_span = top_p ? top_p->span_weight : sd->span_weight; 2551 } else { 2552 int factor = max(1U, (sd->span_weight / imb_span)); 2553 2554 sd->imb_numa_nr = imb * factor; 2555 } 2556 } 2557 } 2558 2559 /* Calculate CPU capacity for physical packages and nodes */ 2560 for (i = nr_cpumask_bits-1; i >= 0; i--) { 2561 if (!cpumask_test_cpu(i, cpu_map)) 2562 continue; 2563 2564 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2565 claim_allocations(i, sd); 2566 init_sched_groups_capacity(i, sd); 2567 } 2568 } 2569 2570 /* Attach the domains */ 2571 rcu_read_lock(); 2572 for_each_cpu(i, cpu_map) { 2573 rq = cpu_rq(i); 2574 sd = *per_cpu_ptr(d.sd, i); 2575 2576 cpu_attach_domain(sd, d.rd, i); 2577 2578 if (lowest_flag_domain(i, SD_CLUSTER)) 2579 has_cluster = true; 2580 } 2581 rcu_read_unlock(); 2582 2583 if (has_asym) 2584 static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2585 2586 if (has_cluster) 2587 static_branch_inc_cpuslocked(&sched_cluster_active); 2588 2589 if (rq && sched_debug_verbose) 2590 pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map)); 2591 2592 ret = 0; 2593 error: 2594 __free_domain_allocs(&d, alloc_state, cpu_map); 2595 2596 return ret; 2597 } 2598 2599 /* Current sched domains: */ 2600 static cpumask_var_t *doms_cur; 2601 2602 /* Number of sched domains in 'doms_cur': */ 2603 static int ndoms_cur; 2604 2605 /* Attributes of custom domains in 'doms_cur' */ 2606 static struct sched_domain_attr *dattr_cur; 2607 2608 /* 2609 * Special case: If a kmalloc() of a doms_cur partition (array of 2610 * cpumask) fails, then fallback to a single sched domain, 2611 * as determined by the single cpumask fallback_doms. 2612 */ 2613 static cpumask_var_t fallback_doms; 2614 2615 /* 2616 * arch_update_cpu_topology lets virtualized architectures update the 2617 * CPU core maps. It is supposed to return 1 if the topology changed 2618 * or 0 if it stayed the same. 2619 */ 2620 int __weak arch_update_cpu_topology(void) 2621 { 2622 return 0; 2623 } 2624 2625 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2626 { 2627 int i; 2628 cpumask_var_t *doms; 2629 2630 doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 2631 if (!doms) 2632 return NULL; 2633 for (i = 0; i < ndoms; i++) { 2634 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2635 free_sched_domains(doms, i); 2636 return NULL; 2637 } 2638 } 2639 return doms; 2640 } 2641 2642 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2643 { 2644 unsigned int i; 2645 for (i = 0; i < ndoms; i++) 2646 free_cpumask_var(doms[i]); 2647 kfree(doms); 2648 } 2649 2650 /* 2651 * Set up scheduler domains and groups. For now this just excludes isolated 2652 * CPUs, but could be used to exclude other special cases in the future. 2653 */ 2654 int __init sched_init_domains(const struct cpumask *cpu_map) 2655 { 2656 int err; 2657 2658 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 2659 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 2660 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 2661 2662 arch_update_cpu_topology(); 2663 asym_cpu_capacity_scan(); 2664 ndoms_cur = 1; 2665 doms_cur = alloc_sched_domains(ndoms_cur); 2666 if (!doms_cur) 2667 doms_cur = &fallback_doms; 2668 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2669 err = build_sched_domains(doms_cur[0], NULL); 2670 2671 return err; 2672 } 2673 2674 /* 2675 * Detach sched domains from a group of CPUs specified in cpu_map 2676 * These CPUs will now be attached to the NULL domain 2677 */ 2678 static void detach_destroy_domains(const struct cpumask *cpu_map) 2679 { 2680 unsigned int cpu = cpumask_any(cpu_map); 2681 int i; 2682 2683 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2684 static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2685 2686 if (static_branch_unlikely(&sched_cluster_active)) 2687 static_branch_dec_cpuslocked(&sched_cluster_active); 2688 2689 rcu_read_lock(); 2690 for_each_cpu(i, cpu_map) 2691 cpu_attach_domain(NULL, &def_root_domain, i); 2692 rcu_read_unlock(); 2693 } 2694 2695 /* handle null as "default" */ 2696 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2697 struct sched_domain_attr *new, int idx_new) 2698 { 2699 struct sched_domain_attr tmp; 2700 2701 /* Fast path: */ 2702 if (!new && !cur) 2703 return 1; 2704 2705 tmp = SD_ATTR_INIT; 2706 2707 return !memcmp(cur ? (cur + idx_cur) : &tmp, 2708 new ? (new + idx_new) : &tmp, 2709 sizeof(struct sched_domain_attr)); 2710 } 2711 2712 /* 2713 * Partition sched domains as specified by the 'ndoms_new' 2714 * cpumasks in the array doms_new[] of cpumasks. This compares 2715 * doms_new[] to the current sched domain partitioning, doms_cur[]. 2716 * It destroys each deleted domain and builds each new domain. 2717 * 2718 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2719 * The masks don't intersect (don't overlap.) We should setup one 2720 * sched domain for each mask. CPUs not in any of the cpumasks will 2721 * not be load balanced. If the same cpumask appears both in the 2722 * current 'doms_cur' domains and in the new 'doms_new', we can leave 2723 * it as it is. 2724 * 2725 * The passed in 'doms_new' should be allocated using 2726 * alloc_sched_domains. This routine takes ownership of it and will 2727 * free_sched_domains it when done with it. If the caller failed the 2728 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2729 * and partition_sched_domains() will fallback to the single partition 2730 * 'fallback_doms', it also forces the domains to be rebuilt. 2731 * 2732 * If doms_new == NULL it will be replaced with cpu_online_mask. 2733 * ndoms_new == 0 is a special case for destroying existing domains, 2734 * and it will not create the default domain. 2735 * 2736 * Call with hotplug lock and sched_domains_mutex held 2737 */ 2738 static void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2739 struct sched_domain_attr *dattr_new) 2740 { 2741 bool __maybe_unused has_eas = false; 2742 int i, j, n; 2743 int new_topology; 2744 2745 lockdep_assert_held(&sched_domains_mutex); 2746 2747 /* Let the architecture update CPU core mappings: */ 2748 new_topology = arch_update_cpu_topology(); 2749 /* Trigger rebuilding CPU capacity asymmetry data */ 2750 if (new_topology) 2751 asym_cpu_capacity_scan(); 2752 2753 if (!doms_new) { 2754 WARN_ON_ONCE(dattr_new); 2755 n = 0; 2756 doms_new = alloc_sched_domains(1); 2757 if (doms_new) { 2758 n = 1; 2759 cpumask_and(doms_new[0], cpu_active_mask, 2760 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2761 } 2762 } else { 2763 n = ndoms_new; 2764 } 2765 2766 /* Destroy deleted domains: */ 2767 for (i = 0; i < ndoms_cur; i++) { 2768 for (j = 0; j < n && !new_topology; j++) { 2769 if (cpumask_equal(doms_cur[i], doms_new[j]) && 2770 dattrs_equal(dattr_cur, i, dattr_new, j)) 2771 goto match1; 2772 } 2773 /* No match - a current sched domain not in new doms_new[] */ 2774 detach_destroy_domains(doms_cur[i]); 2775 match1: 2776 ; 2777 } 2778 2779 n = ndoms_cur; 2780 if (!doms_new) { 2781 n = 0; 2782 doms_new = &fallback_doms; 2783 cpumask_and(doms_new[0], cpu_active_mask, 2784 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2785 } 2786 2787 /* Build new domains: */ 2788 for (i = 0; i < ndoms_new; i++) { 2789 for (j = 0; j < n && !new_topology; j++) { 2790 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2791 dattrs_equal(dattr_new, i, dattr_cur, j)) 2792 goto match2; 2793 } 2794 /* No match - add a new doms_new */ 2795 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2796 match2: 2797 ; 2798 } 2799 2800 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2801 /* Build perf domains: */ 2802 for (i = 0; i < ndoms_new; i++) { 2803 for (j = 0; j < n && !sched_energy_update; j++) { 2804 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2805 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 2806 has_eas = true; 2807 goto match3; 2808 } 2809 } 2810 /* No match - add perf domains for a new rd */ 2811 has_eas |= build_perf_domains(doms_new[i]); 2812 match3: 2813 ; 2814 } 2815 sched_energy_set(has_eas); 2816 #endif 2817 2818 /* Remember the new sched domains: */ 2819 if (doms_cur != &fallback_doms) 2820 free_sched_domains(doms_cur, ndoms_cur); 2821 2822 kfree(dattr_cur); 2823 doms_cur = doms_new; 2824 dattr_cur = dattr_new; 2825 ndoms_cur = ndoms_new; 2826 2827 update_sched_domain_debugfs(); 2828 dl_rebuild_rd_accounting(); 2829 } 2830 2831 /* 2832 * Call with hotplug lock held 2833 */ 2834 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2835 struct sched_domain_attr *dattr_new) 2836 { 2837 sched_domains_mutex_lock(); 2838 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2839 sched_domains_mutex_unlock(); 2840 } 2841