1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Scheduler topology setup/handling methods 4 */ 5 6 #include <linux/bsearch.h> 7 8 DEFINE_MUTEX(sched_domains_mutex); 9 void sched_domains_mutex_lock(void) 10 { 11 mutex_lock(&sched_domains_mutex); 12 } 13 void sched_domains_mutex_unlock(void) 14 { 15 mutex_unlock(&sched_domains_mutex); 16 } 17 18 /* Protected by sched_domains_mutex: */ 19 static cpumask_var_t sched_domains_tmpmask; 20 static cpumask_var_t sched_domains_tmpmask2; 21 22 static int __init sched_debug_setup(char *str) 23 { 24 sched_debug_verbose = true; 25 26 return 0; 27 } 28 early_param("sched_verbose", sched_debug_setup); 29 30 static inline bool sched_debug(void) 31 { 32 return sched_debug_verbose; 33 } 34 35 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 36 const struct sd_flag_debug sd_flag_debug[] = { 37 #include <linux/sched/sd_flags.h> 38 }; 39 #undef SD_FLAG 40 41 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 42 struct cpumask *groupmask) 43 { 44 struct sched_group *group = sd->groups; 45 unsigned long flags = sd->flags; 46 unsigned int idx; 47 48 cpumask_clear(groupmask); 49 50 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 51 printk(KERN_CONT "span=%*pbl level=%s\n", 52 cpumask_pr_args(sched_domain_span(sd)), sd->name); 53 54 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 55 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 56 } 57 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 58 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 59 } 60 61 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 62 unsigned int flag = BIT(idx); 63 unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 64 65 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 66 !(sd->child->flags & flag)) 67 printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 68 sd_flag_debug[idx].name); 69 70 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 71 !(sd->parent->flags & flag)) 72 printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 73 sd_flag_debug[idx].name); 74 } 75 76 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 77 do { 78 if (!group) { 79 printk("\n"); 80 printk(KERN_ERR "ERROR: group is NULL\n"); 81 break; 82 } 83 84 if (cpumask_empty(sched_group_span(group))) { 85 printk(KERN_CONT "\n"); 86 printk(KERN_ERR "ERROR: empty group\n"); 87 break; 88 } 89 90 if (!(sd->flags & SD_OVERLAP) && 91 cpumask_intersects(groupmask, sched_group_span(group))) { 92 printk(KERN_CONT "\n"); 93 printk(KERN_ERR "ERROR: repeated CPUs\n"); 94 break; 95 } 96 97 cpumask_or(groupmask, groupmask, sched_group_span(group)); 98 99 printk(KERN_CONT " %d:{ span=%*pbl", 100 group->sgc->id, 101 cpumask_pr_args(sched_group_span(group))); 102 103 if ((sd->flags & SD_OVERLAP) && 104 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 105 printk(KERN_CONT " mask=%*pbl", 106 cpumask_pr_args(group_balance_mask(group))); 107 } 108 109 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 110 printk(KERN_CONT " cap=%lu", group->sgc->capacity); 111 112 if (group == sd->groups && sd->child && 113 !cpumask_equal(sched_domain_span(sd->child), 114 sched_group_span(group))) { 115 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 116 } 117 118 printk(KERN_CONT " }"); 119 120 group = group->next; 121 122 if (group != sd->groups) 123 printk(KERN_CONT ","); 124 125 } while (group != sd->groups); 126 printk(KERN_CONT "\n"); 127 128 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 129 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 130 131 if (sd->parent && 132 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 133 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 134 return 0; 135 } 136 137 static void sched_domain_debug(struct sched_domain *sd, int cpu) 138 { 139 int level = 0; 140 141 if (!sched_debug_verbose) 142 return; 143 144 if (!sd) { 145 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 146 return; 147 } 148 149 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 150 151 for (;;) { 152 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 153 break; 154 level++; 155 sd = sd->parent; 156 if (!sd) 157 break; 158 } 159 } 160 161 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 162 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 163 static const unsigned int SD_DEGENERATE_GROUPS_MASK = 164 #include <linux/sched/sd_flags.h> 165 0; 166 #undef SD_FLAG 167 168 static int sd_degenerate(struct sched_domain *sd) 169 { 170 if (cpumask_weight(sched_domain_span(sd)) == 1) 171 return 1; 172 173 /* Following flags need at least 2 groups */ 174 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 175 (sd->groups != sd->groups->next)) 176 return 0; 177 178 /* Following flags don't use groups */ 179 if (sd->flags & (SD_WAKE_AFFINE)) 180 return 0; 181 182 return 1; 183 } 184 185 static int 186 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 187 { 188 unsigned long cflags = sd->flags, pflags = parent->flags; 189 190 if (sd_degenerate(parent)) 191 return 1; 192 193 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 194 return 0; 195 196 /* Flags needing groups don't count if only 1 group in parent */ 197 if (parent->groups == parent->groups->next) 198 pflags &= ~SD_DEGENERATE_GROUPS_MASK; 199 200 if (~cflags & pflags) 201 return 0; 202 203 return 1; 204 } 205 206 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 207 DEFINE_STATIC_KEY_FALSE(sched_energy_present); 208 static unsigned int sysctl_sched_energy_aware = 1; 209 static DEFINE_MUTEX(sched_energy_mutex); 210 static bool sched_energy_update; 211 212 static bool sched_is_eas_possible(const struct cpumask *cpu_mask) 213 { 214 bool any_asym_capacity = false; 215 struct cpufreq_policy *policy; 216 bool policy_is_ready; 217 int i; 218 219 /* EAS is enabled for asymmetric CPU capacity topologies. */ 220 for_each_cpu(i, cpu_mask) { 221 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) { 222 any_asym_capacity = true; 223 break; 224 } 225 } 226 if (!any_asym_capacity) { 227 if (sched_debug()) { 228 pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n", 229 cpumask_pr_args(cpu_mask)); 230 } 231 return false; 232 } 233 234 /* EAS definitely does *not* handle SMT */ 235 if (sched_smt_active()) { 236 if (sched_debug()) { 237 pr_info("rd %*pbl: Checking EAS, SMT is not supported\n", 238 cpumask_pr_args(cpu_mask)); 239 } 240 return false; 241 } 242 243 if (!arch_scale_freq_invariant()) { 244 if (sched_debug()) { 245 pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", 246 cpumask_pr_args(cpu_mask)); 247 } 248 return false; 249 } 250 251 /* Do not attempt EAS if schedutil is not being used. */ 252 for_each_cpu(i, cpu_mask) { 253 policy = cpufreq_cpu_get(i); 254 if (!policy) { 255 if (sched_debug()) { 256 pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d", 257 cpumask_pr_args(cpu_mask), i); 258 } 259 return false; 260 } 261 policy_is_ready = sugov_is_governor(policy); 262 cpufreq_cpu_put(policy); 263 if (!policy_is_ready) { 264 if (sched_debug()) { 265 pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n", 266 cpumask_pr_args(cpu_mask)); 267 } 268 return false; 269 } 270 } 271 272 return true; 273 } 274 275 void rebuild_sched_domains_energy(void) 276 { 277 mutex_lock(&sched_energy_mutex); 278 sched_energy_update = true; 279 rebuild_sched_domains(); 280 sched_energy_update = false; 281 mutex_unlock(&sched_energy_mutex); 282 } 283 284 #ifdef CONFIG_PROC_SYSCTL 285 static int sched_energy_aware_handler(const struct ctl_table *table, int write, 286 void *buffer, size_t *lenp, loff_t *ppos) 287 { 288 int ret, state; 289 290 if (write && !capable(CAP_SYS_ADMIN)) 291 return -EPERM; 292 293 if (!sched_is_eas_possible(cpu_active_mask)) { 294 if (write) { 295 return -EOPNOTSUPP; 296 } else { 297 *lenp = 0; 298 return 0; 299 } 300 } 301 302 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 303 if (!ret && write) { 304 state = static_branch_unlikely(&sched_energy_present); 305 if (state != sysctl_sched_energy_aware) 306 rebuild_sched_domains_energy(); 307 } 308 309 return ret; 310 } 311 312 static const struct ctl_table sched_energy_aware_sysctls[] = { 313 { 314 .procname = "sched_energy_aware", 315 .data = &sysctl_sched_energy_aware, 316 .maxlen = sizeof(unsigned int), 317 .mode = 0644, 318 .proc_handler = sched_energy_aware_handler, 319 .extra1 = SYSCTL_ZERO, 320 .extra2 = SYSCTL_ONE, 321 }, 322 }; 323 324 static int __init sched_energy_aware_sysctl_init(void) 325 { 326 register_sysctl_init("kernel", sched_energy_aware_sysctls); 327 return 0; 328 } 329 330 late_initcall(sched_energy_aware_sysctl_init); 331 #endif 332 333 static void free_pd(struct perf_domain *pd) 334 { 335 struct perf_domain *tmp; 336 337 while (pd) { 338 tmp = pd->next; 339 kfree(pd); 340 pd = tmp; 341 } 342 } 343 344 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 345 { 346 while (pd) { 347 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 348 return pd; 349 pd = pd->next; 350 } 351 352 return NULL; 353 } 354 355 static struct perf_domain *pd_init(int cpu) 356 { 357 struct em_perf_domain *obj = em_cpu_get(cpu); 358 struct perf_domain *pd; 359 360 if (!obj) { 361 if (sched_debug()) 362 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 363 return NULL; 364 } 365 366 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 367 if (!pd) 368 return NULL; 369 pd->em_pd = obj; 370 371 return pd; 372 } 373 374 static void perf_domain_debug(const struct cpumask *cpu_map, 375 struct perf_domain *pd) 376 { 377 if (!sched_debug() || !pd) 378 return; 379 380 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 381 382 while (pd) { 383 printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 384 cpumask_first(perf_domain_span(pd)), 385 cpumask_pr_args(perf_domain_span(pd)), 386 em_pd_nr_perf_states(pd->em_pd)); 387 pd = pd->next; 388 } 389 390 printk(KERN_CONT "\n"); 391 } 392 393 static void destroy_perf_domain_rcu(struct rcu_head *rp) 394 { 395 struct perf_domain *pd; 396 397 pd = container_of(rp, struct perf_domain, rcu); 398 free_pd(pd); 399 } 400 401 static void sched_energy_set(bool has_eas) 402 { 403 if (!has_eas && static_branch_unlikely(&sched_energy_present)) { 404 if (sched_debug()) 405 pr_info("%s: stopping EAS\n", __func__); 406 static_branch_disable_cpuslocked(&sched_energy_present); 407 } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) { 408 if (sched_debug()) 409 pr_info("%s: starting EAS\n", __func__); 410 static_branch_enable_cpuslocked(&sched_energy_present); 411 } 412 } 413 414 /* 415 * EAS can be used on a root domain if it meets all the following conditions: 416 * 1. an Energy Model (EM) is available; 417 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 418 * 3. no SMT is detected. 419 * 4. schedutil is driving the frequency of all CPUs of the rd; 420 * 5. frequency invariance support is present; 421 */ 422 static bool build_perf_domains(const struct cpumask *cpu_map) 423 { 424 int i; 425 struct perf_domain *pd = NULL, *tmp; 426 int cpu = cpumask_first(cpu_map); 427 struct root_domain *rd = cpu_rq(cpu)->rd; 428 429 if (!sysctl_sched_energy_aware) 430 goto free; 431 432 if (!sched_is_eas_possible(cpu_map)) 433 goto free; 434 435 for_each_cpu(i, cpu_map) { 436 /* Skip already covered CPUs. */ 437 if (find_pd(pd, i)) 438 continue; 439 440 /* Create the new pd and add it to the local list. */ 441 tmp = pd_init(i); 442 if (!tmp) 443 goto free; 444 tmp->next = pd; 445 pd = tmp; 446 } 447 448 perf_domain_debug(cpu_map, pd); 449 450 /* Attach the new list of performance domains to the root domain. */ 451 tmp = rd->pd; 452 rcu_assign_pointer(rd->pd, pd); 453 if (tmp) 454 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 455 456 return !!pd; 457 458 free: 459 free_pd(pd); 460 tmp = rd->pd; 461 rcu_assign_pointer(rd->pd, NULL); 462 if (tmp) 463 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 464 465 return false; 466 } 467 #else 468 static void free_pd(struct perf_domain *pd) { } 469 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/ 470 471 static void free_rootdomain(struct rcu_head *rcu) 472 { 473 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 474 475 cpupri_cleanup(&rd->cpupri); 476 cpudl_cleanup(&rd->cpudl); 477 free_cpumask_var(rd->dlo_mask); 478 free_cpumask_var(rd->rto_mask); 479 free_cpumask_var(rd->online); 480 free_cpumask_var(rd->span); 481 free_pd(rd->pd); 482 kfree(rd); 483 } 484 485 void rq_attach_root(struct rq *rq, struct root_domain *rd) 486 { 487 struct root_domain *old_rd = NULL; 488 struct rq_flags rf; 489 490 rq_lock_irqsave(rq, &rf); 491 492 if (rq->rd) { 493 old_rd = rq->rd; 494 495 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 496 set_rq_offline(rq); 497 498 cpumask_clear_cpu(rq->cpu, old_rd->span); 499 500 /* 501 * If we don't want to free the old_rd yet then 502 * set old_rd to NULL to skip the freeing later 503 * in this function: 504 */ 505 if (!atomic_dec_and_test(&old_rd->refcount)) 506 old_rd = NULL; 507 } 508 509 atomic_inc(&rd->refcount); 510 rq->rd = rd; 511 512 cpumask_set_cpu(rq->cpu, rd->span); 513 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 514 set_rq_online(rq); 515 516 /* 517 * Because the rq is not a task, dl_add_task_root_domain() did not 518 * move the fair server bw to the rd if it already started. 519 * Add it now. 520 */ 521 if (rq->fair_server.dl_server) 522 __dl_server_attach_root(&rq->fair_server, rq); 523 524 rq_unlock_irqrestore(rq, &rf); 525 526 if (old_rd) 527 call_rcu(&old_rd->rcu, free_rootdomain); 528 } 529 530 void sched_get_rd(struct root_domain *rd) 531 { 532 atomic_inc(&rd->refcount); 533 } 534 535 void sched_put_rd(struct root_domain *rd) 536 { 537 if (!atomic_dec_and_test(&rd->refcount)) 538 return; 539 540 call_rcu(&rd->rcu, free_rootdomain); 541 } 542 543 static int init_rootdomain(struct root_domain *rd) 544 { 545 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 546 goto out; 547 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 548 goto free_span; 549 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 550 goto free_online; 551 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 552 goto free_dlo_mask; 553 554 #ifdef HAVE_RT_PUSH_IPI 555 rd->rto_cpu = -1; 556 raw_spin_lock_init(&rd->rto_lock); 557 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 558 #endif 559 560 rd->visit_cookie = 0; 561 init_dl_bw(&rd->dl_bw); 562 if (cpudl_init(&rd->cpudl) != 0) 563 goto free_rto_mask; 564 565 if (cpupri_init(&rd->cpupri) != 0) 566 goto free_cpudl; 567 return 0; 568 569 free_cpudl: 570 cpudl_cleanup(&rd->cpudl); 571 free_rto_mask: 572 free_cpumask_var(rd->rto_mask); 573 free_dlo_mask: 574 free_cpumask_var(rd->dlo_mask); 575 free_online: 576 free_cpumask_var(rd->online); 577 free_span: 578 free_cpumask_var(rd->span); 579 out: 580 return -ENOMEM; 581 } 582 583 /* 584 * By default the system creates a single root-domain with all CPUs as 585 * members (mimicking the global state we have today). 586 */ 587 struct root_domain def_root_domain; 588 589 void __init init_defrootdomain(void) 590 { 591 init_rootdomain(&def_root_domain); 592 593 atomic_set(&def_root_domain.refcount, 1); 594 } 595 596 static struct root_domain *alloc_rootdomain(void) 597 { 598 struct root_domain *rd; 599 600 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 601 if (!rd) 602 return NULL; 603 604 if (init_rootdomain(rd) != 0) { 605 kfree(rd); 606 return NULL; 607 } 608 609 return rd; 610 } 611 612 static void free_sched_groups(struct sched_group *sg, int free_sgc) 613 { 614 struct sched_group *tmp, *first; 615 616 if (!sg) 617 return; 618 619 first = sg; 620 do { 621 tmp = sg->next; 622 623 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 624 kfree(sg->sgc); 625 626 if (atomic_dec_and_test(&sg->ref)) 627 kfree(sg); 628 sg = tmp; 629 } while (sg != first); 630 } 631 632 static void destroy_sched_domain(struct sched_domain *sd) 633 { 634 /* 635 * A normal sched domain may have multiple group references, an 636 * overlapping domain, having private groups, only one. Iterate, 637 * dropping group/capacity references, freeing where none remain. 638 */ 639 free_sched_groups(sd->groups, 1); 640 641 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 642 kfree(sd->shared); 643 kfree(sd); 644 } 645 646 static void destroy_sched_domains_rcu(struct rcu_head *rcu) 647 { 648 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 649 650 while (sd) { 651 struct sched_domain *parent = sd->parent; 652 destroy_sched_domain(sd); 653 sd = parent; 654 } 655 } 656 657 static void destroy_sched_domains(struct sched_domain *sd) 658 { 659 if (sd) 660 call_rcu(&sd->rcu, destroy_sched_domains_rcu); 661 } 662 663 /* 664 * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set 665 * (Last Level Cache Domain) for this allows us to avoid some pointer chasing 666 * select_idle_sibling(). 667 * 668 * Also keep a unique ID per domain (we use the first CPU number in the cpumask 669 * of the domain), this allows us to quickly tell if two CPUs are in the same 670 * cache domain, see cpus_share_cache(). 671 */ 672 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 673 DEFINE_PER_CPU(int, sd_llc_size); 674 DEFINE_PER_CPU(int, sd_llc_id); 675 DEFINE_PER_CPU(int, sd_share_id); 676 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 677 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 678 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 679 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 680 681 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 682 DEFINE_STATIC_KEY_FALSE(sched_cluster_active); 683 684 static void update_top_cache_domain(int cpu) 685 { 686 struct sched_domain_shared *sds = NULL; 687 struct sched_domain *sd; 688 int id = cpu; 689 int size = 1; 690 691 sd = highest_flag_domain(cpu, SD_SHARE_LLC); 692 if (sd) { 693 id = cpumask_first(sched_domain_span(sd)); 694 size = cpumask_weight(sched_domain_span(sd)); 695 sds = sd->shared; 696 } 697 698 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 699 per_cpu(sd_llc_size, cpu) = size; 700 per_cpu(sd_llc_id, cpu) = id; 701 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 702 703 sd = lowest_flag_domain(cpu, SD_CLUSTER); 704 if (sd) 705 id = cpumask_first(sched_domain_span(sd)); 706 707 /* 708 * This assignment should be placed after the sd_llc_id as 709 * we want this id equals to cluster id on cluster machines 710 * but equals to LLC id on non-Cluster machines. 711 */ 712 per_cpu(sd_share_id, cpu) = id; 713 714 sd = lowest_flag_domain(cpu, SD_NUMA); 715 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 716 717 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 718 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 719 720 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 721 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 722 } 723 724 /* 725 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 726 * hold the hotplug lock. 727 */ 728 static void 729 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 730 { 731 struct rq *rq = cpu_rq(cpu); 732 struct sched_domain *tmp; 733 734 /* Remove the sched domains which do not contribute to scheduling. */ 735 for (tmp = sd; tmp; ) { 736 struct sched_domain *parent = tmp->parent; 737 if (!parent) 738 break; 739 740 if (sd_parent_degenerate(tmp, parent)) { 741 tmp->parent = parent->parent; 742 743 if (parent->parent) { 744 parent->parent->child = tmp; 745 parent->parent->groups->flags = tmp->flags; 746 } 747 748 /* 749 * Transfer SD_PREFER_SIBLING down in case of a 750 * degenerate parent; the spans match for this 751 * so the property transfers. 752 */ 753 if (parent->flags & SD_PREFER_SIBLING) 754 tmp->flags |= SD_PREFER_SIBLING; 755 destroy_sched_domain(parent); 756 } else 757 tmp = tmp->parent; 758 } 759 760 if (sd && sd_degenerate(sd)) { 761 tmp = sd; 762 sd = sd->parent; 763 destroy_sched_domain(tmp); 764 if (sd) { 765 struct sched_group *sg = sd->groups; 766 767 /* 768 * sched groups hold the flags of the child sched 769 * domain for convenience. Clear such flags since 770 * the child is being destroyed. 771 */ 772 do { 773 sg->flags = 0; 774 } while (sg != sd->groups); 775 776 sd->child = NULL; 777 } 778 } 779 780 sched_domain_debug(sd, cpu); 781 782 rq_attach_root(rq, rd); 783 tmp = rq->sd; 784 rcu_assign_pointer(rq->sd, sd); 785 dirty_sched_domain_sysctl(cpu); 786 destroy_sched_domains(tmp); 787 788 update_top_cache_domain(cpu); 789 } 790 791 struct s_data { 792 struct sched_domain * __percpu *sd; 793 struct root_domain *rd; 794 }; 795 796 enum s_alloc { 797 sa_rootdomain, 798 sa_sd, 799 sa_sd_storage, 800 sa_none, 801 }; 802 803 /* 804 * Return the canonical balance CPU for this group, this is the first CPU 805 * of this group that's also in the balance mask. 806 * 807 * The balance mask are all those CPUs that could actually end up at this 808 * group. See build_balance_mask(). 809 * 810 * Also see should_we_balance(). 811 */ 812 int group_balance_cpu(struct sched_group *sg) 813 { 814 return cpumask_first(group_balance_mask(sg)); 815 } 816 817 818 /* 819 * NUMA topology (first read the regular topology blurb below) 820 * 821 * Given a node-distance table, for example: 822 * 823 * node 0 1 2 3 824 * 0: 10 20 30 20 825 * 1: 20 10 20 30 826 * 2: 30 20 10 20 827 * 3: 20 30 20 10 828 * 829 * which represents a 4 node ring topology like: 830 * 831 * 0 ----- 1 832 * | | 833 * | | 834 * | | 835 * 3 ----- 2 836 * 837 * We want to construct domains and groups to represent this. The way we go 838 * about doing this is to build the domains on 'hops'. For each NUMA level we 839 * construct the mask of all nodes reachable in @level hops. 840 * 841 * For the above NUMA topology that gives 3 levels: 842 * 843 * NUMA-2 0-3 0-3 0-3 0-3 844 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 845 * 846 * NUMA-1 0-1,3 0-2 1-3 0,2-3 847 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 848 * 849 * NUMA-0 0 1 2 3 850 * 851 * 852 * As can be seen; things don't nicely line up as with the regular topology. 853 * When we iterate a domain in child domain chunks some nodes can be 854 * represented multiple times -- hence the "overlap" naming for this part of 855 * the topology. 856 * 857 * In order to minimize this overlap, we only build enough groups to cover the 858 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 859 * 860 * Because: 861 * 862 * - the first group of each domain is its child domain; this 863 * gets us the first 0-1,3 864 * - the only uncovered node is 2, who's child domain is 1-3. 865 * 866 * However, because of the overlap, computing a unique CPU for each group is 867 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 868 * groups include the CPUs of Node-0, while those CPUs would not in fact ever 869 * end up at those groups (they would end up in group: 0-1,3). 870 * 871 * To correct this we have to introduce the group balance mask. This mask 872 * will contain those CPUs in the group that can reach this group given the 873 * (child) domain tree. 874 * 875 * With this we can once again compute balance_cpu and sched_group_capacity 876 * relations. 877 * 878 * XXX include words on how balance_cpu is unique and therefore can be 879 * used for sched_group_capacity links. 880 * 881 * 882 * Another 'interesting' topology is: 883 * 884 * node 0 1 2 3 885 * 0: 10 20 20 30 886 * 1: 20 10 20 20 887 * 2: 20 20 10 20 888 * 3: 30 20 20 10 889 * 890 * Which looks a little like: 891 * 892 * 0 ----- 1 893 * | / | 894 * | / | 895 * | / | 896 * 2 ----- 3 897 * 898 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 899 * are not. 900 * 901 * This leads to a few particularly weird cases where the sched_domain's are 902 * not of the same number for each CPU. Consider: 903 * 904 * NUMA-2 0-3 0-3 905 * groups: {0-2},{1-3} {1-3},{0-2} 906 * 907 * NUMA-1 0-2 0-3 0-3 1-3 908 * 909 * NUMA-0 0 1 2 3 910 * 911 */ 912 913 914 /* 915 * Build the balance mask; it contains only those CPUs that can arrive at this 916 * group and should be considered to continue balancing. 917 * 918 * We do this during the group creation pass, therefore the group information 919 * isn't complete yet, however since each group represents a (child) domain we 920 * can fully construct this using the sched_domain bits (which are already 921 * complete). 922 */ 923 static void 924 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 925 { 926 const struct cpumask *sg_span = sched_group_span(sg); 927 struct sd_data *sdd = sd->private; 928 struct sched_domain *sibling; 929 int i; 930 931 cpumask_clear(mask); 932 933 for_each_cpu(i, sg_span) { 934 sibling = *per_cpu_ptr(sdd->sd, i); 935 936 /* 937 * Can happen in the asymmetric case, where these siblings are 938 * unused. The mask will not be empty because those CPUs that 939 * do have the top domain _should_ span the domain. 940 */ 941 if (!sibling->child) 942 continue; 943 944 /* If we would not end up here, we can't continue from here */ 945 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 946 continue; 947 948 cpumask_set_cpu(i, mask); 949 } 950 951 /* We must not have empty masks here */ 952 WARN_ON_ONCE(cpumask_empty(mask)); 953 } 954 955 /* 956 * XXX: This creates per-node group entries; since the load-balancer will 957 * immediately access remote memory to construct this group's load-balance 958 * statistics having the groups node local is of dubious benefit. 959 */ 960 static struct sched_group * 961 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 962 { 963 struct sched_group *sg; 964 struct cpumask *sg_span; 965 966 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 967 GFP_KERNEL, cpu_to_node(cpu)); 968 969 if (!sg) 970 return NULL; 971 972 sg_span = sched_group_span(sg); 973 if (sd->child) { 974 cpumask_copy(sg_span, sched_domain_span(sd->child)); 975 sg->flags = sd->child->flags; 976 } else { 977 cpumask_copy(sg_span, sched_domain_span(sd)); 978 } 979 980 atomic_inc(&sg->ref); 981 return sg; 982 } 983 984 static void init_overlap_sched_group(struct sched_domain *sd, 985 struct sched_group *sg) 986 { 987 struct cpumask *mask = sched_domains_tmpmask2; 988 struct sd_data *sdd = sd->private; 989 struct cpumask *sg_span; 990 int cpu; 991 992 build_balance_mask(sd, sg, mask); 993 cpu = cpumask_first(mask); 994 995 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 996 if (atomic_inc_return(&sg->sgc->ref) == 1) 997 cpumask_copy(group_balance_mask(sg), mask); 998 else 999 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 1000 1001 /* 1002 * Initialize sgc->capacity such that even if we mess up the 1003 * domains and no possible iteration will get us here, we won't 1004 * die on a /0 trap. 1005 */ 1006 sg_span = sched_group_span(sg); 1007 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 1008 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1009 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 1010 } 1011 1012 static struct sched_domain * 1013 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 1014 { 1015 /* 1016 * The proper descendant would be the one whose child won't span out 1017 * of sd 1018 */ 1019 while (sibling->child && 1020 !cpumask_subset(sched_domain_span(sibling->child), 1021 sched_domain_span(sd))) 1022 sibling = sibling->child; 1023 1024 /* 1025 * As we are referencing sgc across different topology level, we need 1026 * to go down to skip those sched_domains which don't contribute to 1027 * scheduling because they will be degenerated in cpu_attach_domain 1028 */ 1029 while (sibling->child && 1030 cpumask_equal(sched_domain_span(sibling->child), 1031 sched_domain_span(sibling))) 1032 sibling = sibling->child; 1033 1034 return sibling; 1035 } 1036 1037 static int 1038 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1039 { 1040 struct sched_group *first = NULL, *last = NULL, *sg; 1041 const struct cpumask *span = sched_domain_span(sd); 1042 struct cpumask *covered = sched_domains_tmpmask; 1043 struct sd_data *sdd = sd->private; 1044 struct sched_domain *sibling; 1045 int i; 1046 1047 cpumask_clear(covered); 1048 1049 for_each_cpu_wrap(i, span, cpu) { 1050 struct cpumask *sg_span; 1051 1052 if (cpumask_test_cpu(i, covered)) 1053 continue; 1054 1055 sibling = *per_cpu_ptr(sdd->sd, i); 1056 1057 /* 1058 * Asymmetric node setups can result in situations where the 1059 * domain tree is of unequal depth, make sure to skip domains 1060 * that already cover the entire range. 1061 * 1062 * In that case build_sched_domains() will have terminated the 1063 * iteration early and our sibling sd spans will be empty. 1064 * Domains should always include the CPU they're built on, so 1065 * check that. 1066 */ 1067 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1068 continue; 1069 1070 /* 1071 * Usually we build sched_group by sibling's child sched_domain 1072 * But for machines whose NUMA diameter are 3 or above, we move 1073 * to build sched_group by sibling's proper descendant's child 1074 * domain because sibling's child sched_domain will span out of 1075 * the sched_domain being built as below. 1076 * 1077 * Smallest diameter=3 topology is: 1078 * 1079 * node 0 1 2 3 1080 * 0: 10 20 30 40 1081 * 1: 20 10 20 30 1082 * 2: 30 20 10 20 1083 * 3: 40 30 20 10 1084 * 1085 * 0 --- 1 --- 2 --- 3 1086 * 1087 * NUMA-3 0-3 N/A N/A 0-3 1088 * groups: {0-2},{1-3} {1-3},{0-2} 1089 * 1090 * NUMA-2 0-2 0-3 0-3 1-3 1091 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1092 * 1093 * NUMA-1 0-1 0-2 1-3 2-3 1094 * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1095 * 1096 * NUMA-0 0 1 2 3 1097 * 1098 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1099 * group span isn't a subset of the domain span. 1100 */ 1101 if (sibling->child && 1102 !cpumask_subset(sched_domain_span(sibling->child), span)) 1103 sibling = find_descended_sibling(sd, sibling); 1104 1105 sg = build_group_from_child_sched_domain(sibling, cpu); 1106 if (!sg) 1107 goto fail; 1108 1109 sg_span = sched_group_span(sg); 1110 cpumask_or(covered, covered, sg_span); 1111 1112 init_overlap_sched_group(sibling, sg); 1113 1114 if (!first) 1115 first = sg; 1116 if (last) 1117 last->next = sg; 1118 last = sg; 1119 last->next = first; 1120 } 1121 sd->groups = first; 1122 1123 return 0; 1124 1125 fail: 1126 free_sched_groups(first, 0); 1127 1128 return -ENOMEM; 1129 } 1130 1131 1132 /* 1133 * Package topology (also see the load-balance blurb in fair.c) 1134 * 1135 * The scheduler builds a tree structure to represent a number of important 1136 * topology features. By default (default_topology[]) these include: 1137 * 1138 * - Simultaneous multithreading (SMT) 1139 * - Multi-Core Cache (MC) 1140 * - Package (PKG) 1141 * 1142 * Where the last one more or less denotes everything up to a NUMA node. 1143 * 1144 * The tree consists of 3 primary data structures: 1145 * 1146 * sched_domain -> sched_group -> sched_group_capacity 1147 * ^ ^ ^ ^ 1148 * `-' `-' 1149 * 1150 * The sched_domains are per-CPU and have a two way link (parent & child) and 1151 * denote the ever growing mask of CPUs belonging to that level of topology. 1152 * 1153 * Each sched_domain has a circular (double) linked list of sched_group's, each 1154 * denoting the domains of the level below (or individual CPUs in case of the 1155 * first domain level). The sched_group linked by a sched_domain includes the 1156 * CPU of that sched_domain [*]. 1157 * 1158 * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 1159 * 1160 * CPU 0 1 2 3 4 5 6 7 1161 * 1162 * PKG [ ] 1163 * MC [ ] [ ] 1164 * SMT [ ] [ ] [ ] [ ] 1165 * 1166 * - or - 1167 * 1168 * PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 1169 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 1170 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 1171 * 1172 * CPU 0 1 2 3 4 5 6 7 1173 * 1174 * One way to think about it is: sched_domain moves you up and down among these 1175 * topology levels, while sched_group moves you sideways through it, at child 1176 * domain granularity. 1177 * 1178 * sched_group_capacity ensures each unique sched_group has shared storage. 1179 * 1180 * There are two related construction problems, both require a CPU that 1181 * uniquely identify each group (for a given domain): 1182 * 1183 * - The first is the balance_cpu (see should_we_balance() and the 1184 * load-balance blurb in fair.c); for each group we only want 1 CPU to 1185 * continue balancing at a higher domain. 1186 * 1187 * - The second is the sched_group_capacity; we want all identical groups 1188 * to share a single sched_group_capacity. 1189 * 1190 * Since these topologies are exclusive by construction. That is, its 1191 * impossible for an SMT thread to belong to multiple cores, and cores to 1192 * be part of multiple caches. There is a very clear and unique location 1193 * for each CPU in the hierarchy. 1194 * 1195 * Therefore computing a unique CPU for each group is trivial (the iteration 1196 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 1197 * group), we can simply pick the first CPU in each group. 1198 * 1199 * 1200 * [*] in other words, the first group of each domain is its child domain. 1201 */ 1202 1203 static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1204 { 1205 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1206 struct sched_domain *child = sd->child; 1207 struct sched_group *sg; 1208 bool already_visited; 1209 1210 if (child) 1211 cpu = cpumask_first(sched_domain_span(child)); 1212 1213 sg = *per_cpu_ptr(sdd->sg, cpu); 1214 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1215 1216 /* Increase refcounts for claim_allocations: */ 1217 already_visited = atomic_inc_return(&sg->ref) > 1; 1218 /* sgc visits should follow a similar trend as sg */ 1219 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 1220 1221 /* If we have already visited that group, it's already initialized. */ 1222 if (already_visited) 1223 return sg; 1224 1225 if (child) { 1226 cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1227 cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 1228 sg->flags = child->flags; 1229 } else { 1230 cpumask_set_cpu(cpu, sched_group_span(sg)); 1231 cpumask_set_cpu(cpu, group_balance_mask(sg)); 1232 } 1233 1234 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 1235 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1236 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 1237 1238 return sg; 1239 } 1240 1241 /* 1242 * build_sched_groups will build a circular linked list of the groups 1243 * covered by the given span, will set each group's ->cpumask correctly, 1244 * and will initialize their ->sgc. 1245 * 1246 * Assumes the sched_domain tree is fully constructed 1247 */ 1248 static int 1249 build_sched_groups(struct sched_domain *sd, int cpu) 1250 { 1251 struct sched_group *first = NULL, *last = NULL; 1252 struct sd_data *sdd = sd->private; 1253 const struct cpumask *span = sched_domain_span(sd); 1254 struct cpumask *covered; 1255 int i; 1256 1257 lockdep_assert_held(&sched_domains_mutex); 1258 covered = sched_domains_tmpmask; 1259 1260 cpumask_clear(covered); 1261 1262 for_each_cpu_wrap(i, span, cpu) { 1263 struct sched_group *sg; 1264 1265 if (cpumask_test_cpu(i, covered)) 1266 continue; 1267 1268 sg = get_group(i, sdd); 1269 1270 cpumask_or(covered, covered, sched_group_span(sg)); 1271 1272 if (!first) 1273 first = sg; 1274 if (last) 1275 last->next = sg; 1276 last = sg; 1277 } 1278 last->next = first; 1279 sd->groups = first; 1280 1281 return 0; 1282 } 1283 1284 /* 1285 * Initialize sched groups cpu_capacity. 1286 * 1287 * cpu_capacity indicates the capacity of sched group, which is used while 1288 * distributing the load between different sched groups in a sched domain. 1289 * Typically cpu_capacity for all the groups in a sched domain will be same 1290 * unless there are asymmetries in the topology. If there are asymmetries, 1291 * group having more cpu_capacity will pickup more load compared to the 1292 * group having less cpu_capacity. 1293 */ 1294 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1295 { 1296 struct sched_group *sg = sd->groups; 1297 struct cpumask *mask = sched_domains_tmpmask2; 1298 1299 WARN_ON(!sg); 1300 1301 do { 1302 int cpu, cores = 0, max_cpu = -1; 1303 1304 sg->group_weight = cpumask_weight(sched_group_span(sg)); 1305 1306 cpumask_copy(mask, sched_group_span(sg)); 1307 for_each_cpu(cpu, mask) { 1308 cores++; 1309 #ifdef CONFIG_SCHED_SMT 1310 cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); 1311 #endif 1312 } 1313 sg->cores = cores; 1314 1315 if (!(sd->flags & SD_ASYM_PACKING)) 1316 goto next; 1317 1318 for_each_cpu(cpu, sched_group_span(sg)) { 1319 if (max_cpu < 0) 1320 max_cpu = cpu; 1321 else if (sched_asym_prefer(cpu, max_cpu)) 1322 max_cpu = cpu; 1323 } 1324 sg->asym_prefer_cpu = max_cpu; 1325 1326 next: 1327 sg = sg->next; 1328 } while (sg != sd->groups); 1329 1330 if (cpu != group_balance_cpu(sg)) 1331 return; 1332 1333 update_group_capacity(sd, cpu); 1334 } 1335 1336 /* 1337 * Set of available CPUs grouped by their corresponding capacities 1338 * Each list entry contains a CPU mask reflecting CPUs that share the same 1339 * capacity. 1340 * The lifespan of data is unlimited. 1341 */ 1342 LIST_HEAD(asym_cap_list); 1343 1344 /* 1345 * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1346 * Provides sd_flags reflecting the asymmetry scope. 1347 */ 1348 static inline int 1349 asym_cpu_capacity_classify(const struct cpumask *sd_span, 1350 const struct cpumask *cpu_map) 1351 { 1352 struct asym_cap_data *entry; 1353 int count = 0, miss = 0; 1354 1355 /* 1356 * Count how many unique CPU capacities this domain spans across 1357 * (compare sched_domain CPUs mask with ones representing available 1358 * CPUs capacities). Take into account CPUs that might be offline: 1359 * skip those. 1360 */ 1361 list_for_each_entry(entry, &asym_cap_list, link) { 1362 if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1363 ++count; 1364 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1365 ++miss; 1366 } 1367 1368 WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1369 1370 /* No asymmetry detected */ 1371 if (count < 2) 1372 return 0; 1373 /* Some of the available CPU capacity values have not been detected */ 1374 if (miss) 1375 return SD_ASYM_CPUCAPACITY; 1376 1377 /* Full asymmetry */ 1378 return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1379 1380 } 1381 1382 static void free_asym_cap_entry(struct rcu_head *head) 1383 { 1384 struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu); 1385 kfree(entry); 1386 } 1387 1388 static inline void asym_cpu_capacity_update_data(int cpu) 1389 { 1390 unsigned long capacity = arch_scale_cpu_capacity(cpu); 1391 struct asym_cap_data *insert_entry = NULL; 1392 struct asym_cap_data *entry; 1393 1394 /* 1395 * Search if capacity already exits. If not, track which the entry 1396 * where we should insert to keep the list ordered descending. 1397 */ 1398 list_for_each_entry(entry, &asym_cap_list, link) { 1399 if (capacity == entry->capacity) 1400 goto done; 1401 else if (!insert_entry && capacity > entry->capacity) 1402 insert_entry = list_prev_entry(entry, link); 1403 } 1404 1405 entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1406 if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1407 return; 1408 entry->capacity = capacity; 1409 1410 /* If NULL then the new capacity is the smallest, add last. */ 1411 if (!insert_entry) 1412 list_add_tail_rcu(&entry->link, &asym_cap_list); 1413 else 1414 list_add_rcu(&entry->link, &insert_entry->link); 1415 done: 1416 __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1417 } 1418 1419 /* 1420 * Build-up/update list of CPUs grouped by their capacities 1421 * An update requires explicit request to rebuild sched domains 1422 * with state indicating CPU topology changes. 1423 */ 1424 static void asym_cpu_capacity_scan(void) 1425 { 1426 struct asym_cap_data *entry, *next; 1427 int cpu; 1428 1429 list_for_each_entry(entry, &asym_cap_list, link) 1430 cpumask_clear(cpu_capacity_span(entry)); 1431 1432 for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1433 asym_cpu_capacity_update_data(cpu); 1434 1435 list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1436 if (cpumask_empty(cpu_capacity_span(entry))) { 1437 list_del_rcu(&entry->link); 1438 call_rcu(&entry->rcu, free_asym_cap_entry); 1439 } 1440 } 1441 1442 /* 1443 * Only one capacity value has been detected i.e. this system is symmetric. 1444 * No need to keep this data around. 1445 */ 1446 if (list_is_singular(&asym_cap_list)) { 1447 entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 1448 list_del_rcu(&entry->link); 1449 call_rcu(&entry->rcu, free_asym_cap_entry); 1450 } 1451 } 1452 1453 /* 1454 * Initializers for schedule domains 1455 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1456 */ 1457 1458 static int default_relax_domain_level = -1; 1459 int sched_domain_level_max; 1460 1461 static int __init setup_relax_domain_level(char *str) 1462 { 1463 if (kstrtoint(str, 0, &default_relax_domain_level)) 1464 pr_warn("Unable to set relax_domain_level\n"); 1465 1466 return 1; 1467 } 1468 __setup("relax_domain_level=", setup_relax_domain_level); 1469 1470 static void set_domain_attribute(struct sched_domain *sd, 1471 struct sched_domain_attr *attr) 1472 { 1473 int request; 1474 1475 if (!attr || attr->relax_domain_level < 0) { 1476 if (default_relax_domain_level < 0) 1477 return; 1478 request = default_relax_domain_level; 1479 } else 1480 request = attr->relax_domain_level; 1481 1482 if (sd->level >= request) { 1483 /* Turn off idle balance on this domain: */ 1484 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1485 } 1486 } 1487 1488 static void __sdt_free(const struct cpumask *cpu_map); 1489 static int __sdt_alloc(const struct cpumask *cpu_map); 1490 1491 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1492 const struct cpumask *cpu_map) 1493 { 1494 switch (what) { 1495 case sa_rootdomain: 1496 if (!atomic_read(&d->rd->refcount)) 1497 free_rootdomain(&d->rd->rcu); 1498 fallthrough; 1499 case sa_sd: 1500 free_percpu(d->sd); 1501 fallthrough; 1502 case sa_sd_storage: 1503 __sdt_free(cpu_map); 1504 fallthrough; 1505 case sa_none: 1506 break; 1507 } 1508 } 1509 1510 static enum s_alloc 1511 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1512 { 1513 memset(d, 0, sizeof(*d)); 1514 1515 if (__sdt_alloc(cpu_map)) 1516 return sa_sd_storage; 1517 d->sd = alloc_percpu(struct sched_domain *); 1518 if (!d->sd) 1519 return sa_sd_storage; 1520 d->rd = alloc_rootdomain(); 1521 if (!d->rd) 1522 return sa_sd; 1523 1524 return sa_rootdomain; 1525 } 1526 1527 /* 1528 * NULL the sd_data elements we've used to build the sched_domain and 1529 * sched_group structure so that the subsequent __free_domain_allocs() 1530 * will not free the data we're using. 1531 */ 1532 static void claim_allocations(int cpu, struct sched_domain *sd) 1533 { 1534 struct sd_data *sdd = sd->private; 1535 1536 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1537 *per_cpu_ptr(sdd->sd, cpu) = NULL; 1538 1539 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1540 *per_cpu_ptr(sdd->sds, cpu) = NULL; 1541 1542 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1543 *per_cpu_ptr(sdd->sg, cpu) = NULL; 1544 1545 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1546 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1547 } 1548 1549 #ifdef CONFIG_NUMA 1550 enum numa_topology_type sched_numa_topology_type; 1551 1552 static int sched_domains_numa_levels; 1553 static int sched_domains_curr_level; 1554 1555 int sched_max_numa_distance; 1556 static int *sched_domains_numa_distance; 1557 static struct cpumask ***sched_domains_numa_masks; 1558 #endif 1559 1560 /* 1561 * SD_flags allowed in topology descriptions. 1562 * 1563 * These flags are purely descriptive of the topology and do not prescribe 1564 * behaviour. Behaviour is artificial and mapped in the below sd_init() 1565 * function. For details, see include/linux/sched/sd_flags.h. 1566 * 1567 * SD_SHARE_CPUCAPACITY 1568 * SD_SHARE_LLC 1569 * SD_CLUSTER 1570 * SD_NUMA 1571 * 1572 * Odd one out, which beside describing the topology has a quirk also 1573 * prescribes the desired behaviour that goes along with it: 1574 * 1575 * SD_ASYM_PACKING - describes SMT quirks 1576 */ 1577 #define TOPOLOGY_SD_FLAGS \ 1578 (SD_SHARE_CPUCAPACITY | \ 1579 SD_CLUSTER | \ 1580 SD_SHARE_LLC | \ 1581 SD_NUMA | \ 1582 SD_ASYM_PACKING) 1583 1584 static struct sched_domain * 1585 sd_init(struct sched_domain_topology_level *tl, 1586 const struct cpumask *cpu_map, 1587 struct sched_domain *child, int cpu) 1588 { 1589 struct sd_data *sdd = &tl->data; 1590 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1591 int sd_id, sd_weight, sd_flags = 0; 1592 struct cpumask *sd_span; 1593 1594 #ifdef CONFIG_NUMA 1595 /* 1596 * Ugly hack to pass state to sd_numa_mask()... 1597 */ 1598 sched_domains_curr_level = tl->numa_level; 1599 #endif 1600 1601 sd_weight = cpumask_weight(tl->mask(cpu)); 1602 1603 if (tl->sd_flags) 1604 sd_flags = (*tl->sd_flags)(); 1605 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1606 "wrong sd_flags in topology description\n")) 1607 sd_flags &= TOPOLOGY_SD_FLAGS; 1608 1609 *sd = (struct sched_domain){ 1610 .min_interval = sd_weight, 1611 .max_interval = 2*sd_weight, 1612 .busy_factor = 16, 1613 .imbalance_pct = 117, 1614 1615 .cache_nice_tries = 0, 1616 1617 .flags = 1*SD_BALANCE_NEWIDLE 1618 | 1*SD_BALANCE_EXEC 1619 | 1*SD_BALANCE_FORK 1620 | 0*SD_BALANCE_WAKE 1621 | 1*SD_WAKE_AFFINE 1622 | 0*SD_SHARE_CPUCAPACITY 1623 | 0*SD_SHARE_LLC 1624 | 0*SD_SERIALIZE 1625 | 1*SD_PREFER_SIBLING 1626 | 0*SD_NUMA 1627 | sd_flags 1628 , 1629 1630 .last_balance = jiffies, 1631 .balance_interval = sd_weight, 1632 .max_newidle_lb_cost = 0, 1633 .last_decay_max_lb_cost = jiffies, 1634 .child = child, 1635 .name = tl->name, 1636 }; 1637 1638 sd_span = sched_domain_span(sd); 1639 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); 1640 sd_id = cpumask_first(sd_span); 1641 1642 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1643 1644 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1645 (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1646 "CPU capacity asymmetry not supported on SMT\n"); 1647 1648 /* 1649 * Convert topological properties into behaviour. 1650 */ 1651 /* Don't attempt to spread across CPUs of different capacities. */ 1652 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 1653 sd->child->flags &= ~SD_PREFER_SIBLING; 1654 1655 if (sd->flags & SD_SHARE_CPUCAPACITY) { 1656 sd->imbalance_pct = 110; 1657 1658 } else if (sd->flags & SD_SHARE_LLC) { 1659 sd->imbalance_pct = 117; 1660 sd->cache_nice_tries = 1; 1661 1662 #ifdef CONFIG_NUMA 1663 } else if (sd->flags & SD_NUMA) { 1664 sd->cache_nice_tries = 2; 1665 1666 sd->flags &= ~SD_PREFER_SIBLING; 1667 sd->flags |= SD_SERIALIZE; 1668 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1669 sd->flags &= ~(SD_BALANCE_EXEC | 1670 SD_BALANCE_FORK | 1671 SD_WAKE_AFFINE); 1672 } 1673 1674 #endif 1675 } else { 1676 sd->cache_nice_tries = 1; 1677 } 1678 1679 /* 1680 * For all levels sharing cache; connect a sched_domain_shared 1681 * instance. 1682 */ 1683 if (sd->flags & SD_SHARE_LLC) { 1684 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1685 atomic_inc(&sd->shared->ref); 1686 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1687 } 1688 1689 sd->private = sdd; 1690 1691 return sd; 1692 } 1693 1694 /* 1695 * Topology list, bottom-up. 1696 */ 1697 static struct sched_domain_topology_level default_topology[] = { 1698 #ifdef CONFIG_SCHED_SMT 1699 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1700 #endif 1701 1702 #ifdef CONFIG_SCHED_CLUSTER 1703 { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, 1704 #endif 1705 1706 #ifdef CONFIG_SCHED_MC 1707 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1708 #endif 1709 { cpu_cpu_mask, SD_INIT_NAME(PKG) }, 1710 { NULL, }, 1711 }; 1712 1713 static struct sched_domain_topology_level *sched_domain_topology = 1714 default_topology; 1715 static struct sched_domain_topology_level *sched_domain_topology_saved; 1716 1717 #define for_each_sd_topology(tl) \ 1718 for (tl = sched_domain_topology; tl->mask; tl++) 1719 1720 void __init set_sched_topology(struct sched_domain_topology_level *tl) 1721 { 1722 if (WARN_ON_ONCE(sched_smp_initialized)) 1723 return; 1724 1725 sched_domain_topology = tl; 1726 sched_domain_topology_saved = NULL; 1727 } 1728 1729 #ifdef CONFIG_NUMA 1730 1731 static const struct cpumask *sd_numa_mask(int cpu) 1732 { 1733 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1734 } 1735 1736 static void sched_numa_warn(const char *str) 1737 { 1738 static int done = false; 1739 int i,j; 1740 1741 if (done) 1742 return; 1743 1744 done = true; 1745 1746 printk(KERN_WARNING "ERROR: %s\n\n", str); 1747 1748 for (i = 0; i < nr_node_ids; i++) { 1749 printk(KERN_WARNING " "); 1750 for (j = 0; j < nr_node_ids; j++) { 1751 if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 1752 printk(KERN_CONT "(%02d) ", node_distance(i,j)); 1753 else 1754 printk(KERN_CONT " %02d ", node_distance(i,j)); 1755 } 1756 printk(KERN_CONT "\n"); 1757 } 1758 printk(KERN_WARNING "\n"); 1759 } 1760 1761 bool find_numa_distance(int distance) 1762 { 1763 bool found = false; 1764 int i, *distances; 1765 1766 if (distance == node_distance(0, 0)) 1767 return true; 1768 1769 rcu_read_lock(); 1770 distances = rcu_dereference(sched_domains_numa_distance); 1771 if (!distances) 1772 goto unlock; 1773 for (i = 0; i < sched_domains_numa_levels; i++) { 1774 if (distances[i] == distance) { 1775 found = true; 1776 break; 1777 } 1778 } 1779 unlock: 1780 rcu_read_unlock(); 1781 1782 return found; 1783 } 1784 1785 #define for_each_cpu_node_but(n, nbut) \ 1786 for_each_node_state(n, N_CPU) \ 1787 if (n == nbut) \ 1788 continue; \ 1789 else 1790 1791 /* 1792 * A system can have three types of NUMA topology: 1793 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1794 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1795 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1796 * 1797 * The difference between a glueless mesh topology and a backplane 1798 * topology lies in whether communication between not directly 1799 * connected nodes goes through intermediary nodes (where programs 1800 * could run), or through backplane controllers. This affects 1801 * placement of programs. 1802 * 1803 * The type of topology can be discerned with the following tests: 1804 * - If the maximum distance between any nodes is 1 hop, the system 1805 * is directly connected. 1806 * - If for two nodes A and B, located N > 1 hops away from each other, 1807 * there is an intermediary node C, which is < N hops away from both 1808 * nodes A and B, the system is a glueless mesh. 1809 */ 1810 static void init_numa_topology_type(int offline_node) 1811 { 1812 int a, b, c, n; 1813 1814 n = sched_max_numa_distance; 1815 1816 if (sched_domains_numa_levels <= 2) { 1817 sched_numa_topology_type = NUMA_DIRECT; 1818 return; 1819 } 1820 1821 for_each_cpu_node_but(a, offline_node) { 1822 for_each_cpu_node_but(b, offline_node) { 1823 /* Find two nodes furthest removed from each other. */ 1824 if (node_distance(a, b) < n) 1825 continue; 1826 1827 /* Is there an intermediary node between a and b? */ 1828 for_each_cpu_node_but(c, offline_node) { 1829 if (node_distance(a, c) < n && 1830 node_distance(b, c) < n) { 1831 sched_numa_topology_type = 1832 NUMA_GLUELESS_MESH; 1833 return; 1834 } 1835 } 1836 1837 sched_numa_topology_type = NUMA_BACKPLANE; 1838 return; 1839 } 1840 } 1841 1842 pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 1843 sched_numa_topology_type = NUMA_DIRECT; 1844 } 1845 1846 1847 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1848 1849 void sched_init_numa(int offline_node) 1850 { 1851 struct sched_domain_topology_level *tl; 1852 unsigned long *distance_map; 1853 int nr_levels = 0; 1854 int i, j; 1855 int *distances; 1856 struct cpumask ***masks; 1857 1858 /* 1859 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the 1860 * unique distances in the node_distance() table. 1861 */ 1862 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1863 if (!distance_map) 1864 return; 1865 1866 bitmap_zero(distance_map, NR_DISTANCE_VALUES); 1867 for_each_cpu_node_but(i, offline_node) { 1868 for_each_cpu_node_but(j, offline_node) { 1869 int distance = node_distance(i, j); 1870 1871 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 1872 sched_numa_warn("Invalid distance value range"); 1873 bitmap_free(distance_map); 1874 return; 1875 } 1876 1877 bitmap_set(distance_map, distance, 1); 1878 } 1879 } 1880 /* 1881 * We can now figure out how many unique distance values there are and 1882 * allocate memory accordingly. 1883 */ 1884 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 1885 1886 distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); 1887 if (!distances) { 1888 bitmap_free(distance_map); 1889 return; 1890 } 1891 1892 for (i = 0, j = 0; i < nr_levels; i++, j++) { 1893 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 1894 distances[i] = j; 1895 } 1896 rcu_assign_pointer(sched_domains_numa_distance, distances); 1897 1898 bitmap_free(distance_map); 1899 1900 /* 1901 * 'nr_levels' contains the number of unique distances 1902 * 1903 * The sched_domains_numa_distance[] array includes the actual distance 1904 * numbers. 1905 */ 1906 1907 /* 1908 * Here, we should temporarily reset sched_domains_numa_levels to 0. 1909 * If it fails to allocate memory for array sched_domains_numa_masks[][], 1910 * the array will contain less then 'nr_levels' members. This could be 1911 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1912 * in other functions. 1913 * 1914 * We reset it to 'nr_levels' at the end of this function. 1915 */ 1916 sched_domains_numa_levels = 0; 1917 1918 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 1919 if (!masks) 1920 return; 1921 1922 /* 1923 * Now for each level, construct a mask per node which contains all 1924 * CPUs of nodes that are that many hops away from us. 1925 */ 1926 for (i = 0; i < nr_levels; i++) { 1927 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 1928 if (!masks[i]) 1929 return; 1930 1931 for_each_cpu_node_but(j, offline_node) { 1932 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1933 int k; 1934 1935 if (!mask) 1936 return; 1937 1938 masks[i][j] = mask; 1939 1940 for_each_cpu_node_but(k, offline_node) { 1941 if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) 1942 sched_numa_warn("Node-distance not symmetric"); 1943 1944 if (node_distance(j, k) > sched_domains_numa_distance[i]) 1945 continue; 1946 1947 cpumask_or(mask, mask, cpumask_of_node(k)); 1948 } 1949 } 1950 } 1951 rcu_assign_pointer(sched_domains_numa_masks, masks); 1952 1953 /* Compute default topology size */ 1954 for (i = 0; sched_domain_topology[i].mask; i++); 1955 1956 tl = kzalloc((i + nr_levels + 1) * 1957 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1958 if (!tl) 1959 return; 1960 1961 /* 1962 * Copy the default topology bits.. 1963 */ 1964 for (i = 0; sched_domain_topology[i].mask; i++) 1965 tl[i] = sched_domain_topology[i]; 1966 1967 /* 1968 * Add the NUMA identity distance, aka single NODE. 1969 */ 1970 tl[i++] = (struct sched_domain_topology_level){ 1971 .mask = sd_numa_mask, 1972 .numa_level = 0, 1973 SD_INIT_NAME(NODE) 1974 }; 1975 1976 /* 1977 * .. and append 'j' levels of NUMA goodness. 1978 */ 1979 for (j = 1; j < nr_levels; i++, j++) { 1980 tl[i] = (struct sched_domain_topology_level){ 1981 .mask = sd_numa_mask, 1982 .sd_flags = cpu_numa_flags, 1983 .flags = SDTL_OVERLAP, 1984 .numa_level = j, 1985 SD_INIT_NAME(NUMA) 1986 }; 1987 } 1988 1989 sched_domain_topology_saved = sched_domain_topology; 1990 sched_domain_topology = tl; 1991 1992 sched_domains_numa_levels = nr_levels; 1993 WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); 1994 1995 init_numa_topology_type(offline_node); 1996 } 1997 1998 1999 static void sched_reset_numa(void) 2000 { 2001 int nr_levels, *distances; 2002 struct cpumask ***masks; 2003 2004 nr_levels = sched_domains_numa_levels; 2005 sched_domains_numa_levels = 0; 2006 sched_max_numa_distance = 0; 2007 sched_numa_topology_type = NUMA_DIRECT; 2008 distances = sched_domains_numa_distance; 2009 rcu_assign_pointer(sched_domains_numa_distance, NULL); 2010 masks = sched_domains_numa_masks; 2011 rcu_assign_pointer(sched_domains_numa_masks, NULL); 2012 if (distances || masks) { 2013 int i, j; 2014 2015 synchronize_rcu(); 2016 kfree(distances); 2017 for (i = 0; i < nr_levels && masks; i++) { 2018 if (!masks[i]) 2019 continue; 2020 for_each_node(j) 2021 kfree(masks[i][j]); 2022 kfree(masks[i]); 2023 } 2024 kfree(masks); 2025 } 2026 if (sched_domain_topology_saved) { 2027 kfree(sched_domain_topology); 2028 sched_domain_topology = sched_domain_topology_saved; 2029 sched_domain_topology_saved = NULL; 2030 } 2031 } 2032 2033 /* 2034 * Call with hotplug lock held 2035 */ 2036 void sched_update_numa(int cpu, bool online) 2037 { 2038 int node; 2039 2040 node = cpu_to_node(cpu); 2041 /* 2042 * Scheduler NUMA topology is updated when the first CPU of a 2043 * node is onlined or the last CPU of a node is offlined. 2044 */ 2045 if (cpumask_weight(cpumask_of_node(node)) != 1) 2046 return; 2047 2048 sched_reset_numa(); 2049 sched_init_numa(online ? NUMA_NO_NODE : node); 2050 } 2051 2052 void sched_domains_numa_masks_set(unsigned int cpu) 2053 { 2054 int node = cpu_to_node(cpu); 2055 int i, j; 2056 2057 for (i = 0; i < sched_domains_numa_levels; i++) { 2058 for (j = 0; j < nr_node_ids; j++) { 2059 if (!node_state(j, N_CPU)) 2060 continue; 2061 2062 /* Set ourselves in the remote node's masks */ 2063 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 2064 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2065 } 2066 } 2067 } 2068 2069 void sched_domains_numa_masks_clear(unsigned int cpu) 2070 { 2071 int i, j; 2072 2073 for (i = 0; i < sched_domains_numa_levels; i++) { 2074 for (j = 0; j < nr_node_ids; j++) { 2075 if (sched_domains_numa_masks[i][j]) 2076 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2077 } 2078 } 2079 } 2080 2081 /* 2082 * sched_numa_find_closest() - given the NUMA topology, find the cpu 2083 * closest to @cpu from @cpumask. 2084 * cpumask: cpumask to find a cpu from 2085 * cpu: cpu to be close to 2086 * 2087 * returns: cpu, or nr_cpu_ids when nothing found. 2088 */ 2089 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2090 { 2091 int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 2092 struct cpumask ***masks; 2093 2094 rcu_read_lock(); 2095 masks = rcu_dereference(sched_domains_numa_masks); 2096 if (!masks) 2097 goto unlock; 2098 for (i = 0; i < sched_domains_numa_levels; i++) { 2099 if (!masks[i][j]) 2100 break; 2101 cpu = cpumask_any_and(cpus, masks[i][j]); 2102 if (cpu < nr_cpu_ids) { 2103 found = cpu; 2104 break; 2105 } 2106 } 2107 unlock: 2108 rcu_read_unlock(); 2109 2110 return found; 2111 } 2112 2113 struct __cmp_key { 2114 const struct cpumask *cpus; 2115 struct cpumask ***masks; 2116 int node; 2117 int cpu; 2118 int w; 2119 }; 2120 2121 static int hop_cmp(const void *a, const void *b) 2122 { 2123 struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; 2124 struct __cmp_key *k = (struct __cmp_key *)a; 2125 2126 if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) 2127 return 1; 2128 2129 if (b == k->masks) { 2130 k->w = 0; 2131 return 0; 2132 } 2133 2134 prev_hop = *((struct cpumask ***)b - 1); 2135 k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); 2136 if (k->w <= k->cpu) 2137 return 0; 2138 2139 return -1; 2140 } 2141 2142 /** 2143 * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU 2144 * from @cpus to @cpu, taking into account distance 2145 * from a given @node. 2146 * @cpus: cpumask to find a cpu from 2147 * @cpu: CPU to start searching 2148 * @node: NUMA node to order CPUs by distance 2149 * 2150 * Return: cpu, or nr_cpu_ids when nothing found. 2151 */ 2152 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) 2153 { 2154 struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; 2155 struct cpumask ***hop_masks; 2156 int hop, ret = nr_cpu_ids; 2157 2158 if (node == NUMA_NO_NODE) 2159 return cpumask_nth_and(cpu, cpus, cpu_online_mask); 2160 2161 rcu_read_lock(); 2162 2163 /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ 2164 node = numa_nearest_node(node, N_CPU); 2165 k.node = node; 2166 2167 k.masks = rcu_dereference(sched_domains_numa_masks); 2168 if (!k.masks) 2169 goto unlock; 2170 2171 hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); 2172 hop = hop_masks - k.masks; 2173 2174 ret = hop ? 2175 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : 2176 cpumask_nth_and(cpu, cpus, k.masks[0][node]); 2177 unlock: 2178 rcu_read_unlock(); 2179 return ret; 2180 } 2181 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); 2182 2183 /** 2184 * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from 2185 * @node 2186 * @node: The node to count hops from. 2187 * @hops: Include CPUs up to that many hops away. 0 means local node. 2188 * 2189 * Return: On success, a pointer to a cpumask of CPUs at most @hops away from 2190 * @node, an error value otherwise. 2191 * 2192 * Requires rcu_lock to be held. Returned cpumask is only valid within that 2193 * read-side section, copy it if required beyond that. 2194 * 2195 * Note that not all hops are equal in distance; see sched_init_numa() for how 2196 * distances and masks are handled. 2197 * Also note that this is a reflection of sched_domains_numa_masks, which may change 2198 * during the lifetime of the system (offline nodes are taken out of the masks). 2199 */ 2200 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) 2201 { 2202 struct cpumask ***masks; 2203 2204 if (node >= nr_node_ids || hops >= sched_domains_numa_levels) 2205 return ERR_PTR(-EINVAL); 2206 2207 masks = rcu_dereference(sched_domains_numa_masks); 2208 if (!masks) 2209 return ERR_PTR(-EBUSY); 2210 2211 return masks[hops][node]; 2212 } 2213 EXPORT_SYMBOL_GPL(sched_numa_hop_mask); 2214 2215 #endif /* CONFIG_NUMA */ 2216 2217 static int __sdt_alloc(const struct cpumask *cpu_map) 2218 { 2219 struct sched_domain_topology_level *tl; 2220 int j; 2221 2222 for_each_sd_topology(tl) { 2223 struct sd_data *sdd = &tl->data; 2224 2225 sdd->sd = alloc_percpu(struct sched_domain *); 2226 if (!sdd->sd) 2227 return -ENOMEM; 2228 2229 sdd->sds = alloc_percpu(struct sched_domain_shared *); 2230 if (!sdd->sds) 2231 return -ENOMEM; 2232 2233 sdd->sg = alloc_percpu(struct sched_group *); 2234 if (!sdd->sg) 2235 return -ENOMEM; 2236 2237 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2238 if (!sdd->sgc) 2239 return -ENOMEM; 2240 2241 for_each_cpu(j, cpu_map) { 2242 struct sched_domain *sd; 2243 struct sched_domain_shared *sds; 2244 struct sched_group *sg; 2245 struct sched_group_capacity *sgc; 2246 2247 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2248 GFP_KERNEL, cpu_to_node(j)); 2249 if (!sd) 2250 return -ENOMEM; 2251 2252 *per_cpu_ptr(sdd->sd, j) = sd; 2253 2254 sds = kzalloc_node(sizeof(struct sched_domain_shared), 2255 GFP_KERNEL, cpu_to_node(j)); 2256 if (!sds) 2257 return -ENOMEM; 2258 2259 *per_cpu_ptr(sdd->sds, j) = sds; 2260 2261 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2262 GFP_KERNEL, cpu_to_node(j)); 2263 if (!sg) 2264 return -ENOMEM; 2265 2266 sg->next = sg; 2267 2268 *per_cpu_ptr(sdd->sg, j) = sg; 2269 2270 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2271 GFP_KERNEL, cpu_to_node(j)); 2272 if (!sgc) 2273 return -ENOMEM; 2274 2275 sgc->id = j; 2276 2277 *per_cpu_ptr(sdd->sgc, j) = sgc; 2278 } 2279 } 2280 2281 return 0; 2282 } 2283 2284 static void __sdt_free(const struct cpumask *cpu_map) 2285 { 2286 struct sched_domain_topology_level *tl; 2287 int j; 2288 2289 for_each_sd_topology(tl) { 2290 struct sd_data *sdd = &tl->data; 2291 2292 for_each_cpu(j, cpu_map) { 2293 struct sched_domain *sd; 2294 2295 if (sdd->sd) { 2296 sd = *per_cpu_ptr(sdd->sd, j); 2297 if (sd && (sd->flags & SD_OVERLAP)) 2298 free_sched_groups(sd->groups, 0); 2299 kfree(*per_cpu_ptr(sdd->sd, j)); 2300 } 2301 2302 if (sdd->sds) 2303 kfree(*per_cpu_ptr(sdd->sds, j)); 2304 if (sdd->sg) 2305 kfree(*per_cpu_ptr(sdd->sg, j)); 2306 if (sdd->sgc) 2307 kfree(*per_cpu_ptr(sdd->sgc, j)); 2308 } 2309 free_percpu(sdd->sd); 2310 sdd->sd = NULL; 2311 free_percpu(sdd->sds); 2312 sdd->sds = NULL; 2313 free_percpu(sdd->sg); 2314 sdd->sg = NULL; 2315 free_percpu(sdd->sgc); 2316 sdd->sgc = NULL; 2317 } 2318 } 2319 2320 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2321 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2322 struct sched_domain *child, int cpu) 2323 { 2324 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2325 2326 if (child) { 2327 sd->level = child->level + 1; 2328 sched_domain_level_max = max(sched_domain_level_max, sd->level); 2329 child->parent = sd; 2330 2331 if (!cpumask_subset(sched_domain_span(child), 2332 sched_domain_span(sd))) { 2333 pr_err("BUG: arch topology borken\n"); 2334 pr_err(" the %s domain not a subset of the %s domain\n", 2335 child->name, sd->name); 2336 /* Fixup, ensure @sd has at least @child CPUs. */ 2337 cpumask_or(sched_domain_span(sd), 2338 sched_domain_span(sd), 2339 sched_domain_span(child)); 2340 } 2341 2342 } 2343 set_domain_attribute(sd, attr); 2344 2345 return sd; 2346 } 2347 2348 /* 2349 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2350 * any two given CPUs at this (non-NUMA) topology level. 2351 */ 2352 static bool topology_span_sane(struct sched_domain_topology_level *tl, 2353 const struct cpumask *cpu_map, int cpu) 2354 { 2355 int i = cpu + 1; 2356 2357 /* NUMA levels are allowed to overlap */ 2358 if (tl->flags & SDTL_OVERLAP) 2359 return true; 2360 2361 /* 2362 * Non-NUMA levels cannot partially overlap - they must be either 2363 * completely equal or completely disjoint. Otherwise we can end up 2364 * breaking the sched_group lists - i.e. a later get_group() pass 2365 * breaks the linking done for an earlier span. 2366 */ 2367 for_each_cpu_from(i, cpu_map) { 2368 /* 2369 * We should 'and' all those masks with 'cpu_map' to exactly 2370 * match the topology we're about to build, but that can only 2371 * remove CPUs, which only lessens our ability to detect 2372 * overlaps 2373 */ 2374 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && 2375 cpumask_intersects(tl->mask(cpu), tl->mask(i))) 2376 return false; 2377 } 2378 2379 return true; 2380 } 2381 2382 /* 2383 * Build sched domains for a given set of CPUs and attach the sched domains 2384 * to the individual CPUs 2385 */ 2386 static int 2387 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2388 { 2389 enum s_alloc alloc_state = sa_none; 2390 struct sched_domain *sd; 2391 struct s_data d; 2392 struct rq *rq = NULL; 2393 int i, ret = -ENOMEM; 2394 bool has_asym = false; 2395 bool has_cluster = false; 2396 2397 if (WARN_ON(cpumask_empty(cpu_map))) 2398 goto error; 2399 2400 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2401 if (alloc_state != sa_rootdomain) 2402 goto error; 2403 2404 /* Set up domains for CPUs specified by the cpu_map: */ 2405 for_each_cpu(i, cpu_map) { 2406 struct sched_domain_topology_level *tl; 2407 2408 sd = NULL; 2409 for_each_sd_topology(tl) { 2410 2411 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) 2412 goto error; 2413 2414 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2415 2416 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 2417 2418 if (tl == sched_domain_topology) 2419 *per_cpu_ptr(d.sd, i) = sd; 2420 if (tl->flags & SDTL_OVERLAP) 2421 sd->flags |= SD_OVERLAP; 2422 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2423 break; 2424 } 2425 } 2426 2427 /* Build the groups for the domains */ 2428 for_each_cpu(i, cpu_map) { 2429 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2430 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2431 if (sd->flags & SD_OVERLAP) { 2432 if (build_overlap_sched_groups(sd, i)) 2433 goto error; 2434 } else { 2435 if (build_sched_groups(sd, i)) 2436 goto error; 2437 } 2438 } 2439 } 2440 2441 /* 2442 * Calculate an allowed NUMA imbalance such that LLCs do not get 2443 * imbalanced. 2444 */ 2445 for_each_cpu(i, cpu_map) { 2446 unsigned int imb = 0; 2447 unsigned int imb_span = 1; 2448 2449 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2450 struct sched_domain *child = sd->child; 2451 2452 if (!(sd->flags & SD_SHARE_LLC) && child && 2453 (child->flags & SD_SHARE_LLC)) { 2454 struct sched_domain __rcu *top_p; 2455 unsigned int nr_llcs; 2456 2457 /* 2458 * For a single LLC per node, allow an 2459 * imbalance up to 12.5% of the node. This is 2460 * arbitrary cutoff based two factors -- SMT and 2461 * memory channels. For SMT-2, the intent is to 2462 * avoid premature sharing of HT resources but 2463 * SMT-4 or SMT-8 *may* benefit from a different 2464 * cutoff. For memory channels, this is a very 2465 * rough estimate of how many channels may be 2466 * active and is based on recent CPUs with 2467 * many cores. 2468 * 2469 * For multiple LLCs, allow an imbalance 2470 * until multiple tasks would share an LLC 2471 * on one node while LLCs on another node 2472 * remain idle. This assumes that there are 2473 * enough logical CPUs per LLC to avoid SMT 2474 * factors and that there is a correlation 2475 * between LLCs and memory channels. 2476 */ 2477 nr_llcs = sd->span_weight / child->span_weight; 2478 if (nr_llcs == 1) 2479 imb = sd->span_weight >> 3; 2480 else 2481 imb = nr_llcs; 2482 imb = max(1U, imb); 2483 sd->imb_numa_nr = imb; 2484 2485 /* Set span based on the first NUMA domain. */ 2486 top_p = sd->parent; 2487 while (top_p && !(top_p->flags & SD_NUMA)) { 2488 top_p = top_p->parent; 2489 } 2490 imb_span = top_p ? top_p->span_weight : sd->span_weight; 2491 } else { 2492 int factor = max(1U, (sd->span_weight / imb_span)); 2493 2494 sd->imb_numa_nr = imb * factor; 2495 } 2496 } 2497 } 2498 2499 /* Calculate CPU capacity for physical packages and nodes */ 2500 for (i = nr_cpumask_bits-1; i >= 0; i--) { 2501 if (!cpumask_test_cpu(i, cpu_map)) 2502 continue; 2503 2504 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2505 claim_allocations(i, sd); 2506 init_sched_groups_capacity(i, sd); 2507 } 2508 } 2509 2510 /* Attach the domains */ 2511 rcu_read_lock(); 2512 for_each_cpu(i, cpu_map) { 2513 rq = cpu_rq(i); 2514 sd = *per_cpu_ptr(d.sd, i); 2515 2516 cpu_attach_domain(sd, d.rd, i); 2517 2518 if (lowest_flag_domain(i, SD_CLUSTER)) 2519 has_cluster = true; 2520 } 2521 rcu_read_unlock(); 2522 2523 if (has_asym) 2524 static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2525 2526 if (has_cluster) 2527 static_branch_inc_cpuslocked(&sched_cluster_active); 2528 2529 if (rq && sched_debug_verbose) 2530 pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map)); 2531 2532 ret = 0; 2533 error: 2534 __free_domain_allocs(&d, alloc_state, cpu_map); 2535 2536 return ret; 2537 } 2538 2539 /* Current sched domains: */ 2540 static cpumask_var_t *doms_cur; 2541 2542 /* Number of sched domains in 'doms_cur': */ 2543 static int ndoms_cur; 2544 2545 /* Attributes of custom domains in 'doms_cur' */ 2546 static struct sched_domain_attr *dattr_cur; 2547 2548 /* 2549 * Special case: If a kmalloc() of a doms_cur partition (array of 2550 * cpumask) fails, then fallback to a single sched domain, 2551 * as determined by the single cpumask fallback_doms. 2552 */ 2553 static cpumask_var_t fallback_doms; 2554 2555 /* 2556 * arch_update_cpu_topology lets virtualized architectures update the 2557 * CPU core maps. It is supposed to return 1 if the topology changed 2558 * or 0 if it stayed the same. 2559 */ 2560 int __weak arch_update_cpu_topology(void) 2561 { 2562 return 0; 2563 } 2564 2565 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2566 { 2567 int i; 2568 cpumask_var_t *doms; 2569 2570 doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 2571 if (!doms) 2572 return NULL; 2573 for (i = 0; i < ndoms; i++) { 2574 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2575 free_sched_domains(doms, i); 2576 return NULL; 2577 } 2578 } 2579 return doms; 2580 } 2581 2582 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2583 { 2584 unsigned int i; 2585 for (i = 0; i < ndoms; i++) 2586 free_cpumask_var(doms[i]); 2587 kfree(doms); 2588 } 2589 2590 /* 2591 * Set up scheduler domains and groups. For now this just excludes isolated 2592 * CPUs, but could be used to exclude other special cases in the future. 2593 */ 2594 int __init sched_init_domains(const struct cpumask *cpu_map) 2595 { 2596 int err; 2597 2598 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 2599 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 2600 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 2601 2602 arch_update_cpu_topology(); 2603 asym_cpu_capacity_scan(); 2604 ndoms_cur = 1; 2605 doms_cur = alloc_sched_domains(ndoms_cur); 2606 if (!doms_cur) 2607 doms_cur = &fallback_doms; 2608 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2609 err = build_sched_domains(doms_cur[0], NULL); 2610 2611 return err; 2612 } 2613 2614 /* 2615 * Detach sched domains from a group of CPUs specified in cpu_map 2616 * These CPUs will now be attached to the NULL domain 2617 */ 2618 static void detach_destroy_domains(const struct cpumask *cpu_map) 2619 { 2620 unsigned int cpu = cpumask_any(cpu_map); 2621 int i; 2622 2623 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2624 static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2625 2626 if (static_branch_unlikely(&sched_cluster_active)) 2627 static_branch_dec_cpuslocked(&sched_cluster_active); 2628 2629 rcu_read_lock(); 2630 for_each_cpu(i, cpu_map) 2631 cpu_attach_domain(NULL, &def_root_domain, i); 2632 rcu_read_unlock(); 2633 } 2634 2635 /* handle null as "default" */ 2636 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2637 struct sched_domain_attr *new, int idx_new) 2638 { 2639 struct sched_domain_attr tmp; 2640 2641 /* Fast path: */ 2642 if (!new && !cur) 2643 return 1; 2644 2645 tmp = SD_ATTR_INIT; 2646 2647 return !memcmp(cur ? (cur + idx_cur) : &tmp, 2648 new ? (new + idx_new) : &tmp, 2649 sizeof(struct sched_domain_attr)); 2650 } 2651 2652 /* 2653 * Partition sched domains as specified by the 'ndoms_new' 2654 * cpumasks in the array doms_new[] of cpumasks. This compares 2655 * doms_new[] to the current sched domain partitioning, doms_cur[]. 2656 * It destroys each deleted domain and builds each new domain. 2657 * 2658 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2659 * The masks don't intersect (don't overlap.) We should setup one 2660 * sched domain for each mask. CPUs not in any of the cpumasks will 2661 * not be load balanced. If the same cpumask appears both in the 2662 * current 'doms_cur' domains and in the new 'doms_new', we can leave 2663 * it as it is. 2664 * 2665 * The passed in 'doms_new' should be allocated using 2666 * alloc_sched_domains. This routine takes ownership of it and will 2667 * free_sched_domains it when done with it. If the caller failed the 2668 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2669 * and partition_sched_domains() will fallback to the single partition 2670 * 'fallback_doms', it also forces the domains to be rebuilt. 2671 * 2672 * If doms_new == NULL it will be replaced with cpu_online_mask. 2673 * ndoms_new == 0 is a special case for destroying existing domains, 2674 * and it will not create the default domain. 2675 * 2676 * Call with hotplug lock and sched_domains_mutex held 2677 */ 2678 static void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2679 struct sched_domain_attr *dattr_new) 2680 { 2681 bool __maybe_unused has_eas = false; 2682 int i, j, n; 2683 int new_topology; 2684 2685 lockdep_assert_held(&sched_domains_mutex); 2686 2687 /* Let the architecture update CPU core mappings: */ 2688 new_topology = arch_update_cpu_topology(); 2689 /* Trigger rebuilding CPU capacity asymmetry data */ 2690 if (new_topology) 2691 asym_cpu_capacity_scan(); 2692 2693 if (!doms_new) { 2694 WARN_ON_ONCE(dattr_new); 2695 n = 0; 2696 doms_new = alloc_sched_domains(1); 2697 if (doms_new) { 2698 n = 1; 2699 cpumask_and(doms_new[0], cpu_active_mask, 2700 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2701 } 2702 } else { 2703 n = ndoms_new; 2704 } 2705 2706 /* Destroy deleted domains: */ 2707 for (i = 0; i < ndoms_cur; i++) { 2708 for (j = 0; j < n && !new_topology; j++) { 2709 if (cpumask_equal(doms_cur[i], doms_new[j]) && 2710 dattrs_equal(dattr_cur, i, dattr_new, j)) 2711 goto match1; 2712 } 2713 /* No match - a current sched domain not in new doms_new[] */ 2714 detach_destroy_domains(doms_cur[i]); 2715 match1: 2716 ; 2717 } 2718 2719 n = ndoms_cur; 2720 if (!doms_new) { 2721 n = 0; 2722 doms_new = &fallback_doms; 2723 cpumask_and(doms_new[0], cpu_active_mask, 2724 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2725 } 2726 2727 /* Build new domains: */ 2728 for (i = 0; i < ndoms_new; i++) { 2729 for (j = 0; j < n && !new_topology; j++) { 2730 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2731 dattrs_equal(dattr_new, i, dattr_cur, j)) 2732 goto match2; 2733 } 2734 /* No match - add a new doms_new */ 2735 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2736 match2: 2737 ; 2738 } 2739 2740 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2741 /* Build perf domains: */ 2742 for (i = 0; i < ndoms_new; i++) { 2743 for (j = 0; j < n && !sched_energy_update; j++) { 2744 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2745 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 2746 has_eas = true; 2747 goto match3; 2748 } 2749 } 2750 /* No match - add perf domains for a new rd */ 2751 has_eas |= build_perf_domains(doms_new[i]); 2752 match3: 2753 ; 2754 } 2755 sched_energy_set(has_eas); 2756 #endif 2757 2758 /* Remember the new sched domains: */ 2759 if (doms_cur != &fallback_doms) 2760 free_sched_domains(doms_cur, ndoms_cur); 2761 2762 kfree(dattr_cur); 2763 doms_cur = doms_new; 2764 dattr_cur = dattr_new; 2765 ndoms_cur = ndoms_new; 2766 2767 update_sched_domain_debugfs(); 2768 dl_rebuild_rd_accounting(); 2769 } 2770 2771 /* 2772 * Call with hotplug lock held 2773 */ 2774 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2775 struct sched_domain_attr *dattr_new) 2776 { 2777 sched_domains_mutex_lock(); 2778 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2779 sched_domains_mutex_unlock(); 2780 } 2781